prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
"""
WIP method to predict column embeddings using remainder of row as context
"""
import pandas as pd
import fasttext
from scipy.spatial import distance
from itertools import product
import sys
import numpy as np
from configurations import *
from tuple_embedding_models import AutoEncoderTupleEmbedding, AutoEncoderTupleEmbeddingAdjusted
def produce_column_embeddings(table1, table2):
"""
Loop through each column in each table and generate dictionaries containing
columns (keys) and embedded representations of columns (values)
"""
table1_dict = {}
for col in table1.columns:
embedded_values = predict_column_embedding(table1, col)
embed_avg = np.average(np.array(embedded_values), axis=0)
table1_dict[col] = embed_avg
table2_dict = {}
for col in table2.columns:
embedded_values = predict_column_embedding(table2, col)
embed_avg = np.average(np.array(embedded_values), axis=0)
table2_dict[col] = embed_avg
return table1_dict, table2_dict
def predict_column_embedding(table, column):
"""
"""
print(f"Creating embedding for column: {column}")
tuple_embedding_model = AutoEncoderTupleEmbedding()
column_embedding_model = AutoEncoderTupleEmbeddingAdjusted()
# Drop rows which have nan values in the column we are embedding
table = table.dropna(subset=column)
# Drop rows which have all nan values in remaining rows other than column
table = table.dropna(how='all', subset=[col for col in table.columns if col != column])
# Fill remainder of nan values with empty spaces
table = table.fillna(' ')
# Convert to all string values
table = table.astype(str)
# Merge words in all rows except column we are holding out
table["_merged_text"] = table[[col for col in table.columns if col != column]].agg(' '.join, axis=1)
# Extract held out column
held_out_col = table[column]
# Drop columns not in merged text
table = table.drop(columns=[col for col in table.columns if col != "_merged_text"])
# Preprocess tuple embedding model
tuple_embedding_model.preprocess(table["_merged_text"])
# Get tuple embeddings
print("Getting tuple embeddings")
tuple_embeddings = tuple_embedding_model.get_tuple_embedding(table["_merged_text"])
# Now, predict column embeddings by using column embedding model
column_embedding_model.preprocess(tuple_embeddings, held_out_col)
print("Getting column embeddings")
column_embeddings = column_embedding_model.get_tuple_embedding(tuple_embeddings)
return column_embeddings
def cos_sim(col1_embed, col2_embed):
cosine_similarity = 1 - distance.cdist(col1_embed.reshape(1,-1), col2_embed.reshape(1,-1), metric="cosine")
# print(cosine_similarity)
return cosine_similarity
def generate_column_similarity(table1,table2):
# Delete below when done
table1 = table1.iloc[:, :2]
table2 = table2.iloc[:, :2]
###
column_compare_combos = list(product(table1.columns, table2.columns))
# Goal to retrieve column embeddings below
table1_dict, table2_dict = produce_column_embeddings(table1, table2)
cs_list = []
for item in column_compare_combos:
cs = cos_sim(table1_dict[item[0]], table2_dict[item[1]])
# print(cs)
cs_list.append(cs[0][0])
return cs_list,column_compare_combos
def make_prediction_df(table_names, table_files):
table1_name = table_names[0]
table2_name = table_names[1]
dfa = | pd.read_csv(table_files[0]) | pandas.read_csv |
# Copyright (c) 2018-2021, NVIDIA CORPORATION.
import array as arr
import datetime
import io
import operator
import random
import re
import string
import textwrap
from copy import copy
import cupy
import numpy as np
import pandas as pd
import pyarrow as pa
import pytest
from numba import cuda
import cudf
from cudf.core._compat import PANDAS_GE_110, PANDAS_GE_120
from cudf.core.column import column
from cudf.tests import utils
from cudf.tests.utils import (
ALL_TYPES,
DATETIME_TYPES,
NUMERIC_TYPES,
assert_eq,
assert_exceptions_equal,
does_not_raise,
gen_rand,
)
def test_init_via_list_of_tuples():
data = [
(5, "cats", "jump", np.nan),
(2, "dogs", "dig", 7.5),
(3, "cows", "moo", -2.1, "occasionally"),
]
pdf = pd.DataFrame(data)
gdf = cudf.DataFrame(data)
assert_eq(pdf, gdf)
def _dataframe_na_data():
return [
pd.DataFrame(
{
"a": [0, 1, 2, np.nan, 4, None, 6],
"b": [np.nan, None, "u", "h", "d", "a", "m"],
},
index=["q", "w", "e", "r", "t", "y", "u"],
),
pd.DataFrame({"a": [0, 1, 2, 3, 4], "b": ["a", "b", "u", "h", "d"]}),
pd.DataFrame(
{
"a": [None, None, np.nan, None],
"b": [np.nan, None, np.nan, None],
}
),
pd.DataFrame({"a": []}),
pd.DataFrame({"a": [np.nan], "b": [None]}),
pd.DataFrame({"a": ["a", "b", "c", None, "e"]}),
pd.DataFrame({"a": ["a", "b", "c", "d", "e"]}),
]
@pytest.mark.parametrize("rows", [0, 1, 2, 100])
def test_init_via_list_of_empty_tuples(rows):
data = [()] * rows
pdf = pd.DataFrame(data)
gdf = cudf.DataFrame(data)
assert_eq(
pdf,
gdf,
check_like=True,
check_column_type=False,
check_index_type=False,
)
@pytest.mark.parametrize(
"dict_of_series",
[
{"a": pd.Series([1.0, 2.0, 3.0])},
{"a": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6])},
{
"a": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6]),
"b": pd.Series([1.0, 2.0, 4.0], index=[1, 2, 3]),
},
{"a": [1, 2, 3], "b": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6])},
{
"a": pd.Series([1.0, 2.0, 3.0], index=["a", "b", "c"]),
"b": pd.Series([1.0, 2.0, 4.0], index=["c", "d", "e"]),
},
{
"a": pd.Series(
["a", "b", "c"],
index=pd.MultiIndex.from_tuples([(1, 2), (1, 3), (2, 3)]),
),
"b": pd.Series(
["a", " b", "d"],
index=pd.MultiIndex.from_tuples([(1, 2), (1, 3), (2, 3)]),
),
},
],
)
def test_init_from_series_align(dict_of_series):
pdf = pd.DataFrame(dict_of_series)
gdf = cudf.DataFrame(dict_of_series)
assert_eq(pdf, gdf)
for key in dict_of_series:
if isinstance(dict_of_series[key], pd.Series):
dict_of_series[key] = cudf.Series(dict_of_series[key])
gdf = cudf.DataFrame(dict_of_series)
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
("dict_of_series", "expectation"),
[
(
{
"a": pd.Series(["a", "b", "c"], index=[4, 4, 5]),
"b": pd.Series(["a", "b", "c"], index=[4, 5, 6]),
},
pytest.raises(
ValueError, match="Cannot align indices with non-unique values"
),
),
(
{
"a": pd.Series(["a", "b", "c"], index=[4, 4, 5]),
"b": pd.Series(["a", "b", "c"], index=[4, 4, 5]),
},
does_not_raise(),
),
],
)
def test_init_from_series_align_nonunique(dict_of_series, expectation):
with expectation:
gdf = cudf.DataFrame(dict_of_series)
if expectation == does_not_raise():
pdf = pd.DataFrame(dict_of_series)
assert_eq(pdf, gdf)
def test_init_unaligned_with_index():
pdf = pd.DataFrame(
{
"a": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6]),
"b": pd.Series([1.0, 2.0, 3.0], index=[1, 2, 3]),
},
index=[7, 8, 9],
)
gdf = cudf.DataFrame(
{
"a": cudf.Series([1.0, 2.0, 3.0], index=[4, 5, 6]),
"b": cudf.Series([1.0, 2.0, 3.0], index=[1, 2, 3]),
},
index=[7, 8, 9],
)
assert_eq(pdf, gdf, check_dtype=False)
def test_series_basic():
# Make series from buffer
a1 = np.arange(10, dtype=np.float64)
series = cudf.Series(a1)
assert len(series) == 10
np.testing.assert_equal(series.to_array(), np.hstack([a1]))
def test_series_from_cupy_scalars():
data = [0.1, 0.2, 0.3]
data_np = np.array(data)
data_cp = cupy.array(data)
s_np = cudf.Series([data_np[0], data_np[2]])
s_cp = cudf.Series([data_cp[0], data_cp[2]])
assert_eq(s_np, s_cp)
@pytest.mark.parametrize("a", [[1, 2, 3], [1, 10, 30]])
@pytest.mark.parametrize("b", [[4, 5, 6], [-11, -100, 30]])
def test_append_index(a, b):
df = pd.DataFrame()
df["a"] = a
df["b"] = b
gdf = cudf.DataFrame()
gdf["a"] = a
gdf["b"] = b
# Check the default index after appending two columns(Series)
expected = df.a.append(df.b)
actual = gdf.a.append(gdf.b)
assert len(expected) == len(actual)
assert_eq(expected.index, actual.index)
expected = df.a.append(df.b, ignore_index=True)
actual = gdf.a.append(gdf.b, ignore_index=True)
assert len(expected) == len(actual)
assert_eq(expected.index, actual.index)
def test_series_init_none():
# test for creating empty series
# 1: without initializing
sr1 = cudf.Series()
got = sr1.to_string()
expect = "Series([], dtype: float64)"
# values should match despite whitespace difference
assert got.split() == expect.split()
# 2: Using `None` as an initializer
sr2 = cudf.Series(None)
got = sr2.to_string()
expect = "Series([], dtype: float64)"
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_basic():
np.random.seed(0)
df = cudf.DataFrame()
# Populate with cuda memory
df["keys"] = np.arange(10, dtype=np.float64)
np.testing.assert_equal(df["keys"].to_array(), np.arange(10))
assert len(df) == 10
# Populate with numpy array
rnd_vals = np.random.random(10)
df["vals"] = rnd_vals
np.testing.assert_equal(df["vals"].to_array(), rnd_vals)
assert len(df) == 10
assert tuple(df.columns) == ("keys", "vals")
# Make another dataframe
df2 = cudf.DataFrame()
df2["keys"] = np.array([123], dtype=np.float64)
df2["vals"] = np.array([321], dtype=np.float64)
# Concat
df = cudf.concat([df, df2])
assert len(df) == 11
hkeys = np.asarray(np.arange(10, dtype=np.float64).tolist() + [123])
hvals = np.asarray(rnd_vals.tolist() + [321])
np.testing.assert_equal(df["keys"].to_array(), hkeys)
np.testing.assert_equal(df["vals"].to_array(), hvals)
# As matrix
mat = df.as_matrix()
expect = np.vstack([hkeys, hvals]).T
np.testing.assert_equal(mat, expect)
# test dataframe with tuple name
df_tup = cudf.DataFrame()
data = np.arange(10)
df_tup[(1, "foobar")] = data
np.testing.assert_equal(data, df_tup[(1, "foobar")].to_array())
df = cudf.DataFrame(pd.DataFrame({"a": [1, 2, 3], "c": ["a", "b", "c"]}))
pdf = pd.DataFrame(pd.DataFrame({"a": [1, 2, 3], "c": ["a", "b", "c"]}))
assert_eq(df, pdf)
gdf = cudf.DataFrame({"id": [0, 1], "val": [None, None]})
gdf["val"] = gdf["val"].astype("int")
assert gdf["val"].isnull().all()
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"columns", [["a"], ["b"], "a", "b", ["a", "b"]],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_columns(pdf, columns, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(columns=columns, inplace=inplace)
actual = gdf.drop(columns=columns, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"labels",
[[1], [0], 1, 5, [5, 9], pd.Index([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_labels_axis_0(pdf, labels, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(labels=labels, axis=0, inplace=inplace)
actual = gdf.drop(labels=labels, axis=0, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"index",
[[1], [0], 1, 5, [5, 9], pd.Index([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_index(pdf, index, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(index=index, inplace=inplace)
actual = gdf.drop(index=index, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5},
index=pd.MultiIndex(
levels=[
["lama", "cow", "falcon"],
["speed", "weight", "length"],
],
codes=[
[0, 0, 0, 1, 1, 1, 2, 2, 2, 1],
[0, 1, 2, 0, 1, 2, 0, 1, 2, 1],
],
),
)
],
)
@pytest.mark.parametrize(
"index,level",
[
("cow", 0),
("lama", 0),
("falcon", 0),
("speed", 1),
("weight", 1),
("length", 1),
pytest.param(
"cow",
None,
marks=pytest.mark.xfail(
reason="https://github.com/pandas-dev/pandas/issues/36293"
),
),
pytest.param(
"lama",
None,
marks=pytest.mark.xfail(
reason="https://github.com/pandas-dev/pandas/issues/36293"
),
),
pytest.param(
"falcon",
None,
marks=pytest.mark.xfail(
reason="https://github.com/pandas-dev/pandas/issues/36293"
),
),
],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_multiindex(pdf, index, level, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(index=index, inplace=inplace, level=level)
actual = gdf.drop(index=index, inplace=inplace, level=level)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"labels", [["a"], ["b"], "a", "b", ["a", "b"]],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_labels_axis_1(pdf, labels, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(labels=labels, axis=1, inplace=inplace)
actual = gdf.drop(labels=labels, axis=1, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
def test_dataframe_drop_error():
df = cudf.DataFrame({"a": [1], "b": [2], "c": [3]})
pdf = df.to_pandas()
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"columns": "d"}),
rfunc_args_and_kwargs=([], {"columns": "d"}),
expected_error_message="column 'd' does not exist",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"columns": ["a", "d", "b"]}),
rfunc_args_and_kwargs=([], {"columns": ["a", "d", "b"]}),
expected_error_message="column 'd' does not exist",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=(["a"], {"columns": "a", "axis": 1}),
rfunc_args_and_kwargs=(["a"], {"columns": "a", "axis": 1}),
expected_error_message="Cannot specify both",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"axis": 1}),
rfunc_args_and_kwargs=([], {"axis": 1}),
expected_error_message="Need to specify at least",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([[2, 0]],),
rfunc_args_and_kwargs=([[2, 0]],),
expected_error_message="One or more values not found in axis",
)
def test_dataframe_drop_raises():
df = cudf.DataFrame(
{"a": [1, 2, 3], "c": [10, 20, 30]}, index=["x", "y", "z"]
)
pdf = df.to_pandas()
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=(["p"],),
rfunc_args_and_kwargs=(["p"],),
expected_error_message="One or more values not found in axis",
)
# label dtype mismatch
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([3],),
rfunc_args_and_kwargs=([3],),
expected_error_message="One or more values not found in axis",
)
expect = pdf.drop("p", errors="ignore")
actual = df.drop("p", errors="ignore")
assert_eq(actual, expect)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"columns": "p"}),
rfunc_args_and_kwargs=([], {"columns": "p"}),
expected_error_message="column 'p' does not exist",
)
expect = pdf.drop(columns="p", errors="ignore")
actual = df.drop(columns="p", errors="ignore")
assert_eq(actual, expect)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"labels": "p", "axis": 1}),
rfunc_args_and_kwargs=([], {"labels": "p", "axis": 1}),
expected_error_message="column 'p' does not exist",
)
expect = pdf.drop(labels="p", axis=1, errors="ignore")
actual = df.drop(labels="p", axis=1, errors="ignore")
assert_eq(actual, expect)
def test_dataframe_column_add_drop_via_setitem():
df = cudf.DataFrame()
data = np.asarray(range(10))
df["a"] = data
df["b"] = data
assert tuple(df.columns) == ("a", "b")
del df["a"]
assert tuple(df.columns) == ("b",)
df["c"] = data
assert tuple(df.columns) == ("b", "c")
df["a"] = data
assert tuple(df.columns) == ("b", "c", "a")
def test_dataframe_column_set_via_attr():
data_0 = np.asarray([0, 2, 4, 5])
data_1 = np.asarray([1, 4, 2, 3])
data_2 = np.asarray([2, 0, 3, 0])
df = cudf.DataFrame({"a": data_0, "b": data_1, "c": data_2})
for i in range(10):
df.c = df.a
assert assert_eq(df.c, df.a, check_names=False)
assert tuple(df.columns) == ("a", "b", "c")
df.c = df.b
assert assert_eq(df.c, df.b, check_names=False)
assert tuple(df.columns) == ("a", "b", "c")
def test_dataframe_column_drop_via_attr():
df = cudf.DataFrame({"a": []})
with pytest.raises(AttributeError):
del df.a
assert tuple(df.columns) == tuple("a")
@pytest.mark.parametrize("axis", [0, "index"])
def test_dataframe_index_rename(axis):
pdf = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]})
gdf = cudf.DataFrame.from_pandas(pdf)
expect = pdf.rename(mapper={1: 5, 2: 6}, axis=axis)
got = gdf.rename(mapper={1: 5, 2: 6}, axis=axis)
assert_eq(expect, got)
expect = pdf.rename(index={1: 5, 2: 6})
got = gdf.rename(index={1: 5, 2: 6})
assert_eq(expect, got)
expect = pdf.rename({1: 5, 2: 6})
got = gdf.rename({1: 5, 2: 6})
assert_eq(expect, got)
# `pandas` can support indexes with mixed values. We throw a
# `NotImplementedError`.
with pytest.raises(NotImplementedError):
gdf.rename(mapper={1: "x", 2: "y"}, axis=axis)
def test_dataframe_MI_rename():
gdf = cudf.DataFrame(
{"a": np.arange(10), "b": np.arange(10), "c": np.arange(10)}
)
gdg = gdf.groupby(["a", "b"]).count()
pdg = gdg.to_pandas()
expect = pdg.rename(mapper={1: 5, 2: 6}, axis=0)
got = gdg.rename(mapper={1: 5, 2: 6}, axis=0)
assert_eq(expect, got)
@pytest.mark.parametrize("axis", [1, "columns"])
def test_dataframe_column_rename(axis):
pdf = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]})
gdf = cudf.DataFrame.from_pandas(pdf)
expect = pdf.rename(mapper=lambda name: 2 * name, axis=axis)
got = gdf.rename(mapper=lambda name: 2 * name, axis=axis)
assert_eq(expect, got)
expect = pdf.rename(columns=lambda name: 2 * name)
got = gdf.rename(columns=lambda name: 2 * name)
assert_eq(expect, got)
rename_mapper = {"a": "z", "b": "y", "c": "x"}
expect = pdf.rename(columns=rename_mapper)
got = gdf.rename(columns=rename_mapper)
assert_eq(expect, got)
def test_dataframe_pop():
pdf = pd.DataFrame(
{"a": [1, 2, 3], "b": ["x", "y", "z"], "c": [7.0, 8.0, 9.0]}
)
gdf = cudf.DataFrame.from_pandas(pdf)
# Test non-existing column error
with pytest.raises(KeyError) as raises:
gdf.pop("fake_colname")
raises.match("fake_colname")
# check pop numeric column
pdf_pop = pdf.pop("a")
gdf_pop = gdf.pop("a")
assert_eq(pdf_pop, gdf_pop)
assert_eq(pdf, gdf)
# check string column
pdf_pop = pdf.pop("b")
gdf_pop = gdf.pop("b")
assert_eq(pdf_pop, gdf_pop)
assert_eq(pdf, gdf)
# check float column and empty dataframe
pdf_pop = pdf.pop("c")
gdf_pop = gdf.pop("c")
assert_eq(pdf_pop, gdf_pop)
assert_eq(pdf, gdf)
# check empty dataframe edge case
empty_pdf = pd.DataFrame(columns=["a", "b"])
empty_gdf = cudf.DataFrame(columns=["a", "b"])
pb = empty_pdf.pop("b")
gb = empty_gdf.pop("b")
assert len(pb) == len(gb)
assert empty_pdf.empty and empty_gdf.empty
@pytest.mark.parametrize("nelem", [0, 3, 100, 1000])
def test_dataframe_astype(nelem):
df = cudf.DataFrame()
data = np.asarray(range(nelem), dtype=np.int32)
df["a"] = data
assert df["a"].dtype is np.dtype(np.int32)
df["b"] = df["a"].astype(np.float32)
assert df["b"].dtype is np.dtype(np.float32)
np.testing.assert_equal(df["a"].to_array(), df["b"].to_array())
@pytest.mark.parametrize("nelem", [0, 100])
def test_index_astype(nelem):
df = cudf.DataFrame()
data = np.asarray(range(nelem), dtype=np.int32)
df["a"] = data
assert df.index.dtype is np.dtype(np.int64)
df.index = df.index.astype(np.float32)
assert df.index.dtype is np.dtype(np.float32)
df["a"] = df["a"].astype(np.float32)
np.testing.assert_equal(df.index.to_array(), df["a"].to_array())
df["b"] = df["a"]
df = df.set_index("b")
df["a"] = df["a"].astype(np.int16)
df.index = df.index.astype(np.int16)
np.testing.assert_equal(df.index.to_array(), df["a"].to_array())
def test_dataframe_to_string():
pd.options.display.max_rows = 5
pd.options.display.max_columns = 8
# Test basic
df = cudf.DataFrame(
{"a": [1, 2, 3, 4, 5, 6], "b": [11, 12, 13, 14, 15, 16]}
)
string = str(df)
assert string.splitlines()[-1] == "[6 rows x 2 columns]"
# Test skipped columns
df = cudf.DataFrame(
{
"a": [1, 2, 3, 4, 5, 6],
"b": [11, 12, 13, 14, 15, 16],
"c": [11, 12, 13, 14, 15, 16],
"d": [11, 12, 13, 14, 15, 16],
}
)
string = df.to_string()
assert string.splitlines()[-1] == "[6 rows x 4 columns]"
# Test masked
df = cudf.DataFrame(
{"a": [1, 2, 3, 4, 5, 6], "b": [11, 12, 13, 14, 15, 16]}
)
data = np.arange(6)
mask = np.zeros(1, dtype=cudf.utils.utils.mask_dtype)
mask[0] = 0b00101101
masked = cudf.Series.from_masked_array(data, mask)
assert masked.null_count == 2
df["c"] = masked
# check data
values = masked.copy()
validids = [0, 2, 3, 5]
densearray = masked.to_array()
np.testing.assert_equal(data[validids], densearray)
# valid position is corret
for i in validids:
assert data[i] == values[i]
# null position is correct
for i in range(len(values)):
if i not in validids:
assert values[i] is cudf.NA
pd.options.display.max_rows = 10
got = df.to_string()
expect = """
a b c
0 1 11 0
1 2 12 <NA>
2 3 13 2
3 4 14 3
4 5 15 <NA>
5 6 16 5
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_to_string_wide(monkeypatch):
monkeypatch.setenv("COLUMNS", "79")
# Test basic
df = cudf.DataFrame()
for i in range(100):
df["a{}".format(i)] = list(range(3))
pd.options.display.max_columns = 0
got = df.to_string()
expect = """
a0 a1 a2 a3 a4 a5 a6 a7 ... a92 a93 a94 a95 a96 a97 a98 a99
0 0 0 0 0 0 0 0 0 ... 0 0 0 0 0 0 0 0
1 1 1 1 1 1 1 1 1 ... 1 1 1 1 1 1 1 1
2 2 2 2 2 2 2 2 2 ... 2 2 2 2 2 2 2 2
[3 rows x 100 columns]
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_empty_to_string():
# Test for printing empty dataframe
df = cudf.DataFrame()
got = df.to_string()
expect = "Empty DataFrame\nColumns: []\nIndex: []\n"
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_emptycolumns_to_string():
# Test for printing dataframe having empty columns
df = cudf.DataFrame()
df["a"] = []
df["b"] = []
got = df.to_string()
expect = "Empty DataFrame\nColumns: [a, b]\nIndex: []\n"
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_copy():
# Test for copying the dataframe using python copy pkg
df = cudf.DataFrame()
df["a"] = [1, 2, 3]
df2 = copy(df)
df2["b"] = [4, 5, 6]
got = df.to_string()
expect = """
a
0 1
1 2
2 3
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_copy_shallow():
# Test for copy dataframe using class method
df = cudf.DataFrame()
df["a"] = [1, 2, 3]
df2 = df.copy()
df2["b"] = [4, 2, 3]
got = df.to_string()
expect = """
a
0 1
1 2
2 3
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_dtypes():
dtypes = pd.Series(
[np.int32, np.float32, np.float64], index=["c", "a", "b"]
)
df = cudf.DataFrame(
{k: np.ones(10, dtype=v) for k, v in dtypes.iteritems()}
)
assert df.dtypes.equals(dtypes)
def test_dataframe_add_col_to_object_dataframe():
# Test for adding column to an empty object dataframe
cols = ["a", "b", "c"]
df = pd.DataFrame(columns=cols, dtype="str")
data = {k: v for (k, v) in zip(cols, [["a"] for _ in cols])}
gdf = cudf.DataFrame(data)
gdf = gdf[:0]
assert gdf.dtypes.equals(df.dtypes)
gdf["a"] = [1]
df["a"] = [10]
assert gdf.dtypes.equals(df.dtypes)
gdf["b"] = [1.0]
df["b"] = [10.0]
assert gdf.dtypes.equals(df.dtypes)
def test_dataframe_dir_and_getattr():
df = cudf.DataFrame(
{
"a": np.ones(10),
"b": np.ones(10),
"not an id": np.ones(10),
"oop$": np.ones(10),
}
)
o = dir(df)
assert {"a", "b"}.issubset(o)
assert "not an id" not in o
assert "oop$" not in o
# Getattr works
assert df.a.equals(df["a"])
assert df.b.equals(df["b"])
with pytest.raises(AttributeError):
df.not_a_column
@pytest.mark.parametrize("order", ["C", "F"])
def test_empty_dataframe_as_gpu_matrix(order):
df = cudf.DataFrame()
# Check fully empty dataframe.
mat = df.as_gpu_matrix(order=order).copy_to_host()
assert mat.shape == (0, 0)
df = cudf.DataFrame()
nelem = 123
for k in "abc":
df[k] = np.random.random(nelem)
# Check all columns in empty dataframe.
mat = df.head(0).as_gpu_matrix(order=order).copy_to_host()
assert mat.shape == (0, 3)
@pytest.mark.parametrize("order", ["C", "F"])
def test_dataframe_as_gpu_matrix(order):
df = cudf.DataFrame()
nelem = 123
for k in "abcd":
df[k] = np.random.random(nelem)
# Check all columns
mat = df.as_gpu_matrix(order=order).copy_to_host()
assert mat.shape == (nelem, 4)
for i, k in enumerate(df.columns):
np.testing.assert_array_equal(df[k].to_array(), mat[:, i])
# Check column subset
mat = df.as_gpu_matrix(order=order, columns=["a", "c"]).copy_to_host()
assert mat.shape == (nelem, 2)
for i, k in enumerate("ac"):
np.testing.assert_array_equal(df[k].to_array(), mat[:, i])
def test_dataframe_as_gpu_matrix_null_values():
df = cudf.DataFrame()
nelem = 123
na = -10000
refvalues = {}
for k in "abcd":
df[k] = data = np.random.random(nelem)
bitmask = utils.random_bitmask(nelem)
df[k] = df[k].set_mask(bitmask)
boolmask = np.asarray(
utils.expand_bits_to_bytes(bitmask)[:nelem], dtype=np.bool_
)
data[~boolmask] = na
refvalues[k] = data
# Check null value causes error
with pytest.raises(ValueError) as raises:
df.as_gpu_matrix()
raises.match("column 'a' has null values")
for k in df.columns:
df[k] = df[k].fillna(na)
mat = df.as_gpu_matrix().copy_to_host()
for i, k in enumerate(df.columns):
np.testing.assert_array_equal(refvalues[k], mat[:, i])
def test_dataframe_append_empty():
pdf = pd.DataFrame(
{
"key": [1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4],
"value": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
}
)
gdf = cudf.DataFrame.from_pandas(pdf)
gdf["newcol"] = 100
pdf["newcol"] = 100
assert len(gdf["newcol"]) == len(pdf)
assert len(pdf["newcol"]) == len(pdf)
assert_eq(gdf, pdf)
def test_dataframe_setitem_from_masked_object():
ary = np.random.randn(100)
mask = np.zeros(100, dtype=bool)
mask[:20] = True
np.random.shuffle(mask)
ary[mask] = np.nan
test1_null = cudf.Series(ary, nan_as_null=True)
assert test1_null.nullable
assert test1_null.null_count == 20
test1_nan = cudf.Series(ary, nan_as_null=False)
assert test1_nan.null_count == 0
test2_null = cudf.DataFrame.from_pandas(
pd.DataFrame({"a": ary}), nan_as_null=True
)
assert test2_null["a"].nullable
assert test2_null["a"].null_count == 20
test2_nan = cudf.DataFrame.from_pandas(
pd.DataFrame({"a": ary}), nan_as_null=False
)
assert test2_nan["a"].null_count == 0
gpu_ary = cupy.asarray(ary)
test3_null = cudf.Series(gpu_ary, nan_as_null=True)
assert test3_null.nullable
assert test3_null.null_count == 20
test3_nan = cudf.Series(gpu_ary, nan_as_null=False)
assert test3_nan.null_count == 0
test4 = cudf.DataFrame()
lst = [1, 2, None, 4, 5, 6, None, 8, 9]
test4["lst"] = lst
assert test4["lst"].nullable
assert test4["lst"].null_count == 2
def test_dataframe_append_to_empty():
pdf = pd.DataFrame()
pdf["a"] = []
pdf["b"] = [1, 2, 3]
gdf = cudf.DataFrame()
gdf["a"] = []
gdf["b"] = [1, 2, 3]
assert_eq(gdf, pdf)
def test_dataframe_setitem_index_len1():
gdf = cudf.DataFrame()
gdf["a"] = [1]
gdf["b"] = gdf.index._values
np.testing.assert_equal(gdf.b.to_array(), [0])
def test_empty_dataframe_setitem_df():
gdf1 = cudf.DataFrame()
gdf2 = cudf.DataFrame({"a": [1, 2, 3, 4, 5]})
gdf1["a"] = gdf2["a"]
assert_eq(gdf1, gdf2)
def test_assign():
gdf = cudf.DataFrame({"x": [1, 2, 3]})
gdf2 = gdf.assign(y=gdf.x + 1)
assert list(gdf.columns) == ["x"]
assert list(gdf2.columns) == ["x", "y"]
np.testing.assert_equal(gdf2.y.to_array(), [2, 3, 4])
@pytest.mark.parametrize("nrows", [1, 8, 100, 1000])
def test_dataframe_hash_columns(nrows):
gdf = cudf.DataFrame()
data = np.asarray(range(nrows))
data[0] = data[-1] # make first and last the same
gdf["a"] = data
gdf["b"] = gdf.a + 100
out = gdf.hash_columns(["a", "b"])
assert isinstance(out, cupy.ndarray)
assert len(out) == nrows
assert out.dtype == np.int32
# Check default
out_all = gdf.hash_columns()
np.testing.assert_array_equal(cupy.asnumpy(out), cupy.asnumpy(out_all))
# Check single column
out_one = cupy.asnumpy(gdf.hash_columns(["a"]))
# First matches last
assert out_one[0] == out_one[-1]
# Equivalent to the cudf.Series.hash_values()
np.testing.assert_array_equal(cupy.asnumpy(gdf.a.hash_values()), out_one)
@pytest.mark.parametrize("nrows", [3, 10, 100, 1000])
@pytest.mark.parametrize("nparts", [1, 2, 8, 13])
@pytest.mark.parametrize("nkeys", [1, 2])
def test_dataframe_hash_partition(nrows, nparts, nkeys):
np.random.seed(123)
gdf = cudf.DataFrame()
keycols = []
for i in range(nkeys):
keyname = "key{}".format(i)
gdf[keyname] = np.random.randint(0, 7 - i, nrows)
keycols.append(keyname)
gdf["val1"] = np.random.randint(0, nrows * 2, nrows)
got = gdf.partition_by_hash(keycols, nparts=nparts)
# Must return a list
assert isinstance(got, list)
# Must have correct number of partitions
assert len(got) == nparts
# All partitions must be DataFrame type
assert all(isinstance(p, cudf.DataFrame) for p in got)
# Check that all partitions have unique keys
part_unique_keys = set()
for p in got:
if len(p):
# Take rows of the keycolumns and build a set of the key-values
unique_keys = set(map(tuple, p.as_matrix(columns=keycols)))
# Ensure that none of the key-values have occurred in other groups
assert not (unique_keys & part_unique_keys)
part_unique_keys |= unique_keys
assert len(part_unique_keys)
@pytest.mark.parametrize("nrows", [3, 10, 50])
def test_dataframe_hash_partition_masked_value(nrows):
gdf = cudf.DataFrame()
gdf["key"] = np.arange(nrows)
gdf["val"] = np.arange(nrows) + 100
bitmask = utils.random_bitmask(nrows)
bytemask = utils.expand_bits_to_bytes(bitmask)
gdf["val"] = gdf["val"].set_mask(bitmask)
parted = gdf.partition_by_hash(["key"], nparts=3)
# Verify that the valid mask is correct
for p in parted:
df = p.to_pandas()
for row in df.itertuples():
valid = bool(bytemask[row.key])
expected_value = row.key + 100 if valid else np.nan
got_value = row.val
assert (expected_value == got_value) or (
np.isnan(expected_value) and np.isnan(got_value)
)
@pytest.mark.parametrize("nrows", [3, 10, 50])
def test_dataframe_hash_partition_masked_keys(nrows):
gdf = cudf.DataFrame()
gdf["key"] = np.arange(nrows)
gdf["val"] = np.arange(nrows) + 100
bitmask = utils.random_bitmask(nrows)
bytemask = utils.expand_bits_to_bytes(bitmask)
gdf["key"] = gdf["key"].set_mask(bitmask)
parted = gdf.partition_by_hash(["key"], nparts=3, keep_index=False)
# Verify that the valid mask is correct
for p in parted:
df = p.to_pandas()
for row in df.itertuples():
valid = bool(bytemask[row.val - 100])
# val is key + 100
expected_value = row.val - 100 if valid else np.nan
got_value = row.key
assert (expected_value == got_value) or (
np.isnan(expected_value) and np.isnan(got_value)
)
@pytest.mark.parametrize("keep_index", [True, False])
def test_dataframe_hash_partition_keep_index(keep_index):
gdf = cudf.DataFrame(
{"val": [1, 2, 3, 4], "key": [3, 2, 1, 4]}, index=[4, 3, 2, 1]
)
expected_df1 = cudf.DataFrame(
{"val": [1], "key": [3]}, index=[4] if keep_index else None
)
expected_df2 = cudf.DataFrame(
{"val": [2, 3, 4], "key": [2, 1, 4]},
index=[3, 2, 1] if keep_index else range(1, 4),
)
expected = [expected_df1, expected_df2]
parts = gdf.partition_by_hash(["key"], nparts=2, keep_index=keep_index)
for exp, got in zip(expected, parts):
assert_eq(exp, got)
def test_dataframe_hash_partition_empty():
gdf = cudf.DataFrame({"val": [1, 2], "key": [3, 2]}, index=["a", "b"])
parts = gdf.iloc[:0].partition_by_hash(["key"], nparts=3)
assert len(parts) == 3
for part in parts:
assert_eq(gdf.iloc[:0], part)
@pytest.mark.parametrize("dtype1", utils.supported_numpy_dtypes)
@pytest.mark.parametrize("dtype2", utils.supported_numpy_dtypes)
def test_dataframe_concat_different_numerical_columns(dtype1, dtype2):
df1 = pd.DataFrame(dict(x=pd.Series(np.arange(5)).astype(dtype1)))
df2 = pd.DataFrame(dict(x=pd.Series(np.arange(5)).astype(dtype2)))
if dtype1 != dtype2 and "datetime" in dtype1 or "datetime" in dtype2:
with pytest.raises(TypeError):
cudf.concat([df1, df2])
else:
pres = pd.concat([df1, df2])
gres = cudf.concat([cudf.from_pandas(df1), cudf.from_pandas(df2)])
assert_eq(cudf.from_pandas(pres), gres)
def test_dataframe_concat_different_column_types():
df1 = cudf.Series([42], dtype=np.float64)
df2 = cudf.Series(["a"], dtype="category")
with pytest.raises(ValueError):
cudf.concat([df1, df2])
df2 = cudf.Series(["a string"])
with pytest.raises(TypeError):
cudf.concat([df1, df2])
@pytest.mark.parametrize(
"df_1", [cudf.DataFrame({"a": [1, 2], "b": [1, 3]}), cudf.DataFrame({})]
)
@pytest.mark.parametrize(
"df_2", [cudf.DataFrame({"a": [], "b": []}), cudf.DataFrame({})]
)
def test_concat_empty_dataframe(df_1, df_2):
got = cudf.concat([df_1, df_2])
expect = pd.concat([df_1.to_pandas(), df_2.to_pandas()], sort=False)
# ignoring dtypes as pandas upcasts int to float
# on concatenation with empty dataframes
assert_eq(got, expect, check_dtype=False)
@pytest.mark.parametrize(
"df1_d",
[
{"a": [1, 2], "b": [1, 2], "c": ["s1", "s2"], "d": [1.0, 2.0]},
{"b": [1.9, 10.9], "c": ["s1", "s2"]},
{"c": ["s1"], "b": [None], "a": [False]},
],
)
@pytest.mark.parametrize(
"df2_d",
[
{"a": [1, 2, 3]},
{"a": [1, None, 3], "b": [True, True, False], "c": ["s3", None, "s4"]},
{"a": [], "b": []},
{},
],
)
def test_concat_different_column_dataframe(df1_d, df2_d):
got = cudf.concat(
[cudf.DataFrame(df1_d), cudf.DataFrame(df2_d), cudf.DataFrame(df1_d)],
sort=False,
)
expect = pd.concat(
[pd.DataFrame(df1_d), pd.DataFrame(df2_d), pd.DataFrame(df1_d)],
sort=False,
)
# numerical columns are upcasted to float in cudf.DataFrame.to_pandas()
# casts nan to 0 in non-float numerical columns
numeric_cols = got.dtypes[got.dtypes != "object"].index
for col in numeric_cols:
got[col] = got[col].astype(np.float64).fillna(np.nan)
assert_eq(got, expect, check_dtype=False)
@pytest.mark.parametrize(
"ser_1", [pd.Series([1, 2, 3]), pd.Series([], dtype="float64")]
)
@pytest.mark.parametrize("ser_2", [pd.Series([], dtype="float64")])
def test_concat_empty_series(ser_1, ser_2):
got = cudf.concat([cudf.Series(ser_1), cudf.Series(ser_2)])
expect = pd.concat([ser_1, ser_2])
assert_eq(got, expect)
def test_concat_with_axis():
df1 = pd.DataFrame(dict(x=np.arange(5), y=np.arange(5)))
df2 = pd.DataFrame(dict(a=np.arange(5), b=np.arange(5)))
concat_df = pd.concat([df1, df2], axis=1)
cdf1 = cudf.from_pandas(df1)
cdf2 = cudf.from_pandas(df2)
# concat only dataframes
concat_cdf = cudf.concat([cdf1, cdf2], axis=1)
assert_eq(concat_cdf, concat_df)
# concat only series
concat_s = pd.concat([df1.x, df1.y], axis=1)
cs1 = cudf.Series.from_pandas(df1.x)
cs2 = cudf.Series.from_pandas(df1.y)
concat_cdf_s = cudf.concat([cs1, cs2], axis=1)
assert_eq(concat_cdf_s, concat_s)
# concat series and dataframes
s3 = pd.Series(np.random.random(5))
cs3 = cudf.Series.from_pandas(s3)
concat_cdf_all = cudf.concat([cdf1, cs3, cdf2], axis=1)
concat_df_all = pd.concat([df1, s3, df2], axis=1)
assert_eq(concat_cdf_all, concat_df_all)
# concat manual multi index
midf1 = cudf.from_pandas(df1)
midf1.index = cudf.MultiIndex(
levels=[[0, 1, 2, 3], [0, 1]], codes=[[0, 1, 2, 3, 2], [0, 1, 0, 1, 0]]
)
midf2 = midf1[2:]
midf2.index = cudf.MultiIndex(
levels=[[3, 4, 5], [2, 0]], codes=[[0, 1, 2], [1, 0, 1]]
)
mipdf1 = midf1.to_pandas()
mipdf2 = midf2.to_pandas()
assert_eq(cudf.concat([midf1, midf2]), pd.concat([mipdf1, mipdf2]))
assert_eq(cudf.concat([midf2, midf1]), pd.concat([mipdf2, mipdf1]))
assert_eq(
cudf.concat([midf1, midf2, midf1]), pd.concat([mipdf1, mipdf2, mipdf1])
)
# concat groupby multi index
gdf1 = cudf.DataFrame(
{
"x": np.random.randint(0, 10, 10),
"y": np.random.randint(0, 10, 10),
"z": np.random.randint(0, 10, 10),
"v": np.random.randint(0, 10, 10),
}
)
gdf2 = gdf1[5:]
gdg1 = gdf1.groupby(["x", "y"]).min()
gdg2 = gdf2.groupby(["x", "y"]).min()
pdg1 = gdg1.to_pandas()
pdg2 = gdg2.to_pandas()
assert_eq(cudf.concat([gdg1, gdg2]), pd.concat([pdg1, pdg2]))
assert_eq(cudf.concat([gdg2, gdg1]), pd.concat([pdg2, pdg1]))
# series multi index concat
gdgz1 = gdg1.z
gdgz2 = gdg2.z
pdgz1 = gdgz1.to_pandas()
pdgz2 = gdgz2.to_pandas()
assert_eq(cudf.concat([gdgz1, gdgz2]), pd.concat([pdgz1, pdgz2]))
assert_eq(cudf.concat([gdgz2, gdgz1]), pd.concat([pdgz2, pdgz1]))
@pytest.mark.parametrize("nrows", [0, 3, 10, 100, 1000])
def test_nonmatching_index_setitem(nrows):
np.random.seed(0)
gdf = cudf.DataFrame()
gdf["a"] = np.random.randint(2147483647, size=nrows)
gdf["b"] = np.random.randint(2147483647, size=nrows)
gdf = gdf.set_index("b")
test_values = np.random.randint(2147483647, size=nrows)
gdf["c"] = test_values
assert len(test_values) == len(gdf["c"])
assert (
gdf["c"]
.to_pandas()
.equals(cudf.Series(test_values).set_index(gdf._index).to_pandas())
)
def test_from_pandas():
df = pd.DataFrame({"x": [1, 2, 3]}, index=[4.0, 5.0, 6.0])
gdf = cudf.DataFrame.from_pandas(df)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
s = df.x
gs = cudf.Series.from_pandas(s)
assert isinstance(gs, cudf.Series)
assert_eq(s, gs)
@pytest.mark.parametrize("dtypes", [int, float])
def test_from_records(dtypes):
h_ary = np.ndarray(shape=(10, 4), dtype=dtypes)
rec_ary = h_ary.view(np.recarray)
gdf = cudf.DataFrame.from_records(rec_ary, columns=["a", "b", "c", "d"])
df = pd.DataFrame.from_records(rec_ary, columns=["a", "b", "c", "d"])
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame.from_records(rec_ary)
df = pd.DataFrame.from_records(rec_ary)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
@pytest.mark.parametrize("columns", [None, ["first", "second", "third"]])
@pytest.mark.parametrize(
"index",
[
None,
["first", "second"],
"name",
"age",
"weight",
[10, 11],
["abc", "xyz"],
],
)
def test_from_records_index(columns, index):
rec_ary = np.array(
[("Rex", 9, 81.0), ("Fido", 3, 27.0)],
dtype=[("name", "U10"), ("age", "i4"), ("weight", "f4")],
)
gdf = cudf.DataFrame.from_records(rec_ary, columns=columns, index=index)
df = pd.DataFrame.from_records(rec_ary, columns=columns, index=index)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
def test_dataframe_construction_from_cupy_arrays():
h_ary = np.array([[1, 2, 3], [4, 5, 6]], np.int32)
d_ary = cupy.asarray(h_ary)
gdf = cudf.DataFrame(d_ary, columns=["a", "b", "c"])
df = pd.DataFrame(h_ary, columns=["a", "b", "c"])
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame(d_ary)
df = pd.DataFrame(h_ary)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame(d_ary, index=["a", "b"])
df = pd.DataFrame(h_ary, index=["a", "b"])
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame(d_ary)
gdf = gdf.set_index(keys=0, drop=False)
df = pd.DataFrame(h_ary)
df = df.set_index(keys=0, drop=False)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame(d_ary)
gdf = gdf.set_index(keys=1, drop=False)
df = pd.DataFrame(h_ary)
df = df.set_index(keys=1, drop=False)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
def test_dataframe_cupy_wrong_dimensions():
d_ary = cupy.empty((2, 3, 4), dtype=np.int32)
with pytest.raises(
ValueError, match="records dimension expected 1 or 2 but found: 3"
):
cudf.DataFrame(d_ary)
def test_dataframe_cupy_array_wrong_index():
d_ary = cupy.empty((2, 3), dtype=np.int32)
with pytest.raises(
ValueError,
match="Length mismatch: Expected axis has 2 elements, "
"new values have 1 elements",
):
cudf.DataFrame(d_ary, index=["a"])
with pytest.raises(
ValueError,
match="Length mismatch: Expected axis has 2 elements, "
"new values have 1 elements",
):
cudf.DataFrame(d_ary, index="a")
def test_index_in_dataframe_constructor():
a = pd.DataFrame({"x": [1, 2, 3]}, index=[4.0, 5.0, 6.0])
b = cudf.DataFrame({"x": [1, 2, 3]}, index=[4.0, 5.0, 6.0])
assert_eq(a, b)
assert_eq(a.loc[4:], b.loc[4:])
dtypes = NUMERIC_TYPES + DATETIME_TYPES + ["bool"]
@pytest.mark.parametrize("nelem", [0, 2, 3, 100, 1000])
@pytest.mark.parametrize("data_type", dtypes)
def test_from_arrow(nelem, data_type):
df = pd.DataFrame(
{
"a": np.random.randint(0, 1000, nelem).astype(data_type),
"b": np.random.randint(0, 1000, nelem).astype(data_type),
}
)
padf = pa.Table.from_pandas(
df, preserve_index=False
).replace_schema_metadata(None)
gdf = cudf.DataFrame.from_arrow(padf)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
s = pa.Array.from_pandas(df.a)
gs = cudf.Series.from_arrow(s)
assert isinstance(gs, cudf.Series)
# For some reason PyArrow to_pandas() converts to numpy array and has
# better type compatibility
np.testing.assert_array_equal(s.to_pandas(), gs.to_array())
@pytest.mark.parametrize("nelem", [0, 2, 3, 100, 1000])
@pytest.mark.parametrize("data_type", dtypes)
def test_to_arrow(nelem, data_type):
df = pd.DataFrame(
{
"a": np.random.randint(0, 1000, nelem).astype(data_type),
"b": np.random.randint(0, 1000, nelem).astype(data_type),
}
)
gdf = cudf.DataFrame.from_pandas(df)
pa_df = pa.Table.from_pandas(
df, preserve_index=False
).replace_schema_metadata(None)
pa_gdf = gdf.to_arrow(preserve_index=False).replace_schema_metadata(None)
assert isinstance(pa_gdf, pa.Table)
assert pa.Table.equals(pa_df, pa_gdf)
pa_s = pa.Array.from_pandas(df.a)
pa_gs = gdf["a"].to_arrow()
assert isinstance(pa_gs, pa.Array)
assert pa.Array.equals(pa_s, pa_gs)
pa_i = pa.Array.from_pandas(df.index)
pa_gi = gdf.index.to_arrow()
assert isinstance(pa_gi, pa.Array)
assert pa.Array.equals(pa_i, pa_gi)
@pytest.mark.parametrize("data_type", dtypes)
def test_to_from_arrow_nulls(data_type):
if data_type == "longlong":
data_type = "int64"
if data_type == "bool":
s1 = pa.array([True, None, False, None, True], type=data_type)
else:
dtype = np.dtype(data_type)
if dtype.type == np.datetime64:
time_unit, _ = np.datetime_data(dtype)
data_type = pa.timestamp(unit=time_unit)
s1 = pa.array([1, None, 3, None, 5], type=data_type)
gs1 = cudf.Series.from_arrow(s1)
assert isinstance(gs1, cudf.Series)
# We have 64B padded buffers for nulls whereas Arrow returns a minimal
# number of bytes, so only check the first byte in this case
np.testing.assert_array_equal(
np.asarray(s1.buffers()[0]).view("u1")[0],
gs1._column.mask_array_view.copy_to_host().view("u1")[0],
)
assert pa.Array.equals(s1, gs1.to_arrow())
s2 = pa.array([None, None, None, None, None], type=data_type)
gs2 = cudf.Series.from_arrow(s2)
assert isinstance(gs2, cudf.Series)
# We have 64B padded buffers for nulls whereas Arrow returns a minimal
# number of bytes, so only check the first byte in this case
np.testing.assert_array_equal(
np.asarray(s2.buffers()[0]).view("u1")[0],
gs2._column.mask_array_view.copy_to_host().view("u1")[0],
)
assert pa.Array.equals(s2, gs2.to_arrow())
def test_to_arrow_categorical():
df = pd.DataFrame()
df["a"] = pd.Series(["a", "b", "c"], dtype="category")
gdf = cudf.DataFrame.from_pandas(df)
pa_df = pa.Table.from_pandas(
df, preserve_index=False
).replace_schema_metadata(None)
pa_gdf = gdf.to_arrow(preserve_index=False).replace_schema_metadata(None)
assert isinstance(pa_gdf, pa.Table)
assert pa.Table.equals(pa_df, pa_gdf)
pa_s = pa.Array.from_pandas(df.a)
pa_gs = gdf["a"].to_arrow()
assert isinstance(pa_gs, pa.Array)
assert pa.Array.equals(pa_s, pa_gs)
def test_from_arrow_missing_categorical():
pd_cat = pd.Categorical(["a", "b", "c"], categories=["a", "b"])
pa_cat = pa.array(pd_cat, from_pandas=True)
gd_cat = cudf.Series(pa_cat)
assert isinstance(gd_cat, cudf.Series)
assert_eq(
pd.Series(pa_cat.to_pandas()), # PyArrow returns a pd.Categorical
gd_cat.to_pandas(),
)
def test_to_arrow_missing_categorical():
pd_cat = pd.Categorical(["a", "b", "c"], categories=["a", "b"])
pa_cat = pa.array(pd_cat, from_pandas=True)
gd_cat = cudf.Series(pa_cat)
assert isinstance(gd_cat, cudf.Series)
assert pa.Array.equals(pa_cat, gd_cat.to_arrow())
@pytest.mark.parametrize("data_type", dtypes)
def test_from_scalar_typing(data_type):
if data_type == "datetime64[ms]":
scalar = (
np.dtype("int64")
.type(np.random.randint(0, 5))
.astype("datetime64[ms]")
)
elif data_type.startswith("datetime64"):
scalar = np.datetime64(datetime.date.today()).astype("datetime64[ms]")
data_type = "datetime64[ms]"
else:
scalar = np.dtype(data_type).type(np.random.randint(0, 5))
gdf = cudf.DataFrame()
gdf["a"] = [1, 2, 3, 4, 5]
gdf["b"] = scalar
assert gdf["b"].dtype == np.dtype(data_type)
assert len(gdf["b"]) == len(gdf["a"])
@pytest.mark.parametrize("data_type", NUMERIC_TYPES)
def test_from_python_array(data_type):
np_arr = np.random.randint(0, 100, 10).astype(data_type)
data = memoryview(np_arr)
data = arr.array(data.format, data)
gs = cudf.Series(data)
np.testing.assert_equal(gs.to_array(), np_arr)
def test_series_shape():
ps = pd.Series([1, 2, 3, 4])
cs = cudf.Series([1, 2, 3, 4])
assert ps.shape == cs.shape
def test_series_shape_empty():
ps = pd.Series(dtype="float64")
cs = cudf.Series([])
assert ps.shape == cs.shape
def test_dataframe_shape():
pdf = pd.DataFrame({"a": [0, 1, 2, 3], "b": [0.1, 0.2, None, 0.3]})
gdf = cudf.DataFrame.from_pandas(pdf)
assert pdf.shape == gdf.shape
def test_dataframe_shape_empty():
pdf = pd.DataFrame()
gdf = cudf.DataFrame()
assert pdf.shape == gdf.shape
@pytest.mark.parametrize("num_cols", [1, 2, 10])
@pytest.mark.parametrize("num_rows", [1, 2, 20])
@pytest.mark.parametrize("dtype", dtypes)
@pytest.mark.parametrize("nulls", ["none", "some", "all"])
def test_dataframe_transpose(nulls, num_cols, num_rows, dtype):
pdf = pd.DataFrame()
null_rep = np.nan if dtype in ["float32", "float64"] else None
for i in range(num_cols):
colname = string.ascii_lowercase[i]
data = pd.Series(np.random.randint(0, 26, num_rows).astype(dtype))
if nulls == "some":
idx = np.random.choice(
num_rows, size=int(num_rows / 2), replace=False
)
data[idx] = null_rep
elif nulls == "all":
data[:] = null_rep
pdf[colname] = data
gdf = cudf.DataFrame.from_pandas(pdf)
got_function = gdf.transpose()
got_property = gdf.T
expect = pdf.transpose()
assert_eq(expect, got_function)
assert_eq(expect, got_property)
@pytest.mark.parametrize("num_cols", [1, 2, 10])
@pytest.mark.parametrize("num_rows", [1, 2, 20])
def test_dataframe_transpose_category(num_cols, num_rows):
pdf = pd.DataFrame()
for i in range(num_cols):
colname = string.ascii_lowercase[i]
data = pd.Series(list(string.ascii_lowercase), dtype="category")
data = data.sample(num_rows, replace=True).reset_index(drop=True)
pdf[colname] = data
gdf = cudf.DataFrame.from_pandas(pdf)
got_function = gdf.transpose()
got_property = gdf.T
expect = pdf.transpose()
assert_eq(expect, got_function.to_pandas())
assert_eq(expect, got_property.to_pandas())
def test_generated_column():
gdf = cudf.DataFrame({"a": (i for i in range(5))})
assert len(gdf) == 5
@pytest.fixture
def pdf():
return pd.DataFrame({"x": range(10), "y": range(10)})
@pytest.fixture
def gdf(pdf):
return cudf.DataFrame.from_pandas(pdf)
@pytest.mark.parametrize(
"data",
[
{"x": [np.nan, 2, 3, 4, 100, np.nan], "y": [4, 5, 6, 88, 99, np.nan]},
{"x": [1, 2, 3], "y": [4, 5, 6]},
{"x": [np.nan, np.nan, np.nan], "y": [np.nan, np.nan, np.nan]},
{"x": [], "y": []},
{"x": []},
],
)
@pytest.mark.parametrize(
"func",
[
lambda df, **kwargs: df.min(**kwargs),
lambda df, **kwargs: df.max(**kwargs),
lambda df, **kwargs: df.sum(**kwargs),
lambda df, **kwargs: df.product(**kwargs),
lambda df, **kwargs: df.cummin(**kwargs),
lambda df, **kwargs: df.cummax(**kwargs),
lambda df, **kwargs: df.cumsum(**kwargs),
lambda df, **kwargs: df.cumprod(**kwargs),
lambda df, **kwargs: df.mean(**kwargs),
lambda df, **kwargs: df.sum(**kwargs),
lambda df, **kwargs: df.max(**kwargs),
lambda df, **kwargs: df.std(ddof=1, **kwargs),
lambda df, **kwargs: df.var(ddof=1, **kwargs),
lambda df, **kwargs: df.std(ddof=2, **kwargs),
lambda df, **kwargs: df.var(ddof=2, **kwargs),
lambda df, **kwargs: df.kurt(**kwargs),
lambda df, **kwargs: df.skew(**kwargs),
lambda df, **kwargs: df.all(**kwargs),
lambda df, **kwargs: df.any(**kwargs),
],
)
@pytest.mark.parametrize("skipna", [True, False, None])
def test_dataframe_reductions(data, func, skipna):
pdf = pd.DataFrame(data=data)
gdf = cudf.DataFrame.from_pandas(pdf)
assert_eq(func(pdf, skipna=skipna), func(gdf, skipna=skipna))
@pytest.mark.parametrize(
"data",
[
{"x": [np.nan, 2, 3, 4, 100, np.nan], "y": [4, 5, 6, 88, 99, np.nan]},
{"x": [1, 2, 3], "y": [4, 5, 6]},
{"x": [np.nan, np.nan, np.nan], "y": [np.nan, np.nan, np.nan]},
{"x": [], "y": []},
{"x": []},
],
)
@pytest.mark.parametrize("func", [lambda df: df.count()])
def test_dataframe_count_reduction(data, func):
pdf = pd.DataFrame(data=data)
gdf = cudf.DataFrame.from_pandas(pdf)
assert_eq(func(pdf), func(gdf))
@pytest.mark.parametrize(
"data",
[
{"x": [np.nan, 2, 3, 4, 100, np.nan], "y": [4, 5, 6, 88, 99, np.nan]},
{"x": [1, 2, 3], "y": [4, 5, 6]},
{"x": [np.nan, np.nan, np.nan], "y": [np.nan, np.nan, np.nan]},
{"x": [], "y": []},
{"x": []},
],
)
@pytest.mark.parametrize("ops", ["sum", "product", "prod"])
@pytest.mark.parametrize("skipna", [True, False, None])
@pytest.mark.parametrize("min_count", [-10, -1, 0, 1, 2, 3, 10])
def test_dataframe_min_count_ops(data, ops, skipna, min_count):
psr = pd.DataFrame(data)
gsr = cudf.DataFrame(data)
if PANDAS_GE_120 and psr.shape[0] * psr.shape[1] < min_count:
pytest.xfail("https://github.com/pandas-dev/pandas/issues/39738")
assert_eq(
getattr(psr, ops)(skipna=skipna, min_count=min_count),
getattr(gsr, ops)(skipna=skipna, min_count=min_count),
check_dtype=False,
)
@pytest.mark.parametrize(
"binop",
[
operator.add,
operator.mul,
operator.floordiv,
operator.truediv,
operator.mod,
operator.pow,
operator.eq,
operator.lt,
operator.le,
operator.gt,
operator.ge,
operator.ne,
],
)
def test_binops_df(pdf, gdf, binop):
pdf = pdf + 1.0
gdf = gdf + 1.0
d = binop(pdf, pdf)
g = binop(gdf, gdf)
assert_eq(d, g)
@pytest.mark.parametrize("binop", [operator.and_, operator.or_, operator.xor])
def test_bitwise_binops_df(pdf, gdf, binop):
d = binop(pdf, pdf + 1)
g = binop(gdf, gdf + 1)
assert_eq(d, g)
@pytest.mark.parametrize(
"binop",
[
operator.add,
operator.mul,
operator.floordiv,
operator.truediv,
operator.mod,
operator.pow,
operator.eq,
operator.lt,
operator.le,
operator.gt,
operator.ge,
operator.ne,
],
)
def test_binops_series(pdf, gdf, binop):
pdf = pdf + 1.0
gdf = gdf + 1.0
d = binop(pdf.x, pdf.y)
g = binop(gdf.x, gdf.y)
assert_eq(d, g)
@pytest.mark.parametrize("binop", [operator.and_, operator.or_, operator.xor])
def test_bitwise_binops_series(pdf, gdf, binop):
d = binop(pdf.x, pdf.y + 1)
g = binop(gdf.x, gdf.y + 1)
assert_eq(d, g)
@pytest.mark.parametrize("unaryop", [operator.neg, operator.inv, operator.abs])
def test_unaryops_df(pdf, gdf, unaryop):
d = unaryop(pdf - 5)
g = unaryop(gdf - 5)
assert_eq(d, g)
@pytest.mark.parametrize(
"func",
[
lambda df: df.empty,
lambda df: df.x.empty,
lambda df: df.x.fillna(123, limit=None, method=None, axis=None),
lambda df: df.drop("x", axis=1, errors="raise"),
],
)
def test_unary_operators(func, pdf, gdf):
p = func(pdf)
g = func(gdf)
assert_eq(p, g)
def test_is_monotonic(gdf):
pdf = pd.DataFrame({"x": [1, 2, 3]}, index=[3, 1, 2])
gdf = cudf.DataFrame.from_pandas(pdf)
assert not gdf.index.is_monotonic
assert not gdf.index.is_monotonic_increasing
assert not gdf.index.is_monotonic_decreasing
def test_iter(pdf, gdf):
assert list(pdf) == list(gdf)
def test_iteritems(gdf):
for k, v in gdf.iteritems():
assert k in gdf.columns
assert isinstance(v, cudf.Series)
assert_eq(v, gdf[k])
@pytest.mark.parametrize("q", [0.5, 1, 0.001, [0.5], [], [0.005, 0.5, 1]])
@pytest.mark.parametrize("numeric_only", [True, False])
def test_quantile(q, numeric_only):
ts = pd.date_range("2018-08-24", periods=5, freq="D")
td = pd.to_timedelta(np.arange(5), unit="h")
pdf = pd.DataFrame(
{"date": ts, "delta": td, "val": np.random.randn(len(ts))}
)
gdf = cudf.DataFrame.from_pandas(pdf)
assert_eq(pdf["date"].quantile(q), gdf["date"].quantile(q))
assert_eq(pdf["delta"].quantile(q), gdf["delta"].quantile(q))
assert_eq(pdf["val"].quantile(q), gdf["val"].quantile(q))
if numeric_only:
assert_eq(pdf.quantile(q), gdf.quantile(q))
else:
q = q if isinstance(q, list) else [q]
assert_eq(
pdf.quantile(
q if isinstance(q, list) else [q], numeric_only=False
),
gdf.quantile(q, numeric_only=False),
)
def test_empty_quantile():
pdf = pd.DataFrame({"x": []})
df = cudf.DataFrame({"x": []})
actual = df.quantile()
expected = pdf.quantile()
assert_eq(actual, expected)
def test_from_pandas_function(pdf):
gdf = cudf.from_pandas(pdf)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(pdf, gdf)
gdf = cudf.from_pandas(pdf.x)
assert isinstance(gdf, cudf.Series)
assert_eq(pdf.x, gdf)
with pytest.raises(TypeError):
cudf.from_pandas(123)
@pytest.mark.parametrize("preserve_index", [True, False])
def test_arrow_pandas_compat(pdf, gdf, preserve_index):
pdf["z"] = range(10)
pdf = pdf.set_index("z")
gdf["z"] = range(10)
gdf = gdf.set_index("z")
pdf_arrow_table = pa.Table.from_pandas(pdf, preserve_index=preserve_index)
gdf_arrow_table = gdf.to_arrow(preserve_index=preserve_index)
assert pa.Table.equals(pdf_arrow_table, gdf_arrow_table)
gdf2 = cudf.DataFrame.from_arrow(pdf_arrow_table)
pdf2 = pdf_arrow_table.to_pandas()
assert_eq(pdf2, gdf2)
@pytest.mark.parametrize("nrows", [1, 8, 100, 1000, 100000])
def test_series_hash_encode(nrows):
data = np.asarray(range(nrows))
# Python hash returns different value which sometimes
# results in enc_with_name_arr and enc_arr to be same.
# And there is no other better way to make hash return same value.
# So using an integer name to get constant value back from hash.
s = cudf.Series(data, name=1)
num_features = 1000
encoded_series = s.hash_encode(num_features)
assert isinstance(encoded_series, cudf.Series)
enc_arr = encoded_series.to_array()
assert np.all(enc_arr >= 0)
assert np.max(enc_arr) < num_features
enc_with_name_arr = s.hash_encode(num_features, use_name=True).to_array()
assert enc_with_name_arr[0] != enc_arr[0]
@pytest.mark.parametrize("dtype", NUMERIC_TYPES + ["bool"])
def test_cuda_array_interface(dtype):
np_data = np.arange(10).astype(dtype)
cupy_data = cupy.array(np_data)
pd_data = pd.Series(np_data)
cudf_data = cudf.Series(cupy_data)
assert_eq(pd_data, cudf_data)
gdf = cudf.DataFrame()
gdf["test"] = cupy_data
pd_data.name = "test"
assert_eq(pd_data, gdf["test"])
@pytest.mark.parametrize("nelem", [0, 2, 3, 100])
@pytest.mark.parametrize("nchunks", [1, 2, 5, 10])
@pytest.mark.parametrize("data_type", dtypes)
def test_from_arrow_chunked_arrays(nelem, nchunks, data_type):
np_list_data = [
np.random.randint(0, 100, nelem).astype(data_type)
for i in range(nchunks)
]
pa_chunk_array = pa.chunked_array(np_list_data)
expect = pd.Series(pa_chunk_array.to_pandas())
got = cudf.Series(pa_chunk_array)
assert_eq(expect, got)
np_list_data2 = [
np.random.randint(0, 100, nelem).astype(data_type)
for i in range(nchunks)
]
pa_chunk_array2 = pa.chunked_array(np_list_data2)
pa_table = pa.Table.from_arrays(
[pa_chunk_array, pa_chunk_array2], names=["a", "b"]
)
expect = pa_table.to_pandas()
got = cudf.DataFrame.from_arrow(pa_table)
assert_eq(expect, got)
@pytest.mark.skip(reason="Test was designed to be run in isolation")
def test_gpu_memory_usage_with_boolmask():
ctx = cuda.current_context()
def query_GPU_memory(note=""):
memInfo = ctx.get_memory_info()
usedMemoryGB = (memInfo.total - memInfo.free) / 1e9
return usedMemoryGB
cuda.current_context().deallocations.clear()
nRows = int(1e8)
nCols = 2
dataNumpy = np.asfortranarray(np.random.rand(nRows, nCols))
colNames = ["col" + str(iCol) for iCol in range(nCols)]
pandasDF = pd.DataFrame(data=dataNumpy, columns=colNames, dtype=np.float32)
cudaDF = cudf.core.DataFrame.from_pandas(pandasDF)
boolmask = cudf.Series(np.random.randint(1, 2, len(cudaDF)).astype("bool"))
memory_used = query_GPU_memory()
cudaDF = cudaDF[boolmask]
assert (
cudaDF.index._values.data_array_view.device_ctypes_pointer
== cudaDF["col0"].index._values.data_array_view.device_ctypes_pointer
)
assert (
cudaDF.index._values.data_array_view.device_ctypes_pointer
== cudaDF["col1"].index._values.data_array_view.device_ctypes_pointer
)
assert memory_used == query_GPU_memory()
def test_boolmask(pdf, gdf):
boolmask = np.random.randint(0, 2, len(pdf)) > 0
gdf = gdf[boolmask]
pdf = pdf[boolmask]
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
"mask_shape",
[
(2, "ab"),
(2, "abc"),
(3, "ab"),
(3, "abc"),
(3, "abcd"),
(4, "abc"),
(4, "abcd"),
],
)
def test_dataframe_boolmask(mask_shape):
pdf = pd.DataFrame()
for col in "abc":
pdf[col] = np.random.randint(0, 10, 3)
pdf_mask = pd.DataFrame()
for col in mask_shape[1]:
pdf_mask[col] = np.random.randint(0, 2, mask_shape[0]) > 0
gdf = cudf.DataFrame.from_pandas(pdf)
gdf_mask = cudf.DataFrame.from_pandas(pdf_mask)
gdf = gdf[gdf_mask]
pdf = pdf[pdf_mask]
assert np.array_equal(gdf.columns, pdf.columns)
for col in gdf.columns:
assert np.array_equal(
gdf[col].fillna(-1).to_pandas().values, pdf[col].fillna(-1).values
)
@pytest.mark.parametrize(
"mask",
[
[True, False, True],
pytest.param(
cudf.Series([True, False, True]),
marks=pytest.mark.xfail(
reason="Pandas can't index a multiindex with a Series"
),
),
],
)
def test_dataframe_multiindex_boolmask(mask):
gdf = cudf.DataFrame(
{"w": [3, 2, 1], "x": [1, 2, 3], "y": [0, 1, 0], "z": [1, 1, 1]}
)
gdg = gdf.groupby(["w", "x"]).count()
pdg = gdg.to_pandas()
assert_eq(gdg[mask], pdg[mask])
def test_dataframe_assignment():
pdf = pd.DataFrame()
for col in "abc":
pdf[col] = np.array([0, 1, 1, -2, 10])
gdf = cudf.DataFrame.from_pandas(pdf)
gdf[gdf < 0] = 999
pdf[pdf < 0] = 999
assert_eq(gdf, pdf)
def test_1row_arrow_table():
data = [pa.array([0]), pa.array([1])]
batch = pa.RecordBatch.from_arrays(data, ["f0", "f1"])
table = pa.Table.from_batches([batch])
expect = table.to_pandas()
got = cudf.DataFrame.from_arrow(table)
assert_eq(expect, got)
def test_arrow_handle_no_index_name(pdf, gdf):
gdf_arrow = gdf.to_arrow()
pdf_arrow = pa.Table.from_pandas(pdf)
assert pa.Table.equals(pdf_arrow, gdf_arrow)
got = cudf.DataFrame.from_arrow(gdf_arrow)
expect = pdf_arrow.to_pandas()
assert_eq(expect, got)
@pytest.mark.parametrize("num_rows", [1, 3, 10, 100])
@pytest.mark.parametrize("num_bins", [1, 2, 4, 20])
@pytest.mark.parametrize("right", [True, False])
@pytest.mark.parametrize("dtype", NUMERIC_TYPES + ["bool"])
@pytest.mark.parametrize("series_bins", [True, False])
def test_series_digitize(num_rows, num_bins, right, dtype, series_bins):
data = np.random.randint(0, 100, num_rows).astype(dtype)
bins = np.unique(np.sort(np.random.randint(2, 95, num_bins).astype(dtype)))
s = cudf.Series(data)
if series_bins:
s_bins = cudf.Series(bins)
indices = s.digitize(s_bins, right)
else:
indices = s.digitize(bins, right)
np.testing.assert_array_equal(
np.digitize(data, bins, right), indices.to_array()
)
def test_series_digitize_invalid_bins():
s = cudf.Series(np.random.randint(0, 30, 80), dtype="int32")
bins = cudf.Series([2, None, None, 50, 90], dtype="int32")
with pytest.raises(
ValueError, match="`bins` cannot contain null entries."
):
_ = s.digitize(bins)
def test_pandas_non_contiguious():
arr1 = np.random.sample([5000, 10])
assert arr1.flags["C_CONTIGUOUS"] is True
df = pd.DataFrame(arr1)
for col in df.columns:
assert df[col].values.flags["C_CONTIGUOUS"] is False
gdf = cudf.DataFrame.from_pandas(df)
assert_eq(gdf.to_pandas(), df)
@pytest.mark.parametrize("num_elements", [0, 2, 10, 100])
@pytest.mark.parametrize("null_type", [np.nan, None, "mixed"])
def test_series_all_null(num_elements, null_type):
if null_type == "mixed":
data = []
data1 = [np.nan] * int(num_elements / 2)
data2 = [None] * int(num_elements / 2)
for idx in range(len(data1)):
data.append(data1[idx])
data.append(data2[idx])
else:
data = [null_type] * num_elements
# Typecast Pandas because None will return `object` dtype
expect = pd.Series(data, dtype="float64")
got = cudf.Series(data)
assert_eq(expect, got)
@pytest.mark.parametrize("num_elements", [0, 2, 10, 100])
def test_series_all_valid_nan(num_elements):
data = [np.nan] * num_elements
sr = cudf.Series(data, nan_as_null=False)
np.testing.assert_equal(sr.null_count, 0)
def test_series_rename():
pds = pd.Series([1, 2, 3], name="asdf")
gds = cudf.Series([1, 2, 3], name="asdf")
expect = pds.rename("new_name")
got = gds.rename("new_name")
assert_eq(expect, got)
pds = pd.Series(expect)
gds = cudf.Series(got)
assert_eq(pds, gds)
pds = pd.Series(expect, name="name name")
gds = cudf.Series(got, name="name name")
assert_eq(pds, gds)
@pytest.mark.parametrize("data_type", dtypes)
@pytest.mark.parametrize("nelem", [0, 100])
def test_head_tail(nelem, data_type):
def check_index_equality(left, right):
assert left.index.equals(right.index)
def check_values_equality(left, right):
if len(left) == 0 and len(right) == 0:
return None
np.testing.assert_array_equal(left.to_pandas(), right.to_pandas())
def check_frame_series_equality(left, right):
check_index_equality(left, right)
check_values_equality(left, right)
gdf = cudf.DataFrame(
{
"a": np.random.randint(0, 1000, nelem).astype(data_type),
"b": np.random.randint(0, 1000, nelem).astype(data_type),
}
)
check_frame_series_equality(gdf.head(), gdf[:5])
check_frame_series_equality(gdf.head(3), gdf[:3])
check_frame_series_equality(gdf.head(-2), gdf[:-2])
check_frame_series_equality(gdf.head(0), gdf[0:0])
check_frame_series_equality(gdf["a"].head(), gdf["a"][:5])
check_frame_series_equality(gdf["a"].head(3), gdf["a"][:3])
check_frame_series_equality(gdf["a"].head(-2), gdf["a"][:-2])
check_frame_series_equality(gdf.tail(), gdf[-5:])
check_frame_series_equality(gdf.tail(3), gdf[-3:])
check_frame_series_equality(gdf.tail(-2), gdf[2:])
check_frame_series_equality(gdf.tail(0), gdf[0:0])
check_frame_series_equality(gdf["a"].tail(), gdf["a"][-5:])
check_frame_series_equality(gdf["a"].tail(3), gdf["a"][-3:])
check_frame_series_equality(gdf["a"].tail(-2), gdf["a"][2:])
def test_tail_for_string():
gdf = cudf.DataFrame()
gdf["id"] = cudf.Series(["a", "b"], dtype=np.object_)
gdf["v"] = cudf.Series([1, 2])
assert_eq(gdf.tail(3), gdf.to_pandas().tail(3))
@pytest.mark.parametrize("drop", [True, False])
def test_reset_index(pdf, gdf, drop):
assert_eq(
pdf.reset_index(drop=drop, inplace=False),
gdf.reset_index(drop=drop, inplace=False),
)
assert_eq(
pdf.x.reset_index(drop=drop, inplace=False),
gdf.x.reset_index(drop=drop, inplace=False),
)
@pytest.mark.parametrize("drop", [True, False])
def test_reset_named_index(pdf, gdf, drop):
pdf.index.name = "cudf"
gdf.index.name = "cudf"
assert_eq(
pdf.reset_index(drop=drop, inplace=False),
gdf.reset_index(drop=drop, inplace=False),
)
assert_eq(
pdf.x.reset_index(drop=drop, inplace=False),
gdf.x.reset_index(drop=drop, inplace=False),
)
@pytest.mark.parametrize("drop", [True, False])
def test_reset_index_inplace(pdf, gdf, drop):
pdf.reset_index(drop=drop, inplace=True)
gdf.reset_index(drop=drop, inplace=True)
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
"data",
[
{
"a": [1, 2, 3, 4, 5],
"b": ["a", "b", "c", "d", "e"],
"c": [1.0, 2.0, 3.0, 4.0, 5.0],
}
],
)
@pytest.mark.parametrize(
"index",
[
"a",
["a", "b"],
pd.CategoricalIndex(["I", "II", "III", "IV", "V"]),
pd.Series(["h", "i", "k", "l", "m"]),
["b", pd.Index(["I", "II", "III", "IV", "V"])],
["c", [11, 12, 13, 14, 15]],
pd.MultiIndex(
levels=[
["I", "II", "III", "IV", "V"],
["one", "two", "three", "four", "five"],
],
codes=[[0, 1, 2, 3, 4], [4, 3, 2, 1, 0]],
names=["col1", "col2"],
),
pd.RangeIndex(0, 5), # corner case
[pd.Series(["h", "i", "k", "l", "m"]), pd.RangeIndex(0, 5)],
[
pd.MultiIndex(
levels=[
["I", "II", "III", "IV", "V"],
["one", "two", "three", "four", "five"],
],
codes=[[0, 1, 2, 3, 4], [4, 3, 2, 1, 0]],
names=["col1", "col2"],
),
pd.RangeIndex(0, 5),
],
],
)
@pytest.mark.parametrize("drop", [True, False])
@pytest.mark.parametrize("append", [True, False])
@pytest.mark.parametrize("inplace", [True, False])
def test_set_index(data, index, drop, append, inplace):
gdf = cudf.DataFrame(data)
pdf = gdf.to_pandas()
expected = pdf.set_index(index, inplace=inplace, drop=drop, append=append)
actual = gdf.set_index(index, inplace=inplace, drop=drop, append=append)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"data",
[
{
"a": [1, 1, 2, 2, 5],
"b": ["a", "b", "c", "d", "e"],
"c": [1.0, 2.0, 3.0, 4.0, 5.0],
}
],
)
@pytest.mark.parametrize("index", ["a", pd.Index([1, 1, 2, 2, 3])])
@pytest.mark.parametrize("verify_integrity", [True])
@pytest.mark.xfail
def test_set_index_verify_integrity(data, index, verify_integrity):
gdf = cudf.DataFrame(data)
gdf.set_index(index, verify_integrity=verify_integrity)
@pytest.mark.parametrize("drop", [True, False])
@pytest.mark.parametrize("nelem", [10, 200, 1333])
def test_set_index_multi(drop, nelem):
np.random.seed(0)
a = np.arange(nelem)
np.random.shuffle(a)
df = pd.DataFrame(
{
"a": a,
"b": np.random.randint(0, 4, size=nelem),
"c": np.random.uniform(low=0, high=4, size=nelem),
"d": np.random.choice(["green", "black", "white"], nelem),
}
)
df["e"] = df["d"].astype("category")
gdf = cudf.DataFrame.from_pandas(df)
assert_eq(gdf.set_index("a", drop=drop), gdf.set_index(["a"], drop=drop))
assert_eq(
df.set_index(["b", "c"], drop=drop),
gdf.set_index(["b", "c"], drop=drop),
)
assert_eq(
df.set_index(["d", "b"], drop=drop),
gdf.set_index(["d", "b"], drop=drop),
)
assert_eq(
df.set_index(["b", "d", "e"], drop=drop),
gdf.set_index(["b", "d", "e"], drop=drop),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_0(copy):
# TODO (ptaylor): pandas changes `int` dtype to `float64`
# when reindexing and filling new label indices with NaN
gdf = cudf.datasets.randomdata(
nrows=6,
dtypes={
"a": "category",
# 'b': int,
"c": float,
"d": str,
},
)
pdf = gdf.to_pandas()
# Validate reindex returns a copy unmodified
assert_eq(pdf.reindex(copy=True), gdf.reindex(copy=copy))
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_1(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as index when axis defaults to 0
assert_eq(pdf.reindex(index, copy=True), gdf.reindex(index, copy=copy))
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_2(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as index when axis=0
assert_eq(
pdf.reindex(index, axis=0, copy=True),
gdf.reindex(index, axis=0, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_3(copy):
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as columns when axis=0
assert_eq(
pdf.reindex(columns, axis=1, copy=True),
gdf.reindex(columns, axis=1, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_4(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as index when axis=0
assert_eq(
pdf.reindex(labels=index, axis=0, copy=True),
gdf.reindex(labels=index, axis=0, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_5(copy):
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as columns when axis=1
assert_eq(
pdf.reindex(labels=columns, axis=1, copy=True),
gdf.reindex(labels=columns, axis=1, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_6(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as index when axis='index'
assert_eq(
pdf.reindex(labels=index, axis="index", copy=True),
gdf.reindex(labels=index, axis="index", copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_7(copy):
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as columns when axis='columns'
assert_eq(
pdf.reindex(labels=columns, axis="columns", copy=True),
gdf.reindex(labels=columns, axis="columns", copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_8(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate reindexes labels when index=labels
assert_eq(
pdf.reindex(index=index, copy=True),
gdf.reindex(index=index, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_9(copy):
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate reindexes column names when columns=labels
assert_eq(
pdf.reindex(columns=columns, copy=True),
gdf.reindex(columns=columns, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_10(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate reindexes both labels and column names when
# index=index_labels and columns=column_labels
assert_eq(
pdf.reindex(index=index, columns=columns, copy=True),
gdf.reindex(index=index, columns=columns, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_change_dtype(copy):
if PANDAS_GE_110:
kwargs = {"check_freq": False}
else:
kwargs = {}
index = pd.date_range("12/29/2009", periods=10, freq="D")
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate reindexes both labels and column names when
# index=index_labels and columns=column_labels
assert_eq(
pdf.reindex(index=index, columns=columns, copy=True),
gdf.reindex(index=index, columns=columns, copy=copy),
**kwargs,
)
@pytest.mark.parametrize("copy", [True, False])
def test_series_categorical_reindex(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(nrows=6, dtypes={"a": "category"})
pdf = gdf.to_pandas()
assert_eq(pdf["a"].reindex(copy=True), gdf["a"].reindex(copy=copy))
assert_eq(
pdf["a"].reindex(index, copy=True), gdf["a"].reindex(index, copy=copy)
)
assert_eq(
pdf["a"].reindex(index=index, copy=True),
gdf["a"].reindex(index=index, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_series_float_reindex(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(nrows=6, dtypes={"c": float})
pdf = gdf.to_pandas()
assert_eq(pdf["c"].reindex(copy=True), gdf["c"].reindex(copy=copy))
assert_eq(
pdf["c"].reindex(index, copy=True), gdf["c"].reindex(index, copy=copy)
)
assert_eq(
pdf["c"].reindex(index=index, copy=True),
gdf["c"].reindex(index=index, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_series_string_reindex(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(nrows=6, dtypes={"d": str})
pdf = gdf.to_pandas()
assert_eq(pdf["d"].reindex(copy=True), gdf["d"].reindex(copy=copy))
assert_eq(
pdf["d"].reindex(index, copy=True), gdf["d"].reindex(index, copy=copy)
)
assert_eq(
pdf["d"].reindex(index=index, copy=True),
gdf["d"].reindex(index=index, copy=copy),
)
def test_to_frame(pdf, gdf):
assert_eq(pdf.x.to_frame(), gdf.x.to_frame())
name = "foo"
gdf_new_name = gdf.x.to_frame(name=name)
pdf_new_name = pdf.x.to_frame(name=name)
assert_eq(pdf.x.to_frame(), gdf.x.to_frame())
name = False
gdf_new_name = gdf.x.to_frame(name=name)
pdf_new_name = pdf.x.to_frame(name=name)
assert_eq(gdf_new_name, pdf_new_name)
assert gdf_new_name.columns[0] is name
def test_dataframe_empty_sort_index():
pdf = pd.DataFrame({"x": []})
gdf = cudf.DataFrame.from_pandas(pdf)
expect = pdf.sort_index()
got = gdf.sort_index()
assert_eq(expect, got)
@pytest.mark.parametrize("axis", [0, 1, "index", "columns"])
@pytest.mark.parametrize("ascending", [True, False])
@pytest.mark.parametrize("ignore_index", [True, False])
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize("na_position", ["first", "last"])
def test_dataframe_sort_index(
axis, ascending, inplace, ignore_index, na_position
):
pdf = pd.DataFrame(
{"b": [1, 3, 2], "a": [1, 4, 3], "c": [4, 1, 5]},
index=[3.0, 1.0, np.nan],
)
gdf = cudf.DataFrame.from_pandas(pdf)
expected = pdf.sort_index(
axis=axis,
ascending=ascending,
ignore_index=ignore_index,
inplace=inplace,
na_position=na_position,
)
got = gdf.sort_index(
axis=axis,
ascending=ascending,
ignore_index=ignore_index,
inplace=inplace,
na_position=na_position,
)
if inplace is True:
assert_eq(pdf, gdf)
else:
assert_eq(expected, got)
@pytest.mark.parametrize("axis", [0, 1, "index", "columns"])
@pytest.mark.parametrize(
"level",
[
0,
"b",
1,
["b"],
"a",
["a", "b"],
["b", "a"],
[0, 1],
[1, 0],
[0, 2],
None,
],
)
@pytest.mark.parametrize("ascending", [True, False])
@pytest.mark.parametrize("ignore_index", [True, False])
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize("na_position", ["first", "last"])
def test_dataframe_mulitindex_sort_index(
axis, level, ascending, inplace, ignore_index, na_position
):
pdf = pd.DataFrame(
{
"b": [1.0, 3.0, np.nan],
"a": [1, 4, 3],
1: ["a", "b", "c"],
"e": [3, 1, 4],
"d": [1, 2, 8],
}
).set_index(["b", "a", 1])
gdf = cudf.DataFrame.from_pandas(pdf)
# ignore_index is supported in v.1.0
expected = pdf.sort_index(
axis=axis,
level=level,
ascending=ascending,
inplace=inplace,
na_position=na_position,
)
if ignore_index is True:
expected = expected
got = gdf.sort_index(
axis=axis,
level=level,
ascending=ascending,
ignore_index=ignore_index,
inplace=inplace,
na_position=na_position,
)
if inplace is True:
if ignore_index is True:
pdf = pdf.reset_index(drop=True)
assert_eq(pdf, gdf)
else:
if ignore_index is True:
expected = expected.reset_index(drop=True)
assert_eq(expected, got)
@pytest.mark.parametrize("dtype", dtypes + ["category"])
def test_dataframe_0_row_dtype(dtype):
if dtype == "category":
data = pd.Series(["a", "b", "c", "d", "e"], dtype="category")
else:
data = np.array([1, 2, 3, 4, 5], dtype=dtype)
expect = cudf.DataFrame()
expect["x"] = data
expect["y"] = data
got = expect.head(0)
for col_name in got.columns:
assert expect[col_name].dtype == got[col_name].dtype
expect = cudf.Series(data)
got = expect.head(0)
assert expect.dtype == got.dtype
@pytest.mark.parametrize("nan_as_null", [True, False])
def test_series_list_nanasnull(nan_as_null):
data = [1.0, 2.0, 3.0, np.nan, None]
expect = pa.array(data, from_pandas=nan_as_null)
got = cudf.Series(data, nan_as_null=nan_as_null).to_arrow()
# Bug in Arrow 0.14.1 where NaNs aren't handled
expect = expect.cast("int64", safe=False)
got = got.cast("int64", safe=False)
assert pa.Array.equals(expect, got)
def test_column_assignment():
gdf = cudf.datasets.randomdata(
nrows=20, dtypes={"a": "category", "b": int, "c": float}
)
new_cols = ["q", "r", "s"]
gdf.columns = new_cols
assert list(gdf.columns) == new_cols
def test_select_dtype():
gdf = cudf.datasets.randomdata(
nrows=20, dtypes={"a": "category", "b": int, "c": float, "d": str}
)
pdf = gdf.to_pandas()
assert_eq(pdf.select_dtypes("float64"), gdf.select_dtypes("float64"))
assert_eq(pdf.select_dtypes(np.float64), gdf.select_dtypes(np.float64))
assert_eq(
pdf.select_dtypes(include=["float64"]),
gdf.select_dtypes(include=["float64"]),
)
assert_eq(
pdf.select_dtypes(include=["object", "int", "category"]),
gdf.select_dtypes(include=["object", "int", "category"]),
)
assert_eq(
pdf.select_dtypes(include=["int64", "float64"]),
gdf.select_dtypes(include=["int64", "float64"]),
)
assert_eq(
pdf.select_dtypes(include=np.number),
gdf.select_dtypes(include=np.number),
)
assert_eq(
pdf.select_dtypes(include=[np.int64, np.float64]),
gdf.select_dtypes(include=[np.int64, np.float64]),
)
assert_eq(
pdf.select_dtypes(include=["category"]),
gdf.select_dtypes(include=["category"]),
)
assert_eq(
pdf.select_dtypes(exclude=np.number),
gdf.select_dtypes(exclude=np.number),
)
assert_exceptions_equal(
lfunc=pdf.select_dtypes,
rfunc=gdf.select_dtypes,
lfunc_args_and_kwargs=([], {"includes": ["Foo"]}),
rfunc_args_and_kwargs=([], {"includes": ["Foo"]}),
)
assert_exceptions_equal(
lfunc=pdf.select_dtypes,
rfunc=gdf.select_dtypes,
lfunc_args_and_kwargs=(
[],
{"exclude": np.number, "include": np.number},
),
rfunc_args_and_kwargs=(
[],
{"exclude": np.number, "include": np.number},
),
)
gdf = cudf.DataFrame(
{"A": [3, 4, 5], "C": [1, 2, 3], "D": ["a", "b", "c"]}
)
pdf = gdf.to_pandas()
assert_eq(
pdf.select_dtypes(include=["object", "int", "category"]),
gdf.select_dtypes(include=["object", "int", "category"]),
)
assert_eq(
pdf.select_dtypes(include=["object"], exclude=["category"]),
gdf.select_dtypes(include=["object"], exclude=["category"]),
)
gdf = cudf.DataFrame({"a": range(10), "b": range(10, 20)})
pdf = gdf.to_pandas()
assert_eq(
pdf.select_dtypes(include=["category"]),
gdf.select_dtypes(include=["category"]),
)
assert_eq(
pdf.select_dtypes(include=["float"]),
gdf.select_dtypes(include=["float"]),
)
assert_eq(
pdf.select_dtypes(include=["object"]),
gdf.select_dtypes(include=["object"]),
)
assert_eq(
pdf.select_dtypes(include=["int"]), gdf.select_dtypes(include=["int"])
)
assert_eq(
pdf.select_dtypes(exclude=["float"]),
gdf.select_dtypes(exclude=["float"]),
)
assert_eq(
pdf.select_dtypes(exclude=["object"]),
gdf.select_dtypes(exclude=["object"]),
)
assert_eq(
pdf.select_dtypes(include=["int"], exclude=["object"]),
gdf.select_dtypes(include=["int"], exclude=["object"]),
)
assert_exceptions_equal(
lfunc=pdf.select_dtypes, rfunc=gdf.select_dtypes,
)
gdf = cudf.DataFrame(
{"a": cudf.Series([], dtype="int"), "b": cudf.Series([], dtype="str")}
)
pdf = gdf.to_pandas()
assert_eq(
pdf.select_dtypes(exclude=["object"]),
gdf.select_dtypes(exclude=["object"]),
)
assert_eq(
pdf.select_dtypes(include=["int"], exclude=["object"]),
gdf.select_dtypes(include=["int"], exclude=["object"]),
)
def test_select_dtype_datetime():
gdf = cudf.datasets.timeseries(
start="2000-01-01", end="2000-01-02", freq="3600s", dtypes={"x": int}
)
gdf = gdf.reset_index()
pdf = gdf.to_pandas()
assert_eq(pdf.select_dtypes("datetime64"), gdf.select_dtypes("datetime64"))
assert_eq(
pdf.select_dtypes(np.dtype("datetime64")),
gdf.select_dtypes(np.dtype("datetime64")),
)
assert_eq(
pdf.select_dtypes(include="datetime64"),
gdf.select_dtypes(include="datetime64"),
)
def test_select_dtype_datetime_with_frequency():
gdf = cudf.datasets.timeseries(
start="2000-01-01", end="2000-01-02", freq="3600s", dtypes={"x": int}
)
gdf = gdf.reset_index()
pdf = gdf.to_pandas()
assert_exceptions_equal(
pdf.select_dtypes,
gdf.select_dtypes,
(["datetime64[ms]"],),
(["datetime64[ms]"],),
)
def test_array_ufunc():
gdf = cudf.DataFrame({"x": [2, 3, 4.0], "y": [9.0, 2.5, 1.1]})
pdf = gdf.to_pandas()
assert_eq(np.sqrt(gdf), np.sqrt(pdf))
assert_eq(np.sqrt(gdf.x), np.sqrt(pdf.x))
@pytest.mark.parametrize("nan_value", [-5, -5.0, 0, 5, 5.0, None, "pandas"])
def test_series_to_gpu_array(nan_value):
s = cudf.Series([0, 1, None, 3])
np.testing.assert_array_equal(
s.to_array(nan_value), s.to_gpu_array(nan_value).copy_to_host()
)
def test_dataframe_describe_exclude():
np.random.seed(12)
data_length = 10000
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["x"] = df.x.astype("int64")
df["y"] = np.random.normal(10, 1, data_length)
pdf = df.to_pandas()
gdf_results = df.describe(exclude=["float"])
pdf_results = pdf.describe(exclude=["float"])
assert_eq(gdf_results, pdf_results)
def test_dataframe_describe_include():
np.random.seed(12)
data_length = 10000
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["x"] = df.x.astype("int64")
df["y"] = np.random.normal(10, 1, data_length)
pdf = df.to_pandas()
gdf_results = df.describe(include=["int"])
pdf_results = pdf.describe(include=["int"])
assert_eq(gdf_results, pdf_results)
def test_dataframe_describe_default():
np.random.seed(12)
data_length = 10000
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["y"] = np.random.normal(10, 1, data_length)
pdf = df.to_pandas()
gdf_results = df.describe()
pdf_results = pdf.describe()
assert_eq(pdf_results, gdf_results)
def test_series_describe_include_all():
np.random.seed(12)
data_length = 10000
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["x"] = df.x.astype("int64")
df["y"] = np.random.normal(10, 1, data_length)
df["animal"] = np.random.choice(["dog", "cat", "bird"], data_length)
pdf = df.to_pandas()
gdf_results = df.describe(include="all")
pdf_results = pdf.describe(include="all")
assert_eq(gdf_results[["x", "y"]], pdf_results[["x", "y"]])
assert_eq(gdf_results.index, pdf_results.index)
assert_eq(gdf_results.columns, pdf_results.columns)
assert_eq(
gdf_results[["animal"]].fillna(-1).astype("str"),
pdf_results[["animal"]].fillna(-1).astype("str"),
)
def test_dataframe_describe_percentiles():
np.random.seed(12)
data_length = 10000
sample_percentiles = [0.0, 0.1, 0.33, 0.84, 0.4, 0.99]
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["y"] = np.random.normal(10, 1, data_length)
pdf = df.to_pandas()
gdf_results = df.describe(percentiles=sample_percentiles)
pdf_results = pdf.describe(percentiles=sample_percentiles)
assert_eq(pdf_results, gdf_results)
def test_get_numeric_data():
pdf = pd.DataFrame(
{"x": [1, 2, 3], "y": [1.0, 2.0, 3.0], "z": ["a", "b", "c"]}
)
gdf = cudf.from_pandas(pdf)
assert_eq(pdf._get_numeric_data(), gdf._get_numeric_data())
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("period", [-1, -5, -10, -20, 0, 1, 5, 10, 20])
@pytest.mark.parametrize("data_empty", [False, True])
def test_shift(dtype, period, data_empty):
if data_empty:
data = None
else:
if dtype == np.int8:
# to keep data in range
data = gen_rand(dtype, 100000, low=-2, high=2)
else:
data = gen_rand(dtype, 100000)
gdf = cudf.DataFrame({"a": cudf.Series(data, dtype=dtype)})
pdf = pd.DataFrame({"a": pd.Series(data, dtype=dtype)})
shifted_outcome = gdf.a.shift(period).fillna(0)
expected_outcome = pdf.a.shift(period).fillna(0).astype(dtype)
if data_empty:
assert_eq(shifted_outcome, expected_outcome, check_index_type=False)
else:
assert_eq(shifted_outcome, expected_outcome)
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("period", [-1, -5, -10, -20, 0, 1, 5, 10, 20])
@pytest.mark.parametrize("data_empty", [False, True])
def test_diff(dtype, period, data_empty):
if data_empty:
data = None
else:
if dtype == np.int8:
# to keep data in range
data = gen_rand(dtype, 100000, low=-2, high=2)
else:
data = gen_rand(dtype, 100000)
gdf = cudf.DataFrame({"a": cudf.Series(data, dtype=dtype)})
pdf = pd.DataFrame({"a": pd.Series(data, dtype=dtype)})
expected_outcome = pdf.a.diff(period)
diffed_outcome = gdf.a.diff(period).astype(expected_outcome.dtype)
if data_empty:
assert_eq(diffed_outcome, expected_outcome, check_index_type=False)
else:
assert_eq(diffed_outcome, expected_outcome)
@pytest.mark.parametrize("df", _dataframe_na_data())
@pytest.mark.parametrize("nan_as_null", [True, False, None])
def test_dataframe_isnull_isna(df, nan_as_null):
gdf = cudf.DataFrame.from_pandas(df, nan_as_null=nan_as_null)
assert_eq(df.isnull(), gdf.isnull())
assert_eq(df.isna(), gdf.isna())
# Test individual columns
for col in df:
assert_eq(df[col].isnull(), gdf[col].isnull())
assert_eq(df[col].isna(), gdf[col].isna())
@pytest.mark.parametrize("df", _dataframe_na_data())
@pytest.mark.parametrize("nan_as_null", [True, False, None])
def test_dataframe_notna_notnull(df, nan_as_null):
gdf = cudf.DataFrame.from_pandas(df, nan_as_null=nan_as_null)
assert_eq(df.notnull(), gdf.notnull())
assert_eq(df.notna(), gdf.notna())
# Test individual columns
for col in df:
assert_eq(df[col].notnull(), gdf[col].notnull())
assert_eq(df[col].notna(), gdf[col].notna())
def test_ndim():
pdf = pd.DataFrame({"x": range(5), "y": range(5, 10)})
gdf = cudf.DataFrame.from_pandas(pdf)
assert pdf.ndim == gdf.ndim
assert pdf.x.ndim == gdf.x.ndim
s = pd.Series(dtype="float64")
gs = cudf.Series()
assert s.ndim == gs.ndim
@pytest.mark.parametrize(
"decimals",
[
-3,
0,
5,
pd.Series([1, 4, 3, -6], index=["w", "x", "y", "z"]),
cudf.Series([-4, -2, 12], index=["x", "y", "z"]),
{"w": -1, "x": 15, "y": 2},
],
)
def test_dataframe_round(decimals):
pdf = pd.DataFrame(
{
"w": np.arange(0.5, 10.5, 1),
"x": np.random.normal(-100, 100, 10),
"y": np.array(
[
14.123,
2.343,
np.nan,
0.0,
-8.302,
np.nan,
94.313,
-112.236,
-8.029,
np.nan,
]
),
"z": np.repeat([-0.6459412758761901], 10),
}
)
gdf = cudf.DataFrame.from_pandas(pdf)
if isinstance(decimals, cudf.Series):
pdecimals = decimals.to_pandas()
else:
pdecimals = decimals
result = gdf.round(decimals)
expected = pdf.round(pdecimals)
assert_eq(result, expected)
# with nulls, maintaining existing null mask
for c in pdf.columns:
arr = pdf[c].to_numpy().astype("float64") # for pandas nulls
arr.ravel()[np.random.choice(10, 5, replace=False)] = np.nan
pdf[c] = gdf[c] = arr
result = gdf.round(decimals)
expected = pdf.round(pdecimals)
assert_eq(result, expected)
for c in gdf.columns:
np.array_equal(gdf[c].nullmask.to_array(), result[c].to_array())
@pytest.mark.parametrize(
"data",
[
[0, 1, 2, 3],
[-2, -1, 2, 3, 5],
[-2, -1, 0, 3, 5],
[True, False, False],
[True],
[False],
[],
[True, None, False],
[True, True, None],
[None, None],
[[0, 5], [1, 6], [2, 7], [3, 8], [4, 9]],
[[1, True], [2, False], [3, False]],
pytest.param(
[["a", True], ["b", False], ["c", False]],
marks=[
pytest.mark.xfail(
reason="NotImplementedError: all does not "
"support columns of object dtype."
)
],
),
],
)
def test_all(data):
# Pandas treats `None` in object type columns as True for some reason, so
# replacing with `False`
if np.array(data).ndim <= 1:
pdata = cudf.utils.utils._create_pandas_series(data=data).replace(
[None], False
)
gdata = cudf.Series.from_pandas(pdata)
else:
pdata = pd.DataFrame(data, columns=["a", "b"]).replace([None], False)
gdata = cudf.DataFrame.from_pandas(pdata)
# test bool_only
if pdata["b"].dtype == "bool":
got = gdata.all(bool_only=True)
expected = pdata.all(bool_only=True)
assert_eq(got, expected)
else:
with pytest.raises(NotImplementedError):
gdata.all(bool_only=False)
with pytest.raises(NotImplementedError):
gdata.all(level="a")
got = gdata.all()
expected = pdata.all()
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[0, 1, 2, 3],
[-2, -1, 2, 3, 5],
[-2, -1, 0, 3, 5],
[0, 0, 0, 0, 0],
[0, 0, None, 0],
[True, False, False],
[True],
[False],
[],
[True, None, False],
[True, True, None],
[None, None],
[[0, 5], [1, 6], [2, 7], [3, 8], [4, 9]],
[[1, True], [2, False], [3, False]],
pytest.param(
[["a", True], ["b", False], ["c", False]],
marks=[
pytest.mark.xfail(
reason="NotImplementedError: any does not "
"support columns of object dtype."
)
],
),
],
)
@pytest.mark.parametrize("axis", [0, 1])
def test_any(data, axis):
if np.array(data).ndim <= 1:
pdata = cudf.utils.utils._create_pandas_series(data=data)
gdata = cudf.Series.from_pandas(pdata)
if axis == 1:
with pytest.raises(NotImplementedError):
gdata.any(axis=axis)
else:
got = gdata.any(axis=axis)
expected = pdata.any(axis=axis)
assert_eq(got, expected)
else:
pdata = pd.DataFrame(data, columns=["a", "b"])
gdata = cudf.DataFrame.from_pandas(pdata)
# test bool_only
if pdata["b"].dtype == "bool":
got = gdata.any(bool_only=True)
expected = pdata.any(bool_only=True)
assert_eq(got, expected)
else:
with pytest.raises(NotImplementedError):
gdata.any(bool_only=False)
with pytest.raises(NotImplementedError):
gdata.any(level="a")
got = gdata.any(axis=axis)
expected = pdata.any(axis=axis)
assert_eq(got, expected)
@pytest.mark.parametrize("axis", [0, 1])
def test_empty_dataframe_any(axis):
pdf = pd.DataFrame({}, columns=["a", "b"])
gdf = cudf.DataFrame.from_pandas(pdf)
got = gdf.any(axis=axis)
expected = pdf.any(axis=axis)
assert_eq(got, expected, check_index_type=False)
@pytest.mark.parametrize("indexed", [False, True])
def test_dataframe_sizeof(indexed):
rows = int(1e6)
index = list(i for i in range(rows)) if indexed else None
gdf = cudf.DataFrame({"A": [8] * rows, "B": [32] * rows}, index=index)
for c in gdf._data.columns:
assert gdf._index.__sizeof__() == gdf._index.__sizeof__()
cols_sizeof = sum(c.__sizeof__() for c in gdf._data.columns)
assert gdf.__sizeof__() == (gdf._index.__sizeof__() + cols_sizeof)
@pytest.mark.parametrize("a", [[], ["123"]])
@pytest.mark.parametrize("b", ["123", ["123"]])
@pytest.mark.parametrize(
"misc_data",
["123", ["123"] * 20, 123, [1, 2, 0.8, 0.9] * 50, 0.9, 0.00001],
)
@pytest.mark.parametrize("non_list_data", [123, "abc", "zyx", "rapids", 0.8])
def test_create_dataframe_cols_empty_data(a, b, misc_data, non_list_data):
expected = pd.DataFrame({"a": a})
actual = cudf.DataFrame.from_pandas(expected)
expected["b"] = b
actual["b"] = b
assert_eq(actual, expected)
expected = pd.DataFrame({"a": []})
actual = cudf.DataFrame.from_pandas(expected)
expected["b"] = misc_data
actual["b"] = misc_data
assert_eq(actual, expected)
expected = pd.DataFrame({"a": a})
actual = cudf.DataFrame.from_pandas(expected)
expected["b"] = non_list_data
actual["b"] = non_list_data
assert_eq(actual, expected)
def test_empty_dataframe_describe():
pdf = pd.DataFrame({"a": [], "b": []})
gdf = cudf.from_pandas(pdf)
expected = pdf.describe()
actual = gdf.describe()
assert_eq(expected, actual)
def test_as_column_types():
col = column.as_column(cudf.Series([]))
assert_eq(col.dtype, np.dtype("float64"))
gds = cudf.Series(col)
pds = pd.Series(pd.Series([], dtype="float64"))
assert_eq(pds, gds)
col = column.as_column(cudf.Series([]), dtype="float32")
assert_eq(col.dtype, np.dtype("float32"))
gds = cudf.Series(col)
pds = pd.Series(pd.Series([], dtype="float32"))
assert_eq(pds, gds)
col = column.as_column(cudf.Series([]), dtype="str")
assert_eq(col.dtype, np.dtype("object"))
gds = cudf.Series(col)
pds = pd.Series(pd.Series([], dtype="str"))
assert_eq(pds, gds)
col = column.as_column(cudf.Series([]), dtype="object")
assert_eq(col.dtype, np.dtype("object"))
gds = cudf.Series(col)
pds = pd.Series(pd.Series([], dtype="object"))
assert_eq(pds, gds)
pds = pd.Series(np.array([1, 2, 3]), dtype="float32")
gds = cudf.Series(column.as_column(np.array([1, 2, 3]), dtype="float32"))
assert_eq(pds, gds)
pds = pd.Series([1, 2, 3], dtype="float32")
gds = cudf.Series([1, 2, 3], dtype="float32")
assert_eq(pds, gds)
pds = pd.Series([], dtype="float64")
gds = cudf.Series(column.as_column(pds))
assert_eq(pds, gds)
pds = pd.Series([1, 2, 4], dtype="int64")
gds = cudf.Series(column.as_column(cudf.Series([1, 2, 4]), dtype="int64"))
assert_eq(pds, gds)
pds = pd.Series([1.2, 18.0, 9.0], dtype="float32")
gds = cudf.Series(
column.as_column(cudf.Series([1.2, 18.0, 9.0]), dtype="float32")
)
assert_eq(pds, gds)
pds = pd.Series([1.2, 18.0, 9.0], dtype="str")
gds = cudf.Series(
column.as_column(cudf.Series([1.2, 18.0, 9.0]), dtype="str")
)
assert_eq(pds, gds)
pds = pd.Series(pd.Index(["1", "18", "9"]), dtype="int")
gds = cudf.Series(
cudf.core.index.StringIndex(["1", "18", "9"]), dtype="int"
)
assert_eq(pds, gds)
def test_one_row_head():
gdf = cudf.DataFrame({"name": ["carl"], "score": [100]}, index=[123])
pdf = gdf.to_pandas()
head_gdf = gdf.head()
head_pdf = pdf.head()
assert_eq(head_pdf, head_gdf)
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("as_dtype", NUMERIC_TYPES)
def test_series_astype_numeric_to_numeric(dtype, as_dtype):
psr = pd.Series([1, 2, 4, 3], dtype=dtype)
gsr = cudf.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("as_dtype", NUMERIC_TYPES)
def test_series_astype_numeric_to_numeric_nulls(dtype, as_dtype):
data = [1, 2, None, 3]
sr = cudf.Series(data, dtype=dtype)
got = sr.astype(as_dtype)
expect = cudf.Series([1, 2, None, 3], dtype=as_dtype)
assert_eq(expect, got)
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize(
"as_dtype",
[
"str",
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
],
)
def test_series_astype_numeric_to_other(dtype, as_dtype):
psr = pd.Series([1, 2, 3], dtype=dtype)
gsr = cudf.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize(
"as_dtype",
[
"str",
"int32",
"uint32",
"float32",
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
],
)
def test_series_astype_string_to_other(as_dtype):
if "datetime64" in as_dtype:
data = ["2001-01-01", "2002-02-02", "2000-01-05"]
else:
data = ["1", "2", "3"]
psr = pd.Series(data)
gsr = cudf.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize(
"as_dtype",
[
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
],
)
def test_series_astype_datetime_to_other(as_dtype):
data = ["2001-01-01", "2002-02-02", "2001-01-05"]
psr = pd.Series(data)
gsr = cudf.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize(
"inp",
[
("datetime64[ns]", "2011-01-01 00:00:00.000000000"),
("datetime64[us]", "2011-01-01 00:00:00.000000"),
("datetime64[ms]", "2011-01-01 00:00:00.000"),
("datetime64[s]", "2011-01-01 00:00:00"),
],
)
def test_series_astype_datetime_to_string(inp):
dtype, expect = inp
base_date = "2011-01-01"
sr = cudf.Series([base_date], dtype=dtype)
got = sr.astype(str)[0]
assert expect == got
@pytest.mark.parametrize(
"as_dtype",
[
"int32",
"uint32",
"float32",
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
"str",
],
)
def test_series_astype_categorical_to_other(as_dtype):
if "datetime64" in as_dtype:
data = ["2001-01-01", "2002-02-02", "2000-01-05", "2001-01-01"]
else:
data = [1, 2, 3, 1]
psr = pd.Series(data, dtype="category")
gsr = cudf.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize("ordered", [True, False])
def test_series_astype_to_categorical_ordered(ordered):
psr = pd.Series([1, 2, 3, 1], dtype="category")
gsr = cudf.from_pandas(psr)
ordered_dtype_pd = pd.CategoricalDtype(
categories=[1, 2, 3], ordered=ordered
)
ordered_dtype_gd = cudf.CategoricalDtype.from_pandas(ordered_dtype_pd)
assert_eq(
psr.astype("int32").astype(ordered_dtype_pd).astype("int32"),
gsr.astype("int32").astype(ordered_dtype_gd).astype("int32"),
)
@pytest.mark.parametrize("ordered", [True, False])
def test_series_astype_cat_ordered_to_unordered(ordered):
pd_dtype = pd.CategoricalDtype(categories=[1, 2, 3], ordered=ordered)
pd_to_dtype = pd.CategoricalDtype(
categories=[1, 2, 3], ordered=not ordered
)
gd_dtype = cudf.CategoricalDtype.from_pandas(pd_dtype)
gd_to_dtype = cudf.CategoricalDtype.from_pandas(pd_to_dtype)
psr = pd.Series([1, 2, 3], dtype=pd_dtype)
gsr = cudf.Series([1, 2, 3], dtype=gd_dtype)
expect = psr.astype(pd_to_dtype)
got = gsr.astype(gd_to_dtype)
assert_eq(expect, got)
def test_series_astype_null_cases():
data = [1, 2, None, 3]
# numerical to other
assert_eq(cudf.Series(data, dtype="str"), cudf.Series(data).astype("str"))
assert_eq(
cudf.Series(data, dtype="category"),
cudf.Series(data).astype("category"),
)
assert_eq(
cudf.Series(data, dtype="float32"),
cudf.Series(data, dtype="int32").astype("float32"),
)
assert_eq(
cudf.Series(data, dtype="float32"),
cudf.Series(data, dtype="uint32").astype("float32"),
)
assert_eq(
cudf.Series(data, dtype="datetime64[ms]"),
cudf.Series(data).astype("datetime64[ms]"),
)
# categorical to other
assert_eq(
cudf.Series(data, dtype="str"),
cudf.Series(data, dtype="category").astype("str"),
)
assert_eq(
cudf.Series(data, dtype="float32"),
cudf.Series(data, dtype="category").astype("float32"),
)
assert_eq(
cudf.Series(data, dtype="datetime64[ms]"),
cudf.Series(data, dtype="category").astype("datetime64[ms]"),
)
# string to other
assert_eq(
cudf.Series([1, 2, None, 3], dtype="int32"),
cudf.Series(["1", "2", None, "3"]).astype("int32"),
)
assert_eq(
cudf.Series(
["2001-01-01", "2001-02-01", None, "2001-03-01"],
dtype="datetime64[ms]",
),
cudf.Series(["2001-01-01", "2001-02-01", None, "2001-03-01"]).astype(
"datetime64[ms]"
),
)
assert_eq(
cudf.Series(["a", "b", "c", None], dtype="category").to_pandas(),
cudf.Series(["a", "b", "c", None]).astype("category").to_pandas(),
)
# datetime to other
data = [
"2001-01-01 00:00:00.000000",
"2001-02-01 00:00:00.000000",
None,
"2001-03-01 00:00:00.000000",
]
assert_eq(
cudf.Series(data),
cudf.Series(data, dtype="datetime64[us]").astype("str"),
)
assert_eq(
pd.Series(data, dtype="datetime64[ns]").astype("category"),
cudf.from_pandas(pd.Series(data, dtype="datetime64[ns]")).astype(
"category"
),
)
def test_series_astype_null_categorical():
sr = cudf.Series([None, None, None], dtype="category")
expect = cudf.Series([None, None, None], dtype="int32")
got = sr.astype("int32")
assert_eq(expect, got)
@pytest.mark.parametrize(
"data",
[
(
pd.Series([3, 3.0]),
pd.Series([2.3, 3.9]),
pd.Series([1.5, 3.9]),
pd.Series([1.0, 2]),
),
[
pd.Series([3, 3.0]),
pd.Series([2.3, 3.9]),
pd.Series([1.5, 3.9]),
pd.Series([1.0, 2]),
],
],
)
def test_create_dataframe_from_list_like(data):
pdf = pd.DataFrame(data, index=["count", "mean", "std", "min"])
gdf = cudf.DataFrame(data, index=["count", "mean", "std", "min"])
assert_eq(pdf, gdf)
pdf = pd.DataFrame(data)
gdf = cudf.DataFrame(data)
assert_eq(pdf, gdf)
def test_create_dataframe_column():
pdf = pd.DataFrame(columns=["a", "b", "c"], index=["A", "Z", "X"])
gdf = cudf.DataFrame(columns=["a", "b", "c"], index=["A", "Z", "X"])
assert_eq(pdf, gdf)
pdf = pd.DataFrame(
{"a": [1, 2, 3], "b": [2, 3, 5]},
columns=["a", "b", "c"],
index=["A", "Z", "X"],
)
gdf = cudf.DataFrame(
{"a": [1, 2, 3], "b": [2, 3, 5]},
columns=["a", "b", "c"],
index=["A", "Z", "X"],
)
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
"data",
[
[1, 2, 4],
[],
[5.0, 7.0, 8.0],
pd.Categorical(["a", "b", "c"]),
["m", "a", "d", "v"],
],
)
def test_series_values_host_property(data):
pds = cudf.utils.utils._create_pandas_series(data=data)
gds = cudf.Series(data)
np.testing.assert_array_equal(pds.values, gds.values_host)
@pytest.mark.parametrize(
"data",
[
[1, 2, 4],
[],
[5.0, 7.0, 8.0],
pytest.param(
pd.Categorical(["a", "b", "c"]),
marks=pytest.mark.xfail(raises=NotImplementedError),
),
pytest.param(
["m", "a", "d", "v"],
marks=pytest.mark.xfail(raises=NotImplementedError),
),
],
)
def test_series_values_property(data):
pds = cudf.utils.utils._create_pandas_series(data=data)
gds = cudf.Series(data)
gds_vals = gds.values
assert isinstance(gds_vals, cupy.ndarray)
np.testing.assert_array_equal(gds_vals.get(), pds.values)
@pytest.mark.parametrize(
"data",
[
{"A": [1, 2, 3], "B": [4, 5, 6]},
{"A": [1.0, 2.0, 3.0], "B": [4.0, 5.0, 6.0]},
{"A": [1, 2, 3], "B": [1.0, 2.0, 3.0]},
{"A": np.float32(np.arange(3)), "B": np.float64(np.arange(3))},
pytest.param(
{"A": [1, None, 3], "B": [1, 2, None]},
marks=pytest.mark.xfail(
reason="Nulls not supported by as_gpu_matrix"
),
),
pytest.param(
{"A": [None, None, None], "B": [None, None, None]},
marks=pytest.mark.xfail(
reason="Nulls not supported by as_gpu_matrix"
),
),
pytest.param(
{"A": [], "B": []},
marks=pytest.mark.xfail(reason="Requires at least 1 row"),
),
pytest.param(
{"A": [1, 2, 3], "B": ["a", "b", "c"]},
marks=pytest.mark.xfail(
reason="str or categorical not supported by as_gpu_matrix"
),
),
pytest.param(
{"A": pd.Categorical(["a", "b", "c"]), "B": ["d", "e", "f"]},
marks=pytest.mark.xfail(
reason="str or categorical not supported by as_gpu_matrix"
),
),
],
)
def test_df_values_property(data):
pdf = pd.DataFrame.from_dict(data)
gdf = cudf.DataFrame.from_pandas(pdf)
pmtr = pdf.values
gmtr = gdf.values.get()
np.testing.assert_array_equal(pmtr, gmtr)
def test_value_counts():
pdf = pd.DataFrame(
{
"numeric": [1, 2, 3, 4, 5, 6, 1, 2, 4] * 10,
"alpha": ["u", "h", "d", "a", "m", "u", "h", "d", "a"] * 10,
}
)
gdf = cudf.DataFrame(
{
"numeric": [1, 2, 3, 4, 5, 6, 1, 2, 4] * 10,
"alpha": ["u", "h", "d", "a", "m", "u", "h", "d", "a"] * 10,
}
)
assert_eq(
pdf.numeric.value_counts().sort_index(),
gdf.numeric.value_counts().sort_index(),
check_dtype=False,
)
assert_eq(
pdf.alpha.value_counts().sort_index(),
gdf.alpha.value_counts().sort_index(),
check_dtype=False,
)
@pytest.mark.parametrize(
"data",
[
[],
[0, 12, 14],
[0, 14, 12, 12, 3, 10, 12, 14],
np.random.randint(-100, 100, 200),
pd.Series([0.0, 1.0, None, 10.0]),
[None, None, None, None],
[np.nan, None, -1, 2, 3],
],
)
@pytest.mark.parametrize(
"values",
[
np.random.randint(-100, 100, 10),
[],
[np.nan, None, -1, 2, 3],
[1.0, 12.0, None, None, 120],
[0, 14, 12, 12, 3, 10, 12, 14, None],
[None, None, None],
["0", "12", "14"],
["0", "12", "14", "a"],
],
)
def test_isin_numeric(data, values):
index = np.random.randint(0, 100, len(data))
psr = cudf.utils.utils._create_pandas_series(data=data, index=index)
gsr = cudf.Series.from_pandas(psr, nan_as_null=False)
expected = psr.isin(values)
got = gsr.isin(values)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[],
pd.Series(
["2018-01-01", "2019-04-03", None, "2019-12-30"],
dtype="datetime64[ns]",
),
pd.Series(
[
"2018-01-01",
"2019-04-03",
None,
"2019-12-30",
"2018-01-01",
"2018-01-01",
],
dtype="datetime64[ns]",
),
],
)
@pytest.mark.parametrize(
"values",
[
[],
[1514764800000000000, 1577664000000000000],
[
1514764800000000000,
1577664000000000000,
1577664000000000000,
1577664000000000000,
1514764800000000000,
],
["2019-04-03", "2019-12-30", "2012-01-01"],
[
"2012-01-01",
"2012-01-01",
"2012-01-01",
"2019-04-03",
"2019-12-30",
"2012-01-01",
],
],
)
def test_isin_datetime(data, values):
psr = cudf.utils.utils._create_pandas_series(data=data)
gsr = cudf.Series.from_pandas(psr)
got = gsr.isin(values)
expected = psr.isin(values)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[],
pd.Series(["this", "is", None, "a", "test"]),
pd.Series(["test", "this", "test", "is", None, "test", "a", "test"]),
pd.Series(["0", "12", "14"]),
],
)
@pytest.mark.parametrize(
"values",
[
[],
["this", "is"],
[None, None, None],
["12", "14", "19"],
pytest.param(
[12, 14, 19],
marks=pytest.mark.xfail(
not PANDAS_GE_120,
reason="pandas's failure here seems like a bug(in < 1.2) "
"given the reverse succeeds",
),
),
["is", "this", "is", "this", "is"],
],
)
def test_isin_string(data, values):
psr = cudf.utils.utils._create_pandas_series(data=data)
gsr = cudf.Series.from_pandas(psr)
got = gsr.isin(values)
expected = psr.isin(values)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[],
pd.Series(["a", "b", "c", "c", "c", "d", "e"], dtype="category"),
pd.Series(["a", "b", None, "c", "d", "e"], dtype="category"),
pd.Series([0, 3, 10, 12], dtype="category"),
pd.Series([0, 3, 10, 12, 0, 10, 3, 0, 0, 3, 3], dtype="category"),
],
)
@pytest.mark.parametrize(
"values",
[
[],
["a", "b", None, "f", "words"],
["0", "12", None, "14"],
[0, 10, 12, None, 39, 40, 1000],
[0, 0, 0, 0, 3, 3, 3, None, 1, 2, 3],
],
)
def test_isin_categorical(data, values):
psr = cudf.utils.utils._create_pandas_series(data=data)
gsr = cudf.Series.from_pandas(psr)
got = gsr.isin(values)
expected = psr.isin(values)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[],
pd.Series(
["this", "is", None, "a", "test"], index=["a", "b", "c", "d", "e"]
),
pd.Series([0, 15, 10], index=[0, None, 9]),
pd.Series(
range(25),
index=pd.date_range(
start="2019-01-01", end="2019-01-02", freq="H"
),
),
],
)
@pytest.mark.parametrize(
"values",
[
[],
["this", "is"],
[0, 19, 13],
["2019-01-01 04:00:00", "2019-01-01 06:00:00", "2018-03-02"],
],
)
def test_isin_index(data, values):
psr = cudf.utils.utils._create_pandas_series(data=data)
gsr = cudf.Series.from_pandas(psr)
got = gsr.index.isin(values)
expected = psr.index.isin(values)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
pd.MultiIndex.from_arrays(
[[1, 2, 3], ["red", "blue", "green"]], names=("number", "color")
),
pd.MultiIndex.from_arrays([[], []], names=("number", "color")),
pd.MultiIndex.from_arrays(
[[1, 2, 3, 10, 100], ["red", "blue", "green", "pink", "white"]],
names=("number", "color"),
),
],
)
@pytest.mark.parametrize(
"values,level,err",
[
(["red", "orange", "yellow"], "color", None),
(["red", "white", "yellow"], "color", None),
([0, 1, 2, 10, 11, 15], "number", None),
([0, 1, 2, 10, 11, 15], None, TypeError),
(pd.Series([0, 1, 2, 10, 11, 15]), None, TypeError),
(pd.Index([0, 1, 2, 10, 11, 15]), None, TypeError),
(pd.Index([0, 1, 2, 8, 11, 15]), "number", None),
(pd.Index(["red", "white", "yellow"]), "color", None),
([(1, "red"), (3, "red")], None, None),
(((1, "red"), (3, "red")), None, None),
(
pd.MultiIndex.from_arrays(
[[1, 2, 3], ["red", "blue", "green"]],
names=("number", "color"),
),
None,
None,
),
(
pd.MultiIndex.from_arrays([[], []], names=("number", "color")),
None,
None,
),
(
pd.MultiIndex.from_arrays(
[
[1, 2, 3, 10, 100],
["red", "blue", "green", "pink", "white"],
],
names=("number", "color"),
),
None,
None,
),
],
)
def test_isin_multiindex(data, values, level, err):
pmdx = data
gmdx = cudf.from_pandas(data)
if err is None:
expected = pmdx.isin(values, level=level)
if isinstance(values, pd.MultiIndex):
values = cudf.from_pandas(values)
got = gmdx.isin(values, level=level)
assert_eq(got, expected)
else:
assert_exceptions_equal(
lfunc=pmdx.isin,
rfunc=gmdx.isin,
lfunc_args_and_kwargs=([values], {"level": level}),
rfunc_args_and_kwargs=([values], {"level": level}),
check_exception_type=False,
expected_error_message=re.escape(
"values need to be a Multi-Index or set/list-like tuple "
"squences when `level=None`."
),
)
@pytest.mark.parametrize(
"data",
[
pd.DataFrame(
{
"num_legs": [2, 4],
"num_wings": [2, 0],
"bird_cats": pd.Series(
["sparrow", "pigeon"],
dtype="category",
index=["falcon", "dog"],
),
},
index=["falcon", "dog"],
),
pd.DataFrame(
{"num_legs": [8, 2], "num_wings": [0, 2]},
index=["spider", "falcon"],
),
pd.DataFrame(
{
"num_legs": [8, 2, 1, 0, 2, 4, 5],
"num_wings": [2, 0, 2, 1, 2, 4, -1],
}
),
],
)
@pytest.mark.parametrize(
"values",
[
[0, 2],
{"num_wings": [0, 3]},
pd.DataFrame(
{"num_legs": [8, 2], "num_wings": [0, 2]},
index=["spider", "falcon"],
),
pd.DataFrame(
{
"num_legs": [2, 4],
"num_wings": [2, 0],
"bird_cats": pd.Series(
["sparrow", "pigeon"],
dtype="category",
index=["falcon", "dog"],
),
},
index=["falcon", "dog"],
),
["sparrow", "pigeon"],
pd.Series(["sparrow", "pigeon"], dtype="category"),
pd.Series([1, 2, 3, 4, 5]),
"abc",
123,
],
)
def test_isin_dataframe(data, values):
pdf = data
gdf = cudf.from_pandas(pdf)
if cudf.utils.dtypes.is_scalar(values):
assert_exceptions_equal(
lfunc=pdf.isin,
rfunc=gdf.isin,
lfunc_args_and_kwargs=([values],),
rfunc_args_and_kwargs=([values],),
)
else:
try:
expected = pdf.isin(values)
except ValueError as e:
if str(e) == "Lengths must match.":
pytest.xfail(
not PANDAS_GE_110,
"https://github.com/pandas-dev/pandas/issues/34256",
)
if isinstance(values, (pd.DataFrame, pd.Series)):
values = cudf.from_pandas(values)
got = gdf.isin(values)
assert_eq(got, expected)
def test_constructor_properties():
df = cudf.DataFrame()
key1 = "a"
key2 = "b"
val1 = np.array([123], dtype=np.float64)
val2 = np.array([321], dtype=np.float64)
df[key1] = val1
df[key2] = val2
# Correct use of _constructor (for DataFrame)
assert_eq(df, df._constructor({key1: val1, key2: val2}))
# Correct use of _constructor (for cudf.Series)
assert_eq(df[key1], df[key2]._constructor(val1, name=key1))
# Correct use of _constructor_sliced (for DataFrame)
assert_eq(df[key1], df._constructor_sliced(val1, name=key1))
# Correct use of _constructor_expanddim (for cudf.Series)
assert_eq(df, df[key2]._constructor_expanddim({key1: val1, key2: val2}))
# Incorrect use of _constructor_sliced (Raises for cudf.Series)
with pytest.raises(NotImplementedError):
df[key1]._constructor_sliced
# Incorrect use of _constructor_expanddim (Raises for DataFrame)
with pytest.raises(NotImplementedError):
df._constructor_expanddim
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("as_dtype", ALL_TYPES)
def test_df_astype_numeric_to_all(dtype, as_dtype):
if "uint" in dtype:
data = [1, 2, None, 4, 7]
elif "int" in dtype or "longlong" in dtype:
data = [1, 2, None, 4, -7]
elif "float" in dtype:
data = [1.0, 2.0, None, 4.0, np.nan, -7.0]
gdf = cudf.DataFrame()
gdf["foo"] = cudf.Series(data, dtype=dtype)
gdf["bar"] = cudf.Series(data, dtype=dtype)
insert_data = cudf.Series(data, dtype=dtype)
expect = cudf.DataFrame()
expect["foo"] = insert_data.astype(as_dtype)
expect["bar"] = insert_data.astype(as_dtype)
got = gdf.astype(as_dtype)
assert_eq(expect, got)
@pytest.mark.parametrize(
"as_dtype",
[
"int32",
"float32",
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
],
)
def test_df_astype_string_to_other(as_dtype):
if "datetime64" in as_dtype:
# change None to "NaT" after this issue is fixed:
# https://github.com/rapidsai/cudf/issues/5117
data = ["2001-01-01", "2002-02-02", "2000-01-05", None]
elif as_dtype == "int32":
data = [1, 2, 3]
elif as_dtype == "category":
data = ["1", "2", "3", None]
elif "float" in as_dtype:
data = [1.0, 2.0, 3.0, np.nan]
insert_data = cudf.Series.from_pandas(pd.Series(data, dtype="str"))
expect_data = cudf.Series(data, dtype=as_dtype)
gdf = cudf.DataFrame()
expect = cudf.DataFrame()
gdf["foo"] = insert_data
gdf["bar"] = insert_data
expect["foo"] = expect_data
expect["bar"] = expect_data
got = gdf.astype(as_dtype)
assert_eq(expect, got)
@pytest.mark.parametrize(
"as_dtype",
[
"int64",
"datetime64[s]",
"datetime64[us]",
"datetime64[ns]",
"str",
"category",
],
)
def test_df_astype_datetime_to_other(as_dtype):
data = [
"1991-11-20 00:00:00.000",
"2004-12-04 00:00:00.000",
"2016-09-13 00:00:00.000",
None,
]
gdf = cudf.DataFrame()
expect = cudf.DataFrame()
gdf["foo"] = cudf.Series(data, dtype="datetime64[ms]")
gdf["bar"] = cudf.Series(data, dtype="datetime64[ms]")
if as_dtype == "int64":
expect["foo"] = cudf.Series(
[690595200000, 1102118400000, 1473724800000, None], dtype="int64"
)
expect["bar"] = cudf.Series(
[690595200000, 1102118400000, 1473724800000, None], dtype="int64"
)
elif as_dtype == "str":
expect["foo"] = cudf.Series(data, dtype="str")
expect["bar"] = cudf.Series(data, dtype="str")
elif as_dtype == "category":
expect["foo"] = cudf.Series(gdf["foo"], dtype="category")
expect["bar"] = cudf.Series(gdf["bar"], dtype="category")
else:
expect["foo"] = cudf.Series(data, dtype=as_dtype)
expect["bar"] = cudf.Series(data, dtype=as_dtype)
got = gdf.astype(as_dtype)
assert_eq(expect, got)
@pytest.mark.parametrize(
"as_dtype",
[
"int32",
"float32",
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
"str",
],
)
def test_df_astype_categorical_to_other(as_dtype):
if "datetime64" in as_dtype:
data = ["2001-01-01", "2002-02-02", "2000-01-05", "2001-01-01"]
else:
data = [1, 2, 3, 1]
psr = pd.Series(data, dtype="category")
pdf = pd.DataFrame()
pdf["foo"] = psr
pdf["bar"] = psr
gdf = cudf.DataFrame.from_pandas(pdf)
assert_eq(pdf.astype(as_dtype), gdf.astype(as_dtype))
@pytest.mark.parametrize("ordered", [True, False])
def test_df_astype_to_categorical_ordered(ordered):
psr = pd.Series([1, 2, 3, 1], dtype="category")
pdf = pd.DataFrame()
pdf["foo"] = psr
pdf["bar"] = psr
gdf = cudf.DataFrame.from_pandas(pdf)
ordered_dtype_pd = pd.CategoricalDtype(
categories=[1, 2, 3], ordered=ordered
)
ordered_dtype_gd = cudf.CategoricalDtype.from_pandas(ordered_dtype_pd)
assert_eq(
pdf.astype(ordered_dtype_pd).astype("int32"),
gdf.astype(ordered_dtype_gd).astype("int32"),
)
@pytest.mark.parametrize(
"dtype,args",
[(dtype, {}) for dtype in ALL_TYPES]
+ [("category", {"ordered": True}), ("category", {"ordered": False})],
)
def test_empty_df_astype(dtype, args):
df = cudf.DataFrame()
kwargs = {}
kwargs.update(args)
assert_eq(df, df.astype(dtype=dtype, **kwargs))
@pytest.mark.parametrize(
"errors",
[
pytest.param(
"raise", marks=pytest.mark.xfail(reason="should raise error here")
),
pytest.param("other", marks=pytest.mark.xfail(raises=ValueError)),
"ignore",
pytest.param(
"warn", marks=pytest.mark.filterwarnings("ignore:Traceback")
),
],
)
def test_series_astype_error_handling(errors):
sr = cudf.Series(["random", "words"])
got = sr.astype("datetime64", errors=errors)
assert_eq(sr, got)
@pytest.mark.parametrize("dtype", ALL_TYPES)
def test_df_constructor_dtype(dtype):
if "datetime" in dtype:
data = ["1991-11-20", "2004-12-04", "2016-09-13", None]
elif dtype == "str":
data = ["a", "b", "c", None]
elif "float" in dtype:
data = [1.0, 0.5, -1.1, np.nan, None]
elif "bool" in dtype:
data = [True, False, None]
else:
data = [1, 2, 3, None]
sr = cudf.Series(data, dtype=dtype)
expect = cudf.DataFrame()
expect["foo"] = sr
expect["bar"] = sr
got = cudf.DataFrame({"foo": data, "bar": data}, dtype=dtype)
assert_eq(expect, got)
@pytest.mark.parametrize(
"data",
[
cudf.datasets.randomdata(
nrows=10, dtypes={"a": "category", "b": int, "c": float, "d": int}
),
cudf.datasets.randomdata(
nrows=10, dtypes={"a": "category", "b": int, "c": float, "d": str}
),
cudf.datasets.randomdata(
nrows=10, dtypes={"a": bool, "b": int, "c": float, "d": str}
),
cudf.DataFrame(),
cudf.DataFrame({"a": [0, 1, 2], "b": [1, None, 3]}),
cudf.DataFrame(
{
"a": [1, 2, 3, 4],
"b": [7, np.NaN, 9, 10],
"c": [np.NaN, np.NaN, np.NaN, np.NaN],
"d": cudf.Series([None, None, None, None], dtype="int64"),
"e": [100, None, 200, None],
"f": cudf.Series([10, None, np.NaN, 11], nan_as_null=False),
}
),
cudf.DataFrame(
{
"a": [10, 11, 12, 13, 14, 15],
"b": cudf.Series(
[10, None, np.NaN, 2234, None, np.NaN], nan_as_null=False
),
}
),
],
)
@pytest.mark.parametrize(
"op", ["max", "min", "sum", "product", "mean", "var", "std"]
)
@pytest.mark.parametrize("skipna", [True, False])
def test_rowwise_ops(data, op, skipna):
gdf = data
pdf = gdf.to_pandas()
if op in ("var", "std"):
expected = getattr(pdf, op)(axis=1, ddof=0, skipna=skipna)
got = getattr(gdf, op)(axis=1, ddof=0, skipna=skipna)
else:
expected = getattr(pdf, op)(axis=1, skipna=skipna)
got = getattr(gdf, op)(axis=1, skipna=skipna)
assert_eq(expected, got, check_exact=False)
@pytest.mark.parametrize(
"op", ["max", "min", "sum", "product", "mean", "var", "std"]
)
def test_rowwise_ops_nullable_dtypes_all_null(op):
gdf = cudf.DataFrame(
{
"a": [1, 2, 3, 4],
"b": [7, np.NaN, 9, 10],
"c": [np.NaN, np.NaN, np.NaN, np.NaN],
"d": cudf.Series([None, None, None, None], dtype="int64"),
"e": [100, None, 200, None],
"f": cudf.Series([10, None, np.NaN, 11], nan_as_null=False),
}
)
expected = cudf.Series([None, None, None, None], dtype="float64")
if op in ("var", "std"):
got = getattr(gdf, op)(axis=1, ddof=0, skipna=False)
else:
got = getattr(gdf, op)(axis=1, skipna=False)
assert_eq(got.null_count, expected.null_count)
assert_eq(got, expected)
@pytest.mark.parametrize(
"op,expected",
[
(
"max",
cudf.Series(
[10.0, None, np.NaN, 2234.0, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
(
"min",
cudf.Series(
[10.0, None, np.NaN, 13.0, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
(
"sum",
cudf.Series(
[20.0, None, np.NaN, 2247.0, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
(
"product",
cudf.Series(
[100.0, None, np.NaN, 29042.0, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
(
"mean",
cudf.Series(
[10.0, None, np.NaN, 1123.5, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
(
"var",
cudf.Series(
[0.0, None, np.NaN, 1233210.25, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
(
"std",
cudf.Series(
[0.0, None, np.NaN, 1110.5, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
],
)
def test_rowwise_ops_nullable_dtypes_partial_null(op, expected):
gdf = cudf.DataFrame(
{
"a": [10, 11, 12, 13, 14, 15],
"b": cudf.Series(
[10, None, np.NaN, 2234, None, np.NaN], nan_as_null=False,
),
}
)
if op in ("var", "std"):
got = getattr(gdf, op)(axis=1, ddof=0, skipna=False)
else:
got = getattr(gdf, op)(axis=1, skipna=False)
assert_eq(got.null_count, expected.null_count)
assert_eq(got, expected)
@pytest.mark.parametrize(
"op,expected",
[
(
"max",
cudf.Series([10, None, None, 2234, None, 453], dtype="int64",),
),
("min", cudf.Series([10, None, None, 13, None, 15], dtype="int64",),),
(
"sum",
cudf.Series([20, None, None, 2247, None, 468], dtype="int64",),
),
(
"product",
cudf.Series([100, None, None, 29042, None, 6795], dtype="int64",),
),
(
"mean",
cudf.Series(
[10.0, None, None, 1123.5, None, 234.0], dtype="float32",
),
),
(
"var",
cudf.Series(
[0.0, None, None, 1233210.25, None, 47961.0], dtype="float32",
),
),
(
"std",
cudf.Series(
[0.0, None, None, 1110.5, None, 219.0], dtype="float32",
),
),
],
)
def test_rowwise_ops_nullable_int_dtypes(op, expected):
gdf = cudf.DataFrame(
{
"a": [10, 11, None, 13, None, 15],
"b": cudf.Series(
[10, None, 323, 2234, None, 453], nan_as_null=False,
),
}
)
if op in ("var", "std"):
got = getattr(gdf, op)(axis=1, ddof=0, skipna=False)
else:
got = getattr(gdf, op)(axis=1, skipna=False)
assert_eq(got.null_count, expected.null_count)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"t2": cudf.Series(
["1940-08-31 06:00:00", "2020-08-02 10:00:00"], dtype="<M8[ms]"
),
},
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"t2": cudf.Series(
["1940-08-31 06:00:00", "2020-08-02 10:00:00"], dtype="<M8[ns]"
),
"t3": cudf.Series(
["1960-08-31 06:00:00", "2030-08-02 10:00:00"], dtype="<M8[s]"
),
},
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"t2": cudf.Series(
["1940-08-31 06:00:00", "2020-08-02 10:00:00"], dtype="<M8[us]"
),
},
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"t2": cudf.Series(
["1940-08-31 06:00:00", "2020-08-02 10:00:00"], dtype="<M8[ms]"
),
"i1": cudf.Series([1001, 2002], dtype="int64"),
},
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"t2": cudf.Series(["1940-08-31 06:00:00", None], dtype="<M8[ms]"),
"i1": cudf.Series([1001, 2002], dtype="int64"),
},
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"i1": cudf.Series([1001, 2002], dtype="int64"),
"f1": cudf.Series([-100.001, 123.456], dtype="float64"),
},
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"i1": cudf.Series([1001, 2002], dtype="int64"),
"f1": cudf.Series([-100.001, 123.456], dtype="float64"),
"b1": cudf.Series([True, False], dtype="bool"),
},
],
)
@pytest.mark.parametrize("op", ["max", "min"])
@pytest.mark.parametrize("skipna", [True, False])
def test_rowwise_ops_datetime_dtypes(data, op, skipna):
gdf = cudf.DataFrame(data)
pdf = gdf.to_pandas()
got = getattr(gdf, op)(axis=1, skipna=skipna)
expected = getattr(pdf, op)(axis=1, skipna=skipna)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data,op,skipna",
[
(
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"],
dtype="<M8[ms]",
),
"t2": cudf.Series(
["1940-08-31 06:00:00", None], dtype="<M8[ms]"
),
},
"max",
True,
),
(
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"],
dtype="<M8[ms]",
),
"t2": cudf.Series(
["1940-08-31 06:00:00", None], dtype="<M8[ms]"
),
},
"min",
False,
),
(
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"],
dtype="<M8[ms]",
),
"t2": cudf.Series(
["1940-08-31 06:00:00", None], dtype="<M8[ms]"
),
},
"min",
True,
),
],
)
def test_rowwise_ops_datetime_dtypes_2(data, op, skipna):
gdf = cudf.DataFrame(data)
pdf = gdf.to_pandas()
got = getattr(gdf, op)(axis=1, skipna=skipna)
expected = getattr(pdf, op)(axis=1, skipna=skipna)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
(
{
"t1": pd.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"],
dtype="<M8[ns]",
),
"t2": pd.Series(
["1940-08-31 06:00:00", pd.NaT], dtype="<M8[ns]"
),
}
)
],
)
def test_rowwise_ops_datetime_dtypes_pdbug(data):
pdf = pd.DataFrame(data)
gdf = cudf.from_pandas(pdf)
expected = pdf.max(axis=1, skipna=False)
got = gdf.max(axis=1, skipna=False)
if PANDAS_GE_120:
assert_eq(got, expected)
else:
# PANDAS BUG: https://github.com/pandas-dev/pandas/issues/36907
with pytest.raises(AssertionError, match="numpy array are different"):
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[5.0, 6.0, 7.0],
"single value",
np.array(1, dtype="int64"),
np.array(0.6273643, dtype="float64"),
],
)
def test_insert(data):
pdf = pd.DataFrame.from_dict({"A": [1, 2, 3], "B": ["a", "b", "c"]})
gdf = cudf.DataFrame.from_pandas(pdf)
# insertion by index
pdf.insert(0, "foo", data)
gdf.insert(0, "foo", data)
assert_eq(pdf, gdf)
pdf.insert(3, "bar", data)
gdf.insert(3, "bar", data)
assert_eq(pdf, gdf)
pdf.insert(1, "baz", data)
gdf.insert(1, "baz", data)
assert_eq(pdf, gdf)
# pandas insert doesn't support negative indexing
pdf.insert(len(pdf.columns), "qux", data)
gdf.insert(-1, "qux", data)
assert_eq(pdf, gdf)
def test_cov():
gdf = cudf.datasets.randomdata(10)
pdf = gdf.to_pandas()
assert_eq(pdf.cov(), gdf.cov())
@pytest.mark.xfail(reason="cupy-based cov does not support nulls")
def test_cov_nans():
pdf = pd.DataFrame()
pdf["a"] = [None, None, None, 2.00758632, None]
pdf["b"] = [0.36403686, None, None, None, None]
pdf["c"] = [None, None, None, 0.64882227, None]
pdf["d"] = [None, -1.46863125, None, 1.22477948, -0.06031689]
gdf = cudf.from_pandas(pdf)
assert_eq(pdf.cov(), gdf.cov())
@pytest.mark.parametrize(
"gsr",
[
cudf.Series([4, 2, 3]),
cudf.Series([4, 2, 3], index=["a", "b", "c"]),
cudf.Series([4, 2, 3], index=["a", "b", "d"]),
cudf.Series([4, 2], index=["a", "b"]),
cudf.Series([4, 2, 3], index=cudf.core.index.RangeIndex(0, 3)),
pytest.param(
cudf.Series([4, 2, 3, 4, 5], index=["a", "b", "d", "0", "12"]),
marks=pytest.mark.xfail,
),
],
)
@pytest.mark.parametrize("colnames", [["a", "b", "c"], [0, 1, 2]])
@pytest.mark.parametrize(
"op",
[
operator.add,
operator.mul,
operator.floordiv,
operator.truediv,
operator.mod,
operator.pow,
operator.eq,
operator.lt,
operator.le,
operator.gt,
operator.ge,
operator.ne,
],
)
def test_df_sr_binop(gsr, colnames, op):
data = [[3.0, 2.0, 5.0], [3.0, None, 5.0], [6.0, 7.0, np.nan]]
data = dict(zip(colnames, data))
gsr = gsr.astype("float64")
gdf = cudf.DataFrame(data)
pdf = gdf.to_pandas(nullable=True)
psr = gsr.to_pandas(nullable=True)
expect = op(pdf, psr)
got = op(gdf, gsr).to_pandas(nullable=True)
assert_eq(expect, got, check_dtype=False)
expect = op(psr, pdf)
got = op(gsr, gdf).to_pandas(nullable=True)
assert_eq(expect, got, check_dtype=False)
@pytest.mark.parametrize(
"op",
[
operator.add,
operator.mul,
operator.floordiv,
operator.truediv,
operator.mod,
operator.pow,
# comparison ops will temporarily XFAIL
# see PR https://github.com/rapidsai/cudf/pull/7491
pytest.param(operator.eq, marks=pytest.mark.xfail()),
pytest.param(operator.lt, marks=pytest.mark.xfail()),
pytest.param(operator.le, marks=pytest.mark.xfail()),
pytest.param(operator.gt, marks=pytest.mark.xfail()),
pytest.param(operator.ge, marks=pytest.mark.xfail()),
pytest.param(operator.ne, marks=pytest.mark.xfail()),
],
)
@pytest.mark.parametrize(
"gsr", [cudf.Series([1, 2, 3, 4, 5], index=["a", "b", "d", "0", "12"])]
)
def test_df_sr_binop_col_order(gsr, op):
colnames = [0, 1, 2]
data = [[0, 2, 5], [3, None, 5], [6, 7, np.nan]]
data = dict(zip(colnames, data))
gdf = cudf.DataFrame(data)
pdf = pd.DataFrame.from_dict(data)
psr = gsr.to_pandas()
expect = op(pdf, psr).astype("float")
out = op(gdf, gsr).astype("float")
got = out[expect.columns]
assert_eq(expect, got)
@pytest.mark.parametrize("set_index", [None, "A", "C", "D"])
@pytest.mark.parametrize("index", [True, False])
@pytest.mark.parametrize("deep", [True, False])
def test_memory_usage(deep, index, set_index):
# Testing numerical/datetime by comparing with pandas
# (string and categorical columns will be different)
rows = int(100)
df = pd.DataFrame(
{
"A": np.arange(rows, dtype="int64"),
"B": np.arange(rows, dtype="int32"),
"C": np.arange(rows, dtype="float64"),
}
)
df["D"] = pd.to_datetime(df.A)
if set_index:
df = df.set_index(set_index)
gdf = cudf.from_pandas(df)
if index and set_index is None:
# Special Case: Assume RangeIndex size == 0
assert gdf.index.memory_usage(deep=deep) == 0
else:
# Check for Series only
assert df["B"].memory_usage(index=index, deep=deep) == gdf[
"B"
].memory_usage(index=index, deep=deep)
# Check for entire DataFrame
assert_eq(
df.memory_usage(index=index, deep=deep).sort_index(),
gdf.memory_usage(index=index, deep=deep).sort_index(),
)
@pytest.mark.xfail
def test_memory_usage_string():
rows = int(100)
df = pd.DataFrame(
{
"A": np.arange(rows, dtype="int32"),
"B": np.random.choice(["apple", "banana", "orange"], rows),
}
)
gdf = cudf.from_pandas(df)
# Check deep=False (should match pandas)
assert gdf.B.memory_usage(deep=False, index=False) == df.B.memory_usage(
deep=False, index=False
)
# Check string column
assert gdf.B.memory_usage(deep=True, index=False) == df.B.memory_usage(
deep=True, index=False
)
# Check string index
assert gdf.set_index("B").index.memory_usage(
deep=True
) == df.B.memory_usage(deep=True, index=False)
def test_memory_usage_cat():
rows = int(100)
df = pd.DataFrame(
{
"A": np.arange(rows, dtype="int32"),
"B": np.random.choice(["apple", "banana", "orange"], rows),
}
)
df["B"] = df.B.astype("category")
gdf = cudf.from_pandas(df)
expected = (
gdf.B._column.cat().categories.__sizeof__()
+ gdf.B._column.cat().codes.__sizeof__()
)
# Check cat column
assert gdf.B.memory_usage(deep=True, index=False) == expected
# Check cat index
assert gdf.set_index("B").index.memory_usage(deep=True) == expected
def test_memory_usage_list():
df = cudf.DataFrame({"A": [[0, 1, 2, 3], [4, 5, 6], [7, 8], [9]]})
expected = (
df.A._column.offsets._memory_usage()
+ df.A._column.elements._memory_usage()
)
assert expected == df.A.memory_usage()
@pytest.mark.xfail
def test_memory_usage_multi():
rows = int(100)
deep = True
df = pd.DataFrame(
{
"A": np.arange(rows, dtype="int32"),
"B": np.random.choice(np.arange(3, dtype="int64"), rows),
"C": np.random.choice(np.arange(3, dtype="float64"), rows),
}
).set_index(["B", "C"])
gdf = cudf.from_pandas(df)
# Assume MultiIndex memory footprint is just that
# of the underlying columns, levels, and codes
expect = rows * 16 # Source Columns
expect += rows * 16 # Codes
expect += 3 * 8 # Level 0
expect += 3 * 8 # Level 1
assert expect == gdf.index.memory_usage(deep=deep)
@pytest.mark.parametrize(
"list_input",
[
pytest.param([1, 2, 3, 4], id="smaller"),
pytest.param([1, 2, 3, 4, 5, 6], id="larger"),
],
)
@pytest.mark.parametrize(
"key",
[
pytest.param("list_test", id="new_column"),
pytest.param("id", id="existing_column"),
],
)
def test_setitem_diff_size_list(list_input, key):
gdf = cudf.datasets.randomdata(5)
with pytest.raises(
ValueError, match=("All columns must be of equal length")
):
gdf[key] = list_input
@pytest.mark.parametrize(
"series_input",
[
pytest.param(cudf.Series([1, 2, 3, 4]), id="smaller_cudf"),
pytest.param(cudf.Series([1, 2, 3, 4, 5, 6]), id="larger_cudf"),
pytest.param(cudf.Series([1, 2, 3], index=[4, 5, 6]), id="index_cudf"),
pytest.param(pd.Series([1, 2, 3, 4]), id="smaller_pandas"),
pytest.param(pd.Series([1, 2, 3, 4, 5, 6]), id="larger_pandas"),
pytest.param(pd.Series([1, 2, 3], index=[4, 5, 6]), id="index_pandas"),
],
)
@pytest.mark.parametrize(
"key",
[
pytest.param("list_test", id="new_column"),
pytest.param("id", id="existing_column"),
],
)
def test_setitem_diff_size_series(series_input, key):
gdf = cudf.datasets.randomdata(5)
pdf = gdf.to_pandas()
pandas_input = series_input
if isinstance(pandas_input, cudf.Series):
pandas_input = pandas_input.to_pandas()
expect = pdf
expect[key] = pandas_input
got = gdf
got[key] = series_input
# Pandas uses NaN and typecasts to float64 if there's missing values on
# alignment, so need to typecast to float64 for equality comparison
expect = expect.astype("float64")
got = got.astype("float64")
assert_eq(expect, got)
def test_tupleize_cols_False_set():
pdf = pd.DataFrame()
gdf = cudf.DataFrame()
pdf[("a", "b")] = [1]
gdf[("a", "b")] = [1]
assert_eq(pdf, gdf)
assert_eq(pdf.columns, gdf.columns)
def test_init_multiindex_from_dict():
pdf = pd.DataFrame({("a", "b"): [1]})
gdf = cudf.DataFrame({("a", "b"): [1]})
assert_eq(pdf, gdf)
assert_eq(pdf.columns, gdf.columns)
def test_change_column_dtype_in_empty():
pdf = pd.DataFrame({"a": [], "b": []})
gdf = cudf.from_pandas(pdf)
assert_eq(pdf, gdf)
pdf["b"] = pdf["b"].astype("int64")
gdf["b"] = gdf["b"].astype("int64")
assert_eq(pdf, gdf)
def test_dataframe_from_table_empty_index():
df = cudf.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
odict = df._data
tbl = cudf._lib.table.Table(odict)
result = cudf.DataFrame._from_table(tbl) # noqa: F841
@pytest.mark.parametrize("dtype", ["int64", "str"])
def test_dataframe_from_dictionary_series_same_name_index(dtype):
pd_idx1 = pd.Index([1, 2, 0], name="test_index").astype(dtype)
pd_idx2 = pd.Index([2, 0, 1], name="test_index").astype(dtype)
pd_series1 = pd.Series([1, 2, 3], index=pd_idx1)
pd_series2 = pd.Series([1, 2, 3], index=pd_idx2)
gd_idx1 = cudf.from_pandas(pd_idx1)
gd_idx2 = cudf.from_pandas(pd_idx2)
gd_series1 = cudf.Series([1, 2, 3], index=gd_idx1)
gd_series2 = cudf.Series([1, 2, 3], index=gd_idx2)
expect = pd.DataFrame({"a": pd_series1, "b": pd_series2})
got = cudf.DataFrame({"a": gd_series1, "b": gd_series2})
if dtype == "str":
# Pandas actually loses its index name erroneously here...
expect.index.name = "test_index"
assert_eq(expect, got)
assert expect.index.names == got.index.names
@pytest.mark.parametrize(
"arg", [slice(2, 8, 3), slice(1, 20, 4), slice(-2, -6, -2)]
)
def test_dataframe_strided_slice(arg):
mul = pd.DataFrame(
{
"Index": [1, 2, 3, 4, 5, 6, 7, 8, 9],
"AlphaIndex": ["a", "b", "c", "d", "e", "f", "g", "h", "i"],
}
)
pdf = pd.DataFrame(
{"Val": [10, 9, 8, 7, 6, 5, 4, 3, 2]},
index=pd.MultiIndex.from_frame(mul),
)
gdf = cudf.DataFrame.from_pandas(pdf)
expect = pdf[arg]
got = gdf[arg]
assert_eq(expect, got)
@pytest.mark.parametrize(
"data,condition,other,error",
[
(pd.Series(range(5)), pd.Series(range(5)) > 0, None, None),
(pd.Series(range(5)), pd.Series(range(5)) > 1, None, None),
(pd.Series(range(5)), pd.Series(range(5)) > 1, 10, None),
(
pd.Series(range(5)),
pd.Series(range(5)) > 1,
pd.Series(range(5, 10)),
None,
),
(
pd.DataFrame(np.arange(10).reshape(-1, 2), columns=["A", "B"]),
(
pd.DataFrame(np.arange(10).reshape(-1, 2), columns=["A", "B"])
% 3
)
== 0,
-pd.DataFrame(np.arange(10).reshape(-1, 2), columns=["A", "B"]),
None,
),
(
pd.DataFrame({"a": [1, 2, np.nan], "b": [4, np.nan, 6]}),
pd.DataFrame({"a": [1, 2, np.nan], "b": [4, np.nan, 6]}) == 4,
None,
None,
),
(
pd.DataFrame({"a": [1, 2, np.nan], "b": [4, np.nan, 6]}),
pd.DataFrame({"a": [1, 2, np.nan], "b": [4, np.nan, 6]}) != 4,
None,
None,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
[True, True, True],
None,
ValueError,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
[True, True, True, False],
None,
ValueError,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
[[True, True, True, False], [True, True, True, False]],
None,
ValueError,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
[[True, True], [False, True], [True, False], [False, True]],
None,
None,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
cuda.to_device(
np.array(
[[True, True], [False, True], [True, False], [False, True]]
)
),
None,
None,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
cupy.array(
[[True, True], [False, True], [True, False], [False, True]]
),
17,
None,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
[[True, True], [False, True], [True, False], [False, True]],
17,
None,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
[
[True, True, False, True],
[True, True, False, True],
[True, True, False, True],
[True, True, False, True],
],
None,
ValueError,
),
(
pd.Series([1, 2, np.nan]),
pd.Series([1, 2, np.nan]) == 4,
None,
None,
),
(
pd.Series([1, 2, np.nan]),
pd.Series([1, 2, np.nan]) != 4,
None,
None,
),
(
pd.Series([4, np.nan, 6]),
pd.Series([4, np.nan, 6]) == 4,
None,
None,
),
(
pd.Series([4, np.nan, 6]),
pd.Series([4, np.nan, 6]) != 4,
None,
None,
),
(
pd.Series([4, np.nan, 6], dtype="category"),
pd.Series([4, np.nan, 6], dtype="category") != 4,
None,
None,
),
(
pd.Series(["a", "b", "b", "d", "c", "s"], dtype="category"),
pd.Series(["a", "b", "b", "d", "c", "s"], dtype="category") == "b",
None,
None,
),
(
pd.Series(["a", "b", "b", "d", "c", "s"], dtype="category"),
pd.Series(["a", "b", "b", "d", "c", "s"], dtype="category") == "b",
"s",
None,
),
(
pd.Series([1, 2, 3, 2, 5]),
pd.Series([1, 2, 3, 2, 5]) == 2,
pd.DataFrame(
{
"a": pd.Series([1, 2, 3, 2, 5]),
"b": pd.Series([1, 2, 3, 2, 5]),
}
),
NotImplementedError,
),
],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_df_sr_mask_where(data, condition, other, error, inplace):
ps_where = data
gs_where = cudf.from_pandas(data)
ps_mask = ps_where.copy(deep=True)
gs_mask = gs_where.copy(deep=True)
if hasattr(condition, "__cuda_array_interface__"):
if type(condition).__module__.split(".")[0] == "cupy":
ps_condition = cupy.asnumpy(condition)
else:
ps_condition = np.array(condition).astype("bool")
else:
ps_condition = condition
if type(condition).__module__.split(".")[0] == "pandas":
gs_condition = cudf.from_pandas(condition)
else:
gs_condition = condition
ps_other = other
if type(other).__module__.split(".")[0] == "pandas":
gs_other = cudf.from_pandas(other)
else:
gs_other = other
if error is None:
expect_where = ps_where.where(
ps_condition, other=ps_other, inplace=inplace
)
got_where = gs_where.where(
gs_condition, other=gs_other, inplace=inplace
)
expect_mask = ps_mask.mask(
ps_condition, other=ps_other, inplace=inplace
)
got_mask = gs_mask.mask(gs_condition, other=gs_other, inplace=inplace)
if inplace:
expect_where = ps_where
got_where = gs_where
expect_mask = ps_mask
got_mask = gs_mask
if pd.api.types.is_categorical_dtype(expect_where):
np.testing.assert_array_equal(
expect_where.cat.codes,
got_where.cat.codes.astype(expect_where.cat.codes.dtype)
.fillna(-1)
.to_array(),
)
assert_eq(expect_where.cat.categories, got_where.cat.categories)
np.testing.assert_array_equal(
expect_mask.cat.codes,
got_mask.cat.codes.astype(expect_mask.cat.codes.dtype)
.fillna(-1)
.to_array(),
)
assert_eq(expect_mask.cat.categories, got_mask.cat.categories)
else:
assert_eq(
expect_where.fillna(-1),
got_where.fillna(-1),
check_dtype=False,
)
assert_eq(
expect_mask.fillna(-1), got_mask.fillna(-1), check_dtype=False
)
else:
assert_exceptions_equal(
lfunc=ps_where.where,
rfunc=gs_where.where,
lfunc_args_and_kwargs=(
[ps_condition],
{"other": ps_other, "inplace": inplace},
),
rfunc_args_and_kwargs=(
[gs_condition],
{"other": gs_other, "inplace": inplace},
),
compare_error_message=False
if error is NotImplementedError
else True,
)
assert_exceptions_equal(
lfunc=ps_mask.mask,
rfunc=gs_mask.mask,
lfunc_args_and_kwargs=(
[ps_condition],
{"other": ps_other, "inplace": inplace},
),
rfunc_args_and_kwargs=(
[gs_condition],
{"other": gs_other, "inplace": inplace},
),
compare_error_message=False,
)
@pytest.mark.parametrize(
"data,condition,other,has_cat",
[
(
pd.DataFrame(
{
"a": pd.Series(["a", "a", "b", "c", "a", "d", "d", "a"]),
"b": pd.Series(["o", "p", "q", "e", "p", "p", "a", "a"]),
}
),
pd.DataFrame(
{
"a": pd.Series(["a", "a", "b", "c", "a", "d", "d", "a"]),
"b": pd.Series(["o", "p", "q", "e", "p", "p", "a", "a"]),
}
)
!= "a",
None,
None,
),
(
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
),
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
)
!= "a",
None,
True,
),
(
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
),
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
)
== "a",
None,
True,
),
(
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
),
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
)
!= "a",
"a",
True,
),
(
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
),
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
)
== "a",
"a",
True,
),
],
)
def test_df_string_cat_types_mask_where(data, condition, other, has_cat):
ps = data
gs = cudf.from_pandas(data)
ps_condition = condition
if type(condition).__module__.split(".")[0] == "pandas":
gs_condition = cudf.from_pandas(condition)
else:
gs_condition = condition
ps_other = other
if type(other).__module__.split(".")[0] == "pandas":
gs_other = cudf.from_pandas(other)
else:
gs_other = other
expect_where = ps.where(ps_condition, other=ps_other)
got_where = gs.where(gs_condition, other=gs_other)
expect_mask = ps.mask(ps_condition, other=ps_other)
got_mask = gs.mask(gs_condition, other=gs_other)
if has_cat is None:
assert_eq(
expect_where.fillna(-1).astype("str"),
got_where.fillna(-1),
check_dtype=False,
)
assert_eq(
expect_mask.fillna(-1).astype("str"),
got_mask.fillna(-1),
check_dtype=False,
)
else:
assert_eq(expect_where, got_where, check_dtype=False)
assert_eq(expect_mask, got_mask, check_dtype=False)
@pytest.mark.parametrize(
"data,expected_upcast_type,error",
[
(
pd.Series([random.random() for _ in range(10)], dtype="float32"),
np.dtype("float32"),
None,
),
(
pd.Series([random.random() for _ in range(10)], dtype="float16"),
np.dtype("float32"),
None,
),
(
pd.Series([random.random() for _ in range(10)], dtype="float64"),
np.dtype("float64"),
None,
),
(
pd.Series([random.random() for _ in range(10)], dtype="float128"),
None,
NotImplementedError,
),
],
)
def test_from_pandas_unsupported_types(data, expected_upcast_type, error):
pdf = pd.DataFrame({"one_col": data})
if error == NotImplementedError:
with pytest.raises(error):
cudf.from_pandas(data)
with pytest.raises(error):
cudf.Series(data)
with pytest.raises(error):
cudf.from_pandas(pdf)
with pytest.raises(error):
cudf.DataFrame(pdf)
else:
df = cudf.from_pandas(data)
assert_eq(data, df, check_dtype=False)
assert df.dtype == expected_upcast_type
df = cudf.Series(data)
assert_eq(data, df, check_dtype=False)
assert df.dtype == expected_upcast_type
df = cudf.from_pandas(pdf)
assert_eq(pdf, df, check_dtype=False)
assert df["one_col"].dtype == expected_upcast_type
df = cudf.DataFrame(pdf)
assert_eq(pdf, df, check_dtype=False)
assert df["one_col"].dtype == expected_upcast_type
@pytest.mark.parametrize("nan_as_null", [True, False])
@pytest.mark.parametrize("index", [None, "a", ["a", "b"]])
def test_from_pandas_nan_as_null(nan_as_null, index):
data = [np.nan, 2.0, 3.0]
if index is None:
pdf = pd.DataFrame({"a": data, "b": data})
expected = cudf.DataFrame(
{
"a": column.as_column(data, nan_as_null=nan_as_null),
"b": column.as_column(data, nan_as_null=nan_as_null),
}
)
else:
pdf = pd.DataFrame({"a": data, "b": data}).set_index(index)
expected = cudf.DataFrame(
{
"a": column.as_column(data, nan_as_null=nan_as_null),
"b": column.as_column(data, nan_as_null=nan_as_null),
}
)
expected = cudf.DataFrame(
{
"a": column.as_column(data, nan_as_null=nan_as_null),
"b": column.as_column(data, nan_as_null=nan_as_null),
}
)
expected = expected.set_index(index)
got = cudf.from_pandas(pdf, nan_as_null=nan_as_null)
assert_eq(expected, got)
@pytest.mark.parametrize("nan_as_null", [True, False])
def test_from_pandas_for_series_nan_as_null(nan_as_null):
data = [np.nan, 2.0, 3.0]
psr = pd.Series(data)
expected = cudf.Series(column.as_column(data, nan_as_null=nan_as_null))
got = cudf.from_pandas(psr, nan_as_null=nan_as_null)
assert_eq(expected, got)
@pytest.mark.parametrize("copy", [True, False])
def test_df_series_dataframe_astype_copy(copy):
gdf = cudf.DataFrame({"col1": [1, 2], "col2": [3, 4]})
pdf = gdf.to_pandas()
assert_eq(
gdf.astype(dtype="float", copy=copy),
pdf.astype(dtype="float", copy=copy),
)
assert_eq(gdf, pdf)
gsr = cudf.Series([1, 2])
psr = gsr.to_pandas()
assert_eq(
gsr.astype(dtype="float", copy=copy),
psr.astype(dtype="float", copy=copy),
)
assert_eq(gsr, psr)
gsr = cudf.Series([1, 2])
psr = gsr.to_pandas()
actual = gsr.astype(dtype="int64", copy=copy)
expected = psr.astype(dtype="int64", copy=copy)
assert_eq(expected, actual)
assert_eq(gsr, psr)
actual[0] = 3
expected[0] = 3
assert_eq(gsr, psr)
@pytest.mark.parametrize("copy", [True, False])
def test_df_series_dataframe_astype_dtype_dict(copy):
gdf = cudf.DataFrame({"col1": [1, 2], "col2": [3, 4]})
pdf = gdf.to_pandas()
assert_eq(
gdf.astype(dtype={"col1": "float"}, copy=copy),
pdf.astype(dtype={"col1": "float"}, copy=copy),
)
assert_eq(gdf, pdf)
gsr = cudf.Series([1, 2])
psr = gsr.to_pandas()
assert_eq(
gsr.astype(dtype={None: "float"}, copy=copy),
psr.astype(dtype={None: "float"}, copy=copy),
)
assert_eq(gsr, psr)
assert_exceptions_equal(
lfunc=psr.astype,
rfunc=gsr.astype,
lfunc_args_and_kwargs=([], {"dtype": {"a": "float"}, "copy": copy}),
rfunc_args_and_kwargs=([], {"dtype": {"a": "float"}, "copy": copy}),
)
gsr = cudf.Series([1, 2])
psr = gsr.to_pandas()
actual = gsr.astype({None: "int64"}, copy=copy)
expected = psr.astype({None: "int64"}, copy=copy)
assert_eq(expected, actual)
assert_eq(gsr, psr)
actual[0] = 3
expected[0] = 3
assert_eq(gsr, psr)
@pytest.mark.parametrize(
"data,columns",
[
([1, 2, 3, 100, 112, 35464], ["a"]),
(range(100), None),
([], None),
((-10, 21, 32, 32, 1, 2, 3), ["p"]),
((), None),
([[1, 2, 3], [1, 2, 3]], ["col1", "col2", "col3"]),
([range(100), range(100)], ["range" + str(i) for i in range(100)]),
(((1, 2, 3), (1, 2, 3)), ["tuple0", "tuple1", "tuple2"]),
([[1, 2, 3]], ["list col1", "list col2", "list col3"]),
([range(100)], ["range" + str(i) for i in range(100)]),
(((1, 2, 3),), ["k1", "k2", "k3"]),
],
)
def test_dataframe_init_1d_list(data, columns):
expect = pd.DataFrame(data, columns=columns)
actual = cudf.DataFrame(data, columns=columns)
assert_eq(
expect, actual, check_index_type=False if len(data) == 0 else True
)
expect = pd.DataFrame(data, columns=None)
actual = cudf.DataFrame(data, columns=None)
assert_eq(
expect, actual, check_index_type=False if len(data) == 0 else True
)
@pytest.mark.parametrize(
"data,cols,index",
[
(
np.ndarray(shape=(4, 2), dtype=float, order="F"),
["a", "b"],
["a", "b", "c", "d"],
),
(
np.ndarray(shape=(4, 2), dtype=float, order="F"),
["a", "b"],
[0, 20, 30, 10],
),
(
np.ndarray(shape=(4, 2), dtype=float, order="F"),
["a", "b"],
[0, 1, 2, 3],
),
(np.array([11, 123, -2342, 232]), ["a"], [1, 2, 11, 12]),
(np.array([11, 123, -2342, 232]), ["a"], ["khsdjk", "a", "z", "kk"]),
(
cupy.ndarray(shape=(4, 2), dtype=float, order="F"),
["a", "z"],
["a", "z", "a", "z"],
),
(cupy.array([11, 123, -2342, 232]), ["z"], [0, 1, 1, 0]),
(cupy.array([11, 123, -2342, 232]), ["z"], [1, 2, 3, 4]),
(cupy.array([11, 123, -2342, 232]), ["z"], ["a", "z", "d", "e"]),
(np.random.randn(2, 4), ["a", "b", "c", "d"], ["a", "b"]),
(np.random.randn(2, 4), ["a", "b", "c", "d"], [1, 0]),
(cupy.random.randn(2, 4), ["a", "b", "c", "d"], ["a", "b"]),
(cupy.random.randn(2, 4), ["a", "b", "c", "d"], [1, 0]),
],
)
def test_dataframe_init_from_arrays_cols(data, cols, index):
gd_data = data
if isinstance(data, cupy.core.ndarray):
# pandas can't handle cupy arrays in general
pd_data = data.get()
# additional test for building DataFrame with gpu array whose
# cuda array interface has no `descr` attribute
numba_data = cuda.as_cuda_array(data)
else:
pd_data = data
numba_data = None
# verify with columns & index
pdf = pd.DataFrame(pd_data, columns=cols, index=index)
gdf = cudf.DataFrame(gd_data, columns=cols, index=index)
assert_eq(pdf, gdf, check_dtype=False)
# verify with columns
pdf = pd.DataFrame(pd_data, columns=cols)
gdf = cudf.DataFrame(gd_data, columns=cols)
assert_eq(pdf, gdf, check_dtype=False)
pdf = pd.DataFrame(pd_data)
gdf = cudf.DataFrame(gd_data)
assert_eq(pdf, gdf, check_dtype=False)
if numba_data is not None:
gdf = cudf.DataFrame(numba_data)
assert_eq(pdf, gdf, check_dtype=False)
@pytest.mark.parametrize(
"col_data",
[
range(5),
["a", "b", "x", "y", "z"],
[1.0, 0.213, 0.34332],
["a"],
[1],
[0.2323],
[],
],
)
@pytest.mark.parametrize(
"assign_val",
[
1,
2,
np.array(2),
cupy.array(2),
0.32324,
np.array(0.34248),
cupy.array(0.34248),
"abc",
np.array("abc", dtype="object"),
np.array("abc", dtype="str"),
np.array("abc"),
None,
],
)
def test_dataframe_assign_scalar(col_data, assign_val):
pdf = pd.DataFrame({"a": col_data})
gdf = cudf.DataFrame({"a": col_data})
pdf["b"] = (
cupy.asnumpy(assign_val)
if isinstance(assign_val, cupy.ndarray)
else assign_val
)
gdf["b"] = assign_val
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
"col_data",
[
1,
2,
np.array(2),
cupy.array(2),
0.32324,
np.array(0.34248),
cupy.array(0.34248),
"abc",
np.array("abc", dtype="object"),
np.array("abc", dtype="str"),
np.array("abc"),
None,
],
)
@pytest.mark.parametrize(
"assign_val",
[
1,
2,
np.array(2),
cupy.array(2),
0.32324,
np.array(0.34248),
cupy.array(0.34248),
"abc",
np.array("abc", dtype="object"),
np.array("abc", dtype="str"),
np.array("abc"),
None,
],
)
def test_dataframe_assign_scalar_with_scalar_cols(col_data, assign_val):
pdf = pd.DataFrame(
{
"a": cupy.asnumpy(col_data)
if isinstance(col_data, cupy.ndarray)
else col_data
},
index=["dummy_mandatory_index"],
)
gdf = cudf.DataFrame({"a": col_data}, index=["dummy_mandatory_index"])
pdf["b"] = (
cupy.asnumpy(assign_val)
if isinstance(assign_val, cupy.ndarray)
else assign_val
)
gdf["b"] = assign_val
assert_eq(pdf, gdf)
def test_dataframe_info_basic():
buffer = io.StringIO()
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
StringIndex: 10 entries, a to 1111
Data columns (total 10 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 0 10 non-null float64
1 1 10 non-null float64
2 2 10 non-null float64
3 3 10 non-null float64
4 4 10 non-null float64
5 5 10 non-null float64
6 6 10 non-null float64
7 7 10 non-null float64
8 8 10 non-null float64
9 9 10 non-null float64
dtypes: float64(10)
memory usage: 859.0+ bytes
"""
)
df = pd.DataFrame(
np.random.randn(10, 10),
index=["a", "2", "3", "4", "5", "6", "7", "8", "100", "1111"],
)
cudf.from_pandas(df).info(buf=buffer, verbose=True)
s = buffer.getvalue()
assert str_cmp == s
def test_dataframe_info_verbose_mem_usage():
buffer = io.StringIO()
df = pd.DataFrame({"a": [1, 2, 3], "b": ["safdas", "assa", "asdasd"]})
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
RangeIndex: 3 entries, 0 to 2
Data columns (total 2 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 a 3 non-null int64
1 b 3 non-null object
dtypes: int64(1), object(1)
memory usage: 56.0+ bytes
"""
)
cudf.from_pandas(df).info(buf=buffer, verbose=True)
s = buffer.getvalue()
assert str_cmp == s
buffer.truncate(0)
buffer.seek(0)
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
RangeIndex: 3 entries, 0 to 2
Columns: 2 entries, a to b
dtypes: int64(1), object(1)
memory usage: 56.0+ bytes
"""
)
cudf.from_pandas(df).info(buf=buffer, verbose=False)
s = buffer.getvalue()
assert str_cmp == s
buffer.truncate(0)
buffer.seek(0)
df = pd.DataFrame(
{"a": [1, 2, 3], "b": ["safdas", "assa", "asdasd"]},
index=["sdfdsf", "sdfsdfds", "dsfdf"],
)
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
StringIndex: 3 entries, sdfdsf to dsfdf
Data columns (total 2 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 a 3 non-null int64
1 b 3 non-null object
dtypes: int64(1), object(1)
memory usage: 91.0 bytes
"""
)
cudf.from_pandas(df).info(buf=buffer, verbose=True, memory_usage="deep")
s = buffer.getvalue()
assert str_cmp == s
buffer.truncate(0)
buffer.seek(0)
int_values = [1, 2, 3, 4, 5]
text_values = ["alpha", "beta", "gamma", "delta", "epsilon"]
float_values = [0.0, 0.25, 0.5, 0.75, 1.0]
df = cudf.DataFrame(
{
"int_col": int_values,
"text_col": text_values,
"float_col": float_values,
}
)
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
RangeIndex: 5 entries, 0 to 4
Data columns (total 3 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 int_col 5 non-null int64
1 text_col 5 non-null object
2 float_col 5 non-null float64
dtypes: float64(1), int64(1), object(1)
memory usage: 130.0 bytes
"""
)
df.info(buf=buffer, verbose=True, memory_usage="deep")
actual_string = buffer.getvalue()
assert str_cmp == actual_string
buffer.truncate(0)
buffer.seek(0)
def test_dataframe_info_null_counts():
int_values = [1, 2, 3, 4, 5]
text_values = ["alpha", "beta", "gamma", "delta", "epsilon"]
float_values = [0.0, 0.25, 0.5, 0.75, 1.0]
df = cudf.DataFrame(
{
"int_col": int_values,
"text_col": text_values,
"float_col": float_values,
}
)
buffer = io.StringIO()
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
RangeIndex: 5 entries, 0 to 4
Data columns (total 3 columns):
# Column Dtype
--- ------ -----
0 int_col int64
1 text_col object
2 float_col float64
dtypes: float64(1), int64(1), object(1)
memory usage: 130.0+ bytes
"""
)
df.info(buf=buffer, verbose=True, null_counts=False)
actual_string = buffer.getvalue()
assert str_cmp == actual_string
buffer.truncate(0)
buffer.seek(0)
df.info(buf=buffer, verbose=True, max_cols=0)
actual_string = buffer.getvalue()
assert str_cmp == actual_string
buffer.truncate(0)
buffer.seek(0)
df = cudf.DataFrame()
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
RangeIndex: 0 entries
Empty DataFrame"""
)
df.info(buf=buffer, verbose=True)
actual_string = buffer.getvalue()
assert str_cmp == actual_string
buffer.truncate(0)
buffer.seek(0)
df = cudf.DataFrame(
{
"a": [1, 2, 3, None, 10, 11, 12, None],
"b": ["a", "b", "c", "sd", "sdf", "sd", None, None],
}
)
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
RangeIndex: 8 entries, 0 to 7
Data columns (total 2 columns):
# Column Dtype
--- ------ -----
0 a int64
1 b object
dtypes: int64(1), object(1)
memory usage: 238.0+ bytes
"""
)
pd.options.display.max_info_rows = 2
df.info(buf=buffer, max_cols=2, null_counts=None)
pd.reset_option("display.max_info_rows")
actual_string = buffer.getvalue()
assert str_cmp == actual_string
buffer.truncate(0)
buffer.seek(0)
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
RangeIndex: 8 entries, 0 to 7
Data columns (total 2 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 a 6 non-null int64
1 b 6 non-null object
dtypes: int64(1), object(1)
memory usage: 238.0+ bytes
"""
)
df.info(buf=buffer, max_cols=2, null_counts=None)
actual_string = buffer.getvalue()
assert str_cmp == actual_string
buffer.truncate(0)
buffer.seek(0)
df.info(buf=buffer, null_counts=True)
actual_string = buffer.getvalue()
assert str_cmp == actual_string
@pytest.mark.parametrize(
"data1",
[
[1, 2, 3, 4, 5, 6, 7],
[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0],
[
1.9876543,
2.9876654,
3.9876543,
4.1234587,
5.23,
6.88918237,
7.00001,
],
[
-1.9876543,
-2.9876654,
-3.9876543,
-4.1234587,
-5.23,
-6.88918237,
-7.00001,
],
[
1.987654321,
2.987654321,
3.987654321,
0.1221,
2.1221,
0.112121,
-21.1212,
],
[
-1.987654321,
-2.987654321,
-3.987654321,
-0.1221,
-2.1221,
-0.112121,
21.1212,
],
],
)
@pytest.mark.parametrize(
"data2",
[
[1, 2, 3, 4, 5, 6, 7],
[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0],
[
1.9876543,
2.9876654,
3.9876543,
4.1234587,
5.23,
6.88918237,
7.00001,
],
[
-1.9876543,
-2.9876654,
-3.9876543,
-4.1234587,
-5.23,
-6.88918237,
-7.00001,
],
[
1.987654321,
2.987654321,
3.987654321,
0.1221,
2.1221,
0.112121,
-21.1212,
],
[
-1.987654321,
-2.987654321,
-3.987654321,
-0.1221,
-2.1221,
-0.112121,
21.1212,
],
],
)
@pytest.mark.parametrize("rtol", [0, 0.01, 1e-05, 1e-08, 5e-1, 50.12])
@pytest.mark.parametrize("atol", [0, 0.01, 1e-05, 1e-08, 50.12])
def test_cudf_isclose(data1, data2, rtol, atol):
array1 = cupy.array(data1)
array2 = cupy.array(data2)
expected = cudf.Series(cupy.isclose(array1, array2, rtol=rtol, atol=atol))
actual = cudf.isclose(
cudf.Series(data1), cudf.Series(data2), rtol=rtol, atol=atol
)
assert_eq(expected, actual)
actual = cudf.isclose(data1, data2, rtol=rtol, atol=atol)
assert_eq(expected, actual)
actual = cudf.isclose(
cupy.array(data1), cupy.array(data2), rtol=rtol, atol=atol
)
assert_eq(expected, actual)
actual = cudf.isclose(
np.array(data1), np.array(data2), rtol=rtol, atol=atol
)
assert_eq(expected, actual)
actual = cudf.isclose(
pd.Series(data1), pd.Series(data2), rtol=rtol, atol=atol
)
assert_eq(expected, actual)
@pytest.mark.parametrize(
"data1",
[
[
-1.9876543,
-2.9876654,
np.nan,
-4.1234587,
-5.23,
-6.88918237,
-7.00001,
],
[
1.987654321,
2.987654321,
3.987654321,
0.1221,
2.1221,
np.nan,
-21.1212,
],
],
)
@pytest.mark.parametrize(
"data2",
[
[
-1.9876543,
-2.9876654,
-3.9876543,
-4.1234587,
-5.23,
-6.88918237,
-7.00001,
],
[
1.987654321,
2.987654321,
3.987654321,
0.1221,
2.1221,
0.112121,
-21.1212,
],
[
-1.987654321,
-2.987654321,
-3.987654321,
np.nan,
np.nan,
np.nan,
21.1212,
],
],
)
@pytest.mark.parametrize("equal_nan", [True, False])
def test_cudf_isclose_nulls(data1, data2, equal_nan):
array1 = cupy.array(data1)
array2 = cupy.array(data2)
expected = cudf.Series(cupy.isclose(array1, array2, equal_nan=equal_nan))
actual = cudf.isclose(
cudf.Series(data1), cudf.Series(data2), equal_nan=equal_nan
)
assert_eq(expected, actual, check_dtype=False)
actual = cudf.isclose(data1, data2, equal_nan=equal_nan)
assert_eq(expected, actual, check_dtype=False)
def test_cudf_isclose_different_index():
s1 = cudf.Series(
[-1.9876543, -2.9876654, -3.9876543, -4.1234587, -5.23, -7.00001],
index=[0, 1, 2, 3, 4, 5],
)
s2 = cudf.Series(
[-1.9876543, -2.9876654, -7.00001, -4.1234587, -5.23, -3.9876543],
index=[0, 1, 5, 3, 4, 2],
)
expected = cudf.Series([True] * 6, index=s1.index)
assert_eq(expected, cudf.isclose(s1, s2))
s1 = cudf.Series(
[-1.9876543, -2.9876654, -3.9876543, -4.1234587, -5.23, -7.00001],
index=[0, 1, 2, 3, 4, 5],
)
s2 = cudf.Series(
[-1.9876543, -2.9876654, -7.00001, -4.1234587, -5.23, -3.9876543],
index=[0, 1, 5, 10, 4, 2],
)
expected = cudf.Series(
[True, True, True, False, True, True], index=s1.index
)
assert_eq(expected, cudf.isclose(s1, s2))
s1 = cudf.Series(
[-1.9876543, -2.9876654, -3.9876543, -4.1234587, -5.23, -7.00001],
index=[100, 1, 2, 3, 4, 5],
)
s2 = cudf.Series(
[-1.9876543, -2.9876654, -7.00001, -4.1234587, -5.23, -3.9876543],
index=[0, 1, 100, 10, 4, 2],
)
expected = cudf.Series(
[False, True, True, False, True, False], index=s1.index
)
assert_eq(expected, cudf.isclose(s1, s2))
def test_dataframe_to_dict_error():
df = cudf.DataFrame({"a": [1, 2, 3], "b": [9, 5, 3]})
with pytest.raises(
TypeError,
match=re.escape(
r"cuDF does not support conversion to host memory "
r"via `to_dict()` method. Consider using "
r"`.to_pandas().to_dict()` to construct a Python dictionary."
),
):
df.to_dict()
with pytest.raises(
TypeError,
match=re.escape(
r"cuDF does not support conversion to host memory "
r"via `to_dict()` method. Consider using "
r"`.to_pandas().to_dict()` to construct a Python dictionary."
),
):
df["a"].to_dict()
@pytest.mark.parametrize(
"df",
[
pd.DataFrame({"a": [1, 2, 3, 4, 5, 10, 11, 12, 33, 55, 19]}),
pd.DataFrame(
{
"one": [1, 2, 3, 4, 5, 10],
"two": ["abc", "def", "ghi", "xyz", "pqr", "abc"],
}
),
pd.DataFrame(
{
"one": [1, 2, 3, 4, 5, 10],
"two": ["abc", "def", "ghi", "xyz", "pqr", "abc"],
},
index=[10, 20, 30, 40, 50, 60],
),
pd.DataFrame(
{
"one": [1, 2, 3, 4, 5, 10],
"two": ["abc", "def", "ghi", "xyz", "pqr", "abc"],
},
index=["a", "b", "c", "d", "e", "f"],
),
pd.DataFrame(index=["a", "b", "c", "d", "e", "f"]),
pd.DataFrame(columns=["a", "b", "c", "d", "e", "f"]),
pd.DataFrame(index=[10, 11, 12]),
pd.DataFrame(columns=[10, 11, 12]),
pd.DataFrame(),
pd.DataFrame({"one": [], "two": []}),
pd.DataFrame({2: [], 1: []}),
pd.DataFrame(
{
0: [1, 2, 3, 4, 5, 10],
1: ["abc", "def", "ghi", "xyz", "pqr", "abc"],
100: ["a", "b", "b", "x", "z", "a"],
},
index=[10, 20, 30, 40, 50, 60],
),
],
)
def test_dataframe_keys(df):
gdf = cudf.from_pandas(df)
assert_eq(df.keys(), gdf.keys())
@pytest.mark.parametrize(
"ps",
[
pd.Series([1, 2, 3, 4, 5, 10, 11, 12, 33, 55, 19]),
pd.Series(["abc", "def", "ghi", "xyz", "pqr", "abc"]),
pd.Series(
[1, 2, 3, 4, 5, 10],
index=["abc", "def", "ghi", "xyz", "pqr", "abc"],
),
pd.Series(
["abc", "def", "ghi", "xyz", "pqr", "abc"],
index=[1, 2, 3, 4, 5, 10],
),
pd.Series(index=["a", "b", "c", "d", "e", "f"], dtype="float64"),
pd.Series(index=[10, 11, 12], dtype="float64"),
pd.Series(dtype="float64"),
pd.Series([], dtype="float64"),
],
)
def test_series_keys(ps):
gds = cudf.from_pandas(ps)
if len(ps) == 0 and not isinstance(ps.index, pd.RangeIndex):
assert_eq(ps.keys().astype("float64"), gds.keys())
else:
assert_eq(ps.keys(), gds.keys())
@pytest.mark.parametrize(
"df",
[
pd.DataFrame(),
pd.DataFrame(index=[10, 20, 30]),
pd.DataFrame({"first_col": [], "second_col": [], "third_col": []}),
pd.DataFrame([[1, 2], [3, 4]], columns=list("AB")),
pd.DataFrame([[1, 2], [3, 4]], columns=list("AB"), index=[10, 20]),
pd.DataFrame([[1, 2], [3, 4]], columns=list("AB"), index=[7, 8]),
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
}
),
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
},
index=[7, 20, 11, 9],
),
pd.DataFrame({"l": [10]}),
pd.DataFrame({"l": [10]}, index=[100]),
pd.DataFrame({"f": [10.2, 11.2332, 0.22, 3.3, 44.23, 10.0]}),
pd.DataFrame(
{"f": [10.2, 11.2332, 0.22, 3.3, 44.23, 10.0]},
index=[100, 200, 300, 400, 500, 0],
),
],
)
@pytest.mark.parametrize(
"other",
[
pd.DataFrame([[5, 6], [7, 8]], columns=list("AB")),
pd.DataFrame([[5, 6], [7, 8]], columns=list("BD")),
pd.DataFrame([[5, 6], [7, 8]], columns=list("DE")),
pd.DataFrame(),
pd.DataFrame(
{"c": [10, 11, 22, 33, 44, 100]}, index=[7, 8, 9, 10, 11, 20]
),
pd.DataFrame({"f": [10.2, 11.2332, 0.22, 3.3, 44.23, 10.0]}),
pd.DataFrame({"l": [10]}),
pd.DataFrame({"l": [10]}, index=[200]),
pd.DataFrame([]),
| pd.DataFrame({"first_col": [], "second_col": [], "third_col": []}) | pandas.DataFrame |
import pandas
from google.cloud import bigquery
from google_pandas_load import LoadConfig
from tests.context.loaders import gpl1, gpl2, gpl3, gpl4, gpl5
from tests.context.resources import project_id, bq_client, \
dataset_ref, dataset_name
from tests.utils import BaseClassTest, populate_dataset, \
populate, populate_bucket, populate_local_folder
class DataDeliveryTest(BaseClassTest):
def test_query_to_bq(self):
l0 = [2, 3]
populate_dataset()
gpl3.load(
source='query',
destination='bq',
data_name='a0',
query='select 3 as x union all select 2 as x')
table_ref = dataset_ref.table(table_id='a0')
table = bq_client.get_table(table_ref)
df1 = bq_client.list_rows(table=table).to_dataframe()
l1 = sorted(list(df1.x))
self.assertEqual(l0, l1)
def test_bq_to_dataframe(self):
df0 = pandas.DataFrame(data={'x': ['data_a10_bq']})
populate()
df1 = gpl4.load(
source='bq',
destination='dataframe',
data_name='a10_bq')
self.assertTrue(gpl4.exist_in_bq('a10_bq'))
self.assertTrue(df0.equals(df1))
def test_gs_to_local(self):
populate_bucket()
gpl2.load(
source='gs',
destination='local',
data_name='a7')
self.assertEqual(len(gpl2.list_blob_uris('a7')), 1)
self.assertEqual(len(gpl2.list_local_file_paths('a7')), 1)
def test_local_to_dataframe(self):
l0 = ['data_a{}_local'.format(i) for i in range(10, 14)]
populate_local_folder()
df1 = gpl5.load(
source='local',
destination='dataframe',
data_name='a1')
l1 = sorted(list(df1.x))
self.assertEqual(l0, l1)
def test_query_to_dataframe(self):
df0 = pandas.DataFrame(data={'x': [1, 1]})
populate()
df1 = gpl2.load(
source='query',
destination='dataframe',
query='select 1 as x union all select 1 as x',
data_name='a1')
self.assertFalse(gpl2.exist_in_bq('a1'))
self.assertFalse(gpl2.exist_in_gs('a1'))
self.assertFalse(gpl2.exist_in_local('a1'))
self.assertTrue(df0.equals(df1))
def test_local_to_bq(self):
populate()
gpl3.load(
source='local',
destination='bq',
data_name='a',
bq_schema=[bigquery.SchemaField('x', 'STRING')])
self.assertTrue(gpl3.exist_in_local('a'))
self.assertFalse(gpl3.exist_in_gs('a'))
table_ref = dataset_ref.table(table_id='a')
table = bq_client.get_table(table_ref)
num_rows = table.num_rows
self.assertEqual(num_rows, 5)
def test_dataframe_to_gs(self):
df = pandas.DataFrame(data={'x': [1]})
gpl3.load(
source='dataframe',
destination='gs',
data_name='b',
dataframe=df)
self.assertFalse(gpl3.exist_in_local('b'))
self.assertEqual(len(gpl3.list_blob_uris('b')), 1)
def test_local_to_gs(self):
populate()
gpl1.load(
source='local',
destination='gs',
data_name='a1')
self.assertTrue(gpl1.exist_in_local('a1'))
self.assertEqual(len(gpl1.list_blob_uris('a1')), 4)
def test_dataframe_to_bq(self):
l0 = [3, 4, 7]
df0 = pandas.DataFrame(data={'x': l0})
populate()
gpl5.load(
source='dataframe',
destination='bq',
data_name='a',
dataframe=df0)
self.assertFalse(gpl5.exist_in_local('a'))
self.assertFalse(gpl5.exist_in_gs('a'))
self.assertTrue(gpl5.exist_in_bq('a'))
query = 'select * from `{}.{}.{}`'.format(
project_id, dataset_name, 'a')
df1 = bq_client.query(query).to_dataframe()
l1 = sorted(list(df1.x))
self.assertEqual(l0, l1)
def test_upload_download(self):
df0 = pandas.DataFrame(data={'x': [1], 'y': [3]})
populate()
gpl1.load(
source='dataframe',
destination='bq',
data_name='a9',
dataframe=df0)
query = 'select * from `{}.{}.{}`'.format(
project_id, dataset_name, 'a9')
df1 = gpl1.load(
source='query',
destination='dataframe',
query=query)
self.assertTrue(df0.equals(df1))
def test_download_upload(self):
df0 = pandas.DataFrame(data={'x': [3]})
df1 = gpl2.load(
source='query',
destination='dataframe',
query='select 3 as x')
self.assertTrue(df0.equals(df1))
gpl2.load(
source='dataframe',
destination='bq',
data_name='b8',
dataframe=df1)
query = 'select * from `{}.{}.{}`'.format(
project_id, dataset_name, 'b8')
df2 = bq_client.query(query).to_dataframe()
self.assertTrue(df0.equals(df2))
def test_mload(self):
populate()
config1 = LoadConfig(
source='dataframe',
destination='bq',
data_name='a10',
dataframe=pandas.DataFrame(data={'x': [3]}))
config2 = LoadConfig(
source='query',
destination='dataframe',
query='select 4 as y')
config3 = LoadConfig(
source='query',
destination='gs',
data_name='e0',
query='select 4 as y')
load_results = gpl5.mload([config1, config2, config3])
self.assertEqual(len(load_results), 3)
self.assertTrue(load_results[0] is None)
df2 = pandas.DataFrame(data={'y': [4]})
self.assertTrue(load_results[1].equals(df2))
self.assertTrue(load_results[2] is None)
def test_diamond(self):
df0 = pandas.DataFrame(data={'x': [3]})
query = 'select 3 as x'
populate()
df1 = gpl5.xload(
source='query',
destination='dataframe',
query=query).load_result
config = LoadConfig(
source='query',
destination='dataframe',
query=query)
df2 = gpl5.mload([config])[0]
self.assertTrue(df0.equals(df1))
self.assertTrue(df0.equals(df2))
def test_config_repeated(self):
df0 = | pandas.DataFrame(data={'x': [3]}) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import os
import sys
import pandas as pd
import numpy as np
import json
# webscraping
import requests
import wget
from bs4 import BeautifulSoup
from selenium import webdriver
from datetime import datetime, timedelta
import time
from tqdm import tqdm
# if __package__:
# from ..imports import *
# else:
# # run as a script, use absolute import
# _i = os.path.dirname(os.path.dirname(os.path.abspath("..")))
# if _i not in sys.path:
# sys.path.insert(0, _i)
# from imports import *
"""Scrape weather data and weather information
"""
# find station in the city name
def find_weather_stations(city_names: list, weather_json_file: str):
"""Find a list of weather stations in city_names list
"""
# load weather information
with open(weather_json_file, 'r') as f:
station_dict_list = json.load(f)
weather_stations_info = []
for city_name in city_names:
for i, station in enumerate(station_dict_list):
if city_name in station['city_name']:
weather_stations_info.append(station)
return weather_stations_info
def get_data_n_soup(browser, date_str, header_url, waittime=30):
''' Input: date in string
- Ask Selenium to open the website, and execute inner javascript.
- Parse data into beautifulsoup object and look for the hourly table
- Parse the table into a panda dataframe
- remove the unit
- add date column
return: daily weather dataframe and beauitfulsoup object of that table
'''
# url=f'https://www.wunderground.com/history/daily/th/bang-phut/VTBD/date/{date_str}'
url = header_url + date_str
#print(url)
browser.get(url)
time.sleep(waittime)
innerhtml = browser.execute_script("return document.body.innerHTML")
soup = BeautifulSoup(innerhtml, features="lxml")
#div_table = soup.find_all('table')
daily_df = pd.read_html(str(soup))
#print('table lenght ', len(daily_df))
daily_df = daily_df[-1]
#print('data lenght', len(daily_df))
daily_df = daily_df.dropna(how='all')
# add date columns
daily_df['datetime'] = pd.to_datetime(
date_str + ' ' + daily_df['Time'],
format="%Y-%m-%d %I:%M %p")
return daily_df
def convert_temp_col(data_df, temperature_col):
# convert string temperature in F to celcious in float
for col in temperature_col:
if col in data_df.columns:
data_series = data_df[col].copy()
#data_series = data_series.str.replace('°F', '')
data_series = data_series.str.extract(r'(\d+)').iloc[:,0]
data_series = data_series.astype(float)
data_series = ((data_series - 32) * 5 / 9).round(2)
data_df[col] = data_series
data_df.columns = data_df.columns.str.replace(col, col + '(C)')
return data_df
def convert_wind_col(data_df, win_col):
# convert string wind speed and wind gust in mph to kph
for col in win_col:
if col in data_df.columns:
data_series = data_df[col].copy()
# remove unit in the data
data_series = data_series.str.extract(r'(\d+)').iloc[:,0]
#data_series = data_series.str.replace('mph', '')
#data_series = data_series.str.replace(',', '')
data_series = data_series.astype(float)
# convert the value
data_series = (data_series * 1.60934).round(0)
data_df[col] = data_series
data_df.columns = data_df.columns.str.replace(col, col + '(kmph)')
return data_df
def convert_pressure_col(data_df, pressure_col):
# convert string pressure in 'inch' to float
for col in pressure_col:
if col in data_df.columns:
data_series = data_df[col].copy()
#data_series = data_series.str.replace('in', '')
data_series = data_series.str.extract(r'(\d+)').iloc[:,0]
data_series = data_series.astype(float)
# convert the value to hPa
data_series = (data_series * 33.8638).round(0)
data_df[col] = data_series
data_df.columns = data_df.columns.str.replace(col, col + '(hPa)')
return data_df
def convert_precip_col(data_df, precip_col):
# convert string precipitation in 'inch' to float and change unit
for col in precip_col:
if col in data_df.columns:
data_series = data_df[col].copy()
#data_series = data_series.str.replace('in', '')
data_series = data_series.str.extract(r'(\d+)').iloc[:,0]
data_series = data_series.astype(float)
# convert the value to hPa
data_series = (data_series * 25.4).round(2)
data_df[col] = data_series
data_df.columns = data_df.columns.str.replace(col, col + '(mm)')
return data_df
def convert_humidity_col(data_df, humidity_col):
# convert string temperature in F to celcious in float
for col in humidity_col:
if col in data_df.columns:
data_series = data_df[col].copy()
#data_series = data_series.str.replace('%', '')
data_series = data_series.str.extract(r'(\d+)').iloc[:,0]
data_series = data_series.astype(int)
data_df[col] = data_series
data_df.columns = data_df.columns.str.replace(col, col + '(%)')
return data_df
def convert_unit(data_df):
# convert string data into number by removing the text in the unit. Put the text in the columns name.
# convert temperature wand windspeed into metric system
data_df.columns = data_df.columns.str.replace(' ', '_')
temperature_col = ['Temperature', 'Dew_Point']
wind_col = ['Wind_Speed', 'Wind_Gust']
pressure_col = ['Pressure']
humidity_col = ['Humidity']
precip_col = ['Precip.']
data_df = convert_temp_col(data_df, temperature_col)
data_df = convert_wind_col(data_df, wind_col)
data_df = convert_pressure_col(data_df, pressure_col)
data_df = convert_humidity_col(data_df, humidity_col)
data_df = convert_precip_col(data_df, precip_col)
return data_df
def scrape_weather(city_json, date_range):
# scrape weather data from a city in date in date range
# save filename using city name
# return weather data and bad data df and date that fail
# append the data to an exsiting file if the files does not exists
browser = webdriver.Firefox()
time.sleep(2)
weather = pd.DataFrame()
bad_date_df = pd.DataFrame()
# Build header URL
specific_url = city_json['specific_url']
header_url = 'https://www.wunderground.com/history/daily/' + specific_url + 'date/'
for i, date in tqdm(enumerate(date_range)):
try:
# obtain daily weather dataframe
daily_df = get_data_n_soup(
browser, date, header_url=header_url, waittime=10)
except:
pass
else:
# good query
# convert unit of the data
daily_df = convert_unit(daily_df)
# combine the weather for each day
weather = pd.concat([weather, daily_df], axis=0, join='outer')
# except BaseException:
# # fail query,
# bad_date_df = pd.concat(
# [
# bad_date_df,
# pd.DataFrame(
# {
# 'header_url': header_url,
# 'date': date},
# index=[0])],
# ignore_index=True)
# else:
# if len(daily_df) == 0:
# # fail query,
# bad_date_df = pd.concat(
# [
# bad_date_df,
# pd.DataFrame(
# {
# 'header_url': header_url,
# 'date': date},
# index=[0])],
# ignore_index=True)
# else:
# good query
# convert unit of the data
# daily_df = convert_unit(daily_df)
# # combine the weather for each day
# weather = pd.concat([weather, daily_df], axis=0, join='outer')
browser.close()
try:
# sort weather value
weather = weather.sort_values('datetime')
except BaseException:
print(date_range, weather.columns)
return weather, bad_date_df
def fix_temperature(df, lowest_t: int = 5, highest_t: int = 65):
# remove abnormal tempearture reading from weather data
idx = df[df['Temperature(C)'] < lowest_t].index
df.loc[idx, ['Temperature(C)', 'Dew_Point(C)', 'Humidity(%)']] = np.nan
idx = df[df['Temperature(C)'] > highest_t].index
df.loc[idx, ['Temperature(C)', 'Dew_Point(C)', 'Humidity(%)']] = np.nan
return df
def fix_pressure(df, lowest_t: int = 170, highest_t: int = 1500):
# remove abnormal tempearture reading from weather data
idx = df[df['Pressure(hPa)'] < lowest_t].index
df.loc[idx, ['Pressure(hPa)']] = np.nan
idx = df[df['Pressure(hPa)'] > highest_t].index
df.loc[idx, ['Pressure(hPa)']] = np.nan
return df
def fill_missing_weather(df, limit: int = 12):
# make the timestamp to be 30 mins interval. Fill the missing value
# roud datetiem to whole 30 mins
df['datetime'] = pd.to_datetime(df['datetime'])
df['datetime'] = df['datetime'].dt.round('30T')
df = df.sort_values('datetime')
df = df.drop_duplicates('datetime')
dates = df['datetime'].dropna().dt.date.unique()
# fill in the missing value
new_datetime = pd.date_range(
start=dates[0], end=dates[-1] + timedelta(days=1), freq='30T')
new_weather = pd.DataFrame(new_datetime[:-1], columns=['datetime'])
new_weather = new_weather.merge(df, on='datetime', how='outer')
new_weather = new_weather.fillna(method='ffill', limit=limit)
new_weather = new_weather.fillna(method='bfill', limit=limit)
new_weather = new_weather.set_index('datetime')
new_weather = new_weather.dropna(how='all').reset_index()
return new_weather
def update_weather(
city_json,
data_folder,
start_date=datetime(
2000,
10,
1),
end_date=datetime.now()):
"""Update weather for the city specified by city_json and save.
"""
# read existing file
city_name = ('_').join(city_json['city_name'].split(' '))
current_filename = data_folder + city_name + '.csv'
print('updateing file:', current_filename)
# obtain a list of existed dates if exists
if os.path.exists(current_filename):
df = pd.read_csv(current_filename)
df['datetime'] = pd.to_datetime(df['datetime'])
df = df.drop_duplicates('datetime')
# find exisiting date
ex_date = df['datetime'].dt.strftime('%Y-%m-%d').unique()
ex_date = set(ex_date)
else:
df = | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import os
from pathlib import Path
from itertools import islice
class My_dict(dict):
def __init__(self):
self = dict()
def add(self, key, value):
self[key] = value
class Df():
def __init__(self, raw_data_location):
df = pd.read_csv(raw_data_location, header = 0)
self.df = df
def tsp_df(self):
df = self.df
df1 = | pd.DataFrame() | pandas.DataFrame |
import numpy as np
import pandas as pd
import streamlit as st
import importlib
import os
import sys
import time
def file_selector(folder_path='.'):
filenames = os.listdir(folder_path)
filenames_ = [f for f in filenames if f[-3:] == "txt"]
selected_filename = st.selectbox('Select a file', filenames_)
return os.path.join(folder_path, selected_filename)
st.header("Rocking Data Bytes")
modo = st.sidebar.radio("Modo", options=["Buscar contenido", "Subir contenido", "Configuración"], index=0)
if "METADATA.csv" in os.listdir(".") and "TAGS.csv" in os.listdir("."):
METADATA = pd.read_csv("./METADATA.csv", index_col=0)
TAGS = pd.read_csv("./TAGS.csv", index_col=0)
else:
METADATA = pd.DataFrame(np.zeros((1, 5)), index=["INIT"], columns=["TAG_{}".format(i) for i in range(1,6)])
METADATA.to_csv("./METADATA.csv")
TAGS = pd.DataFrame({"TAGS":["funciones", "machine learning", "visualizacion", "estadistica"]})
TAGS.to_csv("./TAGS.csv")
if modo == "Buscar contenido":
METADATA = pd.read_csv("./METADATA.csv", index_col=0)
TAGS = pd.read_csv("./TAGS.csv", index_col=0)
search_tags = st.multiselect("Tags", options=[_[0] for _ in TAGS.values])
available_bytes = []
for byte in METADATA.index:
if sum([tag_ in METADATA.loc[byte].values for tag_ in search_tags]) == len(search_tags):
print(sum([tag_ in METADATA.loc[byte] for tag_ in search_tags]))
available_bytes.append(byte)
if search_tags == []:
selection = st.selectbox("Índice", options=METADATA.index[1:])
else:
selection = st.selectbox("Índice", options=available_bytes)
if st.button("Ver"):
importlib.import_module("{}".format(selection))
del sys.modules["{}".format(selection)]
elif modo == "Subir contenido":
METADATA = pd.read_csv("./METADATA.csv", index_col=0)
TAGS = pd.read_csv("./TAGS.csv", index_col=0)
nombre = st.text_input("Nombre")
tags = st.multiselect("Tags", options=[_[0] for _ in TAGS.values])
path = file_selector()
base = open(os.getcwd() + "/{}.py".format(nombre), "w", encoding="utf-8")
base.write("import streamlit as st\nwith st.echo():\n")
base.close()
flag_1 = nombre is not None
flag_2 = tags is not None
FLAGS = flag_1 + flag_2
if FLAGS == 2 and st.button("Guardar"):
temp = open(path, "r", encoding="utf-8")
base_ = open(os.getcwd() + "/{}.py".format(nombre), "a", encoding="utf-8")
for line in temp:
base_.write("\t"+line)
temp.close()
base_.close()
for i in range(5-len(tags)):
tags.extend([0])
print({nombre:tags})
METADATA.loc[nombre, :] = tags
METADATA.to_csv("./METADATA.csv")
else:
st.empty().text(" ")
st.info("Actualmente existen {} bytes almacenados y {} tags".format(METADATA.shape[0]-1, len(TAGS.values)))
st.empty().text(" ")
st.subheader("Metadata")
st.empty().text(" ")
if st.button("Resetear metadata"):
METADATA = pd.DataFrame(np.zeros((1, 5)), index=["INIT"], columns=["TAG_{}".format(i) for i in range(1, 6)])
METADATA.to_csv("./METADATA.csv")
if st.button("Ver metadata"):
st.write(METADATA.iloc[1:, :])
st.subheader("Tags")
new_tag = st.text_input("Agregar un nuevo tag")
if new_tag is not None and st.button("Agregar tag"):
redundancia = False
if new_tag in TAGS.values:
redundancia = True
if redundancia is False:
TAGS = pd.concat([TAGS, | pd.DataFrame({"TAGS":new_tag}, index=[0]) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 6 10:25:47 2019
@author: <NAME>
Input files: host and pathogen quantification tables (e.g. pathogen_quant_salmon.tsv, host_quantification_uniquely_mapped_htseq.tsv), raw read statistics, star statistics
Output file: tsv file
Description: Used to collect mapping and quantifications statistics for STAR, HTSeq, Salmon or HTSeq results.
"""
import argparse
import pandas as pd
# function to sum up number of mapped reads from quantification table
def mapping_stats(quantification_table_path,gene_attribute,organism):
# read quantification table
col_names = pd.read_csv(quantification_table_path, sep = '\t', nrows=0).columns
types_dict = {gene_attribute: str}
types_dict.update({col: float for col in col_names if col not in types_dict})
quantification_table = pd.read_csv(quantification_table_path, sep = '\t',index_col=0,dtype=types_dict)
quantification_table = quantification_table.fillna(0)
# initialize dict.
quant_sum = {}
for sample_name in quantification_table: # iterate over columns of quantification_table
if 'NumReads' in sample_name: # for each column (sample) with 'NumReads' sum up the number of reads and add into quant_sum dict.
quant_sum.update({sample_name:sum(quantification_table[sample_name])})
# create data frame from dict.
total_counts_pd = pd.DataFrame.from_dict(quant_sum,orient='index')
total_counts_pd.columns = [organism]
return total_counts_pd
parser = argparse.ArgumentParser(description="""collects and generates mapping statistics""")
parser.add_argument("-q_p", "--quantification_table_pathogen", metavar='<quantification_table_pathogen>', default='.', help="Path to pathogen quantification table; Salmon and HTSeq")
parser.add_argument("-q_h", "--quantification_table_host", metavar='<quantification_table_host>', default='.', help="Path to host quantification table; Salmon and HTSeq")
parser.add_argument("-total_raw", "--total_no_raw_reads",metavar='<total_no_raw_reads>',default='.',help="Path to table with total number of raw reads for each sample; Salmon and STAR")
parser.add_argument("-total_processed", "--total_no_processed_reads", metavar='<total_no_processed_reads>', help="Path to table with total number of processed reads by STAR or Salmon")
parser.add_argument("-m_u", "--mapped_uniquely", metavar='<stats mapped uniquely >', default='.', help="Path to STAR mapping stats of uniquely mapped reads; STAR")
parser.add_argument("-m_m", "--mapped_multi", metavar='<stats multi mapped >', default='.', help="Path to STAR mapping stats of multi mapped reads; STAR")
parser.add_argument("-c_m", "--cross_mapped", metavar='<stats cross mapped >', default='.', help="Path to STAR mapping stats of cross_mapped reads; STAR")
parser.add_argument("-star", "--star_stats", metavar='<stats star >', default='.', help="Path to mapping statistics of STAR; HTSeq")
parser.add_argument("-star_pr", "--star_processed", metavar='<stats star_processed >', default='.',help="Path to STAR stats of processed reads; Salmon in alignment-based mode")
parser.add_argument("-a", "--gene_attribute", default='.', help="gene attribute used in quantification; Salmon and HTSeq")
parser.add_argument("-t", "--tool", metavar='<tool>', help="salmon, salmon_alignment, htseq or star")
parser.add_argument("-o", "--output_dir", metavar='<output>', help="output dir",default='.')
args = parser.parse_args()
# collect statistics for Salmon Selective Alignment mode
if args.tool == 'salmon':
# collect assigned pathogen reads
pathogen_total_counts = mapping_stats(args.quantification_table_pathogen,args.gene_attribute,'pathogen')
# collect assigned host reads
host_total_counts = mapping_stats(args.quantification_table_host,args.gene_attribute,'host')
# combine host and pathogen mapped reads
combined_total_mapped_reads = pd.concat([pathogen_total_counts, host_total_counts], axis=1)
# rename colnames - remove '_NumReads' suffix
new_index1 = [sample.split('_NumReads')[0] for sample in combined_total_mapped_reads.index]
combined_total_mapped_reads.index = new_index1
# calculate total mapped reads
combined_total_mapped_reads['total_mapped_reads'] = combined_total_mapped_reads.sum(axis=1)
if args.total_no_raw_reads.endswith('.tsv'): # if tsv table is defined in total_no_raw_reads argument
# read total number of raw reads
total_reads = pd.read_csv(args.total_no_raw_reads,sep="\t",index_col=0, names=['total_raw_reads'])
# read total number of reads processed by Salmon
processed_reads_salmon = pd.read_csv(args.total_no_processed_reads,sep="\t",index_col=0, names=['processed_reads'])
# combine statistics
results_df = pd.concat([combined_total_mapped_reads, processed_reads_salmon, total_reads], axis=1)
# calculate unmapped reads
results_df['unmapped_reads'] = results_df['processed_reads'] - results_df['total_mapped_reads']
# calculate trimmed reads
results_df['trimmed_reads'] = results_df['total_raw_reads'] - results_df['processed_reads']
else: # if tsv table is not defined in total_no_raw_reads argument
# read total number of reads processed by Salmon
processed_reads_salmon = pd.read_csv(args.total_no_processed_reads,sep="\t",index_col=0, names=['processed_reads'])
results_df = pd.concat([combined_total_mapped_reads, processed_reads_salmon], axis=1)
# calculate unmapped reads
results_df['unmapped_reads'] = results_df['processed_reads'] - results_df['total_mapped_reads']
# save results
results_df2 = results_df.sort_index()
results_df2.to_csv(args.output_dir, sep='\t')
# collect statistics for Salmon alignment-based mode
elif args.tool == 'salmon_alignment':
# collect assigned pathogen reads
pathogen_total_counts = mapping_stats(args.quantification_table_pathogen,args.gene_attribute,'pathogen')
# collect assigned host reads
host_total_counts = mapping_stats(args.quantification_table_host,args.gene_attribute,'host')
# combine host and pathogen mapped reads
combined_total_assigned_reads = pd.concat([pathogen_total_counts, host_total_counts], axis=1)
# rename colnames - remove '_NumReads' suffix
new_index1 = [sample.split('_NumReads')[0] for sample in combined_total_assigned_reads.index]
combined_total_assigned_reads.index = new_index1
# calculate total assigned reads
combined_total_assigned_reads['total_assigned_reads'] = combined_total_assigned_reads.sum(axis=1)
# extracted mapped reads from salmon log file
combined_total_mapped_reads = pd.read_csv(args.total_no_processed_reads,sep="\t",index_col=0, names=['total_mapped_reads'])
#read total number of reads processed by STAR
processed_reads_star = pd.read_csv(args.star_processed,sep="\t",index_col=0, names=['processed_reads'])
if args.total_no_raw_reads.endswith('.tsv'):
#read total number of raw reads
total_reads = pd.read_csv(args.total_no_raw_reads,sep="\t",index_col=0, names=['total_raw_reads'])
results_df = pd.concat([processed_reads_star, total_reads, combined_total_assigned_reads, combined_total_mapped_reads], axis=1)
# calculate unmapped reads
results_df['unmapped_reads'] = results_df['processed_reads'] - results_df['total_mapped_reads']
# calculate unassigned reads
results_df['unassigned_reads'] = results_df['total_mapped_reads'] - results_df['total_assigned_reads']
# calculate trimmed reads
results_df['trimmed_reads'] = results_df['total_raw_reads'] - results_df['processed_reads']
else:
results_df = pd.concat([processed_reads_star, combined_total_assigned_reads, combined_total_mapped_reads], axis=1)
# calculate unmapped reads
results_df['unmapped_reads'] = results_df['processed_reads'] - results_df['total_mapped_reads']
# calculate unassigned reads
results_df['unassigned_reads'] = results_df['total_mapped_reads'] - results_df['total_assigned_reads']
# save results
results_df2 = results_df.sort_index()
results_df2.to_csv(args.output_dir, sep='\t')
# collect statistics for HTSeq
elif args.tool == 'htseq':
# collect assigned pathogen reads
pathogen_total_counts = mapping_stats(args.quantification_table_pathogen,args.gene_attribute,'pathogen_assigned_reads')
# collect assigned host reads
host_total_counts = mapping_stats(args.quantification_table_host,args.gene_attribute,'host_assigned_reads')
combined_total_mapped_reads = pd.concat([pathogen_total_counts, host_total_counts], axis=1)
# rename colnames - remove '_NumReads' suffix
new_index1 = [sample.split('_NumReads')[0] for sample in combined_total_mapped_reads.index]
combined_total_mapped_reads.index = new_index1
# calculate total assigned reads
combined_total_mapped_reads['total_assigned_reads'] = combined_total_mapped_reads.sum(axis=1)
# read alignment statistics
star_stats = pd.read_csv(args.star_stats,sep="\t",index_col=0 )
# combine statistics
results_df = pd.concat([star_stats,combined_total_mapped_reads], axis=1)
# calculate unassigned host reads
results_df['unassigned_host_reads'] = results_df['host_uniquely_mapped_reads'] - results_df['host_assigned_reads']
# calculate unassigned pathogen reads
results_df['unassigned_pathogen_reads'] = results_df['pathogen_uniquely_mapped_reads'] - results_df['pathogen_assigned_reads']
# save results
results_df2 = results_df.sort_index()
results_df2.to_csv(args.output_dir, sep='\t')
# collect statistics for STAR
elif args.tool == 'star':
# read total number of uniquely mapped reads
mapped_uniquely = pd.read_csv(args.mapped_uniquely,sep="\t",index_col=0)
# read total number of multi-mapped reads
mapped_multi = pd.read_csv(args.mapped_multi,sep="\t",index_col=0)
# read total number of cross-mapped reads
cross_mapped = | pd.read_csv(args.cross_mapped,sep="\t", header=None, index_col=0, names=['cross_mapped_reads']) | pandas.read_csv |
#! /usr/bin/env python
import cyvcf2
import argparse
import sys
from collections import defaultdict, Counter
import pandas as pd
import signal
import numpy as np
from shutil import copyfile
import pyfaidx
from random import choice
from pyliftover import LiftOver
from Bio.Seq import reverse_complement
from mutyper import ancestor
def setup_ancestor(args):
"""utility for initializing an Ancestor object for use in different '
'subroutines"""
return ancestor.Ancestor(args.fasta, k=args.k, target=args.target,
strand_file=args.strand_file,
key_function=lambda x:
x.split(args.sep)[args.chrom_pos],
read_ahead=10000,
sequence_always_upper=(not args.strict))
def ancestral_fasta(args):
"""subroutine for ancestor subcommand
"""
# single chromosome fasta file for reference genome
ref = pyfaidx.Fasta(args.reference, read_ahead=10000)
# make a copy to build our ancestor for this chromosome
copyfile(args.reference, args.output)
anc = pyfaidx.Fasta(args.output, read_ahead=10000, mutable=True)
# reference genome for outgroup species (all chromosomes)
out = pyfaidx.Fasta(args.outgroup, read_ahead=10000)
# outgroup to reference alignment chain file
lo = LiftOver(args.chain)
# snps database for the same chromosome
vcf = cyvcf2.VCF(args.vcf)
# change regions outside of callability mask to all N bases
if args.bed:
if args.bed == '-':
bed = sys.stdin
else:
bed = open(args.bed, 'r')
last_end = 0
for line in bed:
chrom, start, end = line.rstrip().split('\t')[:3]
start = int(start)
anc[chrom][last_end:start] = 'N' * (start - last_end)
last_end = int(end)
anc[chrom][last_end:
len(anc[chrom])] = 'N' * (len(anc[chrom]) - last_end)
for variant in vcf:
# change variants that are not biallelic SNPs to N bases
if not (variant.is_snp and len(variant.ALT) == 1):
anc[chrom][variant.start:
variant.end] = 'N' * (variant.end - variant.start)
else:
out_coords = lo.convert_coordinate(variant.CHROM, variant.start)
# change ambiguously aligning sites to N bases
if out_coords is None or len(out_coords) != 1:
anc[chrom][variant.start] = 'N'
else:
if variant.REF != ref[chrom][variant.start].seq.upper():
raise ValueError(f'variant reference allele {variant.REF} '
f'mismatches reference sequence '
f'{ref[chrom][variant.start]}')
out_chromosome, out_position, out_strand = out_coords[0][:3]
out_allele = out[out_chromosome][out_position].seq
# if negative strand, take reverse complement base
if out_strand == '-':
out_allele = reverse_complement(out_allele)
# and finally, polarize
if out_allele.upper() == variant.ALT[0]:
anc[chrom][variant.start] = out_allele
elif out_allele.upper() != variant.REF:
# triallelic
anc[chrom][variant.start] = 'N'
def variants(args):
"""subroutine for variants subcommand
"""
ancestor = setup_ancestor(args)
vcf = cyvcf2.VCF(args.vcf)
vcf.add_info_to_header({'ID': 'mutation_type',
'Description': f'ancestral {args.k}-mer mutation '
'type',
'Type': 'Character', 'Number': '1'})
vcf_writer = cyvcf2.Writer('-', vcf)
vcf_writer.write_header()
for variant in vcf:
# biallelic snps only
if not (variant.is_snp and len(variant.ALT) == 1):
continue
# mutation type as ancestral kmer and derived kmer
anc_kmer, der_kmer = ancestor.mutation_type(variant.CHROM,
variant.start, variant.REF,
variant.ALT[0])
if anc_kmer is None or der_kmer is None:
continue
mutation_type = f'{anc_kmer}>{der_kmer}'
variant.INFO['mutation_type'] = mutation_type
# ancestral allele
AA = ancestor[variant.CHROM][variant.start].seq
# polarize genotypes (and associated INFO) if alternative allele is
# ancestral
if variant.ALT[0] == AA:
variant.INFO['AC'] = variant.INFO['AN'] - variant.INFO['AC']
variant.INFO['AF'] = variant.INFO['AC'] / variant.INFO['AN']
# cyvcf2 docs say we need to reassign genotypes like this for the
# change to propagate (can't just update indexwise)
if variant.ploidy == 2:
# diploid
variant.genotypes = [[int(not gt[0]), int(not gt[1]), gt[2]]
for gt in variant.genotypes]
elif variant.ploidy == 1:
# haploid
variant.genotypes = [[int(not gt[0]), gt[1]]
for gt in variant.genotypes]
else:
raise ValueError(f"invalid ploidy {variant.ploidy}")
elif not variant.REF == AA:
raise ValueError(f'ancestral allele {AA} is not equal to '
f'reference {variant.REF} or alternative '
f'{variant.ALT[0]}')
# set REF to ancestral allele and ALT to derived allele
variant.REF = anc_kmer[ancestor.target]
variant.ALT = der_kmer[ancestor.target]
vcf_writer.write_record(variant)
# this line required to exit on a SIGTERM in a pipe, e.g. from head
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
def targets(args):
"""subroutine for targets subcommand
"""
ancestor = setup_ancestor(args)
if args.bed == '-':
args.bed = sys.stdin
sizes = ancestor.targets(args.bed)
try:
for kmer in sorted(sizes):
print(f'{kmer}\t{sizes[kmer]}')
except BrokenPipeError:
pass
def spectra(args):
"""subroutine for spectra subcommand
"""
vcf = cyvcf2.VCF(args.vcf, gts012=True)
if args.population:
spectra_data = Counter()
for variant in vcf:
spectra_data[variant.INFO['mutation_type']] += 1
spectra = pd.DataFrame(spectra_data,
['population']).reindex(sorted(spectra_data),
axis='columns')
try:
print(spectra.to_csv(sep='\t', index=False))
except BrokenPipeError:
pass
else:
spectra_data = defaultdict(lambda: np.zeros_like(vcf.samples,
dtype=int))
if args.randomize:
for variant in vcf:
random_haplotype = choice([x for x, y in enumerate(variant.gt_types)
for _ in range(y)])
spectra_data[variant.INFO['mutation_type']][random_haplotype] += 1.
else:
for variant in vcf:
if variant.ploidy == 1:
# haploid ALT are coded as 2 (homozygous ALT)
variant.gt_types[variant.gt_types == 2] = 1
spectra_data[variant.INFO['mutation_type']] += variant.gt_types
spectra = pd.DataFrame(spectra_data,
vcf.samples).reindex(sorted(spectra_data),
axis='columns')
try:
print(spectra.to_csv(sep='\t', index=True,
index_label='sample'))
except BrokenPipeError:
pass
def ksfs(args):
"""subroutine for ksfs subcommand
"""
vcf = cyvcf2.VCF(args.vcf)
ksfs_data = defaultdict(lambda: Counter())
AN = None
for variant in vcf:
# AN must be the same for all sites (no missing genotypes)
if AN is not None and variant.INFO['AN'] != AN:
raise ValueError(f'different AN {variant.INFO["AN"]} and {AN}'
' indicates missing genotypes')
AN = variant.INFO['AN']
ksfs_data[variant.INFO['mutation_type']][variant.INFO['AC']] += 1
# exclude fixed sites AC=0, AC=AN
index = range(1, AN)
for mutation_type in sorted(ksfs_data):
ksfs_data[mutation_type] = [ksfs_data[mutation_type][ac]
for ac in index]
ksfs = | pd.DataFrame(ksfs_data, index) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""experiment0_baseline_nn
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1fD-X5sxQmLpGu1VhKF6I73vLX7swU2kV
"""
"""### Imports"""
import numpy as np
import matplotlib.pyplot as plt
import random
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, f1_score, precision_score
from sklearn.metrics import recall_score, classification_report
from sklearn.metrics import confusion_matrix
from sklearn.utils.multiclass import unique_labels
import pandas as pd
from keras.utils.vis_utils import plot_model
from keras.preprocessing.image import ImageDataGenerator
def plot_train(train_history,
train=True,
validation=True,
acc_name='Model accuracy',
loss_name='Model loss'):
legend = []
fig, ax = plt.subplots()
fig.tight_layout()
fig = plt.gcf()
fig.set_size_inches(18.5, 10.5)
# summarize history for accuracy
if train:
plt.plot(history.history['acc'])
legend.append('train')
if validation:
plt.plot(history.history['val_acc'])
legend.append('validation')
plt.grid()
plt.title("Training Accuracy")
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(legend, loc='upper left')
plt.xticks((np.arange(len(history.history["acc"]))),
(np.arange(len(history.history["acc"]))+1))
fig.savefig(acc_name+".png", dpi=100)
plt.show()
fig.tight_layout()
fig = plt.gcf()
fig.set_size_inches(18.5, 10.5)
legend = []
# summarize history for loss
if train:
plt.plot(history.history['loss'])
legend.append('train')
if validation:
plt.plot(history.history['val_loss'])
legend.append('validation')
plt.grid()
plt.title("Training Loss")
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(legend, loc='upper left')
plt.xticks((np.arange(len(history.history["acc"]))),
(np.arange(len(history.history["acc"]))+1))
fig.savefig(loss_name+".png", dpi=100)
plt.show()
return
def global_evaluation(true_labels, predicted_labels, avg='macro'):
f1 = f1_score(true_labels, predicted_labels, average=avg)
precision = precision_score(true_labels, predicted_labels, average=avg)
recall = recall_score(true_labels, predicted_labels, average=avg)
accuracy = accuracy_score(true_labels, predicted_labels)
evaluation_table = pd.DataFrame({'Metric':["F1-score", "Precision", "Recall", "Accuracy"], "Value":[f1, precision, recall, accuracy] })
return evaluation_table
def micro_evaluation(true_labels, predicted_labels, labels=list(range(10)), avg=None):
f1 = f1_score(true_labels, predicted_labels, average=avg)
precision = precision_score(true_labels, predicted_labels, average=avg)
recall = recall_score(true_labels, predicted_labels, average=avg)
evaluation_table = | pd.DataFrame({'Labels':labels, "precision":precision, "recall":recall, "f1":f1 }) | pandas.DataFrame |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
""" Aplicación para resolver el Challenge Python L1
Desarrollado por <NAME>
version 1.0
Bitacora:
fecha: 2021-02-27 observacion: Version 1 por: <NAME>
"""
import requests
import pandas as pd
import json
import hashlib
from datetime import datetime as dt
import sqlite3
from os import path,remove #validar si existen archivos
import logging
import argparse # agregar parametros por script
# Para Ejecutar pruebas unitarias se debe agregar el parametro -T y para log el parametro -L
# ejemplo
# cd %path del proyecto%
# python.exe pythonchallenge.py -L
parser = argparse.ArgumentParser()
parser.add_argument("-L", "--logs", help="Crea archivo de log en la carpeta /logs/ ", action="store_true")
parser.add_argument("-T", "--Test", help="Ejecuta los test unitarios", action="store_true" )
args = parser.parse_args()
logs=args.logs
unitTest=args.Test
def ahora():
return str(dt.now().strftime("%Y-%m-%d %H:%M:%S:%f"))
archivoLog=ahora().replace('-','').replace(':','').replace(' ','')+'.log'
fichero_log='./logs/'+archivoLog
if logs:
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s : %(levelname)s : %(message)s',
filemode = 'w',
filename = fichero_log,
)
logging.debug('Inicio {0}'.format(ahora()))
# 1. De https://rapidapi.com/apilayernet/api/rest-countries-v1, obtenga todas las regiones existentes.
def extraeRegiones():
"""
Retorna las regiones
>>> regiones = extraeRegiones()
>>> respuesta= {'', 'Polar', 'Africa', 'Europe', 'Asia', 'Americas', 'Oceania'}
>>> [ True for x in regiones if x in respuesta ]
[True, True, True, True, True, True, True]
"""
url = "https://restcountries-v1.p.rapidapi.com/all"
headers = {
'x-rapidapi-key': "<KEY>",
'x-rapidapi-host': "restcountries-v1.p.rapidapi.com"
}
response = requests.request("GET", url, headers=headers)
respuestaJson = response.json()
df= | pd.DataFrame(data=respuestaJson) | pandas.DataFrame |
import numpy as np
import pandas as pd
import random as random
import pickle
def formatRank_german(df):
tmp = pd.DataFrame()
tmp['y']=df.sort_values('y_pred',ascending=False).index
tmp['y_pred']=tmp.index
tmp['g']=df.sort_values('y_pred',ascending=False).reset_index()['g']
return tmp
def formatRank_compas(df):
tmp = pd.DataFrame()
tmp['y']=df.sort_values('y_pred').index
tmp['y_pred']=tmp.index
tmp['g']=df.sort_values('y_pred')['g']
return tmp
def readFA_IRData(inpath, filename, funct):
return funct(pd.read_pickle(inpath+filename))
def getAllFA_IRData(inpath, funct):
d ={}
d['cb'] = readFA_IRData(inpath, 'ColorblindRanking.pickle', funct)
d['base'] = d['cb'].copy()
d['base']['y_pred']=d['base']['y']
d['feld'] = readFA_IRData(inpath, 'FeldmanRanking.pickle', funct)
d['feld']['y'] = d['cb']['y']
d['fair1'] = readFA_IRData(inpath, 'FairRanking01PercentProtected.pickle', funct)
d['fair2'] = readFA_IRData(inpath, 'FairRanking02PercentProtected.pickle', funct)
d['fair3'] = readFA_IRData(inpath, 'FairRanking03PercentProtected.pickle', funct)
d['fair4'] = readFA_IRData(inpath, 'FairRanking04PercentProtected.pickle', funct)
d['fair5'] = readFA_IRData(inpath, 'FairRanking05PercentProtected.pickle', funct)
d['fair6'] = readFA_IRData(inpath, 'FairRanking06PercentProtected.pickle', funct)
d['fair7'] = readFA_IRData(inpath, 'FairRanking07PercentProtected.pickle', funct)
d['fair8'] = readFA_IRData(inpath, 'FairRanking08PercentProtected.pickle', funct)
d['fair9'] = readFA_IRData(inpath, 'FairRanking09PercentProtected.pickle', funct)
return d
def plainFA_IRData(inpath):
d ={}
d['cb'] = pd.read_pickle(inpath+'ColorblindRanking.pickle')
d['base'] = d['cb'].copy()
d['base']['y_pred']=d['base']['y']
d['feld'] = pd.read_pickle(inpath+'FeldmanRanking.pickle')
d['feld']['y'] = d['cb']['y']
d['fair1'] = pd.read_pickle(inpath+'FairRanking01PercentProtected.pickle')
d['fair2'] = pd.read_pickle(inpath+'FairRanking02PercentProtected.pickle')
d['fair3'] = | pd.read_pickle(inpath+'FairRanking03PercentProtected.pickle') | pandas.read_pickle |
import dash
import dash_core_components as dcc
import dash_html_components as html
import dash_bootstrap_components as dbc
import plotly.graph_objs as go
import pandas as pd
import sqlite3
from dash.dependencies import Input, Output, State
import time
# import datetime
from datetime import datetime
from pandas import Series
from scipy import stats
from scipy.stats import norm
from numpy import arange,array,ones
import dash_table
app = dash.Dash(external_stylesheets=[dbc.themes.BOOTSTRAP])
app.config['suppress_callback_exceptions']=True
current_year = datetime.now().year
current_day = datetime.now().day
today = time.strftime("%Y-%m-%d")
dayofyear = time.strftime("%j")
dayofyear = int(dayofyear)
# daily normal temperatures
df_norms_max = pd.read_csv('./daily_normal_max.csv')
df_norms_min = pd.read_csv('./daily_normal_min.csv')
df_norms_max_ly = pd.read_csv('./daily_normal_max_ly.csv')
df_norms_min_ly = pd.read_csv('./daily_normal_min_ly.csv')
df_norms_avg = pd.read_csv('./daily_normal_avg.csv')
df_norms_avg_ly = pd.read_csv('./daily_normal_avg_ly.csv')
df_old = pd.read_csv('./stapleton.csv').round(1)
df_old['DATE'] = pd.to_datetime(df_old['DATE'])
df_old = df_old.set_index('DATE')
df_new = pd.read_csv('https://www.ncei.noaa.gov/access/services/data/v1?dataset=daily-summaries&dataTypes=TMAX,TMIN&stations=USW00023062&startDate=2019-01-01&endDate=' + today + '&units=standard').round(1)
df_new['DATE'] = pd.to_datetime(df_new['DATE'])
df_new = df_new.set_index('DATE')
df_new['AVG'] = (df_new['TMAX'] + df_new['TMIN']) / 2
# print(df_new)
if current_year % 4 == 0:
df_new['MXNRM'] = df_norms_max_ly['DLY-TMAX-NORMAL'][0:len(df_new)].values
df_new['MNNRM'] = df_norms_min_ly['DLY-TMIN-NORMAL'][0:len(df_new)].values
else:
df_new['MXNRM'] = df_norms_max['DLY-TMAX-NORMAL'][0:len(df_new)].values
df_new['MNNRM'] = df_norms_min['DLY-TMIN-NORMAL'][0:len(df_new)].values
df_new['AVGNRM'] = (df_new['MXNRM'] + df_new['MNNRM']) / 2
df = pd.concat([df_old, df_new], ignore_index=False)
df_ya_max = df.resample('Y').mean()
# record high and low
record_max = df.loc[df['TMAX'].idxmax()]
record_min = df.loc[df['TMIN'].idxmin()]
df_ya_max = df.resample('Y').mean()
df_da = df_ya_max.groupby((df_ya_max.index.year//10)*10).mean()
# removes final year in df
df5 = df_ya_max[:-1]
# removes final decade in decade averages
df10 = df_da[0:-1]
# filters for completed years in current decade
current_year_decade = current_year%10
current_year_indexer = current_year_decade + 1
# current year decade avg current decade
df_da_cd = (df5[-(current_year_indexer):]).mean()
df_da_cd['combined'] = (df_da_cd['TMAX'] + df_da_cd['TMIN']) / 2
df5['combined'] = (df5['TMAX'] + df5['TMIN']) / 2
# current year 90- degree days
cy90 = df_new[df_new['TMAX']>=90]
# add current decade to decade list
df10.loc['2010'] = df_da_cd
df10 = df10.round(1)
df10 = df10.reset_index()
# current year stats
cy_max = df_new.loc[df_new['TMAX'].idxmax()]
cy_min = df_new.loc[df_new['TMIN'].idxmin()]
cy_max_mean = df_new['TMAX'].mean()
cy_min_mean = df_new['TMIN'].mean()
# filters all AVGT data for 5 year moving average
allavg_rolling = df['AVG'].rolling(window=1825)
allavg_rolling_mean = allavg_rolling.mean()
# filters all MAXT data for 5 year moving average
allmax_rolling = df['TMAX'].rolling(window=1825)
allmax_rolling_mean = allmax_rolling.mean()
# filters all MINT data fr 5 year moving average
allmin_rolling = df['TMIN'].rolling(window=1825)
allmin_rolling_mean = allmin_rolling.mean()
# all_min_temp_mean = allmin_rolling.mean()
# sorts annual mean temps
annual_max_mean_rankings = df5['TMAX'].sort_values(axis=0, ascending=False)
annual_min_mean_rankings = df5['TMIN'].sort_values(axis=0, ascending=False)
annual_combined_rankings = df5['combined'].sort_values(axis=0, ascending=False)
drl = annual_max_mean_rankings.size
acr = pd.DataFrame({'YEAR':annual_combined_rankings.index.year, 'AVG TEMP':annual_combined_rankings.values})
acr = acr.round(1)
maxdt = pd.DataFrame({'YEAR':annual_max_mean_rankings.index.year, 'MAX TEMP':annual_max_mean_rankings.values})
maxdt = maxdt.round(1)
mindt = pd.DataFrame({'YEAR':annual_min_mean_rankings.index.year, 'MIN TEMP':annual_min_mean_rankings.values})
mindt = mindt.round(1)
startyr = 1950
presentyr = datetime.now().year
year_count = presentyr-startyr
print(year_count)
# linear fit for Avg Max Temps
def annual_min_fit():
xi = arange(0,year_count)
slope, intercept, r_value, p_value, std_err = stats.linregress(xi,df5["TMIN"])
return (slope*xi+intercept)
def annual_max_fit():
xi = arange(0,year_count)
slope, intercept, r_value, p_value, std_err = stats.linregress(xi,df5["TMAX"])
return (slope*xi+intercept)
def all_avg_temp_fit():
xi = arange(0,year_count)
slope, intercept, r_value, p_value, std_err = stats.linregress(xi,df5["AVG"])
return (slope*xi+intercept)
def all_max_temp_fit():
xi = arange(0,year_count)
slope, intercept, r_value, p_value, std_err = stats.linregress(xi,df5["TMAX"])
return (slope*xi+intercept)
def all_min_temp_fit():
xi = arange(0,year_count)
slope, intercept, r_value, p_value, std_err = stats.linregress(xi,df5["TMIN"])
return (slope*xi+intercept)
def generate_table(acr, max_rows=10):
return html.Table (
[html.Tr([html.Th(col) for col in acr.columns])] +
[html.Tr([
html.Td(acr.iloc[i][col]) for col in acr.columns
]) for i in range(min(len(acr), max_rows))]
)
def generate_table_maxdt(maxdt, max_rows=10):
return html.Table (
[html.Tr([html.Th(col) for col in maxdt.columns])] +
[html.Tr([
html.Td(maxdt.iloc[i][col]) for col in maxdt.columns
]) for i in range(min(len(maxdt), max_rows))]
)
def generate_table_mindt(mindt, max_rows=10):
return html.Table (
[html.Tr([html.Th(col) for col in mindt.columns])] +
[html.Tr([
html.Td(mindt.iloc[i][col]) for col in mindt.columns
]) for i in range(min(len(mindt), max_rows))]
)
# year list for dropdown selector
year = []
for YEAR in df.index.year.unique():
year.append({'label':(YEAR), 'value':YEAR})
body = dbc.Container([
dbc.Row([
dbc.Col(
html.Div(
className='app-header',
children=[
html.Div('DENVER TEMPERATURE RECORD', className="app-header--title"),
]
),
),
]),
dbc.Row([
dbc.Col(
html.H5('1950-PRESENT', style={'text-align':'center'})
)]
),
dbc.Row([
dbc.Col(
html.Div(
html.H6('DAILY TEMPERATURES'),
style={'text-align':'center'}
),
)
],
justify='around',
),
dbc.Row([
dbc.Col(
html.Div([
dcc.Graph(id='graph1'),
]),
width={'size':6}
),
dbc.Col(
html.Div([
dcc.Graph(id='graph2'),
]),
width={'size':6}
),
],
justify='around',
),
dbc.Row([
dbc.Col(
html.H5('SELECT YEAR', style={'text-align':'center'})
),
dbc.Col(
html.H6('Data Updated', style={'text-align':'center'})
),
dbc.Col(
html.H5('SELECT PARAMETER', style={'text-align':'center'})
),
]),
dbc.Row([
dbc.Col(
dcc.Dropdown(id='year-picker', options=year
),
width = {'size': 3}),
dbc.Col(
html.H5('{}-{}-{}'.format(df.index[-1].year,df.index[-1].month,df.index[-1].day), style={'text-align': 'center'}),
width = {'size': 3}),
dbc.Col(
dcc.RadioItems(id='param', options=[
{'label':'MAX TEMP','value':'TMAX'},
{'label':'MIN TEMP','value':'TMIN'},
{'label':'AVG TEMP','value':'AVG'},
]),
width = {'size': 3}),
],
justify='around',
),
html.Div(
className="stats",
children=[
dbc.Row([
dbc.Col(
html.Div(
html.H5(id='stats',style={'text-align':'center'}),
),
),
]),
]),
dbc.Row([
dbc.Col(
html.Div([
html.H6(id='yearly-high/low')
]),
width={'size':6},
style={'text-align':'center'}
),
dbc.Col(
html.Div([
html.H6(id='mean-max/min'),
]),
width={'size':6},
style={'text-align':'center'}
),
]),
dbc.Row([
dbc.Col(
html.Div([
html.H6(id='days-above-100/below-0')
]),
width={'size':6},
style={'text-align':'center'}
),
dbc.Col(
html.Div([
html.H6(id='days-above-90/high-below-0'),
]),
width={'size':6},
style={'text-align':'center'}
),
]),
dbc.Row([
dbc.Col(
html.Div([
html.H6(id='days-above-80/below-32')
]),
width={'size':6},
style={'text-align':'center'}
),
dbc.Col(
html.Div([
html.H6(id='days-above-normal/below-normal'),
]),
width={'size':6},
style={'text-align':'center'}
),
]),
dbc.Row([
dbc.Col(
html.H5('Select Data', style={'text-align':'center'})
)]
),
dbc.Row([
dbc.Col(
dcc.RadioItems(id='selection', options=[
{'label':'Decade Rankings','value':'decades'},
{'label':'100 Degree Days','value':'100-degrees'},
{'label':'90 Degree Days','value':'90-degrees'},
]),
width = {'size': 5,'display': 'inline-block'},
),
],
justify='center',
),
dbc.Row([
dbc.Col(
dash_table.DataTable(
id='temptable',
columns=[{}],
data=[{}],
sorting=True,
style_cell={'textAlign': 'center'},
style_as_list_view=True,
style_table={
'maxHeight': '450',
'overflowY': 'scroll'
},
),
),
dbc.Col(
dcc.Graph(id='bar'),
),
]),
dbc.Row([
dbc.Col(
html.H4('YEARLY RANKINGS', style={'text-align':'center'})
)]
),
dbc.Row([
dbc.Col(
html.H6('Select Parameters', style={'text-align':'center'})
)]
),
dbc.Row([
dbc.Col(
dcc.RadioItems(id='rankings', options=[
{'label':'Avg Daily Temp','value':'acr'},
{'label':'Max Daily Temp','value':'max_dt'},
{'label':'Min Daily Temp','value':'min_dt'},
]),
width = {'size': 4,'display': 'inline-block'}),
],
justify='center',
),
dbc.Row([
dbc.Col(
html.Div(id='table-container'),
),
dbc.Col(
dcc.Graph(id='yearly-rankings-bar'),
),
]),
dbc.Row([
dbc.Col(
html.H4('1950-Present, Complete Record', style={'text-align':'center'})
)]
),
dbc.Row(
[
dbc.Col(
html.Div([
dcc.Graph(id='all-avg-temps',
figure = {
'data': [
{
'x' : df.index,
'y' : allavg_rolling_mean,
'mode' : 'lines + markers',
'name' : 'Avg Temp'
},
{
'x' : df5.index,
'y' : all_avg_temp_fit(),
'name' : 'trend'
}
],
'layout': go.Layout(
xaxis = {'title': 'Date'},
yaxis = {'title': 'Temp'},
hovermode = 'closest',
height = 700
),
}
),
]),
width = {'size': 10, 'offset':1},
),
]
),
dbc.Row(
[
dbc.Col(
html.H5('Avg Temps 1950-Present, 5 Year Moving Avg', style={'height':50, 'text-align': 'center'}),
),
],
align = 'around'
),
dbc.Row(
[
dbc.Col(
html.Div([
dcc.Graph(id='all-max-temps',
figure = {
'data': [
{
'x' : df.index,
'y' : allmax_rolling_mean,
'mode' : 'lines + markers',
'name' : 'Max Temp'
},
{
'x' : df5.index,
'y' : all_max_temp_fit(),
'name' : 'trend'
}
],
'layout': go.Layout(
xaxis = {'title': 'Date'},
yaxis = {'title': 'Temp'},
hovermode = 'closest',
height = 700
),
}
),
]),
width = {'size': 10, 'offset':1},
),
],
),
dbc.Row(
[
dbc.Col(
html.H5('Max Temps 1950-Present, 5 Year Moving Avg', style={'height':50, 'text-align': 'center'}),
),
],
align = 'around'
),
dbc.Row(
[
dbc.Col(
html.Div([
dcc.Graph(id='all-min-temps',
figure = {
'data': [
{
'x' : df.index,
'y' : allmin_rolling_mean,
'mode' : 'lines + markers',
'name' : 'Min Temp'
},
{
'x' : df5.index,
'y' : all_min_temp_fit(),
'name' : 'trend'
},
],
'layout': go.Layout(
xaxis = {'title': 'Date'},
yaxis = {'title': 'Temp'},
hovermode = 'closest',
height = 700
),
}
),
]),
width = {'size': 10, 'offset':1},
),
]
),
dbc.Row(
[
dbc.Col(
html.H5('Min Temps 1950-Present, 5 Year Moving Avg', style={'height':50, 'text-align': 'center'}),
),
],
align = 'around'
),
])
@app.callback(Output('graph1', 'figure'),
[Input('year-picker', 'value'),
Input('param', 'value')])
def update_figure(selected_year, param):
filtered_year = df[df.index.year == selected_year]
traces = []
year_param_max = filtered_year['TMAX']
year_param_min = filtered_year['TMIN']
year_param_avg = filtered_year['AVG']
if param == 'TMAX':
traces.append(go.Scatter(
y = year_param_max,
name = param,
line = {'color':'red'}
))
traces.append(go.Scatter(
y = df_norms_max['DLY-TMAX-NORMAL'],
name = "Normal Max T",
line = {'color':'black'}
))
elif param == 'TMIN':
traces.append(go.Scatter(
y = year_param_min,
name = param,
line = {'color':'dodgerblue'}
))
traces.append(go.Scatter(
y = df_norms_min['DLY-TMIN-NORMAL'],
name = "Normal Min T",
line = {'color':'black'}
))
elif param == 'AVG':
traces.append(go.Scatter(
y = year_param_avg,
name = param,
line = {'color':'black'}
))
traces.append(go.Scatter(
y = df_norms_avg['DLY-AVG-NORMAL'],
name = "Normal Avg T",
line = {'color':'black'}
))
return {
'data': traces,
'layout': go.Layout(
xaxis = {'title': 'DAY'},
yaxis = {'title': 'TEMP'},
hovermode = 'closest',
title = 'Daily Temps',
height = 400,
)
}
@app.callback(Output('graph2', 'figure'),
[Input('year-picker', 'value'),
Input('param', 'value')])
def update_figure_a(selected_year, param):
traces = []
filtered_year = df[df.index.year == selected_year]
year_param_max = filtered_year['TMAX']
year_param_min = filtered_year['TMIN']
year_param_avg = filtered_year['AVG']
normal_max_diff = year_param_max - filtered_year['MXNRM']
normal_min_diff = year_param_min - filtered_year['MNNRM']
normal_avg_diff = year_param_avg - filtered_year['AVGNRM']
colorscale_max = ((((normal_max_diff.max() - normal_max_diff.min()) - normal_max_diff.max()) / (normal_max_diff.max() - normal_max_diff.min())))
colorscale_min = ((((normal_min_diff.max() - normal_min_diff.min()) - normal_min_diff.max()) / (normal_min_diff.max() - normal_min_diff.min())))
colorscale_avg = ((((normal_avg_diff.max() - normal_avg_diff.min()) - normal_avg_diff.max()) / (normal_avg_diff.max() - normal_avg_diff.min())))
if param == 'TMAX':
traces.append(go.Heatmap(
y=year_param_max.index.day,
x=year_param_max.index.month,
z=normal_max_diff,
colorscale=[[0, 'blue'],[colorscale_max, 'white'], [1, 'red']]
))
elif param == 'TMIN':
traces.append(go.Heatmap(
y=year_param_min.index.day,
x=year_param_min.index.month,
z=normal_min_diff,
colorscale=[[0, 'blue'],[colorscale_min, 'white'], [1, 'red']]
))
elif param == 'AVG':
traces.append(go.Heatmap(
y=year_param_avg.index.day,
x=year_param_avg.index.month,
z=normal_avg_diff,
colorscale=[[0, 'blue'],[colorscale_avg, 'white'], [1, 'red']]
))
return {
'data': traces,
'layout': go.Layout(
title='{} Departure From Norm'.format(param),
xaxis={'title':'MONTH'},
yaxis={'title':'DAY'},
height= 400
)
}
@app.callback(Output('stats', 'children'),
[Input('year-picker', 'value')])
def update_layout_a(selected_year):
return 'Stats for {}'.format(selected_year)
@app.callback(Output('yearly-high/low', 'children'),
[Input('year-picker', 'value'),
Input('param', 'value')])
def update_layout_b(selected_year, param):
filtered_year = df[df.index.year == selected_year]
yearly_max = filtered_year.loc[filtered_year['TMAX'].idxmax()]
yearly_min = filtered_year.loc[filtered_year['TMIN'].idxmin()]
if param == 'TMAX':
return 'Yearly High: {}'.format(yearly_max['TMAX'])
elif param == 'TMIN':
return 'Yearly Low: {}'.format(yearly_min['TMIN'])
@app.callback(Output('mean-max/min', 'children'),
[Input('year-picker', 'value'),
Input('param', 'value')])
def update_layout_c(selected_year, param):
filtered_year = df[df.index.year == selected_year]
if param == 'TMAX':
return 'Mean Max Temp: {:,.1f}'.format(filtered_year['TMAX'].mean())
elif param == 'TMIN':
return 'Mean Min Temp: {:,.1f}'.format(filtered_year['TMIN'].mean())
@app.callback(Output('days-above-100/below-0', 'children'),
[Input('year-picker', 'value'),
Input('param', 'value')])
def update_layout_d(selected_year, param):
filtered_year = df[df.index.year == selected_year]
da_hundred = (filtered_year['TMAX'] >= 100).sum()
da_below_zero = (filtered_year['TMIN'] < 0).sum()
if param == 'TMAX':
return '100 Degree Days: {} - Normal: 1'.format(da_hundred)
elif param == 'TMIN':
return 'Days Below 0: {} - Normal: 6.7'.format(da_below_zero)
@app.callback(Output('days-above-90/high-below-0', 'children'),
[Input('year-picker', 'value'),
Input('param', 'value')])
def update_layout_e(selected_year, param):
filtered_year = df[df.index.year == selected_year]
da_ninety = (filtered_year['TMAX'] >= 90).sum()
da_high_below_32 = (filtered_year['TMAX'] < 32).sum()
if param == 'TMAX':
return '90 Degree Days: {} - Normal: 30.6'.format(da_ninety)
elif param == 'TMIN':
return 'Days High Below 32: {} - Normal: 21'.format(da_high_below_32)
@app.callback(Output('days-above-80/below-32', 'children'),
[Input('year-picker', 'value'),
Input('param', 'value')])
def update_layout_f(selected_year, param):
filtered_year = df[df.index.year == selected_year]
da_80 = (filtered_year['TMAX'] >= 80).sum()
da_below_32 = (filtered_year['TMIN'] < 32).sum()
if param == 'TMAX':
return '80 Degree Days: {} - Normal: 95.5'.format(da_80)
elif param == 'TMIN':
return 'Days Below 32: {} - Normal: 156.5'.format(da_below_32)
@app.callback(Output('days-above-normal/below-normal', 'children'),
[Input('year-picker', 'value'),
Input('param', 'value')])
def update_layout_g(selected_year, param):
filtered_year = df[df.index.year == selected_year]
dmaxan = 0
dminan = 0
i = 0
df_norms_max.loc[i]['DLY-TMAX-NORMAL']
if param == 'TMAX':
while i < filtered_year["TMAX"].count():
if filtered_year.iloc[i]['TMAX'] > df_norms_max.iloc[i]['DLY-TMAX-NORMAL']:
dmaxan = dmaxan + 1
i = i + 1
else: i = i + 1
return 'Days High Above Normal: {}/{}'.format(dmaxan, i)
elif param == 'TMIN':
while i < filtered_year["TMIN"].count():
if filtered_year.iloc[i]['TMIN'] < df_norms_min.iloc[i]['DLY-TMIN-NORMAL']:
dminan = dminan + 1
i = i + 1
else: i = i + 1
return 'Days Low Below Normal: {}/{}'.format(dminan, i)
@app.callback(Output('temptable', 'columns'),
[Input('selection', 'value')])
def update_table_a(selection):
df_100 = df[df['TMAX']>=100]
df_100_count = df_100.resample('Y').count()['TMAX']
df_100 = pd.DataFrame({'DATE':df_100_count.index.year, '100 Degree Days':df_100_count.values})
df_90 = df[df['TMAX']>=90]
df_90_count = df_90.resample('Y').count()['TMAX']
# convert series to dataframe
df_90 = pd.DataFrame({'DATE':df_90_count.index.year, '90 Degree Days':df_90_count.values})
# print(df10)
if selection == 'decades':
return [{'name': i, 'id': i} for i in df10.columns]
elif selection == '100-degrees':
return [{'name': i, 'id': i} for i in df_100.columns]
elif selection == '90-degrees':
return [{'name': i, 'id': i} for i in df_90.columns]
@app.callback(Output('temptable', 'data'),
[Input('selection', 'value')])
def create_table_b(selection):
df_100 = df[df['TMAX']>=100]
df_100_count = df_100.resample('Y').count()['TMAX']
df_100 = pd.DataFrame({'DATE':df_100_count.index.year, '100 Degree Days':df_100_count.values})
df_90 = df[df['TMAX']>=90]
df_90_count = df_90.resample('Y').count()['TMAX']
df_90 = pd.DataFrame({'DATE':df_90_count.index.year, '90 Degree Days':df_90_count.values})
if selection == 'decades':
return df10.to_dict('records')
elif selection == '100-degrees':
return df_100.to_dict('records')
elif selection == '90-degrees':
return df_90.to_dict('records')
@app.callback(Output('bar', 'figure'),
[Input('selection', 'value')])
def update_figure_b(selection):
print(selection)
df_100 = df[df['TMAX']>=100]
df_100_count = df_100.resample('Y').count()['TMAX']
df_100 = | pd.DataFrame({'DATE':df_100_count.index, '100 Degree Days':df_100_count.values}) | pandas.DataFrame |
import os
import datetime
import pandas as pd
from dataactcore.config import CONFIG_BROKER
from dataactcore.scripts import load_duns_exec_comp
from dataactcore.models.domainModels import DUNS
from dataactcore.utils.duns import DUNS_COLUMNS, EXCLUDE_FROM_API
def mock_get_duns_props_from_sam(duns_list):
""" Mock function for get_duns_props as we can't connect to the SAM service """
request_cols = [col for col in DUNS_COLUMNS if col not in EXCLUDE_FROM_API]
columns = request_cols
results = | pd.DataFrame(columns=columns) | pandas.DataFrame |
from __future__ import print_function
import os
import pandas as pd
import xgboost as xgb
import time
import shutil
from sklearn import preprocessing
from sklearn.cross_validation import train_test_split
import numpy as np
from sklearn.utils import shuffle
def archive_results(filename,results,algo,script):
"""
:type algo: basestring
:type script: basestring
:type results: DataFrame
"""
#assert results == pd.DataFrame
now=time.localtime()[0:5]
dirname='../archive'
subdirfmt='%4d-%02d-%02d-%02d-%02d'
subdir=subdirfmt %now
if not os.path.exists(os.path.join(dirname,str(algo))):
os.mkdir(os.path.join(dirname,str(algo)))
dir_to_create=os.path.join(dirname,str(algo),subdir)
if not os.path.exists(dir_to_create):
os.mkdir(dir_to_create)
os.chdir(dir_to_create)
results.to_csv(filename,index=False,float_format='%.6f')
shutil.copy2(script,'.')
return
os.chdir('/users/cuoco/home/KC/cervical-cancer-screening/src')
trainfile=('../input/patients_train.csv.gz')
testfile=('../input/patients_test.csv.gz')
train=pd.read_csv(trainfile,low_memory=False )
test=pd.read_csv(testfile,low_memory=False )
train_ex_file=('../input/train_patients_to_exclude.csv.gz')
train_ex=pd.read_csv(train_ex_file,low_memory=False)
train=train[train.patient_id.isin(train_ex.patient_id)==False]
test_ex_file=('../input/test_patients_to_exclude.csv.gz')
test_ex=pd.read_csv(test_ex_file,low_memory=False)
test=test[test.patient_id.isin(test_ex.patient_id)==False]
print(train.shape,test.shape)
surgical=pd.read_csv('../features/surgical_pap.csv.gz')
diagnosis=pd.read_csv('../features/diagnosis_hpv.csv.gz')
procedure_cervi=pd.read_csv('../features/procedure_cervi.csv.gz')
procedure_hpv=pd.read_csv('../features/procedure_hpv.csv.gz')
procedure_vaccine=pd.read_csv('../features/procedure_vaccine.csv.gz')
procedure_vagi=pd.read_csv('../features/procedure_vagi.csv.gz')
procedure_plan_type=pd.read_csv('../features/procedure_plan_type.csv.gz')
rx_payment=pd.read_csv('../features/rx_payment.csv.gz')
train_pract_screen_ratio=pd.read_csv('../features/train_pract_screen_ratio.csv.gz')
test_pract_screen_ratio=pd.read_csv('../features/test_pract_screen_ratio.csv.gz')
visits=pd.read_csv('../features/visits.csv.gz')
#
print(train.shape,test.shape)
train=pd.merge(train,visits, on='patient_id',how='left')
test=pd.merge(test,visits, on='patient_id',how='left')
print('after merging visits')
train=pd.merge(train,surgical, on='patient_id',how='left')
test=pd.merge(test,surgical, on='patient_id',how='left')
print('after merging surgical')
print(train.shape,test.shape)
train=pd.merge(train,diagnosis, on='patient_id',how='left')
test=pd.merge(test,diagnosis, on='patient_id',how='left')
print('after merging diagnosis')
print(train.shape,test.shape)
train=pd.merge(train,procedure_cervi, on='patient_id',how='left')
test=pd.merge(test,procedure_cervi, on='patient_id',how='left')
train=pd.merge(train,procedure_hpv, on='patient_id',how='left')
test=pd.merge(test,procedure_hpv, on='patient_id',how='left')
#train=pd.merge(train,procedure_vaccine, on='patient_id',how='left')
#test=pd.merge(test,procedure_vaccine, on='patient_id',how='left')
train=pd.merge(train,procedure_vagi, on='patient_id',how='left')
test=pd.merge(test,procedure_vagi, on='patient_id',how='left')
#train=pd.merge(train,procedure_plan_type, on='patient_id',how='left')
#test=pd.merge(test,procedure_plan_type, on='patient_id',how='left')
print('after merging procedure')
print(train.shape,test.shape)
#train=pd.merge(train,rx_payment, on='patient_id',how='left')
#test=pd.merge(test,rx_payment, on='patient_id',how='left')
#print('after merging rx_payment')
print(train.shape,test.shape)
train=pd.merge(train,train_pract_screen_ratio, on='patient_id',how='left')
test=pd.merge(test,test_pract_screen_ratio, on='patient_id',how='left')
print('after merging pract_scree_ratio')
print(train.shape,test.shape)
###############################################################################################
def preprocess_data(train,test):
y=train['is_screener']
id_test=test['patient_id']
train=train.drop(['patient_id','is_screener'],axis=1)
test=test.drop(['patient_id'],axis=1)
for f in train.columns:
if train[f].dtype == 'object':
print(f)
lbl = preprocessing.LabelEncoder()
lbl.fit(list(train[f].values) + list(test[f].values))
train[f] = lbl.transform(list(train[f].values))
test[f] = lbl.transform(list(test[f].values))
return id_test,test,train,y
##prepare a sparse matrix
train=train.fillna(0)
test=test.fillna(0)
id_test,test,train,y=preprocess_data(train,test)
#print(train.columns)
print(train.shape,test.shape)
#print(train.columns)
X=np.asarray(train)
y=np.asarray(y)
X_test=np.asarray(test)
X,y=shuffle(X,y,random_state=9)
X_train,X_val,y_train,y_val = train_test_split(X,y,test_size=0.2,random_state=17)
from sklearn import preprocessing
#scl=decomposition.PCA(n_components=30,whiten=True)
#scl=preprocessing.RobustScaler()
#X_train=scl.fit_transform(X_train)
#X_val=scl.transform(X_val)
#X_test=scl.transform(X_test)
dval=xgb.DMatrix(data=X_val,label=y_val)
dtrain=xgb.DMatrix(data=X_train,label=y_train)
DTest=xgb.DMatrix(data=X_test)
watchlist = [(dval,'eval'), (dtrain,'train')]
params = {"objective": "binary:logistic",
"eta": 0.01,
"eta_decay":0.5,
"max_depth": 12,
"silent":1,
"subsample": 0.9,
"colsample_bytree": 0.65,
"seed": 1193,
"booster": "gbtree",
"nthread":-1,
"eval_metric":'auc'
}
#
clf = xgb.train(params, dtrain, num_boost_round=400, evals=watchlist, early_stopping_rounds=10,verbose_eval=True, maximize= False)
predictions=clf.predict(DTest,ntree_limit=clf.best_ntree_limit)
score=clf.best_score
model='XGBOOST_onselected-features'
#
# predict on test set
submission='%s_score_%03f.csv' %(model,score)
# create submission file
preds = | pd.DataFrame({"patient_id": id_test, 'predict_screener': predictions}) | pandas.DataFrame |
import numpy as np # deal with data
import pandas as pd # deal with data
import re # regular expression
from bs4 import BeautifulSoup # resolver review
from nltk.corpus import stopwords # Import the stop word list
from gensim.models import word2vec# use word2Vec(skip-gram model) making wordfeature vetor
from sklearn.model_selection import train_test_split # use trian data split train and test data
import torch
from torch.utils.data import Dataset,TensorDataset
import torch.nn as nn
from tqdm import tqdm
from RQ1 import model
from collections import Counter
from sklearn import metrics
from RQ1.utils import file_opt
import jsonlines
from matplotlib import pyplot as plt
import seaborn as sns
import config
def review_to_wordlist(review,remove_stop_words=False):
#1.remove HIML
reivew_text=BeautifulSoup(review,'lxml').get_text()
#2.Remove non-latters
latters_only=re.sub("[^a-zA-Z]",' ',reivew_text)
#3.Convert to lower case,split into individual words
words=latters_only.lower().split()
#4.Remove stop words
if remove_stop_words:
stop=set(stopwords.words('english'))
words=[w for w in words if not w in stop]
#5. reutrn a list of words
return words
# make features vector by each words
def makeFeatureVec(words,model,num_features):
featureVec = np.zeros((num_features,), dtype="float32")
nwords=0
index2word_set = set(model.wv.index2word) #get name
for word in words:
if word in index2word_set: # if word in index2word and get it's feature
nwords+=1
featureVec=np.add(featureVec,model[word])
if nwords == 0:
pass
else:
featureVec=np.divide(featureVec,nwords) # average each featureVector
return featureVec
# make all word's features
def getAvgFeatureVecs(reviews,model,num_features):
counter=0
reviewFeatureVecs = np.zeros((len(reviews), num_features), dtype="float32") #features size=len(reviews) X num_features
for review in tqdm(reviews): # loop each review(word)
vector = makeFeatureVec(review,model,num_features)
reviewFeatureVecs[counter]=vector # get each word's featureVectors
counter+=1
return reviewFeatureVecs
def nag_sample_balance_data(dataset):
print("********* Negative Sample Balance ***********")
bugs = dataset.loc[dataset["label"]==0]
features = dataset.loc[dataset["label"]==1]
others = dataset.loc[dataset["label"]==2]
min_len = min(len(bugs), len(features), len(others))
bugs_b = bugs.sample(n=min_len, random_state=1)
feature_b = features.sample(n=min_len, random_state=1)
other_b = others.sample(n=min_len, random_state=1)
balanced_data = pd.concat([bugs_b, feature_b, other_b])
balanced_data = balanced_data.sample(frac=1, random_state=1)
balanced_data = balanced_data.reset_index(drop=True)
print("欠采样后数据集中各类的分布情况:{}".format(Counter(balanced_data["label"])))
return balanced_data
def binary_nag_sample_balance_data(dataset):
print("********* Negative Sample Balance ***********")
bugs = dataset.loc[dataset["label"]==0]
features = dataset.loc[dataset["label"]==1]
# others = dataset.loc[train_data["label"]==2]
min_len = min(len(bugs), len(features))
bugs_b = bugs.sample(n=min_len, random_state=1)
feature_b = features.sample(n=min_len, random_state=1)
# other_b = others.sample(n=min_len, random_state=1)
balanced_data = pd.concat([bugs_b, feature_b])
balanced_data = balanced_data.sample(frac=1, random_state=1)
balanced_data = balanced_data.reset_index(drop=True)
print("欠采样后数据集中各类的分布情况:{}".format(Counter(balanced_data["label"])))
return balanced_data
def k_split(X, y, k, total_k):
fold_size = len(X) // total_k
X_test = X[k*fold_size:(k+1)*fold_size]
X_train = np.concatenate((X[:k*fold_size], X[(k+1)*fold_size:]), axis=0)
y_test = y[k*fold_size:(k+1)*fold_size]
y_train = np.concatenate((y[:k*fold_size], y[(k+1)*fold_size:]), axis=0)
return X_train, X_test, y_train, y_test
def t2v(file_path, repo, time_delt, epoch, lr, batch, text_d, time_dim):
train_data = pd.read_csv(file_path)
train_data['datetime'] = pd.to_datetime(train_data['datetime'])
train_data['timedelta'] = train_data['datetime'] - train_data['datetime'][0]
train_data['timedelta'] = train_data.timedelta.dt.days // time_delt
train_data = nag_sample_balance_data(train_data) # 欠采样
train_data = train_data.sample(frac=1, random_state=1).reset_index(drop=True)
lstm = model.t2v_LSTM(time_dim)
optimizer = torch.optim.Adam(lstm.parameters(), lr=lr) # optimize all cnn parameters
loss_func = nn.CrossEntropyLoss() # loss function is CrossEntropyLoss
total_k = 10
for k in tqdm(range(total_k)):
X = train_data.timedelta.values
y = np.array(train_data['label'])
X_train, X_test, y_train, y_test = k_split(X, y, k, total_k)
X_test = torch.from_numpy(X_test).view(-1, 1, 1)
X_train = torch.from_numpy(np.array(X_train, dtype=np.float32)).view(-1, 1,1)
y_train = torch.from_numpy(np.array(y_train, dtype=np.int64)).view(-1, 1)
deal_traindata1 = TensorDataset(X_train, y_train) # deal with wordVetor and label
load_train1 = torch.utils.data.DataLoader(dataset=deal_traindata1, batch_size=batch,
shuffle=False) # laod data make batch
for e in range(epoch):
for step, (x_time, label) in enumerate(load_train1):
label = label.view(-1) # loss function need 1 dim! if don't do it, loss function will make error!
x_time = x_time.float()
output = lstm(x_time)
optimizer.zero_grad() # clear gradients for this training step
loss = loss_func(output, label)
loss.backward() # backpropagation, compute gradients
optimizer.step() # apply gradients
X_test = X_test.float()
output_test = lstm(X_test) # test model and print:loss and accuracy
pred_y = torch.max(output_test, 1)[1].data.numpy()
pred_y_np = output_test.detach().numpy()
true_y = np.zeros((len(y_test), 3), dtype=np.int)
for i in range(len(y_test)):
true_y[i][y_test[i]] = 1
fpr, tpr, _ = metrics.roc_curve(true_y.ravel(), pred_y_np.ravel())
roc_auc = metrics.auc(fpr, tpr)
print(metrics.classification_report(y_test, pred_y))
precision = metrics.precision_score(y_test, pred_y, average="macro")
recall = metrics.recall_score(y_test, pred_y, average="macro")
f1 = metrics.f1_score(y_test, pred_y, average="macro")
result = {"repository": repo, "precision": precision, "recall": recall, "f1": f1, "auc": roc_auc, "fpr": fpr, "tpr": tpr}
roc = {"repository": repo, "fpr": fpr.tolist(), "tpr": tpr.tolist()}
return result, roc
def lstm(file_path, repo, time_delt, epoch, lr, batch, text_d, time_dim):
train_data = pd.read_csv(file_path)
train_data['datetime'] = pd.to_datetime(train_data['datetime'])
train_data['timedelta'] = train_data['datetime'] - train_data['datetime'][0]
train_data['timedelta'] = train_data.timedelta.dt.days // time_delt
train_data = nag_sample_balance_data(train_data) # 欠采样
train_data = train_data.sample(frac=1, random_state=1).reset_index(drop=True)
lstm = model.single_LSTM(time_dim)
optimizer = torch.optim.Adam(lstm.parameters(), lr=lr) # optimize all cnn parameters
loss_func = nn.CrossEntropyLoss() # loss function is CrossEntropyLoss
total_k = 10
for k in tqdm(range(total_k)):
X = train_data.timedelta.values
y = np.array(train_data['label'])
X_train, X_test, y_train, y_test = k_split(X, y, k, total_k)
X_test = torch.from_numpy(X_test).view(-1, 1, 1)
X_train = torch.from_numpy(np.array(X_train, dtype=np.float32)).view(-1, 1,1)
y_train = torch.from_numpy(np.array(y_train, dtype=np.int64)).view(-1, 1)
deal_traindata1 = TensorDataset(X_train, y_train) # deal with wordVetor and label
load_train1 = torch.utils.data.DataLoader(dataset=deal_traindata1, batch_size=batch,
shuffle=False) # laod data make batch
for e in range(epoch):
for step, (x_time, label) in enumerate(load_train1):
label = label.view(-1) # loss function need 1 dim! if don't do it, loss function will make error!
x_time = x_time.float()
output = lstm(x_time)
optimizer.zero_grad() # clear gradients for this training step
loss = loss_func(output, label)
loss.backward() # backpropagation, compute gradients
optimizer.step() # apply gradients
X_test = X_test.float()
output_test = lstm(X_test) # test model and print:loss and accuracy
pred_y = torch.max(output_test, 1)[1].data.numpy()
pred_y_np = output_test.detach().numpy()
true_y = np.zeros((len(y_test), 3), dtype=np.int)
for i in range(len(y_test)):
true_y[i][y_test[i]] = 1
fpr, tpr, _ = metrics.roc_curve(true_y.ravel(), pred_y_np.ravel())
roc_auc = metrics.auc(fpr, tpr)
print(metrics.classification_report(y_test, pred_y))
precision = metrics.precision_score(y_test, pred_y, average="macro")
recall = metrics.recall_score(y_test, pred_y, average="macro")
f1 = metrics.f1_score(y_test, pred_y, average="macro")
result = {"repository": repo, "precision": precision, "recall": recall, "f1": f1, "auc": roc_auc, "fpr": fpr, "tpr": tpr}
roc = {"repository": repo, "fpr": fpr.tolist(), "tpr": tpr.tolist()}
return result, roc
def zeroR(file_path, repo, time_delt, epoch, lr, batch, text_d, time_dim):
train_data = pd.read_csv(file_path)
train_data['datetime'] = pd.to_datetime(train_data['datetime'])
train_data['timedelta'] = train_data['datetime'] - train_data['datetime'][0]
train_data['timedelta'] = train_data.timedelta.dt.days // time_delt
train_data = nag_sample_balance_data(train_data) # 欠采样
train_data = train_data.sample(frac=1, random_state=1).reset_index(drop=True)
X = train_data.timedelta.values
y = np.array(train_data['label'])
pred_y = np.zeros((len(y),1), dtype=int) # ZeroR
true_y = np.zeros((len(y), 3), dtype=np.int)
for i in range(len(y)):
true_y[i][y[i]] = 1
pred_y_np = np.concatenate( (np.ones([len(y), 1], dtype=int), np.zeros([len(y), 2], dtype=int)), axis=1 )
fpr, tpr, _ = metrics.roc_curve(true_y.ravel(), pred_y_np.ravel())
roc_auc = metrics.auc(fpr, tpr)
print(metrics.classification_report(y, pred_y))
precision = metrics.precision_score(y, pred_y, average="macro")
recall = metrics.recall_score(y, pred_y, average="macro")
f1 = metrics.f1_score(y, pred_y, average="macro")
result = {"repository": repo, "precision": precision, "recall": recall, "f1": f1, "auc": roc_auc, "fpr": fpr, "tpr": tpr}
roc = {"repository": repo, "fpr": fpr.tolist(), "tpr": tpr.tolist()}
return result, roc
def random_guessing(file_path, repo, time_delt, epoch, lr, batch, text_d, time_dim):
train_data = pd.read_csv(file_path)
train_data['datetime'] = pd.to_datetime(train_data['datetime'])
train_data['timedelta'] = train_data['datetime'] - train_data['datetime'][0]
train_data['timedelta'] = train_data.timedelta.dt.days // time_delt
train_data = nag_sample_balance_data(train_data) # 欠采样
train_data = train_data.sample(frac=1, random_state=1).reset_index(drop=True)
X = train_data.timedelta.values
y = np.array(train_data['label'])
pred_y = np.random.randint(3,size=((len(y), 1))) # Randon guessing
pred_y_np = np.full([len(y), 3], 1/3)
true_y = np.zeros((len(y), 3), dtype=np.int)
for i in range(len(y)):
true_y[i][y[i]] = 1
fpr, tpr, _ = metrics.roc_curve(true_y.ravel(), pred_y_np.ravel())
roc_auc = metrics.auc(fpr, tpr)
precision = metrics.precision_score(y, pred_y, average="macro")
recall = metrics.recall_score(y, pred_y, average="macro")
f1 = metrics.f1_score(y, pred_y, average="macro")
result = {"repository": repo, "precision": precision, "recall": recall, "f1": f1, "auc": roc_auc, "fpr": fpr, "tpr": tpr}
roc = {"repository": repo, "fpr": fpr.tolist(), "tpr": tpr.tolist()}
return result, roc
def plot_roc():
with jsonlines.open("./results/t2v_result_roc.jsonl", mode="r") as reader:
for r in reader:
roc_t2v = r
with jsonlines.open("./results/lstm_result_roc.jsonl", mode="r") as reader:
for r in reader:
roc_lstm = r
with jsonlines.open("./results/zeroR_result_roc.jsonl", mode="r") as reader:
for r in reader:
roc_zeroR = r
with jsonlines.open("./results/random_result_roc.jsonl", mode="r") as reader:
for r in reader:
roc_random = r
for i in range(len(roc_t2v)):
plt.plot(roc_t2v[i]["fpr"], roc_t2v[i]["tpr"], label=roc_t2v[i]["repository"])
plt.plot(roc_random[0]["fpr"], roc_random[0]["tpr"], label="random guessing & zeroR", linestyle="--")
plt.show()
def main():
result = pd.DataFrame(columns=["repository", "precision", "recall", "f1", "auc", "fpr", "tpr"])
result_lstm = pd.DataFrame(columns=["repository", "precision", "recall", "f1", "auc", "fpr", "tpr"])
result_zeroR = pd.DataFrame(columns=["repository", "precision", "recall", "f1", "auc", "fpr", "tpr"])
result_random = pd.DataFrame(columns=["repository", "precision", "recall", "f1", "auc", "fpr", "tpr"])
roc_t2v, roc_lstm, roc_zeroR, roc_random = [], [], [], []
repo_list = file_opt.read_txt(config.code_path + "/resource/repo_list.txt")
for repo in repo_list:
file_path = config.data_path + "/" + repo + "/" + repo.replace("/", "_") + "_t2v.csv"
time_delt = 30
epoch = 20
lr = 1e-5
batch = 16
text_d = 100
time_d = 40
result = result.append(t2v(file_path, repo, time_delt, epoch, lr, batch, text_d, time_d)[0], ignore_index=True)
result_lstm = result.append(lstm(file_path, repo, time_delt, epoch, lr, batch, text_d, time_d)[0], ignore_index=True)
result_zeroR = result_zeroR.append(zeroR(file_path, repo, time_delt, epoch, lr, batch, text_d, time_d)[0], ignore_index=True)
result_random = result_random.append(random_guessing(file_path, repo, time_delt, epoch, lr, batch, text_d, time_d)[0], ignore_index=True)
roc_t2v.append(t2v(file_path, repo, time_delt, epoch, lr, batch, text_d, time_d)[1])
roc_lstm.append(lstm(file_path, repo, time_delt, epoch, lr, batch, text_d, time_d)[1])
roc_zeroR.append(zeroR(file_path, repo, time_delt, epoch, lr, batch, text_d, time_d)[1])
roc_random.append(random_guessing(file_path, repo, time_delt, epoch, lr, batch, text_d, time_d)[1])
result.to_csv("./results/t2v_result.csv")
result_lstm.to_csv("./results/lstm_result.csv")
result_zeroR.to_csv("./results/zeroR_result.csv")
result_random.to_csv("./results/random_result.csv")
with jsonlines.open("./results/t2v_result_roc.jsonl", mode="w") as writer:
writer.write(roc_t2v)
with jsonlines.open("./results/lstm_result_roc.jsonl", mode="w") as writer:
writer.write(roc_lstm)
with jsonlines.open("./results/zeroR_result_roc.jsonl", mode="w") as writer:
writer.write(roc_zeroR)
with jsonlines.open("./results/random_result_roc.jsonl", mode="w") as writer:
writer.write(roc_random)
def plot_f1():
result = pd.read_csv("./results/t2v_result.csv")
result_lstm = pd.read_csv("./results/lstm_result.csv")
result_zeroR = pd.read_csv("./results/zeroR_result.csv")
result_random = pd.read_csv("./results/random_result.csv")
print(result["f1"].describe())
print(result_lstm["f1"].describe())
print(result_zeroR["f1"].describe())
print(result_random["f1"].describe())
df = pd.concat([result[["f1"]], result_lstm[["f1"]], result_zeroR[["f1"]], result_random[["f1"]]], axis=1)
df.columns = ["Time2Vec-LSTM","LSTM", "ZeroR", "Random Guessing"]
plt.figure(figsize=[4,3])
df.boxplot(column=["Time2Vec-LSTM", "LSTM", "ZeroR", "Random Guessing"])
plt.show()
df_avg_f1 = pd.DataFrame(columns=["Time2Vec", "LSTM", "ZeroR", "Random Guessing"])
df_avg_f1 = df_avg_f1.append({"Time2Vec": result["precision"].mean(),
"LSTM": result_lstm["precision"].mean(),
"ZeroR": result_zeroR["precision"].mean(),
"Random Guessing": result_random["precision"].mean()}
,ignore_index=True)
df_avg_f1 = df_avg_f1.append({"Time2Vec": result["recall"].mean(),
"LSTM": result_lstm["recall"].mean(),
"ZeroR": result_zeroR["recall"].mean(),
"Random Guessing": result_random["recall"].mean()}
,ignore_index=True)
df_avg_f1 = df_avg_f1.append({"Time2Vec": result["f1"].mean(),
"LSTM": result_lstm["f1"].mean(),
"ZeroR": result_zeroR["f1"].mean(),
"Random Guessing": result_random["f1"].mean()}
,ignore_index=True)
df_avg_f1 = df_avg_f1.append({"Time2Vec": result["auc"].mean(),
"LSTM": result_lstm["auc"].mean(),
"ZeroR": result_zeroR["auc"].mean(),
"Random Guessing": result_random["auc"].mean()}
,ignore_index=True)
df_avg_f1.index = ['Precision', 'Recall', 'F1-measure', 'AUC']
df_avg_f1.plot(kind='bar', rot=0, fontsize=14)
plt.show()
def plot_auc_box():
with jsonlines.open("./results/t2v_result_roc.jsonl", mode="r") as reader:
for r in reader:
roc_t2v = r
with jsonlines.open("./results/zeroR_result_roc.jsonl", mode="r") as reader:
for r in reader:
roc_zeroR = r
with jsonlines.open("./results/random_result_roc.jsonl", mode="r") as reader:
for r in reader:
roc_random = r
result = pd.read_csv("./results/t2v_result.csv")
result_zeroR = pd.read_csv("./results/zeroR_result.csv")
result_random = pd.read_csv("./results/random_result.csv")
auc_pd = | pd.DataFrame(columns=["T2V", "ZeroR", "Random Guessing"]) | pandas.DataFrame |
"""
Código para crear las variables de decisión del baseline
"""
import pandas as pd
import time
from baseline_ajustes import variables_decision_nacional, variables_decision_exp
from limpieza_masters import limpieza_data, ajustar_tarifario
from output import guardar_outputs
# Carga de datos. Retorna diccionario de DFs
start_time = time.time()
sheet_names = ['master_demanda', 'master_tarifario', 'master_homologacion']
data = limpieza_data('input/datamaster_baseline.xlsx', sheet_names, is_baseline=True)
# Ejecutar funciones de Baseline. Tener en cuenta que Nal y Exp vienen en la misma hoja de calculo
## Nacional
demanda_nal = data['master_demanda'].loc[data['master_demanda']['id_ciudad_origen'] != 'CGNA_PORT']
demanda_exp = data['master_demanda'].loc[data['master_demanda']['id_ciudad_origen'] == 'CGNA_PORT']
# Validamos que el tarifario no tenga duplicados
data['mater_tarifario'] = ajustar_tarifario(data['master_tarifario'])
decision_nal, demanda_nal_omitida = variables_decision_nacional(demanda_nal, data['master_tarifario'],
data['master_homologacion'])
decision_exp, demanda_exp_omitida = variables_decision_exp(demanda_exp,data['master_tarifario'], factor_eficiencia=0.78)
# Concatenar archivos de decision y guardar decision en output/
| pd.concat([decision_nal, decision_exp]) | pandas.concat |
# -*- coding: utf-8 -*-
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
# Author: <EMAIL>
from mlhub.pkg import mlask, mlcat
MOVIELENS = '100k' # Select Movielens data size: 100k, 1m, 10m, or 20m.
TOPK = 10 # Top k items to recommend.
TITLEN = 45 # Truncation of titles in printing to screen.
SMPLS = 10 # Number of observations to display.
MOVDISP = 5 # Number of movies to display for a specific user.
mlcat("Microsoft Recommenders Best Practice", """\
Welcome to a demo of the Microsoft open source Recommendations toolkit.
This is a Microsoft open source project though not a supported product.
Pull requests are most welcome.
This demo runs several recommender algorithms on the traditional MovieLens
benchmark dataset which is freely available from
https://grouplens.org/datasets/movielens/.
""")
# ----------------------------------------------------------------------
# Setup
# ----------------------------------------------------------------------
import sys
stderr = sys.stderr
stdout = sys.stdout
devnull = open('/dev/null', 'w')
sys.stderr = devnull
# Import the required libraries.
import os
import time
import itertools
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import warnings
import recutils
import imdb
import urllib.request
from shutil import copyfile
from reco_utils.recommender.sar.sar_singlenode import SARSingleNode
from reco_utils.dataset import movielens
from reco_utils.dataset.python_splitters import python_random_split
from reco_utils.evaluation.python_evaluation import (
map_at_k,
ndcg_at_k,
precision_at_k,
recall_at_k
)
sys.stderr = stderr
mlask()
print("\nSystem version: {}".format(sys.version))
print("Pandas version: {}\n".format(pd.__version__))
mlcat("SAR Algorithm","""\
SAR, the smart adaptive recommendation algorithm, is a fast algorithm for
personalized recommendations based on user history using collaborative
filtering. It produces easily explainable and interpretable recommendations
and handles "cold item" and "semi-cold user" scenarios.
The training data schema is:
<User ID> <Item ID> <Time> [<Event Type>] [<Event Weight>].
Each observation is an interaction between a user and item (e.g., a movie
watched on a streaming site or an item clicked on an e-commerce website).
The MovieLens dataset records movie ratings provided by viewers. The ratings
are treated as the event weights. The smaller of the available datasets is
used, consisting of 100K users.
The dataset is being loaded. Once loaded we can review the first
few observations.
""")
data = movielens.load_pandas_df(size = MOVIELENS,
header = ['UserId',
'MovieId',
'Rating',
'Timestamp']
)
# Convert float precision to 32-bit to reduce memory consumption.
data.loc[:, 'Rating'] = data['Rating'].astype(np.float32)
# Load the movie title index.
titles = pd.read_table('titles.txt',
sep = '|',
header = None,
encoding = "ISO-8859-1")
titles = titles.loc[:, 0:1]
titles.columns = ["MovieId", "MovieTitle"]
mlask(end="\n")
mlcat("Sample Ratings", """\
Below we illustrate the ratings that a number of users have provided for
specific movies. Note that the Rating column will be treated as the Event
Weight and we are not displaying the Time column. From the 100,000 events
in the dataset we will be partitioning the data into training and test
subsets. The model is built from the training dataset.
""")
# Illustrative sample output. Rating is really a 1-5 integer and not a
# float so be sure to display as an integer rather than a
# float. Decide not to display Timestamp unless we convert to
# something understandable.
# TODO Replace truncated movie title with ... to be more informative.
smpl = pd.merge(data, titles, on="MovieId").sample(SMPLS)
smpl['MovieTitle'] = smpl['MovieTitle'].str[:TITLEN]
smpl['Rating'] = pd.to_numeric(smpl['Rating'], downcast='integer')
del smpl['Timestamp'] # Drop the column from printing.
print(smpl.to_string())
# Create train and test datasets.
train, test = python_random_split(data)
# Create a model object.
header = {
"col_user" : "UserId",
"col_item" : "MovieId",
"col_rating" : "Rating",
"col_timestamp" : "Timestamp",
}
model = SARSingleNode(remove_seen = True,
similarity_type = "jaccard",
time_decay_coefficient = 30,
time_now = None,
timedecay_formula = True,
**header
)
start_time = time.time()
model.fit(train)
train_time = time.time() - start_time
start_time = time.time()
topk = model.recommend_k_items(test)
test_time = time.time() - start_time
# TODO: remove this call when the model returns same type as input
topk['UserId'] = pd.to_numeric(topk['UserId'])
topk['MovieId'] = | pd.to_numeric(topk['MovieId']) | pandas.to_numeric |
import unittest
from unittest.mock import patch, PropertyMock
import time
import mt5_correlation.correlation as correlation
import pandas as pd
from datetime import datetime, timedelta
from test_mt5 import Symbol
import random
import os
class TestCorrelation(unittest.TestCase):
# Mock symbols. 4 Symbols, 3 visible.
mock_symbols = [Symbol(name='SYMBOL1', visible=True),
Symbol(name='SYMBOL2', visible=True),
Symbol(name='SYMBOL3', visible=False),
Symbol(name='SYMBOL4', visible=True),
Symbol(name='SYMBOL5', visible=True)]
# Start and end date for price data and mock prices: base; correlated; and uncorrelated.
start_date = None
end_date = None
price_columns = None
mock_base_prices = None
mock_correlated_prices = None
mock_uncorrelated_prices = None
def setUp(self):
"""
Creates some price data fro use in tests
:return:
"""
# Start and end date for price data and mock price dataframes. One for: base; correlated; uncorrelated and
# different dates.
self.start_date = datetime(2021, 1, 1, 1, 5, 0)
self.end_date = datetime(2021, 1, 1, 11, 30, 0)
self.price_columns = ['time', 'close']
self.mock_base_prices = pd.DataFrame(columns=self.price_columns)
self.mock_correlated_prices = pd.DataFrame(columns=self.price_columns)
self.mock_uncorrelated_prices = pd.DataFrame(columns=self.price_columns)
self.mock_correlated_different_dates = pd.DataFrame(columns=self.price_columns)
self.mock_inverse_correlated_prices = pd.DataFrame(columns=self.price_columns)
# Build the price data for the test. One price every 5 minutes for 500 rows. Base will use min for price,
# correlated will use min + 5 and uncorrelated will use random
for date in (self.start_date + timedelta(minutes=m) for m in range(0, 500*5, 5)):
self.mock_base_prices = self.mock_base_prices.append(pd.DataFrame(columns=self.price_columns,
data=[[date, date.minute]]))
self.mock_correlated_prices = \
self.mock_correlated_prices.append(pd.DataFrame(columns=self.price_columns,
data=[[date, date.minute + 5]]))
self.mock_uncorrelated_prices = \
self.mock_uncorrelated_prices.append(pd.DataFrame(columns=self.price_columns,
data=[[date, random.randint(0, 1000000)]]))
self.mock_correlated_different_dates = \
self.mock_correlated_different_dates.append(pd.DataFrame(columns=self.price_columns,
data=[[date + timedelta(minutes=100),
date.minute + 5]]))
self.mock_inverse_correlated_prices = \
self.mock_inverse_correlated_prices.append(pd.DataFrame(columns=self.price_columns,
data=[[date, (date.minute + 5) * -1]]))
@patch('mt5_correlation.mt5.MetaTrader5')
def test_calculate(self, mock):
"""
Test the calculate method. Uses mock for MT5 symbols and prices.
:param mock:
:return:
"""
# Mock symbol return values
mock.symbols_get.return_value = self.mock_symbols
# Correlation class
cor = correlation.Correlation(monitoring_threshold=1, monitor_inverse=True)
# Calculate for price data. We should have 100% matching dates in sets. Get prices should be called 4 times.
# We don't have a SYMBOL3 as this is set as not visible. Correlations should be as follows:
# SYMBOL1:SYMBOL2 should be fully correlated (1)
# SYMBOL1:SYMBOL4 should be uncorrelated (0)
# SYMBOL1:SYMBOL5 should be negatively correlated
# SYMBOL2:SYMBOL5 should be negatively correlated
# We will not use p_value as the last set uses random numbers so p value will not be useful.
mock.copy_rates_range.side_effect = [self.mock_base_prices, self.mock_correlated_prices,
self.mock_uncorrelated_prices, self.mock_inverse_correlated_prices]
cor.calculate(date_from=self.start_date, date_to=self.end_date, timeframe=5, min_prices=100,
max_set_size_diff_pct=100, overlap_pct=100, max_p_value=1)
# Test the output. We should have 6 rows. S1:S2 c=1, S1:S4 c<1, S1:S5 c=-1, S2:S5 c=-1. We are not checking
# S2:S4 or S4:S5
self.assertEqual(len(cor.coefficient_data.index), 6, "There should be six correlations rows calculated.")
self.assertEqual(cor.get_base_coefficient('SYMBOL1', 'SYMBOL2'), 1,
"The correlation for SYMBOL1:SYMBOL2 should be 1.")
self.assertTrue(cor.get_base_coefficient('SYMBOL1', 'SYMBOL4') < 1,
"The correlation for SYMBOL1:SYMBOL4 should be <1.")
self.assertEqual(cor.get_base_coefficient('SYMBOL1', 'SYMBOL5'), -1,
"The correlation for SYMBOL1:SYMBOL5 should be -1.")
self.assertEqual(cor.get_base_coefficient('SYMBOL2', 'SYMBOL5'), -1,
"The correlation for SYMBOL2:SYMBOL5 should be -1.")
# Monitoring threshold is 1 and we are monitoring inverse. Get filtered correlations. There should be 3 (S1:S2,
# S1:S5 and S2:S5)
self.assertEqual(len(cor.filtered_coefficient_data.index), 3,
"There should be 3 rows in filtered coefficient data when we are monitoring inverse "
"correlations.")
# Now aren't monitoring inverse correlations. There should only be one correlation when filtered
cor.monitor_inverse = False
self.assertEqual(len(cor.filtered_coefficient_data.index), 1,
"There should be only 1 rows in filtered coefficient data when we are not monitoring inverse "
"correlations.")
# Now were going to recalculate, but this time SYMBOL1:SYMBOL2 will have non overlapping dates and coefficient
# should be None. There shouldn't be a row. We should have correlations for S1:S4, S1:S5 and S4:S5
mock.copy_rates_range.side_effect = [self.mock_base_prices, self.mock_correlated_different_dates,
self.mock_correlated_prices, self.mock_correlated_prices]
cor.calculate(date_from=self.start_date, date_to=self.end_date, timeframe=5, min_prices=100,
max_set_size_diff_pct=100, overlap_pct=100, max_p_value=1)
self.assertEqual(len(cor.coefficient_data.index), 3, "There should be three correlations rows calculated.")
self.assertEqual(cor.coefficient_data.iloc[0, 2], 1, "The correlation for SYMBOL1:SYMBOL4 should be 1.")
self.assertEqual(cor.coefficient_data.iloc[1, 2], 1, "The correlation for SYMBOL1:SYMBOL5 should be 1.")
self.assertEqual(cor.coefficient_data.iloc[2, 2], 1, "The correlation for SYMBOL4:SYMBOL5 should be 1.")
# Get the price data used to calculate the coefficients for symbol 1. It should match mock_base_prices.
price_data = cor.get_price_data('SYMBOL1')
self.assertTrue(price_data.equals(self.mock_base_prices), "Price data returned post calculation should match "
"mock price data.")
def test_calculate_coefficient(self):
"""
Tests the coefficient calculation.
:return:
"""
# Correlation class
cor = correlation.Correlation()
# Test 2 correlated sets
coefficient = cor.calculate_coefficient(self.mock_base_prices, self.mock_correlated_prices)
self.assertEqual(coefficient, 1, "Coefficient should be 1.")
# Test 2 uncorrelated sets. Set p value to 1 to force correlation to be returned.
coefficient = cor.calculate_coefficient(self.mock_base_prices, self.mock_uncorrelated_prices, max_p_value=1)
self.assertTrue(coefficient < 1, "Coefficient should be < 1.")
# Test 2 sets where prices dont overlap
coefficient = cor.calculate_coefficient(self.mock_base_prices, self.mock_correlated_different_dates)
self.assertTrue(coefficient < 1, "Coefficient should be None.")
# Test 2 inversely correlated sets
coefficient = cor.calculate_coefficient(self.mock_base_prices, self.mock_inverse_correlated_prices)
self.assertEqual(coefficient, -1, "Coefficient should be -1.")
@patch('mt5_correlation.mt5.MetaTrader5')
def test_get_ticks(self, mock):
"""
Test that caching works. For the purpose of this test, we can use price data rather than tick data.
Mock 2 different sets of prices. Get three times. Base, One within cache threshold and one outside. Set 1
should match set 2 but differ from set 3.
:param mock:
:return:
"""
# Correlation class to test
cor = correlation.Correlation()
# Mock the tick data to contain 2 different sets. Then get twice. They should match as the data was cached.
mock.copy_ticks_range.side_effect = [self.mock_base_prices, self.mock_correlated_prices]
# We need to start and stop the monitor as this will set the cache time
cor.start_monitor(interval=10, calculation_params={'from': 10, 'min_prices': 0, 'max_set_size_diff_pct': 0,
'overlap_pct': 0, 'max_p_value': 1}, cache_time=3)
cor.stop_monitor()
# Get the ticks within cache time and check that they match
base_ticks = cor.get_ticks('SYMBOL1', None, None)
cached_ticks = cor.get_ticks('SYMBOL1', None, None)
self.assertTrue(base_ticks.equals(cached_ticks),
"Both sets of tick data should match as set 2 came from cache.")
# Wait 3 seconds
time.sleep(3)
# Retrieve again. This one should be different as the cache has expired.
non_cached_ticks = cor.get_ticks('SYMBOL1', None, None)
self.assertTrue(not base_ticks.equals(non_cached_ticks),
"Both sets of tick data should differ as cached data had expired.")
@patch('mt5_correlation.mt5.MetaTrader5')
def test_start_monitor(self, mock):
"""
Test that starting the monitor and running for 2 seconds produces two sets of coefficient history when using an
interval of 1 second.
:param mock:
:return:
"""
# Mock symbol return values
mock.symbols_get.return_value = self.mock_symbols
# Create correlation class. We will set a divergence threshold so that we can test status.
cor = correlation.Correlation(divergence_threshold=0.8, monitor_inverse=True)
# Calculate for price data. We should have 100% matching dates in sets. Get prices should be called 4 times.
# We dont have a SYMBOL2 as this is set as not visible. All pairs should be correlated for the purpose of this
# test.
mock.copy_rates_range.side_effect = [self.mock_base_prices, self.mock_correlated_prices,
self.mock_correlated_prices, self.mock_inverse_correlated_prices]
cor.calculate(date_from=self.start_date, date_to=self.end_date, timeframe=5, min_prices=100,
max_set_size_diff_pct=100, overlap_pct=100, max_p_value=1)
# We will build some tick data for each symbol and patch it in. Tick data will be from 10 seconds ago to now.
# We only need to patch in one set of tick data for each symbol as it will be cached.
columns = ['time', 'ask']
starttime = datetime.now() - timedelta(seconds=10)
tick_data_s1 = pd.DataFrame(columns=columns)
tick_data_s2 = pd.DataFrame(columns=columns)
tick_data_s4 = pd.DataFrame(columns=columns)
tick_data_s5 = pd.DataFrame(columns=columns)
now = datetime.now()
price_base = 1
while starttime < now:
tick_data_s1 = tick_data_s1.append(pd.DataFrame(columns=columns, data=[[starttime, price_base * 0.5]]))
tick_data_s2 = tick_data_s1.append(pd.DataFrame(columns=columns, data=[[starttime, price_base * 0.1]]))
tick_data_s4 = tick_data_s1.append(pd.DataFrame(columns=columns, data=[[starttime, price_base * 0.25]]))
tick_data_s5 = tick_data_s1.append(pd.DataFrame(columns=columns, data=[[starttime, price_base * -0.25]]))
starttime = starttime + timedelta(milliseconds=10*random.randint(0, 100))
price_base += 1
# Patch it in
mock.copy_ticks_range.side_effect = [tick_data_s1, tick_data_s2, tick_data_s4, tick_data_s5]
# Start the monitor. Run every second. Use ~10 and ~5 seconds of data. Were not testing the overlap and price
# data quality metrics here as that is set elsewhere so these can be set to not take effect. Set cache level
# high and don't use autosave. Timer runs in a separate thread so test can continue after it has started.
cor.start_monitor(interval=1, calculation_params=[{'from': 0.66, 'min_prices': 0,
'max_set_size_diff_pct': 0, 'overlap_pct': 0,
'max_p_value': 1},
{'from': 0.33, 'min_prices': 0,
'max_set_size_diff_pct': 0, 'overlap_pct': 0,
'max_p_value': 1}], cache_time=100, autosave=False)
# Wait 2 seconds so timer runs twice
time.sleep(2)
# Stop the monitor
cor.stop_monitor()
# We should have 2 coefficients calculated for each symbol pair (6), for each date_from value (2),
# for each run (2) so 24 in total.
self.assertEqual(len(cor.coefficient_history.index), 24)
# We should have 2 coefficients calculated for a single symbol pair and timeframe
self.assertEqual(len(cor.get_coefficient_history({'Symbol 1': 'SYMBOL1', 'Symbol 2': 'SYMBOL2',
'Timeframe': 0.66})),
2, "We should have 2 history records for SYMBOL1:SYMBOL2 using the 0.66 min timeframe.")
# The status should be DIVERGED for SYMBOL1:SYMBOL2 and CORRELATED for SYMBOL1:SYMBOL4 and SYMBOL2:SYMBOL4.
self.assertTrue(cor.get_last_status('SYMBOL1', 'SYMBOL2') == correlation.STATUS_DIVERGED)
self.assertTrue(cor.get_last_status('SYMBOL1', 'SYMBOL4') == correlation.STATUS_CORRELATED)
self.assertTrue(cor.get_last_status('SYMBOL2', 'SYMBOL4') == correlation.STATUS_CORRELATED)
# We are monitoring inverse correlations, status for SYMBOL1:SYMBOL5 should be DIVERGED
self.assertTrue(cor.get_last_status('SYMBOL2', 'SYMBOL5') == correlation.STATUS_DIVERGED)
@patch('mt5_correlation.mt5.MetaTrader5')
def test_load_and_save(self, mock):
"""Calculate and run monitor for a few seconds. Store the data. Save it, load it then compare against stored
data."""
# Correlation class
cor = correlation.Correlation()
# Patch symbol and price data, then calculate
mock.symbols_get.return_value = self.mock_symbols
mock.copy_rates_range.side_effect = [self.mock_base_prices, self.mock_correlated_prices,
self.mock_correlated_prices, self.mock_inverse_correlated_prices]
cor.calculate(date_from=self.start_date, date_to=self.end_date, timeframe=5, min_prices=100,
max_set_size_diff_pct=100, overlap_pct=100, max_p_value=1)
# Patch the tick data
columns = ['time', 'ask']
starttime = datetime.now() - timedelta(seconds=10)
tick_data_s1 = pd.DataFrame(columns=columns)
tick_data_s3 = pd.DataFrame(columns=columns)
tick_data_s4 = pd.DataFrame(columns=columns)
now = datetime.now()
price_base = 1
while starttime < now:
tick_data_s1 = tick_data_s1.append(pd.DataFrame(columns=columns, data=[[starttime, price_base * 0.5]]))
tick_data_s3 = tick_data_s1.append( | pd.DataFrame(columns=columns, data=[[starttime, price_base * 0.1]]) | pandas.DataFrame |
import datetime
import numpy as np
import pandas as pd
import pandas.testing as pdt
from cape_privacy.pandas import dtypes
from cape_privacy.pandas.transformations import DateTruncation
from cape_privacy.pandas.transformations import NumericRounding
def _make_apply_numeric_rounding(input, expected_output, ctype, dtype):
transform = NumericRounding(dtype=ctype, precision=1)
df = pd.DataFrame({"amount": input}).astype(dtype)
expected = pd.DataFrame({"amount": expected_output}).astype(dtype)
df["amount"] = transform(df.amount)
return df, expected
def _make_apply_datetruncation(frequency, input_date, expected_date):
transform = DateTruncation(frequency=frequency)
df = | pd.DataFrame({"date": [input_date]}) | pandas.DataFrame |
# -*- coding: utf-8 -*-
# Copyright 2017 The <NAME>. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import sys
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import csv
import ffn
from keras.callbacks import TensorBoard, ModelCheckpoint
from windpuller import WindPuller
from dataset import DataSet
from feature import extract_from_file, extract_all_features
def read_ultimate(path, input_shape):
ultimate_features = np.load(path + "ultimate_feature." + str(input_shape[0]) +'.npy')
ultimate_features = np.reshape(ultimate_features, [-1, input_shape[0], input_shape[1]])
ultimate_labels = np.load(path + "ultimate_label." + str(input_shape[0]) +'.npy')
# ultimate_labels = np.reshape(ultimate_labels, [-1, 1])
train_set = DataSet(ultimate_features, ultimate_labels)
test_features = np.load(path + "ultimate_feature.test." + str(input_shape[0]) +'.npy')
test_features = np.reshape(test_features, [-1, input_shape[0], input_shape[1]])
test_labels = np.load(path + "ultimate_label.test." + str(input_shape[0]) +'.npy')
# test_labels = np.reshape(test_labels, [-1, 1])
test_set = DataSet(test_features, test_labels)
return train_set, test_set
def read_feature(path, input_shape, prefix):
ultimate_features = np.load("%s/%s_feature.%s.npy" % (path, prefix,
str(input_shape[0])))
ultimate_features = np.reshape(ultimate_features,
[-1, input_shape[0], input_shape[1]])
ultimate_labels = np.load("%s/%s_label.%s.npy" % (path, prefix,
str(input_shape[0])))
# ultimate_labels = np.reshape(ultimate_labels, [-1, 1])
train_set = DataSet(ultimate_features, ultimate_labels)
test_features = np.load("%s/%s_feature.test.%s.npy" %
(path, prefix, str(input_shape[0])))
test_features = np.reshape(test_features,
[-1, input_shape[0], input_shape[1]])
test_labels = np.load("%s/%s_label.test.%s.npy" % (path, prefix,
str(input_shape[0])))
# test_labels = np.reshape(test_labels, [-1, 1])
test_set = DataSet(test_features, test_labels)
return train_set, test_set
def calculate_cumulative_return(labels, pred):
cr = []
if len(labels) <= 0:
return cr
cr.append(1. * (1. + labels[0] * pred[0]))
for l in range(1, len(labels)):
cr.append(cr[l-1] * (1 + labels[l] * pred[l]))
cap = np.array(cr)
cr = cap - 1
return cr, cap
def calculate_cumulative_return_cost(labels, pred, fee=0.0002):
'''计算累积收益率, 初始资金为1
params:
labels: 实际日收益率
pred: 预测的每日仓位[0, 1]
returns:
cr: 累计收益率序列
cap: 资金曲线
'''
n = len(labels)
cap = np.ones(n+1)
for i in range(1, n+1):
cap[i] = cap[i-1] * (1 + labels[i-1] * pred[i-1] - np.abs(pred[i-1]) * fee)
cr = cap - 1
return cr, cap
def plot_returns(df, f, n, title, output_dir):
'''画出资金曲线
'''
df.rename(columns={'Strategy':'策略',
'Benchmark':'指数'}).plot(figsize=(24, 16))
plt.xlabel('时间')
plt.ylabel('累计收益率')
plt.title('%s %s_%s择时做多策略vs买入持有策略累计收益率' % (title, f, n),
fontsize=22)
fig_dir = os.path.join(output_dir, '资金曲线')
if not(os.path.exists(fig_dir)):
os.mkdir(fig_dir)
fig_path = os.path.join(output_dir, '资金曲线',
'%s_%s' % (n, title))
plt.savefig(fig_path)
plt.close()
print(' 资金曲线画图结束\n')
print('-'*30)
def calc_perf(output, f, n, key, output_dir):
'''统计各项表现,画出资金曲线,生成投资报告
'''
# 1. 数据预处理
# df = output.set_index(keys='date')
df = output.copy(deep=False)
df.index = pd.to_datetime(df.index)
df.drop(['Close', 'Pct_change', 'Position'], axis=1, inplace=True)
df['Strategy'] = df['Cum_return'] + 1
df['Benchmark'] = df['Buy_hold'] + 1
df.drop(['Cum_return', 'Buy_hold'], axis=1, inplace=True)
if key == 'train':
title = '训练集'
else:
title = '测试集'
# 2. 画出资金曲线
plt.rcParams.update({'font.size': 18})
plot_returns(df, f, n, title, output_dir)
# 3. 画出策略和指数的相关矩阵图
returns = df.to_returns().dropna()
returns.rename(columns={'Strategy':'策略',
'Benchmark':'指数'}).plot_corr_heatmap()
plt.title('%s相关系数热度图' % n)
cor_plt_dir = os.path.join(output_dir, '相关系数')
if not(os.path.exists(cor_plt_dir)):
os.mkdir(cor_plt_dir)
cor_plt_path = os.path.join(cor_plt_dir,
'%s_%s' % (n, title))
plt.savefig(cor_plt_path)
plt.close()
print('相关系数画图结束')
print('-'*30)
# 4. 计算策略表现
perf = df.calc_stats()
result = dict()
result['天数'] = df.shape[0] - 1
result['起始日期'] = perf['Strategy'].start.strftime('%Y-%m-%d')
result['截至日期'] = perf['Strategy'].end.strftime('%Y-%m-%d')
result['收益率'] = perf['Strategy'].total_return
result['年化收益率'] = perf['Strategy'].cagr
result['今年收益率'] = perf['Strategy'].ytd
result['最近6个月收益率'] = perf['Strategy'].six_month
result['最近3个月收益率'] = perf['Strategy'].three_month
result['当月收益率'] = perf['Strategy'].mtd
result['最大回撤'] = perf['Strategy'].max_drawdown
details = perf['Strategy'].drawdown_details
if details is None:
result['最大回撤周期数'] = 0
result['最长未创新高周期数'] = 0
result['平均回撤周期数'] = 0
else:
result['最大回撤周期数'] = \
int(details[details['drawdown'] == result['最大回撤']]['days'])
result['最长未创新高周期数'] = \
perf['Strategy'].drawdown_details['days'].max()
result['平均回撤周期数'] = perf['Strategy'].avg_drawdown_days
try:
result['夏普比率'] = perf['Strategy'].daily_sharpe
except ZeroDivisionError as e:
print('夏普比率分母为0!')
result['夏普比率'] = np.nan
result['最好日收益率'] = perf['Strategy'].best_day
result['最差日收益率'] = perf['Strategy'].worst_day
result['最好月收益率'] = perf['Strategy'].best_month
result['最差月收益率'] = perf['Strategy'].worst_month
result['最好年收益率'] = perf['Strategy'].best_year
result['最差年收益率'] = perf['Strategy'].worst_year
if (output.Position != 0).sum() != 0:
result['胜率'] = (output.Pct_change[output.Position != 0] > 0).sum() / (
(output.Position != 0).sum())# 做多
result['交易次数'] = (output.Position != 0).sum()
result['满仓次数'] = (output.Position.abs() == 1).sum()
result['平均仓位'] = np.abs(output.Position.mean())
result['交易频率'] = result['天数'] / result['交易次数']
result['满仓频率'] = result['天数'] / result['满仓次数']
else:
result['胜率'] = np.nan
result['交易次数'] = 0
result['满仓次数'] = 0
result['平均仓位'] = 0
result['交易频率'] = np.nan
result['满仓频率'] = np.nan
# 5. 将dict结果写入csv文件
result_dir = os.path.join(output_dir, '投资报告')
if not(os.path.exists(result_dir)):
os.mkdir(result_dir)
result_path = os.path.join(result_dir,
'%s_%s.csv' % (n, title))
with open(result_path, 'w') as csv_file:
csv.writer(csv_file).writerows(result.items())
def evaluate_model(model_path, code, output_dir, input_shape=[30, 61]):
extract_from_file("dataset/%s.csv" % code, output_dir, code)
train_set, test_set = read_feature(output_dir, input_shape, code)
saved_wp = WindPuller(input_shape).load_model(model_path)
scores = saved_wp.evaluate(test_set.images, test_set.labels, verbose=0)
print('Test loss:', scores[0])
print('test accuracy:', scores[1])
pred = saved_wp.predict(test_set.images, 1024)
[cr, cap] = calculate_cumulative_return(test_set.labels, pred)
# Output to a csv file
# Read in the date, close from original data file.
days_for_test = 700
tmp = pd.read_csv('dataset/%s.csv' % code, delimiter='\t')
# tmp.columns = ['date', 'open', 'high', 'low', 'close', 'volume']
date = tmp['date'][-days_for_test:]
close = tmp['close'][-days_for_test:]
output = pd.DataFrame({'Return': test_set.labels,
'Position': pred.reshape(-1),
'Capital': cap.reshape(-1),
'Close': close.values},
index=date,
columns=['Close', 'Return', 'Position', 'Capital'])
output.to_csv('output/%s.csv' % code)
def test_model(model_path="model.30.best", extract_all=True,
days_for_test=False):
'''
1. 先对数据集中每一个品种提取特征;
2. 读取训练集和验证集;
3. 加载训练好的模型,预测在训练集和验证集上的结果;
4. 根据结果绘制相应的资金变化图,并保存。
'''
# 1. 特征提取
data_dir = './dataset/'
output_dir = './output09/'
feature_dir = './stock_features/'
if not(os.path.exists(output_dir)):
os.mkdir(output_dir)
# 只提取测试集的特征
if days_for_test == False:
# 测试集从2017-09-01开始
df = pd.read_csv('dataset/000001.csv', index_col='date',
parse_dates=True)
days_for_test = df.shape[0] - df.index.get_loc('2017-09-01')
extract_all_features(data_dir, feature_dir, days_for_test)
# 2. 读取特征
input_shape = [30, 61]
file_list = os.listdir(data_dir)
if extract_all == True:
column_names = [s.split(sep='.')[0] for s in file_list]
else:
# 否则只测试3个指数
column_names = ['000016', '000300', '000905']
wp = WindPuller(input_shape).load_model(model_path)
for f in column_names:
train_set, test_set = read_feature(feature_dir, input_shape, f)
data_set = {'train': train_set, 'test': test_set}
tmp = pd.read_csv('dataset/%s.csv' % f)
for key in data_set:
# 3.分别给训练集/验证集预测并画图保存
print('当前处理 %s_%s\n' % (f, key))
val = data_set[key]
pred = wp.predict(val.images, 1024)
[cr, cap] = calculate_cumulative_return_cost(val.labels, pred)
# 根据训练集/验证集来设置读取数据的范围
if key == 'train':
index = range(input_shape[0]-1, input_shape[0] + pred.shape[0])
elif key == 'test':
index = range(tmp.shape[0]-days_for_test-1, tmp.shape[0])
# 1). 保存资金曲线的数据
date = tmp['date'].iloc[index]
close = tmp['close'].iloc[index]
buy_hold = close / close.iloc[0] - 1
# DEBUG:
#print('date shape:\t', date.shape)
#print('close shape:\t', close.shape)
#print('buy_hold shape:\t', buy_hold.shape)
#print('Pct_change shape:\t', val.labels.shape)
#print('Position shape:\t', pred.shape)
output = pd.DataFrame({'Close': close.values,
'Pct_change': np.concatenate(([np.nan],
val.labels)),
'Position':
np.concatenate(([np.nan],
pred.reshape(-1))),
'Cum_return': cr.reshape(-1),
'Buy_hold': buy_hold.values},
index=date,
columns=['Close', 'Pct_change',
'Position', 'Cum_return',
'Buy_hold'])
names = pd.read_csv('指数名称.csv',
dtype={'code':np.str, 'name':np.str},
engine='python')
names.set_index('code', inplace=True)
names = names.to_dict()['name']
n = names[f]
# 写入文件
cap_line_dir = os.path.join(output_dir, 'stocks')
if not(os.path.exists(cap_line_dir)):
os.mkdir(cap_line_dir)
cap_line_f = os.path.join(cap_line_dir, '%s_%s.csv' % (n, key))
output.to_csv(cap_line_f)
# 2). 统计各项表现,画出资金曲线,生成投资报告
print('开始计算策略表现 %s_%s_%s\n' % (f, n, key))
calc_perf(output, f, n, key, output_dir)
print('计算完毕')
print('='*50)
def predict_tomorrow(model_path="model.30.best", extract_all=False):
'''
1. 先对3个数据集中每一个品种提取特征;
2. 读取只有一行数据的验证集;
3. 加载训练好的模型,预测在验证集上的信号结果;
4. 保存信号结果。
'''
# 1. 特征提取
data_dir = './newdata/'
output_dir = './output09/'
feature_dir = './stock_features/'
if not(os.path.exists(output_dir)):
os.mkdir(output_dir)
# 测试集从2017-09-01开始
df = pd.read_csv('dataset/000300.csv', index_col='date',
parse_dates=True)
days_for_test = df.shape[0] - df.index.get_loc('2017-09-01')
extract_all_features(data_dir, feature_dir, days_for_test, extract_all)
# 2. 读取特征
input_shape = [30, 61]
file_list = os.listdir(data_dir)
if extract_all == True:
column_names = [s.split(sep='.')[0] for s in file_list]
else:
# 否则只测试3个指数
column_names = ['000016', '000300', '000905']
# 加载模型
wp = WindPuller(input_shape).load_model(model_path)
for f in column_names:
_, test_set = read_feature(feature_dir, input_shape, f)
tmp = | pd.read_csv('dataset/%s.csv' % f) | pandas.read_csv |
import base64
import io
import textwrap
import dash
import dash_core_components as dcc
import dash_html_components as html
import gunicorn
import plotly.graph_objs as go
from dash.dependencies import Input, Output, State
import flask
import pandas as pd
import urllib.parse
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from sklearn.decomposition import PCA
import numpy as np
import math
import scipy.stats
import dash_table
from dash_table.Format import Format, Scheme
from colour import Color
import dash_bootstrap_components as dbc
# from waitress import serve
external_stylesheets = [dbc.themes.BOOTSTRAP, 'https://codepen.io/chriddyp/pen/bWLwgP.css',
"https://codepen.io/sutharson/pen/dyYzEGZ.css",
"https://fonts.googleapis.com/css2?family=Raleway&display=swap",
"https://codepen.io/chriddyp/pen/brPBPO.css"]
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
server = app.server
# "external_url": "https://codepen.io/chriddyp/pen/brPBPO.css"
# https://raw.githubusercontent.com/aaml-analytics/pca-explorer/master/LoadingStatusStyleSheet.css
styles = {
'pre': {
'border': 'thin lightgrey solid',
'overflowX': 'scroll'
}
}
tabs_styles = {'height': '40px', 'font-family': 'Raleway', 'fontSize': 14}
tab_style = {
'borderBottom': '1px solid #d6d6d6',
'padding': '6px',
'Weight': 'bold'
}
tab_selected_style = {
'borderTop': '3px solid #333333',
'borderBottom': '1px solid #d6d6d6 ',
'backgroundColor': '#f6f6f6',
'color': '#333333',
# 'fontColor': '#004a4a',
'fontWeight': 'bold',
'padding': '6px'
}
# APP ABOUT DESCRIPTION
MOF_tool_about = textwrap.wrap(' These tools aim to provide a reproducible and consistent data visualisation platform '
'where experimental and computational researchers can use big data and statistical '
'analysis to find the best materials for specific applications. Principal Component '
'Analysis (PCA) is a dimension reduction technique that can be used to reduce a large '
'set of observable variables to a smaller set of latent variables that still contain '
'most of the information in the large set (feature extraction). This is done by '
'transforming a number of (possibly) correlated variables into some number of orthogonal '
'(uncorrelated) variables called principal components to find the directions of maximal '
'variance. PCA can be used to ease data visualisation by having fewer dimensions to plot '
'or be used as a pre-processing step before using another Machine Learning (ML)'
' algorithm for regression '
'and classification tasks. PCA can be used to improve an ML algorithm performance, '
'reduce overfitting and reduce noise in data.',
width=50)
Scree_plot_about = textwrap.wrap(' The Principal Component Analysis Visualisation Tools runs PCA for the user and '
'populates a Scree plot. This plot allows the user to determine if PCA is suitable '
'for '
'their dataset and if can compromise an X% drop in explained variance to '
'have fewer dimensions.', width=50)
Feature_correlation_filter = textwrap.wrap("Feature correlation heatmaps provide users with feature analysis and "
"feature principal component analysis. This tool will allow users to see the"
" correlation between variables and the"
" covariances/correlations between original variables and the "
"principal components (loadings)."
, width=50)
plots_analysis = textwrap.wrap('Users can keep all variables as features or drop certain variables to produce a '
'Biplot, cos2 plot and contribution plot. The score plot is used to look for clusters, '
'trends, and outliers in the first two principal components. The loading plot is used to'
' visually interpret the first two principal components. The biplot overlays the score '
'plot and the loading plot on the same graph. The squared cosine (cos2) plot shows '
'the importance of a component for a given observation i.e. measures '
'how much a variable is represented in a component. The contribution plot contains the '
'contributions (%) of the variables to the principal components', width=50, )
data_table_download = textwrap.wrap("The user's inputs from the 'Plots' tab will provide the output of the data tables."
" The user can download the scores, eigenvalues, explained variance, "
"cumulative explained variance, loadings, "
"cos2 and contributions from the populated data tables. "
"Note: Wait for user inputs to be"
" computed (faded tab app will return to the original colour) before downloading the"
" data tables. ", width=50)
MOF_GH = textwrap.wrap(" to explore AAML's sample data and read more on"
" AAML's Principal Component Analysis Visualisation Tool Manual, FAQ's & Troubleshooting"
" on GitHub... ", width=50)
####################
# APP LAYOUT #
####################
fig = go.Figure()
fig1 = go.Figure()
app.layout = html.Div([
html.Div([
html.Img(
src='https://raw.githubusercontent.com/aaml-analytics/mof-explorer/master/UOC.png',
height='35', width='140', style={'display': 'inline-block', 'padding-left': '1%'}),
html.Img(src='https://raw.githubusercontent.com/aaml-analytics/mof-explorer/master/A2ML-logo.png',
height='50', width='125', style={'float': 'right', 'display': 'inline-block', 'padding-right': '2%'}),
html.H1("Principal Component Analysis Visualisation Tools",
style={'display': 'inline-block', 'padding-left': '11%', 'text-align': 'center', 'fontSize': 36,
'color': 'white', 'font-family': 'Raleway'}),
html.H1("...", style={'fontColor': '#3c3c3c', 'fontSize': 6})
], style={'backgroundColor': '#333333'}),
html.Div([html.A('Refresh', href='/')], style={}),
html.Div([
html.H2("Upload Data", style={'fontSize': 24, 'font-family': 'Raleway', 'color': '#333333'}, ),
html.H3("Upload .txt, .csv or .xls files to starting exploring data...", style={'fontSize': 16,
'font-family': 'Raleway'}),
dcc.Store(id='csv-data', storage_type='session', data=None),
html.Div([dcc.Upload(
id='data-table-upload',
children=html.Div([html.Button('Upload File')],
style={'height': "60px", 'borderWidth': '1px',
'borderRadius': '5px',
'textAlign': 'center',
}),
multiple=False
),
html.Div(id='output-data-upload'),
]), ], style={'display': 'inline-block', 'padding-left': '1%', }),
html.Div([dcc.Tabs([
dcc.Tab(label='About', style=tab_style, selected_style=tab_selected_style,
children=[html.Div([html.H2(" What are AAML's Principal Component Analysis Visualisation Tools?",
style={'fontSize': 18, 'font-family': 'Raleway', 'font-weight': 'bold'
}),
html.Div([' '.join(MOF_tool_about)]
, style={'font-family': 'Raleway'}),
html.H2(["Scree Plot"],
style={'fontSize': 18,
'font-family': 'Raleway', 'font-weight': 'bold'}),
html.Div([' '.join(Scree_plot_about)], style={'font-family': 'Raleway'}),
html.H2(["Feature Correlation"], style={'fontSize': 18,
'font-weight': 'bold',
'font-family': 'Raleway'}),
html.Div([' '.join(Feature_correlation_filter)], style={'font-family': 'Raleway', }),
html.H2(["Plots"],
style={'fontSize': 18, 'font-weight': 'bold',
'font-family': 'Raleway'}),
html.Div([' '.join(plots_analysis)], style={'font-family': 'Raleway'}),
html.H2(["Data tables"],
style={'fontSize': 18, 'font-weight': 'bold',
'font-family': 'Raleway'}),
html.Div([' '.join(data_table_download)], style={'font-family': 'Raleway'}),
# ADD LINK
html.Div([html.Plaintext(
[' Click ', html.A('here ',
href='https://github.com/aaml-analytics/pca-explorer')],
style={'display': 'inline-block',
'fontSize': 14, 'font-family': 'Raleway'}),
html.Div([' '.join(MOF_GH)], style={'display': 'inline-block',
'fontSize': 14,
'font-family': 'Raleway'}),
html.Img(
src='https://raw.githubusercontent.com/aaml-analytics/mof'
'-explorer/master/github.png',
height='40', width='40',
style={'display': 'inline-block', 'float': "right"
})
]
, style={'display': 'inline-block'})
], style={'backgroundColor': '#ffffff', 'padding-left': '1%'}
)]),
dcc.Tab(label='Scree Plot', style=tab_style, selected_style=tab_selected_style,
children=[
html.Div([dcc.Graph(id='PC-Eigen-plot')
],
style={'display': 'inline-block',
'width': '49%'}),
html.Div([dcc.Graph(id='PC-Var-plot')
], style={'display': 'inline-block', 'float': 'right',
'width': '49%'}),
html.Div(
[html.Label(["Remove outliers (if any) in analysis:", dcc.RadioItems(
id='outlier-value',
options=[{'label': 'Yes', 'value': 'Yes'},
{'label': 'No', 'value': 'No'}],
value='No')
])
], style={'display': 'inline-block',
'width': '49%', 'padding-left': '1%'}),
html.Div([html.Label(["Select the type of matrix used to calculate the principal components:",
dcc.RadioItems(
id='matrix-type-scree',
options=[{'label': 'Correlation', 'value': 'Correlation'},
{'label': 'Covariance', 'value': 'Covariance'}],
value='Correlation')
])], style={'display': 'inline-block',
'width': '49%', }),
html.Div([html.P(
"Note: Use a correlation matrix when your variables have different scales and you want to weight "
"all the variables equally. Use a covariance matrix when your variables have different scales and"
" you want to give more emphasis to variables with higher variances. When unsure"
" use a correlation matrix.")],
style={'padding-left': '1%'}),
html.Div([
html.Label(["You should attempt to use at least..."
, html.Div(id='var-output-container-filter')])
], style={'padding-left': '1%'}
),
html.Div([
html.Label(["As a rule of thumb for the Scree Plot"
" Eigenvalues, the point where the slope of the curve "
"is clearly "
"leveling off (the elbow), indicates the number of "
"components that "
"should be retained as significant."])
], style={'padding-left': '1%'}),
]),
dcc.Tab(label='Feature correlation', style=tab_style,
selected_style=tab_selected_style,
children=[html.Div([html.Div([dcc.Graph(id='PC-feature-heatmap')
], style={'width': '47%',
'display': 'inline-block',
'float': 'right'}),
html.Div([dcc.Graph(id='feature-heatmap')
], style={'width': '51%',
'display': 'inline-block',
'float': 'left'}),
html.Div([html.Label(["Loading colour bar range:"
, html.Div(
id='color-range-container')])
], style={
'fontSize': 12,
'float': 'right',
'width': '100%',
'padding-left': '85%'}
),
html.Div(
[html.Label(
["Remove outliers (if any) in analysis:",
dcc.RadioItems(
id='PC-feature-outlier-value',
options=[{'label': 'Yes', 'value': 'Yes'},
{'label': 'No', 'value': 'No'}],
value='No')
])
], style={'display': 'inline-block',
'width': '29%', 'padding-left': '1%'}),
html.Div([html.Label(
["Select the type of matrix used to calculate the principal components:",
dcc.RadioItems(
id='matrix-type-heatmap',
options=[{'label': 'Correlation', 'value': 'Correlation'},
{'label': 'Covariance', 'value': 'Covariance'}],
value='Correlation')
])], style={'display': 'inline-block',
'width': '39%', }),
html.Div([html.Label(["Select color scale:",
dcc.RadioItems(
id='colorscale',
options=[{'label': i, 'value': i}
for i in
['Viridis', 'Plasma']],
value='Plasma'
)]),
], style={'display': 'inline-block',
'width': '29%', 'padding-left': '1%'}),
html.Div([html.P(
"Note: Use a correlation matrix when your variables have different scales and you want to weight "
"all the variables equally. Use a covariance matrix when your variables have different scales and"
" you want to give more emphasis to variables with higher variances. When unsure"
" use a correlation matrix.")],
style={'padding-left': '1%'}),
html.Div([
html.P("There are usually two ways multicollinearity, "
"which is when there are a number of variables "
"that are highly correlated, is dealt with:"),
html.P("1) Use PCA to obtain a set of orthogonal ("
"not correlated) variables to analyse."),
html.P("2) Use correlation of determination (R²) to "
"determine which variables are highly "
"correlated and use only 1 in analysis. "
"Cut off for highly correlated variables "
"is ~0.7."),
html.P(
"In any case, it depends on the machine learning algorithm you may apply later. For correlation robust algorithms,"
" such as Random Forest, correlation of features will not be a concern. For non-correlation robust algorithms such as Linear Discriminant Analysis, "
"all high correlation variables should be removed.")
], style={'padding-left': '1%'}
),
html.Div([
html.Label(["Note: Data has been standardised (scale)"])
], style={'padding-left': '1%'})
])
]),
dcc.Tab(label='Plots', style=tab_style,
selected_style=tab_selected_style,
children=[html.Div([
html.Div([html.P("Selecting Features")], style={'padding-left': '1%',
'font-weight': 'bold'}),
html.Div([
html.P("Input here affects all plots, datatables and downloadable data output"),
html.Label([
"Would you like to analyse all variables or choose custom variables to "
"analyse:",
dcc.RadioItems(
id='all-custom-choice',
options=[{'label': 'All',
'value': 'All'},
{'label': 'Custom',
'value': 'Custom'}],
value='All'
)])
], style={'padding-left': '1%'}),
html.Div([
html.P("For custom variables input variables you would not like as features in your PCA:"),
html.Label(
[
"Note: Only input numerical variables (non-numerical variables have already "
"been removed from your dataframe)",
dcc.Dropdown(id='feature-input',
multi=True,
)])
], style={'padding': 10, 'padding-left': '1%'}),
]), dcc.Tabs(id='sub-tabs1', style=tabs_styles,
children=[
dcc.Tab(label='Biplot (Scores + loadings)', style=tab_style,
selected_style=tab_selected_style,
children=[
html.Div([dcc.Graph(id='biplot', figure=fig)
], style={'height': '100%', 'width': '75%',
'padding-left': '20%'},
),
html.Div(
[html.Label(
["Remove outliers (if any) in analysis:",
dcc.RadioItems(
id='outlier-value-biplot',
options=[
{'label': 'Yes', 'value': 'Yes'},
{'label': 'No', 'value': 'No'}],
value='No')
])
], style={'display': 'inline-block',
'width': '29%', 'padding-left': '1%'}),
html.Div([html.Label([
"Select the type of matrix used to calculate the principal components:",
dcc.RadioItems(
id='matrix-type-biplot',
options=[{'label': 'Correlation',
'value': 'Correlation'},
{'label': 'Covariance',
'value': 'Covariance'}],
value='Correlation')
])], style={'display': 'inline-block',
'width': '39%', }),
html.Div([
html.Label([
"Graph Update to show either loadings (Loading Plot) or "
"scores and loadings (Biplot):",
dcc.RadioItems(
id='customvar-graph-update',
options=[{'label': 'Biplot',
'value': 'Biplot'},
{'label': 'Loadings',
'value': 'Loadings'}],
value='Biplot')
])
], style={'display': 'inline-block',
'width': '29%', 'padding-left': '1%'}),
html.Div([html.P(
"Note: Use a correlation matrix when your variables have different scales and you want to weight "
"all the variables equally. Use a covariance matrix when your variables have different scales and"
" you want to give more emphasis to variables with higher variances. When unsure"
" use a correlation matrix. PCA is an unsupervised machine learning technique - it only "
"looks at the input features and does not take "
"into account the output or the target"
" (response) variable.")],
style={'padding-left': '1%'}),
html.Div([
html.P("For variables you have dropped..."),
html.Label([
"Would you like to introduce a first target variable"
" into your data visualisation?"
" (Graph type must be Biplot): "
"",
dcc.RadioItems(
id='radio-target-item',
options=[{'label': 'Yes',
'value': 'Yes'},
{'label': 'No',
'value': 'No'}],
value='No'
)])
], style={'width': '49%', 'padding-left': '1%',
'display': 'inline-block'}),
html.Div([
html.Label([
"Select first target variable for color scale of scores: ",
dcc.Dropdown(
id='color-scale-scores',
)])
], style={'width': '49%', 'padding-left': '1%',
'display': 'inline-block'}),
html.Div([
html.Label([
"Would you like to introduce a second target variable"
" into your data visualisation??"
" (Graph type must be Biplot):",
dcc.RadioItems(
id='radio-target-item-second',
options=[{'label': 'Yes',
'value': 'Yes'},
{'label': 'No',
'value': 'No'}],
value='No'
)])
], style={'width': '49%', 'padding-left': '1%',
'display': 'inline-block'}),
html.Div([
html.Label([
"Select second target variable for size scale of scores:",
dcc.Dropdown(
id='size-scale-scores',
)])
], style={'width': '49%', 'padding-left': '1%',
'display': 'inline-block'}),
html.Div([html.Label(["Size range:"
, html.Div(
id='size-second-target-container')])
], style={'display': 'inline-block',
'float': 'right',
'padding-right': '5%'}
),
html.Div([
html.Br(),
html.P(
"A loading plot shows how "
"strongly each characteristic (variable)"
" influences a principal component. The angles between the vectors"
" tell us how characteristics correlate with one another: "),
html.P("1) When two vectors are close, forming a small angle, the two "
"variables they represent are positively correlated. "),
html.P(
"2) If they meet each other at 90°, they are not likely to be correlated. "),
html.P(
"3) When they diverge and form a large angle (close to 180°), they are negative correlated."),
html.P(
"The Score Plot involves the projection of the data onto the PCs in two dimensions."
"The plot contains the original data but in the rotated (PC) coordinate system"),
html.P(
"A biplot merges a score plot and loading plot together.")
], style={'padding-left': '1%'}
),
]),
dcc.Tab(label='Cos2', style=tab_style,
selected_style=tab_selected_style,
children=[
html.Div([dcc.Graph(id='cos2-plot', figure=fig)
], style={'width': '65%',
'padding-left': '25%'},
),
html.Div(
[html.Label(["Remove outliers (if any) in analysis:",
dcc.RadioItems(
id='outlier-value-cos2',
options=[
{'label': 'Yes', 'value': 'Yes'},
{'label': 'No', 'value': 'No'}],
value='No')
])
], style={'display': 'inline-block',
'padding-left': '1%',
'width': '49%'}),
html.Div([html.Label([
"Select the type of matrix used to calculate the principal components:",
dcc.RadioItems(
id='matrix-type-cos2',
options=[{'label': 'Correlation',
'value': 'Correlation'},
{'label': 'Covariance',
'value': 'Covariance'}],
value='Correlation')
])], style={'display': 'inline-block',
'width': '49%', }),
html.Div([html.P(
"Note: Use a correlation matrix when your variables have different scales and you want to weight "
"all the variables equally. Use a covariance matrix when your variables have different scales and"
" you want to give more emphasis to variables with higher variances. When unsure"
" use a correlation matrix.")],
style={'padding-left': '1%'}),
html.Div([
html.P("The squared cosine shows the importance of a "
"component for a given observation i.e. "
"measures "
" how much a variable is represented in a "
"component")
], style={'padding-left': '1%'}),
]),
dcc.Tab(label='Contribution', style=tab_style,
selected_style=tab_selected_style,
children=[
html.Div([dcc.Graph(id='contrib-plot', figure=fig)
], style={'width': '65%',
'padding-left': '25%'},
),
html.Div(
[html.Label(["Remove outliers (if any) in analysis:",
dcc.RadioItems(
id='outlier-value-contrib',
options=[
{'label': 'Yes', 'value': 'Yes'},
{'label': 'No', 'value': 'No'}],
value='No')
], style={'padding-left': '1%'})
], style={'display': 'inline-block',
'width': '49%'}),
html.Div([html.Label([
"Select the type of matrix used to calculate the principal components:",
dcc.RadioItems(
id='matrix-type-contrib',
options=[{'label': 'Correlation',
'value': 'Correlation'},
{'label': 'Covariance',
'value': 'Covariance'}],
value='Correlation')
])], style={'display': 'inline-block',
'width': '49%', }),
html.Div([html.P(
"Note: Use a correlation matrix when your variables have different scales and you want to weight "
"all the variables equally. Use a covariance matrix when your variables have different scales and"
" you want to give more emphasis to variables with higher variances. When unsure"
" use a correlation matrix.")],
style={'padding-left': '1%'}),
html.Div([
html.P("The contribution plot contains the "
"contributions (in percentage) of the "
"variables to the principal components")
], style={'padding-left': '1%'}),
])
])
]),
dcc.Tab(label='Data tables', style=tab_style,
selected_style=tab_selected_style,
children=[html.Div([
html.Div([
html.Label(
["Note: Input in 'Plots' tab will provide output of data tables and the"
" downloadable PCA data"])
], style={'font-weight': 'bold', 'padding-left': '1%'}),
html.Div([html.A(
'Download PCA Data (scores for each principal component)',
id='download-link',
href="",
target="_blank"
)], style={'padding-left': '1%'}),
html.Div([html.Label(["Remove outliers (if any) in analysis:",
dcc.RadioItems(id="eigenA-outlier",
options=[{'label': 'Yes',
'value': 'Yes'},
{'label': 'No',
'value': 'No'}],
value='No'
)])], style={'padding-left': '1%',
'display': 'inline-block', 'width': '49%'}),
html.Div([html.Label(["Select the type of matrix used to calculate the principal components:",
dcc.RadioItems(
id='matrix-type-data-table',
options=[{'label': 'Correlation', 'value': 'Correlation'},
{'label': 'Covariance', 'value': 'Covariance'}],
value='Correlation')
])], style={'display': 'inline-block',
'width': '49%', }),
html.Div([html.P(
"Note: Use a correlation matrix when your variables have different scales and you want to weight "
"all the variables equally. Use a covariance matrix when your variables have different scales and"
" you want to give more emphasis to variables with higher variances. When unsure"
" use a correlation matrix.")],
style={'padding-left': '1%'}),
html.Div([
html.Div([
html.Label(["Correlation between Features"])
], style={'font-weight': 'bold'}),
html.Div([
dash_table.DataTable(id='data-table-correlation',
editable=False,
filter_action='native',
sort_action='native',
sort_mode='multi',
selected_columns=[],
selected_rows=[],
page_action='native',
column_selectable='single',
page_current=0,
page_size=20,
style_data={'height': 'auto'},
style_table={'overflowX': 'scroll',
'maxHeight': '300px',
'overflowY': 'scroll'},
style_cell={
'minWidth': '0px', 'maxWidth': '220px',
'whiteSpace': 'normal',
}
),
html.Div(id='data-table-correlation-container'),
]),
html.Div([html.A(
'Download Feature Correlation data',
id='download-link-correlation',
href="",
target="_blank"
)]),
], style={'padding': 20}),
html.Div([
html.Div([
html.Label(["Eigen Analysis of the correlation matrix"]),
], style={'font-weight': 'bold'}),
html.Div([
dash_table.DataTable(id='data-table-eigenA',
editable=False,
filter_action='native',
sort_action='native',
sort_mode='multi',
selected_columns=[],
selected_rows=[],
page_action='native',
column_selectable='single',
page_current=0,
page_size=20,
style_data={'height': 'auto'},
style_table={'overflowX': 'scroll',
'maxHeight': '300px',
'overflowY': 'scroll'},
style_cell={
'minWidth': '0px', 'maxWidth': '220px',
'whiteSpace': 'normal',
}
),
html.Div(id='data-table-eigenA-container'),
]),
html.Div([html.A(
'Download Eigen Analysis data',
id='download-link-eigenA',
href="",
download='Eigen_Analysis_data.csv',
target="_blank"
)]),
], style={'padding': 20}),
html.Div([
html.Div([
html.Label(["Loadings (Feature and PC correlation) from PCA"]),
], style={'font-weight': 'bold'}),
html.Div([
dash_table.DataTable(id='data-table-loadings',
editable=False,
filter_action='native',
sort_action='native',
sort_mode='multi',
selected_columns=[],
selected_rows=[],
page_action='native',
column_selectable='single',
page_current=0,
page_size=20,
style_data={'height': 'auto'},
style_table={'overflowX': 'scroll',
'maxHeight': '300px',
'overflowY': 'scroll'},
style_cell={
'minWidth': '0px', 'maxWidth': '220px',
'whiteSpace': 'normal',
}
),
html.Div(id='data-table-loadings-container'),
]),
html.Div([html.A(
'Download Loadings data',
id='download-link-loadings',
download='Loadings_data.csv',
href="",
target="_blank"
)]),
], style={'padding': 20}),
html.Div([
html.Div([
html.Label(["Cos2 from PCA"])
], style={'font-weight': 'bold'}),
html.Div([
dash_table.DataTable(id='data-table-cos2',
editable=False,
filter_action='native',
sort_action='native',
sort_mode='multi',
selected_columns=[],
selected_rows=[],
page_action='native',
column_selectable='single',
page_current=0,
page_size=20,
style_data={'height': 'auto'},
style_table={'overflowX': 'scroll',
'maxHeight': '300px',
'overflowY': 'scroll'},
style_cell={
'minWidth': '0px', 'maxWidth': '220px',
'whiteSpace': 'normal',
}
),
html.Div(id='data-table-cos2-container'),
]),
html.Div([html.A(
'Download Cos2 data',
id='download-link-cos2',
download='Cos2_data.csv',
href="",
target="_blank"
)]),
], style={'padding': 20}),
html.Div([
html.Div([
html.Label(["Contributions from PCA"])
], style={'font-weight': 'bold'}),
html.Div([
dash_table.DataTable(id='data-table-contrib',
editable=False,
filter_action='native',
sort_action='native',
sort_mode='multi',
selected_columns=[],
selected_rows=[],
page_action='native',
column_selectable='single',
page_current=0,
page_size=20,
style_data={'height': 'auto'},
style_table={'overflowX': 'scroll',
'maxHeight': '300px',
'overflowY': 'scroll'},
style_cell={
'minWidth': '0px', 'maxWidth': '220px',
'whiteSpace': 'normal',
}
),
html.Div(id='data-table-contrib-container'),
]),
html.Div([html.A(
'Download Contributions data',
id='download-link-contrib',
download='Contributions_data.csv',
href="",
target="_blank"
)]),
], style={'padding': 20}),
])])
])
], style={'font-family': 'Raleway'})])
# READ FILE
def parse_contents(contents, filename):
content_type, content_string = contents.split(',')
decoded = base64.b64decode(content_string)
try:
if 'csv' in filename:
# Assume that the user uploaded a CSV file
df = pd.read_csv(io.StringIO(decoded.decode('utf-8')))
df.fillna(0)
elif 'xls' in filename:
# Assume that the user uploaded an excel file
df = pd.read_excel(io.BytesIO(decoded))
df.fillna(0)
elif 'txt' or 'tsv' in filename:
df = pd.read_csv(io.StringIO(decoded.decode('utf-8')), delimiter=r'\s+')
df.fillna(0)
except Exception as e:
print(e)
return html.Div([
'There was an error processing this file.'
])
return df
@app.callback(Output('csv-data', 'data'),
[Input('data-table-upload', 'contents')],
[State('data-table-upload', 'filename')])
def parse_uploaded_file(contents, filename):
if not filename:
return dash.no_update
df = parse_contents(contents, filename)
df.fillna(0)
return df.to_json(date_format='iso', orient='split')
@app.callback(Output('PC-Var-plot', 'figure'),
[Input('outlier-value', 'value'),
Input('matrix-type-scree', 'value'),
Input('csv-data', 'data')],
)
def update_graph_stat(outlier, matrix_type, data):
traces = []
if not data:
return dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
if outlier == 'No' and matrix_type == 'Correlation':
features1 = dff.columns
features = list(features1)
x = dff.loc[:, features].values
# Separating out the target (if any)
# Standardizing the features to {mean, variance} = {0, 1}
x = StandardScaler().fit_transform(x)
pca = PCA(n_components=len(features))
principalComponents = pca.fit_transform(x)
principalDf = pd.DataFrame(data=principalComponents
, columns=['PC' + str(i + 1) for i in range(len(features))])
finalDf = pd.concat([df[[df.columns[0]]], principalDf], axis=1)
loading = pca.components_.T * np.sqrt(pca.explained_variance_)
loading_df = pd.DataFrame(data=loading[0:, 0:], index=features,
columns=['PC' + str(i + 1) for i in range(loading.shape[1])])
Var = pca.explained_variance_ratio_
PC_df = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features))], columns=['Principal Component'])
Var_df = pd.DataFrame(data=Var, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum = Var_df.cumsum()
Var_dff = pd.concat([PC_df, (Var_cumsum * 100)], axis=1)
data = Var_dff
elif outlier == 'Yes' and matrix_type == 'Correlation':
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier = outlier_dff.columns
features_outlier = list(features1_outlier)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
x_outlier = outlier_dff.loc[:, features_outlier].values
# Standardizing the features
x_outlier = StandardScaler().fit_transform(x_outlier)
pca_outlier = PCA(n_components=len(features_outlier))
principalComponents_outlier = pca_outlier.fit_transform(x_outlier)
principalDf_outlier = pd.DataFrame(data=principalComponents_outlier
, columns=['PC' + str(i + 1) for i in range(len(features_outlier))])
# combining principle components and target
finalDf_outlier = pd.concat([outlier_names, principalDf_outlier], axis=1)
# calculating loading
loading_outlier = pca_outlier.components_.T * np.sqrt(pca_outlier.explained_variance_)
loading_df_outlier = pd.DataFrame(data=loading_outlier[0:, 0:], index=features_outlier,
columns=['PC' + str(i + 1) for i in range(loading_outlier.shape[1])])
Var_outlier = pca_outlier.explained_variance_ratio_
PC_df_outlier = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_outlier))],
columns=['Principal Component'])
Var_df_outlier = pd.DataFrame(data=Var_outlier, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_outlier = Var_df_outlier.cumsum()
Var_dff_outlier = pd.concat([PC_df_outlier, (Var_cumsum_outlier * 100)], axis=1)
data = Var_dff_outlier
elif outlier == 'No' and matrix_type == 'Covariance':
features1_covar = dff.columns
features_covar = list(features1_covar)
x = dff.loc[:, features_covar].values
pca_covar = PCA(n_components=len(features_covar))
principalComponents_covar = pca_covar.fit_transform(x)
principalDf_covar = pd.DataFrame(data=principalComponents_covar
, columns=['PC' + str(i + 1) for i in range(len(features_covar))])
finalDf_covar = pd.concat([df[[df.columns[0]]], principalDf_covar], axis=1)
loading_covar = pca_covar.components_.T * np.sqrt(pca_covar.explained_variance_)
loading_df_covar = pd.DataFrame(data=loading_covar[0:, 0:], index=features_covar,
columns=['PC' + str(i + 1) for i in range(loading_covar.shape[1])])
Var_covar = pca_covar.explained_variance_ratio_
PC_df_covar = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_covar))],
columns=['Principal Component'])
Var_df_covar = pd.DataFrame(data=Var_covar, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_covar = Var_df_covar.cumsum()
Var_dff_covar = pd.concat([PC_df_covar, (Var_cumsum_covar * 100)], axis=1)
data = Var_dff_covar
elif outlier == 'Yes' and matrix_type == 'Covariance':
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier_covar = outlier_dff.columns
features_outlier_covar = list(features1_outlier_covar)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
x_outlier = outlier_dff.loc[:, features_outlier_covar].values
pca_outlier_covar = PCA(n_components=len(features_outlier_covar))
principalComponents_outlier_covar = pca_outlier_covar.fit_transform(x_outlier)
principalDf_outlier_covar = pd.DataFrame(data=principalComponents_outlier_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_outlier_covar))])
# combining principle components and target
finalDf_outlier_covar = pd.concat([outlier_names, principalDf_outlier_covar], axis=1)
# calculating loading
loading_outlier_covar = pca_outlier_covar.components_.T * np.sqrt(pca_outlier_covar.explained_variance_)
loading_df_outlier_covar = pd.DataFrame(data=loading_outlier_covar[0:, 0:], index=features_outlier_covar,
columns=['PC' + str(i + 1) for i in
range(loading_outlier_covar.shape[1])])
Var_outlier_covar = pca_outlier_covar.explained_variance_ratio_
PC_df_outlier_covar = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_outlier_covar))],
columns=['Principal Component'])
Var_df_outlier_covar = pd.DataFrame(data=Var_outlier_covar,
columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_outlier_covar = Var_df_outlier_covar.cumsum()
Var_dff_outlier_covar = pd.concat([PC_df_outlier_covar, (Var_cumsum_outlier_covar * 100)], axis=1)
data = Var_dff_outlier_covar
traces.append(go.Scatter(x=data['Principal Component'], y=data['Cumulative Proportion of Explained Variance'],
mode='lines', line=dict(color='Red')))
return {'data': traces,
'layout': go.Layout(title='<b>Cumulative Scree Plot Proportion of Explained Variance</b>',
titlefont=dict(family='Helvetica', size=16),
xaxis={'title': 'Principal Component',
'mirror': True,
'ticks': 'outside',
'showline': True,
'showspikes': True
}, yaxis={'title': 'Cumulative Explained Variance',
'mirror': True,
'ticks': 'outside',
'showline': True,
'showspikes': True,
'range': [0, 100]},
hovermode='closest', font=dict(family="Helvetica"), template="simple_white")
}
@app.callback(
Output('var-output-container-filter', 'children'),
[Input('outlier-value', 'value'),
Input('matrix-type-scree', 'value'),
Input('csv-data', 'data')],
)
def update_output(outlier, matrix_type, data):
if not data:
return dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
if outlier == 'No' and matrix_type == 'Correlation':
features1 = dff.columns
features = list(features1)
x = dff.loc[:, features].values
# Separating out the target (if any)
# Standardizing the features to {mean, variance} = {0, 1}
x = StandardScaler().fit_transform(x)
pca = PCA(n_components=len(features))
principalComponents = pca.fit_transform(x)
principalDf = pd.DataFrame(data=principalComponents
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf = pd.concat([df[[df.columns[0]]], principalDf], axis=1)
dfff = finalDf
loading = pca.components_.T * np.sqrt(pca.explained_variance_)
loading_df = pd.DataFrame(data=loading[0:, 0:], index=features,
columns=['PC' + str(i + 1) for i in range(loading.shape[1])])
loading_dff = loading_df.T
Var = pca.explained_variance_ratio_
PC_df = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features))], columns=['Principal Component'])
PC_num = [float(i + 1) for i in range(len(features))]
Var_df = pd.DataFrame(data=Var, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum = Var_df.cumsum()
Var_dff = pd.concat([PC_df, (Var_cumsum * 100)], axis=1)
PC_interp = np.interp(70, Var_dff['Cumulative Proportion of Explained Variance'], PC_num)
PC_interp_int = math.ceil(PC_interp)
return "'{}' principal components (≥70% of explained variance) to avoid losing too much of your " \
"data. Note that there is no required threshold in order for PCA to be valid." \
" ".format(PC_interp_int)
elif outlier == 'Yes' and matrix_type == "Correlation":
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier = outlier_dff.columns
features_outlier = list(features1_outlier)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
x_outlier = outlier_dff.loc[:, features_outlier].values
# Separating out the target (if any)
y_outlier = outlier_dff.loc[:, ].values
# Standardizing the features
x_outlier = StandardScaler().fit_transform(x_outlier)
pca_outlier = PCA(n_components=len(features_outlier))
principalComponents_outlier = pca_outlier.fit_transform(x_outlier)
principalDf_outlier = pd.DataFrame(data=principalComponents_outlier
, columns=['PC' + str(i + 1) for i in range(len(features_outlier))])
# combining principle components and target
finalDf_outlier = pd.concat([outlier_names, principalDf_outlier], axis=1)
dfff_outlier = finalDf_outlier
# calculating loading
loading_outlier = pca_outlier.components_.T * np.sqrt(pca_outlier.explained_variance_)
loading_df_outlier = pd.DataFrame(data=loading_outlier[0:, 0:], index=features_outlier,
columns=['PC' + str(i + 1) for i in range(loading_outlier.shape[1])])
loading_dff_outlier = loading_df_outlier.T
Var_outlier = pca_outlier.explained_variance_ratio_
PC_df_outlier = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_outlier))],
columns=['Principal Component'])
PC_num_outlier = [float(i + 1) for i in range(len(features_outlier))]
Var_df_outlier = pd.DataFrame(data=Var_outlier, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_outlier = Var_df_outlier.cumsum()
Var_dff_outlier = pd.concat([PC_df_outlier, (Var_cumsum_outlier * 100)], axis=1)
PC_interp_outlier = np.interp(70, Var_dff_outlier['Cumulative Proportion of Explained Variance'],
PC_num_outlier)
PC_interp_int_outlier = math.ceil(PC_interp_outlier)
return "'{}' principal components (≥70% of explained variance) to avoid losing too much of your " \
"data. Note that there is no required threshold in order for PCA to be valid." \
" ".format(PC_interp_int_outlier)
elif outlier == 'No' and matrix_type == 'Covariance':
features1_covar = dff.columns
features_covar = list(features1_covar)
x = dff.loc[:, features_covar].values
pca_covar = PCA(n_components=len(features_covar))
principalComponents_covar = pca_covar.fit_transform(x)
principalDf_covar = pd.DataFrame(data=principalComponents_covar
, columns=['PC' + str(i + 1) for i in range(len(features_covar))])
finalDf_covar = pd.concat([df[[df.columns[0]]], principalDf_covar], axis=1)
loading_covar = pca_covar.components_.T * np.sqrt(pca_covar.explained_variance_)
loading_df_covar = pd.DataFrame(data=loading_covar[0:, 0:], index=features_covar,
columns=['PC' + str(i + 1) for i in range(loading_covar.shape[1])])
Var_covar = pca_covar.explained_variance_ratio_
PC_df_covar = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_covar))],
columns=['Principal Component'])
PC_num_covar = [float(i + 1) for i in range(len(features_covar))]
Var_df_covar = pd.DataFrame(data=Var_covar, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_covar = Var_df_covar.cumsum()
Var_dff_covar = pd.concat([PC_df_covar, (Var_cumsum_covar * 100)], axis=1)
PC_interp_covar = np.interp(70, Var_dff_covar['Cumulative Proportion of Explained Variance'], PC_num_covar)
PC_interp_int_covar = math.ceil(PC_interp_covar)
return "'{}' principal components (≥70% of explained variance) to avoid losing too much of your " \
"data. Note that there is no required threshold in order for PCA to be valid." \
" ".format(PC_interp_int_covar)
elif outlier == 'Yes' and matrix_type == 'Covariance':
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier_covar = outlier_dff.columns
features_outlier_covar = list(features1_outlier_covar)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
x_outlier = outlier_dff.loc[:, features_outlier_covar].values
pca_outlier_covar = PCA(n_components=len(features_outlier_covar))
principalComponents_outlier_covar = pca_outlier_covar.fit_transform(x_outlier)
principalDf_outlier_covar = pd.DataFrame(data=principalComponents_outlier_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_outlier_covar))])
# combining principle components and target
finalDf_outlier_covar = pd.concat([outlier_names, principalDf_outlier_covar], axis=1)
# calculating loading
loading_outlier_covar = pca_outlier_covar.components_.T * np.sqrt(pca_outlier_covar.explained_variance_)
loading_df_outlier_covar = pd.DataFrame(data=loading_outlier_covar[0:, 0:], index=features_outlier_covar,
columns=['PC' + str(i + 1) for i in
range(loading_outlier_covar.shape[1])])
Var_outlier_covar = pca_outlier_covar.explained_variance_ratio_
PC_df_outlier_covar = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_outlier_covar))],
columns=['Principal Component'])
PC_num_outlier_covar = [float(i + 1) for i in range(len(features_outlier_covar))]
Var_df_outlier_covar = pd.DataFrame(data=Var_outlier_covar,
columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_outlier_covar = Var_df_outlier_covar.cumsum()
Var_dff_outlier_covar = pd.concat([PC_df_outlier_covar, (Var_cumsum_outlier_covar * 100)], axis=1)
PC_interp_outlier = np.interp(70, Var_dff_outlier_covar['Cumulative Proportion of Explained Variance'],
PC_num_outlier_covar)
PC_interp_int_outlier = math.ceil(PC_interp_outlier)
return "'{}' principal components (≥70% of explained variance) to avoid losing too much of your " \
"data. Note that there is no required threshold in order for PCA to be valid." \
" ".format(PC_interp_int_outlier)
@app.callback(Output('PC-Eigen-plot', 'figure'),
[Input('outlier-value', 'value'),
Input('matrix-type-scree', 'value'),
Input('csv-data', 'data')]
)
def update_graph_stat(outlier, matrix_type, data):
traces = []
if not data:
return dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
if outlier == 'No' and matrix_type == "Correlation":
features1 = dff.columns
features = list(features1)
x = dff.loc[:, features].values
# Separating out the target (if any)
y = dff.loc[:, ].values
# Standardizing the features to {mean, variance} = {0, 1}
x = StandardScaler().fit_transform(x)
pca = PCA(n_components=len(features))
principalComponents = pca.fit_transform(x)
principalDf = pd.DataFrame(data=principalComponents
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf = pd.concat([df[[df.columns[0]]], principalDf], axis=1)
dfff = finalDf
loading = pca.components_.T * np.sqrt(pca.explained_variance_)
loading_df = pd.DataFrame(data=loading[0:, 0:], index=features,
columns=['PC' + str(i + 1) for i in range(loading.shape[1])])
loading_dff = loading_df.T
Var = pca.explained_variance_ratio_
PC_df = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features))], columns=['Principal Component'])
PC_num = [float(i + 1) for i in range(len(features))]
Var_df = pd.DataFrame(data=Var, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum = Var_df.cumsum()
Var_dff = pd.concat([PC_df, (Var_cumsum * 100)], axis=1)
PC_interp = np.interp(70, Var_dff['Cumulative Proportion of Explained Variance'], PC_num)
PC_interp_int = math.ceil(PC_interp)
eigenvalues = pca.explained_variance_
Eigen_df = pd.DataFrame(data=eigenvalues, columns=['Eigenvalues'])
Eigen_dff = pd.concat([PC_df, Eigen_df], axis=1)
data = Eigen_dff
elif outlier == 'Yes' and matrix_type == "Correlation":
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier = outlier_dff.columns
features_outlier = list(features1_outlier)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
x_outlier = outlier_dff.loc[:, features_outlier].values
# Separating out the target (if any)
y_outlier = outlier_dff.loc[:, ].values
# Standardizing the features
x_outlier = StandardScaler().fit_transform(x_outlier)
pca_outlier = PCA(n_components=len(features_outlier))
principalComponents_outlier = pca_outlier.fit_transform(x_outlier)
principalDf_outlier = pd.DataFrame(data=principalComponents_outlier
, columns=['PC' + str(i + 1) for i in range(len(features_outlier))])
# combining principle components and target
finalDf_outlier = pd.concat([outlier_names, principalDf_outlier], axis=1)
dfff_outlier = finalDf_outlier
# calculating loading
loading_outlier = pca_outlier.components_.T * np.sqrt(pca_outlier.explained_variance_)
loading_df_outlier = pd.DataFrame(data=loading_outlier[0:, 0:], index=features_outlier,
columns=['PC' + str(i + 1) for i in range(loading_outlier.shape[1])])
loading_dff_outlier = loading_df_outlier.T
Var_outlier = pca_outlier.explained_variance_ratio_
PC_df_outlier = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_outlier))],
columns=['Principal Component'])
PC_num_outlier = [float(i + 1) for i in range(len(features_outlier))]
Var_df_outlier = pd.DataFrame(data=Var_outlier, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_outlier = Var_df_outlier.cumsum()
Var_dff_outlier = pd.concat([PC_df_outlier, (Var_cumsum_outlier * 100)], axis=1)
PC_interp_outlier = np.interp(70, Var_dff_outlier['Cumulative Proportion of Explained Variance'],
PC_num_outlier)
PC_interp_int_outlier = math.ceil(PC_interp_outlier)
eigenvalues_outlier = pca_outlier.explained_variance_
Eigen_df_outlier = pd.DataFrame(data=eigenvalues_outlier, columns=['Eigenvalues'])
Eigen_dff_outlier = pd.concat([PC_df_outlier, Eigen_df_outlier], axis=1)
data = Eigen_dff_outlier
elif outlier == 'No' and matrix_type == "Covariance":
features1_covar = dff.columns
features_covar = list(features1_covar)
x = dff.loc[:, features_covar].values
pca_covar = PCA(n_components=len(features_covar))
principalComponents_covar = pca_covar.fit_transform(x)
principalDf_covar = pd.DataFrame(data=principalComponents_covar
, columns=['PC' + str(i + 1) for i in range(len(features_covar))])
finalDf_covar = pd.concat([df[[df.columns[0]]], principalDf_covar], axis=1)
loading_covar = pca_covar.components_.T * np.sqrt(pca_covar.explained_variance_)
loading_df_covar = pd.DataFrame(data=loading_covar[0:, 0:], index=features_covar,
columns=['PC' + str(i + 1) for i in range(loading_covar.shape[1])])
Var_covar = pca_covar.explained_variance_ratio_
PC_df_covar = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_covar))],
columns=['Principal Component'])
PC_num_covar = [float(i + 1) for i in range(len(features_covar))]
Var_df_covar = pd.DataFrame(data=Var_covar, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_covar = Var_df_covar.cumsum()
Var_dff_covar = pd.concat([PC_df_covar, (Var_cumsum_covar * 100)], axis=1)
PC_interp_covar = np.interp(70, Var_dff_covar['Cumulative Proportion of Explained Variance'], PC_num_covar)
PC_interp_int_covar = math.ceil(PC_interp_covar)
eigenvalues_covar = pca_covar.explained_variance_
Eigen_df_covar = pd.DataFrame(data=eigenvalues_covar, columns=['Eigenvalues'])
Eigen_dff_covar = pd.concat([PC_df_covar, Eigen_df_covar], axis=1)
data = Eigen_dff_covar
elif outlier == 'Yes' and matrix_type == 'Covariance':
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier_covar = outlier_dff.columns
features_outlier_covar = list(features1_outlier_covar)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
x_outlier = outlier_dff.loc[:, features_outlier_covar].values
pca_outlier_covar = PCA(n_components=len(features_outlier_covar))
principalComponents_outlier_covar = pca_outlier_covar.fit_transform(x_outlier)
principalDf_outlier_covar = pd.DataFrame(data=principalComponents_outlier_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_outlier_covar))])
# combining principle components and target
finalDf_outlier_covar = pd.concat([outlier_names, principalDf_outlier_covar], axis=1)
# calculating loading
loading_outlier_covar = pca_outlier_covar.components_.T * np.sqrt(pca_outlier_covar.explained_variance_)
loading_df_outlier_covar = pd.DataFrame(data=loading_outlier_covar[0:, 0:], index=features_outlier_covar,
columns=['PC' + str(i + 1) for i in
range(loading_outlier_covar.shape[1])])
Var_outlier_covar = pca_outlier_covar.explained_variance_ratio_
PC_df_outlier_covar = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_outlier_covar))],
columns=['Principal Component'])
PC_num_outlier_covar = [float(i + 1) for i in range(len(features_outlier_covar))]
Var_df_outlier_covar = pd.DataFrame(data=Var_outlier_covar,
columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_outlier_covar = Var_df_outlier_covar.cumsum()
Var_dff_outlier_covar = pd.concat([PC_df_outlier_covar, (Var_cumsum_outlier_covar * 100)], axis=1)
PC_interp_outlier = np.interp(70, Var_dff_outlier_covar['Cumulative Proportion of Explained Variance'],
PC_num_outlier_covar)
PC_interp_int_outlier = math.ceil(PC_interp_outlier)
eigenvalues_outlier_covar = pca_outlier_covar.explained_variance_
Eigen_df_outlier_covar = pd.DataFrame(data=eigenvalues_outlier_covar, columns=['Eigenvalues'])
Eigen_dff_outlier_covar = pd.concat([PC_df_outlier_covar, Eigen_df_outlier_covar], axis=1)
data = Eigen_dff_outlier_covar
traces.append(go.Scatter(x=data['Principal Component'], y=data['Eigenvalues'], mode='lines'))
return {'data': traces,
'layout': go.Layout(title='<b>Scree Plot Eigenvalues</b>', xaxis={'title': 'Principal Component',
'mirror': True,
'ticks': 'outside',
'showline': True,
'showspikes': True},
titlefont=dict(family='Helvetica', size=16),
yaxis={'title': 'Eigenvalues', 'mirror': True,
'ticks': 'outside',
'showline': True,
'showspikes': True}, hovermode='closest',
font=dict(family="Helvetica"), template="simple_white", )
}
def round_up(n, decimals=0):
multiplier = 10 ** decimals
return math.ceil(n * multiplier) / multiplier
def round_down(n, decimals=0):
multiplier = 10 ** decimals
return math.floor(n * multiplier) / multiplier
@app.callback([Output('PC-feature-heatmap', 'figure'),
Output('color-range-container', 'children')],
[
Input('PC-feature-outlier-value', 'value'),
Input('colorscale', 'value'),
Input("matrix-type-heatmap", "value"),
Input('csv-data', 'data')]
)
def update_graph_stat(outlier, colorscale, matrix_type, data):
if not data:
return dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
traces = []
# INCLUDING OUTLIERS
features1 = dff.columns
features = list(features1)
x = dff.loc[:, features].values
# Separating out the target (if any)
y = dff.loc[:, ].values
# Standardizing the features to {mean, variance} = {0, 1}
x = StandardScaler().fit_transform(x)
pca = PCA(n_components=len(features))
principalComponents = pca.fit_transform(x)
principalDf = pd.DataFrame(data=principalComponents
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf = pd.concat([df[[df.columns[0]]], principalDf], axis=1)
dfff = finalDf
# explained variance of the the two principal components
# print(pca.explained_variance_ratio_)
# Explained variance tells us how much information (variance) can be attributed to each of the principal components
# loading of each feature in principle components
loading = pca.components_.T * np.sqrt(pca.explained_variance_)
loading_df = pd.DataFrame(data=loading[0:, 0:], index=features,
columns=['PC' + str(i + 1) for i in range(loading.shape[1])])
loading_dff = loading_df.T
# OUTLIERS REMOVED
z_scores_hm = scipy.stats.zscore(dff)
abs_z_scores_hm = np.abs(z_scores_hm)
filtered_entries_hm = (abs_z_scores_hm < 3).all(axis=1)
outlier_dff_hm = dff[filtered_entries_hm]
features1_outlier_hm = outlier_dff_hm.columns
features_outlier2 = list(features1_outlier_hm)
outlier_names1_hm = df[filtered_entries_hm]
outlier_names_hm = outlier_names1_hm.iloc[:, 0]
x_outlier_hm = outlier_dff_hm.loc[:, features_outlier2].values
# Separating out the target (if any)
# Standardizing the features
x_outlier_hm = StandardScaler().fit_transform(x_outlier_hm)
pca_outlier_hm = PCA(n_components=len(features_outlier2))
principalComponents_outlier_hm = pca_outlier_hm.fit_transform(x_outlier_hm)
principalDf_outlier_hm = pd.DataFrame(data=principalComponents_outlier_hm
, columns=['PC' + str(i + 1) for i in range(len(features_outlier2))])
# combining principle components and target
finalDf_outlier_hm = pd.concat([outlier_names_hm, principalDf_outlier_hm], axis=1)
dfff_outlier_hm = finalDf_outlier_hm
# calculating loading
loading_outlier_hm = pca_outlier_hm.components_.T * np.sqrt(pca_outlier_hm.explained_variance_)
loading_df_outlier_hm = pd.DataFrame(data=loading_outlier_hm[0:, 0:], index=features_outlier2,
columns=['PC' + str(i + 1) for i in range(loading_outlier_hm.shape[1])])
loading_dff_outlier_hm = loading_df_outlier_hm.T
# COVAR MATRIX
features1_covar = dff.columns
features_covar = list(features1_covar)
x = dff.loc[:, features_covar].values
pca_covar = PCA(n_components=len(features_covar))
principalComponents_covar = pca_covar.fit_transform(x)
principalDf_covar = pd.DataFrame(data=principalComponents_covar
, columns=['PC' + str(i + 1) for i in range(len(features_covar))])
finalDf_covar = pd.concat([df[[df.columns[0]]], principalDf_covar], axis=1)
loading_covar = pca_covar.components_.T * np.sqrt(pca_covar.explained_variance_)
loading_df_covar = pd.DataFrame(data=loading_covar[0:, 0:], index=features_covar,
columns=['PC' + str(i + 1) for i in range(loading_covar.shape[1])])
loading_dff_covar = loading_df_covar.T
# COVAR MATRIX OUTLIERS REMOVED
if outlier == 'No' and matrix_type == "Correlation":
data = loading_dff
elif outlier == 'Yes' and matrix_type == "Correlation":
data = loading_dff_outlier_hm
elif outlier == 'No' and matrix_type == "Covariance":
data = loading_dff_covar
elif outlier == "Yes" and matrix_type == "Covariance":
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier_covar = outlier_dff.columns
features_outlier_covar = list(features1_outlier_covar)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
x_outlier = outlier_dff.loc[:, features_outlier_covar].values
pca_outlier_covar = PCA(n_components=len(features_outlier_covar))
principalComponents_outlier_covar = pca_outlier_covar.fit_transform(x_outlier)
principalDf_outlier_covar = pd.DataFrame(data=principalComponents_outlier_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_outlier_covar))])
# combining principle components and target
finalDf_outlier_covar = pd.concat([outlier_names, principalDf_outlier_covar], axis=1)
# calculating loading
loading_outlier_covar = pca_outlier_covar.components_.T * np.sqrt(pca_outlier_covar.explained_variance_)
loading_df_outlier_covar = pd.DataFrame(data=loading_outlier_covar[0:, 0:], index=features_outlier_covar,
columns=['PC' + str(i + 1) for i in
range(loading_outlier_covar.shape[1])])
loading_dff_outlier_covar = loading_df_outlier_covar.T
data = loading_dff_outlier_covar
size_range = [round_up(data.values.min(), 2),round_down(data.values.max(),2) ]
traces.append(go.Heatmap(
z=data, x=features_outlier2, y=['PC' + str(i + 1) for i in range(loading_outlier_hm.shape[1])],
colorscale="Viridis" if colorscale == 'Viridis' else "Plasma",
# coord: represent the correlation between the various feature and the principal component itself
colorbar={"title": "Loading",
# 'tickvals': [round_up(data.values.min(), 2),
# round_up((data.values.min() + (data.values.max() + data.values.min())/2)/2, 2),
# round_down((data.values.max() + data.values.min())/2,2),
# round_down((data.values.max() + (data.values.max() + data.values.min())/2)/2, 2),
# round_down(data.values.max(),2), ]
}
))
return {'data': traces,
'layout': go.Layout(title=dict(text='<b>PC and Feature Correlation Analysis</b>'),
xaxis=dict(title_text='Features', title_standoff=50),
titlefont=dict(family='Helvetica', size=16),
hovermode='closest', margin={'b': 110, 't': 50, 'l': 75},
font=dict(family="Helvetica", size=11),
annotations=[
dict(x=-0.16, y=0.5, showarrow=False, text="Principal Components",
xref='paper', yref='paper', textangle=-90,
font=dict(size=12))]
),
}, '{}'.format(size_range)
@app.callback(Output('feature-heatmap', 'figure'),
[
Input('PC-feature-outlier-value', 'value'),
Input('colorscale', 'value'),
Input('csv-data', 'data')])
def update_graph_stat(outlier, colorscale, data):
if not data:
return dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
traces = []
if outlier == 'No':
features1 = dff.columns
features = list(features1)
# correlation coefficient and coefficient of determination
correlation_dff = dff.corr(method='pearson', )
r2_dff = correlation_dff * correlation_dff
data = r2_dff
feat = features
elif outlier == 'Yes':
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier = outlier_dff.columns
features_outlier = list(features1_outlier)
# correlation coefficient and coefficient of determination
correlation_dff_outlier = outlier_dff.corr(method='pearson', )
r2_dff_outlier = correlation_dff_outlier * correlation_dff_outlier
data = r2_dff_outlier
feat = features_outlier
traces.append(go.Heatmap(
z=data, x=feat, y=feat, colorscale="Viridis" if colorscale == 'Viridis' else "Plasma",
# coord: represent the correlation between the various feature and the principal component itself
colorbar={"title": "R²", 'tickvals': [0, 0.2, 0.4, 0.6, 0.8, 1]}))
return {'data': traces,
'layout': go.Layout(title=dict(text='<b>Feature Correlation Analysis</b>', y=0.97, x=0.6),
xaxis={},
titlefont=dict(family='Helvetica', size=16),
yaxis={},
hovermode='closest', margin={'b': 110, 't': 50, 'l': 180, 'r': 50},
font=dict(family="Helvetica", size=11)),
}
@app.callback(Output('feature-input', 'options'),
[Input('all-custom-choice', 'value'),
Input('csv-data', 'data')])
def activate_input(all_custom, data):
if not data:
return dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
if all_custom == 'All':
options = []
elif all_custom == 'Custom':
options = [{'label': i, 'value': i} for i in dff.columns]
return options
@app.callback(Output('color-scale-scores', 'options'),
[Input('feature-input', 'value'),
Input('radio-target-item', 'value'),
Input('outlier-value-biplot', 'value'),
Input('customvar-graph-update', 'value'),
Input('matrix-type-biplot', 'value'),
Input('csv-data', 'data')], )
def populate_color_dropdown(input, target, outlier, graph_type, matrix_type, data):
if not data:
return dash.no_update
if input is None:
raise dash.exceptions.PreventUpdate
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
dff_target = dff[input]
z_scores_target = scipy.stats.zscore(dff_target)
abs_z_scores_target = np.abs(z_scores_target)
filtered_entries_target = (abs_z_scores_target < 3).all(axis=1)
dff_target_outlier = dff_target[filtered_entries_target]
if target == 'Yes' and outlier == 'Yes' and matrix_type == "Correlation" and graph_type == 'Biplot':
options = [{'label': i, 'value': i} for i in dff_target_outlier.columns]
elif target == 'Yes' and outlier == 'Yes' and matrix_type == "Covariance" and graph_type == 'Biplot':
options = [{'label': i, 'value': i} for i in dff_target_outlier.columns]
elif target == 'Yes' and outlier == 'No' and matrix_type == "Correlation" and graph_type == 'Biplot':
options = [{'label': i, 'value': i} for i in dff_target.columns]
elif target == 'Yes' and outlier == 'No' and matrix_type == "Covariance" and graph_type == 'Biplot':
options = [{'label': i, 'value': i} for i in dff_target.columns]
elif target == 'No' or graph_type == 'Loadings':
options = []
return options
@app.callback(Output('size-scale-scores', 'options'),
[Input('feature-input', 'value'),
Input('radio-target-item-second', 'value'),
Input('outlier-value-biplot', 'value'),
Input('customvar-graph-update', 'value'),
Input('matrix-type-biplot', 'value'),
Input('csv-data', 'data')])
def populate_color_dropdown(input, target, outlier, graph_type, matrix_type, data):
if not data:
return dash.no_update
if input is None:
raise dash.exceptions.PreventUpdate
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
dff_target = dff[input]
z_scores_target = scipy.stats.zscore(dff_target)
abs_z_scores_target = np.abs(z_scores_target)
filtered_entries_target = (abs_z_scores_target < 3).all(axis=1)
dff_target_outlier = dff_target[filtered_entries_target]
if target == 'Yes' and outlier == 'Yes' and matrix_type == "Correlation" and graph_type == 'Biplot':
options = [{'label': i, 'value': i} for i in dff_target_outlier.columns]
elif target == 'Yes' and outlier == 'Yes' and matrix_type == "Covariance" and graph_type == 'Biplot':
options = [{'label': i, 'value': i} for i in dff_target_outlier.columns]
elif target == 'Yes' and outlier == 'No' and matrix_type == "Correlation" and graph_type == 'Biplot':
options = [{'label': i, 'value': i} for i in dff_target.columns]
elif target == 'Yes' and outlier == 'No' and matrix_type == "Covariance" and graph_type == 'Biplot':
options = [{'label': i, 'value': i} for i in dff_target.columns]
elif target == 'No' or graph_type == 'Loadings':
options = []
return options
# resume covar matrix...
@app.callback(Output('biplot', 'figure'),
[
Input('outlier-value-biplot', 'value'),
Input('feature-input', 'value'),
Input('customvar-graph-update', 'value'),
Input('color-scale-scores', 'value'),
Input('radio-target-item', 'value'),
Input('size-scale-scores', 'value'),
Input('radio-target-item-second', 'value'),
Input('all-custom-choice', 'value'),
Input('matrix-type-biplot', 'value'),
Input('csv-data', 'data')
]
)
def update_graph_custom(outlier, input, graph_update, color, target, size, target2, all_custom, matrix_type, data):
if not data:
return dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
features1 = dff.columns
features = list(features1)
if all_custom == 'All':
# x_scale = MinMaxScaler(feature_range=(0, 1), copy=True).fit_transform(x_scale)
# OUTLIER DATA
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier = outlier_dff.columns
features_outlier = list(features1_outlier)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
# def rescale(data, new_min=0, new_max=1):
# """Rescale the data to be within the range [new_min, new_max]"""
# return (data - data.min()) / (data.max() - data.min()) * (new_max - new_min) + new_min
# ORIGINAL DATA WITH OUTLIERS
x_scale = dff.loc[:, features].values
y_scale = dff.loc[:, ].values
x_scale = StandardScaler().fit_transform(x_scale)
# x_scale = rescale(x_scale, new_min=0, new_max=1)
pca_scale = PCA(n_components=len(features))
principalComponents_scale = pca_scale.fit_transform(x_scale)
principalDf_scale = pd.DataFrame(data=principalComponents_scale
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf_scale = pd.concat([df[[df.columns[0]]], principalDf_scale], axis=1)
dfff_scale = finalDf_scale.fillna(0)
Var_scale = pca_scale.explained_variance_ratio_
# calculating loading vector plot
loading_scale = pca_scale.components_.T * np.sqrt(pca_scale.explained_variance_)
loading_scale_df = pd.DataFrame(data=loading_scale[:, 0:2],
columns=["PC1", "PC2"])
line_group_scale_df = pd.DataFrame(data=features, columns=['line_group'])
loading_scale_dff = pd.concat([loading_scale_df, line_group_scale_df], axis=1)
a = (len(features), 2)
zero_scale = np.zeros(a)
zero_scale_df = pd.DataFrame(data=zero_scale, columns=["PC1", "PC2"])
zero_scale_dff = pd.concat([zero_scale_df, line_group_scale_df], axis=1)
loading_scale_line_graph = pd.concat([loading_scale_dff, zero_scale_dff], axis=0)
# ORIGINAL DATA WITH REMOVING OUTLIERS
x_outlier_scale = outlier_dff.loc[:, features_outlier].values
y_outlier_scale = outlier_dff.loc[:, ].values
x_outlier_scale = StandardScaler().fit_transform(x_outlier_scale)
# x_outlier_scale = MinMaxScaler().fit_transform(x_outlier_scale)
# def rescale(data, new_min=0, new_max=1):
# """Rescale the data to be within the range [new_min, new_max]"""
# return (data - data.min()) / (data.max() - data.min()) * (new_max - new_min) + new_min
# x_outlier_scale = rescale(x_outlier_scale, new_min=0, new_max=1)
# uses covariance matrix
pca_outlier_scale = PCA(n_components=len(features_outlier))
principalComponents_outlier_scale = pca_outlier_scale.fit_transform(x_outlier_scale)
principalDf_outlier_scale = pd.DataFrame(data=principalComponents_outlier_scale
, columns=['PC' + str(i + 1) for i in range(len(features_outlier))])
# combining principle components and target
finalDf_outlier_scale = pd.concat([outlier_names, principalDf_outlier_scale], axis=1)
dfff_outlier_scale = finalDf_outlier_scale.fillna(0)
# calculating loading
Var_outlier_scale = pca_outlier_scale.explained_variance_ratio_
# calculating loading vector plot
loading_outlier_scale = pca_outlier_scale.components_.T * np.sqrt(pca_outlier_scale.explained_variance_)
loading_outlier_scale_df = pd.DataFrame(data=loading_outlier_scale[:, 0:2],
columns=["PC1", "PC2"])
line_group_df = pd.DataFrame(data=features_outlier, columns=['line_group'])
loading_outlier_scale_dff = pd.concat([loading_outlier_scale_df, line_group_df], axis=1)
a = (len(features_outlier), 2)
zero_outlier_scale = np.zeros(a)
zero_outlier_scale_df = pd.DataFrame(data=zero_outlier_scale, columns=["PC1", "PC2"])
zero_outlier_scale_dff = pd.concat([zero_outlier_scale_df, line_group_df], axis=1)
loading_outlier_scale_line_graph = pd.concat([loading_outlier_scale_dff, zero_outlier_scale_dff], axis=0)
# COVARIANCE MATRIX
x_scale_covar = dff.loc[:, features].values
y_scale_covar = dff.loc[:, ].values
pca_scale_covar = PCA(n_components=len(features))
principalComponents_scale_covar = pca_scale_covar.fit_transform(x_scale_covar)
principalDf_scale_covar = pd.DataFrame(data=principalComponents_scale_covar
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf_scale_covar = pd.concat([df[[df.columns[0]]], principalDf_scale_covar], axis=1)
dfff_scale_covar = finalDf_scale_covar.fillna(0)
Var_scale_covar = pca_scale_covar.explained_variance_ratio_
loading_scale_covar = pca_scale_covar.components_.T * np.sqrt(pca_scale_covar.explained_variance_)
loading_scale_df_covar = pd.DataFrame(data=loading_scale_covar[:, 0:2],
columns=["PC1", "PC2"])
line_group_scale_df_covar = pd.DataFrame(data=features, columns=['line_group'])
loading_scale_dff_covar = pd.concat([loading_scale_df_covar, line_group_scale_df_covar], axis=1)
a = (len(features), 2)
zero_scale_covar = np.zeros(a)
zero_scale_df_covar = pd.DataFrame(data=zero_scale_covar, columns=["PC1", "PC2"])
zero_scale_dff_covar = pd.concat([zero_scale_df_covar, line_group_scale_df_covar], axis=1)
loading_scale_line_graph_covar = pd.concat([loading_scale_dff_covar, zero_scale_dff_covar], axis=0)
# COVARIANCE MATRIX OUTLIERS
x_outlier_scale_covar = outlier_dff.loc[:, features_outlier].values
y_outlier_scale_covar = outlier_dff.loc[:, ].values
pca_outlier_scale_covar = PCA(n_components=len(features_outlier))
principalComponents_outlier_scale_covar = pca_outlier_scale_covar.fit_transform(x_outlier_scale_covar)
principalDf_outlier_scale_covar = pd.DataFrame(data=principalComponents_outlier_scale_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_outlier))])
finalDf_outlier_scale_covar = pd.concat([outlier_names, principalDf_outlier_scale_covar], axis=1)
dfff_outlier_scale_covar = finalDf_outlier_scale_covar.fillna(0)
Var_outlier_scale_covar = pca_outlier_scale_covar.explained_variance_ratio_
# calculating loading vector plot
loading_outlier_scale_covar = pca_outlier_scale_covar.components_.T * np.sqrt(
pca_outlier_scale_covar.explained_variance_)
loading_outlier_scale_df_covar = pd.DataFrame(data=loading_outlier_scale_covar[:, 0:2],
columns=["PC1", "PC2"])
line_group_df_covar = pd.DataFrame(data=features_outlier, columns=['line_group'])
loading_outlier_scale_dff_covar = pd.concat([loading_outlier_scale_df_covar, line_group_df_covar], axis=1)
a = (len(features_outlier), 2)
zero_outlier_scale_covar = np.zeros(a)
zero_outlier_scale_df_covar = pd.DataFrame(data=zero_outlier_scale_covar, columns=["PC1", "PC2"])
zero_outlier_scale_dff_covar = pd.concat([zero_outlier_scale_df_covar, line_group_df_covar], axis=1)
loading_outlier_scale_line_graph_covar = pd.concat(
[loading_outlier_scale_dff_covar, zero_outlier_scale_dff_covar], axis=0)
if outlier == 'No' and matrix_type == "Correlation":
dat = dfff_scale
elif outlier == 'Yes' and matrix_type == "Correlation":
dat = dfff_outlier_scale
elif outlier == "No" and matrix_type == "Covariance":
dat = dfff_scale_covar
elif outlier == "Yes" and matrix_type == "Covariance":
dat = dfff_outlier_scale_covar
trace2_all = go.Scatter(x=dat['PC1'], y=dat['PC2'], mode='markers',
text=dat[dat.columns[0]],
hovertemplate=
'<b>%{text}</b>' +
'<br>PC1: %{x}<br>' +
'PC2: %{y}'
"<extra></extra>",
marker=dict(opacity=0.7, showscale=False, size=12,
line=dict(width=0.5, color='DarkSlateGrey'),
),
)
####################################################################################################
# INCLUDE THIS
if outlier == 'No' and matrix_type == "Correlation":
data = loading_scale_line_graph
variance = Var_scale
elif outlier == 'Yes' and matrix_type == "Correlation":
data = loading_outlier_scale_line_graph
variance = Var_outlier_scale
elif outlier == "No" and matrix_type == "Covariance":
data = loading_scale_line_graph_covar
variance = Var_scale_covar
elif outlier == "Yes" and matrix_type == "Covariance":
data = loading_outlier_scale_line_graph_covar
variance = Var_outlier_scale_covar
counter = 0
lists = [[] for i in range(len(data['line_group'].unique()))]
for i in data['line_group'].unique():
dataf_all = data[data['line_group'] == i]
trace1_all = go.Scatter(x=dataf_all['PC1'], y=dataf_all['PC2'], line=dict(color="#4f4f4f"),
name=i,
# text=i,
meta=i,
hovertemplate=
'<b>%{meta}</b>' +
'<br>PC1: %{x}<br>' +
'PC2: %{y}'
"<extra></extra>",
mode='lines+text',
textposition='bottom right', textfont=dict(size=12)
)
lists[counter] = trace1_all
counter = counter + 1
####################################################################################################
if graph_update == 'Biplot':
lists.insert(0, trace2_all)
return {'data': lists,
'layout': go.Layout(xaxis=dict(title='PC1 ({}%)'.format(round((variance[0] * 100), 2))),
yaxis=dict(title='PC2 ({}%)'.format(round((variance[1] * 100), 2))),
showlegend=False, margin={'r': 0},
# shapes=[dict(type="circle", xref="x", yref="y", x0=-1,
# y0=-1, x1=1, y1=1,
# line_color="DarkSlateGrey")]
),
}
elif graph_update == 'Loadings':
return {'data': lists,
'layout': go.Layout(xaxis=dict(title='PC1 ({}%)'.format(round((variance[0] * 100), 2))),
yaxis=dict(title='PC2 ({}%)'.format(round((variance[1] * 100), 2))),
showlegend=False, margin={'r': 0},
# shapes=[dict(type="circle", xref="x", yref="y", x0=-1,
# y0=-1, x1=1, y1=1,
# line_color="DarkSlateGrey")]
),
}
elif all_custom == 'Custom':
# Dropping Data variables
dff_input = dff.drop(columns=dff[input])
features1_input = dff_input.columns
features_input = list(features1_input)
dff_target = dff[input]
# OUTLIER DATA INPUT
z_scores_input = scipy.stats.zscore(dff_input)
abs_z_scores_input = np.abs(z_scores_input)
filtered_entries_input = (abs_z_scores_input < 3).all(axis=1)
dff_input_outlier = dff_input[filtered_entries_input]
features1_input_outlier = dff_input_outlier.columns
features_input_outlier = list(features1_input_outlier)
outlier_names_input1 = df[filtered_entries_input]
outlier_names_input = outlier_names_input1.iloc[:, 0]
# OUTLIER DATA TARGET
z_scores_target = scipy.stats.zscore(dff_target)
abs_z_scores_target = np.abs(z_scores_target)
filtered_entries_target = (abs_z_scores_target < 3).all(axis=1)
dff_target_outlier = dff_target[filtered_entries_target]
# INPUT DATA WITH OUTLIERS
x_scale_input = dff_input.loc[:, features_input].values
y_scale_input = dff_input.loc[:, ].values
x_scale_input = StandardScaler().fit_transform(x_scale_input)
# x_scale_input = MinMaxScaler(feature_range=(0, 1), copy=True).fit_transform(x_scale_input)
# def rescale(data, new_min=0, new_max=1):
# """Rescale the data to be within the range [new_min, new_max]"""
# return (data - data.min()) / (data.max() - data.min()) * (new_max - new_min) + new_min
# x_scale_input = rescale(x_scale_input, new_min=0, new_max=1)
pca_scale_input = PCA(n_components=len(features_input))
principalComponents_scale_input = pca_scale_input.fit_transform(x_scale_input)
principalDf_scale_input = pd.DataFrame(data=principalComponents_scale_input
, columns=['PC' + str(i + 1) for i in range(len(features_input))])
finalDf_scale_input = pd.concat([df[[df.columns[0]]], principalDf_scale_input, dff_target], axis=1)
dfff_scale_input = finalDf_scale_input.fillna(0)
Var_scale_input = pca_scale_input.explained_variance_ratio_
# calculating loading vector plot
loading_scale_input = pca_scale_input.components_.T * np.sqrt(pca_scale_input.explained_variance_)
loading_scale_input_df = pd.DataFrame(data=loading_scale_input[:, 0:2],
columns=["PC1", "PC2"])
line_group_scale_input_df = pd.DataFrame(data=features_input, columns=['line_group'])
loading_scale_input_dff = pd.concat([loading_scale_input_df, line_group_scale_input_df],
axis=1)
a = (len(features_input), 2)
zero_scale_input = np.zeros(a)
zero_scale_input_df = pd.DataFrame(data=zero_scale_input, columns=["PC1", "PC2"])
zero_scale_input_dff = pd.concat([zero_scale_input_df, line_group_scale_input_df], axis=1)
loading_scale_input_line_graph = pd.concat([loading_scale_input_dff, zero_scale_input_dff],
axis=0)
# INPUT DATA WITH REMOVING OUTLIERS
x_scale_input_outlier = dff_input_outlier.loc[:, features_input_outlier].values
y_scale_input_outlier = dff_input_outlier.loc[:, ].values
x_scale_input_outlier = StandardScaler().fit_transform(x_scale_input_outlier)
# x_scale_input_outlier = MinMaxScaler(feature_range=(0, 1), copy=True).fit_transform(x_scale_input_outlier)
# def rescale(data, new_min=0, new_max=1):
# """Rescale the data to be within the range [new_min, new_max]"""
# return (data - data.min()) / (data.max() - data.min()) * (new_max - new_min) + new_min
# x_scale_input_outlier = rescale(x_scale_input_outlier, new_min=0, new_max=1)
pca_scale_input_outlier = PCA(n_components=len(features_input_outlier))
principalComponents_scale_input_outlier = pca_scale_input_outlier.fit_transform(x_scale_input_outlier)
principalDf_scale_input_outlier = pd.DataFrame(data=principalComponents_scale_input_outlier
, columns=['PC' + str(i + 1) for i in
range(len(features_input_outlier))])
finalDf_scale_input_outlier = pd.concat(
[outlier_names_input, principalDf_scale_input_outlier, dff_target_outlier],
axis=1)
dfff_scale_input_outlier = finalDf_scale_input_outlier.fillna(0)
Var_scale_input_outlier = pca_scale_input_outlier.explained_variance_ratio_
# calculating loading vector plot
loading_scale_input_outlier = pca_scale_input_outlier.components_.T * np.sqrt(
pca_scale_input_outlier.explained_variance_)
loading_scale_input_outlier_df = pd.DataFrame(data=loading_scale_input_outlier[:, 0:2],
columns=["PC1", "PC2"])
line_group_scale_input_outlier_df = pd.DataFrame(data=features_input_outlier, columns=['line_group'])
loading_scale_input_outlier_dff = pd.concat([loading_scale_input_outlier_df, line_group_scale_input_outlier_df],
axis=1)
a = (len(features_input_outlier), 2)
zero_scale_input_outlier = np.zeros(a)
zero_scale_input_outlier_df = pd.DataFrame(data=zero_scale_input_outlier, columns=["PC1", "PC2"])
zero_scale_input_outlier_dff = pd.concat([zero_scale_input_outlier_df, line_group_scale_input_outlier_df],
axis=1)
loading_scale_input_outlier_line_graph = pd.concat(
[loading_scale_input_outlier_dff, zero_scale_input_outlier_dff],
axis=0)
# COVARIANCE MATRIX
x_scale_input_covar = dff_input.loc[:, features_input].values
y_scale_input_covar = dff_input.loc[:, ].values
pca_scale_input_covar = PCA(n_components=len(features_input))
principalComponents_scale_input_covar = pca_scale_input_covar.fit_transform(x_scale_input_covar)
principalDf_scale_input_covar = pd.DataFrame(data=principalComponents_scale_input_covar
, columns=['PC' + str(i + 1) for i in range(len(features_input))])
finalDf_scale_input_covar = pd.concat([df[[df.columns[0]]], principalDf_scale_input_covar, dff_target], axis=1)
dfff_scale_input_covar = finalDf_scale_input_covar.fillna(0)
Var_scale_input_covar = pca_scale_input_covar.explained_variance_ratio_
# calculating loading vector plot
loading_scale_input_covar = pca_scale_input_covar.components_.T * np.sqrt(
pca_scale_input_covar.explained_variance_)
loading_scale_input_df_covar = pd.DataFrame(data=loading_scale_input_covar[:, 0:2],
columns=["PC1", "PC2"])
line_group_scale_input_df_covar = pd.DataFrame(data=features_input, columns=['line_group'])
loading_scale_input_dff_covar = pd.concat([loading_scale_input_df_covar, line_group_scale_input_df_covar],
axis=1)
a = (len(features_input), 2)
zero_scale_input_covar = np.zeros(a)
zero_scale_input_df_covar = pd.DataFrame(data=zero_scale_input_covar, columns=["PC1", "PC2"])
zero_scale_input_dff_covar = pd.concat([zero_scale_input_df_covar, line_group_scale_input_df_covar], axis=1)
loading_scale_input_line_graph_covar = pd.concat([loading_scale_input_dff_covar, zero_scale_input_dff_covar],
axis=0)
# COVARIANCE MATRIX OUTLIERS
x_scale_input_outlier_covar = dff_input_outlier.loc[:, features_input_outlier].values
y_scale_input_outlier_covar = dff_input_outlier.loc[:, ].values
pca_scale_input_outlier_covar = PCA(n_components=len(features_input_outlier))
principalComponents_scale_input_outlier_covar = pca_scale_input_outlier_covar.fit_transform(
x_scale_input_outlier_covar)
principalDf_scale_input_outlier_covar = pd.DataFrame(data=principalComponents_scale_input_outlier_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_input_outlier))])
finalDf_scale_input_outlier_covar = pd.concat(
[outlier_names_input, principalDf_scale_input_outlier_covar, dff_target_outlier],
axis=1)
dfff_scale_input_outlier_covar = finalDf_scale_input_outlier_covar.fillna(0)
Var_scale_input_outlier_covar = pca_scale_input_outlier_covar.explained_variance_ratio_
# calculating loading vector plot
loading_scale_input_outlier_covar = pca_scale_input_outlier_covar.components_.T * np.sqrt(
pca_scale_input_outlier_covar.explained_variance_)
loading_scale_input_outlier_df_covar = pd.DataFrame(data=loading_scale_input_outlier_covar[:, 0:2],
columns=["PC1", "PC2"])
line_group_scale_input_outlier_df_covar = pd.DataFrame(data=features_input_outlier, columns=['line_group'])
loading_scale_input_outlier_dff_covar = pd.concat([loading_scale_input_outlier_df_covar,
line_group_scale_input_outlier_df_covar],
axis=1)
a = (len(features_input_outlier), 2)
zero_scale_input_outlier_covar = np.zeros(a)
zero_scale_input_outlier_df_covar = pd.DataFrame(data=zero_scale_input_outlier_covar, columns=["PC1", "PC2"])
zero_scale_input_outlier_dff_covar = pd.concat([zero_scale_input_outlier_df_covar,
line_group_scale_input_outlier_df_covar],
axis=1)
loading_scale_input_outlier_line_graph_covar = pd.concat(
[loading_scale_input_outlier_dff_covar, zero_scale_input_outlier_dff_covar],
axis=0)
if outlier == 'No' and matrix_type == "Correlation":
dat = dfff_scale_input
variance = Var_scale_input
elif outlier == 'Yes' and matrix_type == "Correlation":
dat = dfff_scale_input_outlier
variance = Var_scale_input_outlier
elif outlier == "No" and matrix_type == "Covariance":
dat = dfff_scale_input_covar
variance = Var_scale_input_covar
elif outlier == "Yes" and matrix_type == "Covariance":
dat = dfff_scale_input_outlier_covar
variance = Var_scale_input_outlier_covar
trace2 = go.Scatter(x=dat['PC1'], y=dat['PC2'], mode='markers',
marker_color=dat[color] if target == 'Yes' else None,
marker_size=dat[size] if target2 == 'Yes' else 12,
text=dat[dat.columns[0]],
hovertemplate=
'<b>%{text}</b>' +
'<br>PC1: %{x}<br>' +
'PC2: %{y}'
"<extra></extra>",
marker=dict(opacity=0.7, colorscale='Plasma',
sizeref=max(dat[size]) / (15 ** 2) if target2 == 'Yes' else None,
sizemode='area',
showscale=True if target == 'Yes' else False,
line=dict(width=0.5, color='DarkSlateGrey'),
colorbar=dict(title=dict(text=color if target == 'Yes' else None,
font=dict(family='Helvetica'),
side='right'), ypad=0),
),
)
####################################################################################################
if outlier == 'No' and matrix_type == "Correlation":
data = loading_scale_input_line_graph
elif outlier == 'Yes' and matrix_type == "Correlation":
data = loading_scale_input_outlier_line_graph
elif outlier == "No" and matrix_type == "Covariance":
data = loading_scale_input_line_graph_covar
elif outlier == "Yes" and matrix_type == "Covariance":
data = loading_scale_input_outlier_line_graph_covar
counter = 0
lists = [[] for i in range(len(data['line_group'].unique()))]
for i in data['line_group'].unique():
dataf = data[data['line_group'] == i]
trace1 = go.Scatter(x=dataf['PC1'], y=dataf['PC2'],
line=dict(color="#666666" if target == 'Yes' else '#4f4f4f'), name=i,
# text=i,
meta=i,
hovertemplate=
'<b>%{meta}</b>' +
'<br>PC1: %{x}<br>' +
'PC2: %{y}'
"<extra></extra>",
mode='lines+text', textposition='bottom right', textfont=dict(size=12),
)
lists[counter] = trace1
counter = counter + 1
####################################################################################################
if graph_update == 'Biplot':
lists.insert(0, trace2)
return {'data': lists,
'layout': go.Layout(xaxis=dict(title='PC1 ({}%)'.format(round((variance[0] * 100), 2))),
yaxis=dict(title='PC2 ({}%)'.format(round((variance[1] * 100), 2))),
showlegend=False, margin={'r': 0},
# shapes=[dict(type="circle", xref="x", yref="y", x0=-1,
# y0=-1, x1=1, y1=1,
# line_color="DarkSlateGrey")]
),
}
elif graph_update == 'Loadings':
return {'data': lists,
'layout': go.Layout(xaxis=dict(title='PC1 ({}%)'.format(round((variance[0] * 100), 2))),
yaxis=dict(title='PC2 ({}%)'.format(round((variance[1] * 100), 2))),
showlegend=False, margin={'r': 0},
# shapes=[dict(type="circle", xref="x", yref="y", x0=-1,
# y0=-1, x1=1, y1=1,
# line_color="DarkSlateGrey")]
),
}
@app.callback(
Output('size-second-target-container', 'children'),
[Input('size-scale-scores', 'value'),
Input('outlier-value-biplot', 'value'),
Input('csv-data', 'data')
]
)
def update_output(size, outlier, data):
if not data:
return dash.no_update
if size is None:
raise dash.exceptions.PreventUpdate
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
z_scores_dff_size = scipy.stats.zscore(dff)
abs_z_scores_dff_size = np.abs(z_scores_dff_size)
filtered_entries_dff_size = (abs_z_scores_dff_size < 3).all(axis=1)
dff_target_outlier_size = dff[filtered_entries_dff_size]
if outlier == 'Yes':
size_range = [round(dff_target_outlier_size[size].min(), 2), round(dff_target_outlier_size[size].max(), 2)]
elif outlier == 'No':
size_range = [round(dff[size].min(), 2), round(dff[size].max(), 2)]
return '{}'.format(size_range)
@app.callback(Output('cos2-plot', 'figure'),
[
Input('outlier-value-cos2', 'value'),
Input('feature-input', 'value'),
Input('all-custom-choice', 'value'),
Input("matrix-type-cos2", "value"),
Input('csv-data', 'data')
])
def update_cos2_plot(outlier, input, all_custom, matrix_type, data):
if not data:
return dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
if all_custom == 'All':
# x_scale = MinMaxScaler(feature_range=(0, 1), copy=True).fit_transform(x_scale)
features1 = dff.columns
features = list(features1)
# OUTLIER DATA
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier = outlier_dff.columns
features_outlier = list(features1_outlier)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
# def rescale(data, new_min=0, new_max=1):
# """Rescale the data to be within the range [new_min, new_max]"""
# return (data - data.min()) / (data.max() - data.min()) * (new_max - new_min) + new_min
# ORIGINAL DATA WITH OUTLIERS
x_scale = dff.loc[:, features].values
y_scale = dff.loc[:, ].values
x_scale = StandardScaler().fit_transform(x_scale)
pca_scale = PCA(n_components=len(features))
principalComponents_scale = pca_scale.fit_transform(x_scale)
principalDf_scale = pd.DataFrame(data=principalComponents_scale
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf_scale = pd.concat([df[[df.columns[0]]], principalDf_scale], axis=1)
Var_scale = pca_scale.explained_variance_ratio_
# calculating loading vector plot
loading_scale = pca_scale.components_.T * np.sqrt(pca_scale.explained_variance_)
loading_scale_df = pd.DataFrame(data=loading_scale[:, 0:2],
columns=["PC1", "PC2"])
loading_scale_df['cos2'] = (loading_scale_df["PC1"] ** 2) + (loading_scale_df["PC2"] ** 2)
line_group_scale_df = pd.DataFrame(data=features, columns=['line_group'])
loading_scale_dff = pd.concat([loading_scale_df, line_group_scale_df], axis=1)
a = (len(features), 2)
zero_scale = np.zeros(a)
zero_scale_df = pd.DataFrame(data=zero_scale, columns=["PC1", "PC2"])
zero_scale_df_color = pd.DataFrame(data=loading_scale_df.iloc[:, 2], columns=['cos2'])
zero_scale_dff = pd.concat([zero_scale_df, zero_scale_df_color, line_group_scale_df], axis=1)
loading_scale_line_graph = pd.concat([loading_scale_dff, zero_scale_dff], axis=0)
# ORIGINAL DATA WITH REMOVING OUTLIERS
x_outlier_scale = outlier_dff.loc[:, features_outlier].values
y_outlier_scale = outlier_dff.loc[:, ].values
x_outlier_scale = StandardScaler().fit_transform(x_outlier_scale)
# x_outlier_scale = MinMaxScaler().fit_transform(x_outlier_scale)
# def rescale(data, new_min=0, new_max=1):
# """Rescale the data to be within the range [new_min, new_max]"""
# return (data - data.min()) / (data.max() - data.min()) * (new_max - new_min) + new_min
#
# x_outlier_scale = rescale(x_outlier_scale, new_min=0, new_max=1)
# uses covariance matrix
pca_outlier_scale = PCA(n_components=len(features_outlier))
principalComponents_outlier_scale = pca_outlier_scale.fit_transform(x_outlier_scale)
principalDf_outlier_scale = pd.DataFrame(data=principalComponents_outlier_scale
, columns=['PC' + str(i + 1) for i in range(len(features_outlier))])
# combining principle components and target
finalDf_outlier_scale = pd.concat([outlier_names, principalDf_outlier_scale], axis=1)
Var_outlier_scale = pca_outlier_scale.explained_variance_ratio_
# calculating loading
# calculating loading vector plot
loading_outlier_scale = pca_outlier_scale.components_.T * np.sqrt(pca_outlier_scale.explained_variance_)
loading_outlier_scale_df = pd.DataFrame(data=loading_outlier_scale[:, 0:2],
columns=["PC1", "PC2"])
loading_outlier_scale_df["cos2"] = (loading_outlier_scale_df["PC1"] ** 2) + (
loading_outlier_scale_df["PC2"] ** 2)
line_group_df = pd.DataFrame(data=features_outlier, columns=['line_group'])
loading_outlier_scale_dff = pd.concat([loading_outlier_scale_df, line_group_df], axis=1)
a = (len(features_outlier), 2)
zero_outlier_scale = np.zeros(a)
zero_outlier_scale_df = pd.DataFrame(data=zero_outlier_scale, columns=["PC1", "PC2"])
zero_outlier_scale_df_color = pd.DataFrame(data=loading_outlier_scale_df.iloc[:, 2], columns=['cos2'])
zero_outlier_scale_dff = pd.concat([zero_outlier_scale_df, zero_outlier_scale_df_color, line_group_df], axis=1)
loading_outlier_scale_line_graph = pd.concat([loading_outlier_scale_dff, zero_outlier_scale_dff], axis=0)
loading_scale_line_graph_sort = loading_scale_line_graph.sort_values(by='cos2')
loading_outlier_scale_line_graph_sort = loading_outlier_scale_line_graph.sort_values(by='cos2')
# COVARIANCE MATRIX
x_scale_covar = dff.loc[:, features].values
y_scale_covar = dff.loc[:, ].values
pca_scale_covar = PCA(n_components=len(features))
principalComponents_scale_covar = pca_scale_covar.fit_transform(x_scale_covar)
principalDf_scale_covar = pd.DataFrame(data=principalComponents_scale_covar
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf_scale_covar = pd.concat([df[[df.columns[0]]], principalDf_scale_covar], axis=1)
Var_scale_covar = pca_scale_covar.explained_variance_ratio_
# calculating loading vector plot
loading_scale_covar = pca_scale_covar.components_.T * np.sqrt(pca_scale_covar.explained_variance_)
loading_scale_df_covar = pd.DataFrame(data=loading_scale_covar[:, 0:2],
columns=["PC1", "PC2"])
loading_scale_df_covar['cos2'] = (loading_scale_df_covar["PC1"] ** 2) + (loading_scale_df_covar["PC2"] ** 2)
line_group_scale_df_covar = pd.DataFrame(data=features, columns=['line_group'])
loading_scale_dff_covar = pd.concat([loading_scale_df_covar, line_group_scale_df_covar], axis=1)
a = (len(features), 2)
zero_scale_covar = np.zeros(a)
zero_scale_df_covar = pd.DataFrame(data=zero_scale_covar, columns=["PC1", "PC2"])
zero_scale_df_color_covar = pd.DataFrame(data=loading_scale_df_covar.iloc[:, 2], columns=['cos2'])
zero_scale_dff_covar = pd.concat([zero_scale_df_covar, zero_scale_df_color_covar, line_group_scale_df_covar],
axis=1)
loading_scale_line_graph_covar = pd.concat([loading_scale_dff_covar, zero_scale_dff_covar], axis=0)
loading_scale_line_graph_sort_covar = loading_scale_line_graph_covar.sort_values(by='cos2')
# COVARIANCE MATRIX WITH OUTLIERS
x_outlier_scale_covar = outlier_dff.loc[:, features_outlier].values
y_outlier_scale_covar = outlier_dff.loc[:, ].values
pca_outlier_scale_covar = PCA(n_components=len(features_outlier))
principalComponents_outlier_scale_covar = pca_outlier_scale_covar.fit_transform(x_outlier_scale_covar)
principalDf_outlier_scale_covar = pd.DataFrame(data=principalComponents_outlier_scale_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_outlier))])
# combining principle components and target
finalDf_outlier_scale_covar = pd.concat([outlier_names, principalDf_outlier_scale_covar], axis=1)
Var_outlier_scale_covar = pca_outlier_scale_covar.explained_variance_ratio_
# calculating loading
# calculating loading vector plot
loading_outlier_scale_covar = pca_outlier_scale_covar.components_.T * np.sqrt(
pca_outlier_scale_covar.explained_variance_)
loading_outlier_scale_df_covar = pd.DataFrame(data=loading_outlier_scale_covar[:, 0:2],
columns=["PC1", "PC2"])
loading_outlier_scale_df_covar["cos2"] = (loading_outlier_scale_df_covar["PC1"] ** 2) + (
loading_outlier_scale_df_covar["PC2"] ** 2)
line_group_df_covar = pd.DataFrame(data=features_outlier, columns=['line_group'])
loading_outlier_scale_dff_covar = pd.concat([loading_outlier_scale_df_covar, line_group_df_covar], axis=1)
a = (len(features_outlier), 2)
zero_outlier_scale_covar = np.zeros(a)
zero_outlier_scale_df_covar = pd.DataFrame(data=zero_outlier_scale_covar, columns=["PC1", "PC2"])
zero_outlier_scale_df_color_covar = pd.DataFrame(data=loading_outlier_scale_df_covar.iloc[:, 2],
columns=['cos2'])
zero_outlier_scale_dff_covar = pd.concat([zero_outlier_scale_df_covar, zero_outlier_scale_df_color_covar,
line_group_df_covar], axis=1)
loading_outlier_scale_line_graph_covar = pd.concat(
[loading_outlier_scale_dff_covar, zero_outlier_scale_dff_covar], axis=0)
loading_outlier_scale_line_graph_sort_covar = loading_outlier_scale_line_graph_covar.sort_values(by='cos2')
# scaling data
if outlier == 'No' and matrix_type == "Correlation":
data = loading_scale_line_graph_sort
variance = Var_scale
elif outlier == 'Yes' and matrix_type == "Correlation":
data = loading_outlier_scale_line_graph_sort
variance = Var_outlier_scale
elif outlier == "No" and matrix_type == "Covariance":
data = loading_scale_line_graph_sort_covar
variance = Var_scale_covar
elif outlier == "Yes" and matrix_type == "Covariance":
data = loading_outlier_scale_line_graph_sort_covar
variance = Var_outlier_scale_covar
N = len(data['cos2'].unique())
end_color = "#00264c" # dark blue
start_color = "#c6def5" # light blue
colorscale = [x.hex for x in list(Color(start_color).range_to(Color(end_color), N))]
counter = 0
counter_color = 0
lists = [[] for i in range(len(data['line_group'].unique()))]
for i in data['line_group'].unique():
dataf_all = data[data['line_group'] == i]
trace1_all = go.Scatter(x=dataf_all['PC1'], y=dataf_all['PC2'], mode='lines+text',
name=i, line=dict(color=colorscale[counter_color]),
# text=i,
meta=i,
hovertemplate=
'<b>%{meta}</b>' +
'<br>PC1: %{x}<br>' +
'PC2: %{y}'
"<extra></extra>",
textposition='bottom right', textfont=dict(size=12)
)
trace2_all = go.Scatter(x=[1, -1], y=[1, -1], mode='markers',
hoverinfo='skip',
marker=dict(showscale=True, opacity=0,
color=[data["cos2"].min(), data["cos2"].max()],
colorscale=colorscale,
colorbar=dict(title=dict(text="Cos2",
side='right'), ypad=0)
), )
lists[counter] = trace1_all
counter = counter + 1
counter_color = counter_color + 1
lists.append(trace2_all)
####################################################################################################
return {'data': lists,
'layout': go.Layout(xaxis=dict(title='PC1 ({}%)'.format(round((variance[0] * 100), 2)), mirror=True,
ticks='outside', showline=True),
yaxis=dict(title='PC2 ({}%)'.format(round((variance[1] * 100), 2)), mirror=True,
ticks='outside', showline=True),
showlegend=False, margin={'r': 0},
# shapes=[dict(type="circle", xref="x", yref="y", x0=-1,
# y0=-1, x1=1, y1=1,
# line_color="DarkSlateGrey")]
),
}
elif all_custom == "Custom":
# Dropping Data variables
dff_input = dff.drop(columns=dff[input])
features1_input = dff_input.columns
features_input = list(features1_input)
dff_target = dff[input]
# OUTLIER DATA INPUT
z_scores_input = scipy.stats.zscore(dff_input)
abs_z_scores_input = np.abs(z_scores_input)
filtered_entries_input = (abs_z_scores_input < 3).all(axis=1)
dff_input_outlier = dff_input[filtered_entries_input]
features1_input_outlier = dff_input_outlier.columns
features_input_outlier = list(features1_input_outlier)
outlier_names_input1 = df[filtered_entries_input]
outlier_names_input = outlier_names_input1.iloc[:, 0]
# OUTLIER DATA TARGET
z_scores_target = scipy.stats.zscore(dff_target)
abs_z_scores_target = np.abs(z_scores_target)
filtered_entries_target = (abs_z_scores_target < 3).all(axis=1)
dff_target_outlier = dff_target[filtered_entries_target]
# INPUT DATA WITH OUTLIERS
x_scale_input = dff_input.loc[:, features_input].values
y_scale_input = dff_input.loc[:, ].values
x_scale_input = StandardScaler().fit_transform(x_scale_input)
# # x_scale_input = MinMaxScaler(feature_range=(0, 1), copy=True).fit_transform(x_scale_input)
# def rescale(data, new_min=0, new_max=1):
# """Rescale the data to be within the range [new_min, new_max]"""
# return (data - data.min()) / (data.max() - data.min()) * (new_max - new_min) + new_min
#
# x_scale_input = rescale(x_scale_input, new_min=0, new_max=1)
pca_scale_input = PCA(n_components=len(features_input))
principalComponents_scale_input = pca_scale_input.fit_transform(x_scale_input)
principalDf_scale_input = pd.DataFrame(data=principalComponents_scale_input
, columns=['PC' + str(i + 1) for i in range(len(features_input))])
finalDf_scale_input = pd.concat([df[[df.columns[0]]], principalDf_scale_input, dff_target], axis=1)
dfff_scale_input = finalDf_scale_input.fillna(0)
Var_scale_input = pca_scale_input.explained_variance_ratio_
# calculating loading vector plot
loading_scale_input = pca_scale_input.components_.T * np.sqrt(pca_scale_input.explained_variance_)
loading_scale_input_df = pd.DataFrame(data=loading_scale_input[:, 0:2],
columns=["PC1", "PC2"])
loading_scale_input_df["cos2"] = (loading_scale_input_df["PC1"] ** 2) + (loading_scale_input_df["PC2"] ** 2)
line_group_scale_input_df = pd.DataFrame(data=features_input, columns=['line_group'])
loading_scale_input_dff = pd.concat([loading_scale_input_df, line_group_scale_input_df],
axis=1)
a = (len(features_input), 2)
zero_scale_input = np.zeros(a)
zero_scale_input_df = pd.DataFrame(data=zero_scale_input, columns=["PC1", "PC2"])
zero_scale_input_df_color = pd.DataFrame(data=loading_scale_input_df.iloc[:, 2], columns=['cos2'])
zero_scale_input_dff = pd.concat([zero_scale_input_df, zero_scale_input_df_color, line_group_scale_input_df],
axis=1)
loading_scale_input_line_graph = pd.concat([loading_scale_input_dff, zero_scale_input_dff],
axis=0)
# INPUT DATA WITH REMOVING OUTLIERS
x_scale_input_outlier = dff_input_outlier.loc[:, features_input_outlier].values
y_scale_input_outlier = dff_input_outlier.loc[:, ].values
x_scale_input_outlier = StandardScaler().fit_transform(x_scale_input_outlier)
# # x_scale_input_outlier = MinMaxScaler(feature_range=(0, 1), copy=True).fit_transform(x_scale_input_outlier)
# def rescale(data, new_min=0, new_max=1):
# """Rescale the data to be within the range [new_min, new_max]"""
# return (data - data.min()) / (data.max() - data.min()) * (new_max - new_min) + new_min
# x_scale_input_outlier = rescale(x_scale_input_outlier, new_min=0, new_max=1)
pca_scale_input_outlier = PCA(n_components=len(features_input_outlier))
principalComponents_scale_input_outlier = pca_scale_input_outlier.fit_transform(x_scale_input_outlier)
principalDf_scale_input_outlier = pd.DataFrame(data=principalComponents_scale_input_outlier
, columns=['PC' + str(i + 1) for i in
range(len(features_input_outlier))])
finalDf_scale_input_outlier = pd.concat(
[outlier_names_input, principalDf_scale_input_outlier, dff_target_outlier],
axis=1)
dfff_scale_input_outlier = finalDf_scale_input_outlier.fillna(0)
Var_scale_input_outlier = pca_scale_input_outlier.explained_variance_ratio_
# calculating loading vector plot
loading_scale_input_outlier = pca_scale_input_outlier.components_.T * np.sqrt(
pca_scale_input_outlier.explained_variance_)
loading_scale_input_outlier_df = pd.DataFrame(data=loading_scale_input_outlier[:, 0:2],
columns=["PC1", "PC2"])
loading_scale_input_outlier_df["cos2"] = (loading_scale_input_outlier_df["PC1"] ** 2) + \
(loading_scale_input_outlier_df["PC2"] ** 2)
line_group_scale_input_outlier_df = pd.DataFrame(data=features_input_outlier, columns=['line_group'])
loading_scale_input_outlier_dff = pd.concat([loading_scale_input_outlier_df, line_group_scale_input_outlier_df],
axis=1)
a = (len(features_input_outlier), 2)
zero_scale_input_outlier = np.zeros(a)
zero_scale_input_outlier_df = pd.DataFrame(data=zero_scale_input_outlier, columns=["PC1", "PC2"])
zero_scale_input_outlier_df_color = pd.DataFrame(data=loading_scale_input_outlier_df.iloc[:, 2],
columns=['cos2'])
zero_scale_input_outlier_dff = pd.concat([zero_scale_input_outlier_df, zero_scale_input_outlier_df_color,
line_group_scale_input_outlier_df],
axis=1)
loading_scale_input_outlier_line_graph = pd.concat(
[loading_scale_input_outlier_dff, zero_scale_input_outlier_dff],
axis=0)
loading_scale_input_line_graph_sort = loading_scale_input_line_graph.sort_values(by='cos2')
loading_scale_input_outlier_line_graph_sort = loading_scale_input_outlier_line_graph.sort_values(by='cos2')
# COVARIANCE MATRIX
x_scale_input_covar = dff_input.loc[:, features_input].values
y_scale_input_covar = dff_input.loc[:, ].values
pca_scale_input_covar = PCA(n_components=len(features_input))
principalComponents_scale_input_covar = pca_scale_input_covar.fit_transform(x_scale_input_covar)
principalDf_scale_input_covar = pd.DataFrame(data=principalComponents_scale_input_covar
, columns=['PC' + str(i + 1) for i in range(len(features_input))])
finalDf_scale_input_covar = pd.concat([df[[df.columns[0]]], principalDf_scale_input_covar, dff_target], axis=1)
dfff_scale_input_covar = finalDf_scale_input_covar.fillna(0)
Var_scale_input_covar = pca_scale_input_covar.explained_variance_ratio_
# calculating loading vector plot
loading_scale_input_covar = pca_scale_input_covar.components_.T * np.sqrt(
pca_scale_input_covar.explained_variance_)
loading_scale_input_df_covar = pd.DataFrame(data=loading_scale_input_covar[:, 0:2],
columns=["PC1", "PC2"])
loading_scale_input_df_covar["cos2"] = (loading_scale_input_df_covar["PC1"] ** 2) + (
loading_scale_input_df_covar["PC2"] ** 2)
line_group_scale_input_df_covar = pd.DataFrame(data=features_input, columns=['line_group'])
loading_scale_input_dff_covar = pd.concat([loading_scale_input_df_covar, line_group_scale_input_df_covar],
axis=1)
a = (len(features_input), 2)
zero_scale_input_covar = np.zeros(a)
zero_scale_input_df_covar = pd.DataFrame(data=zero_scale_input_covar, columns=["PC1", "PC2"])
zero_scale_input_df_color_covar = pd.DataFrame(data=loading_scale_input_df_covar.iloc[:, 2], columns=['cos2'])
zero_scale_input_dff_covar = pd.concat([zero_scale_input_df_covar, zero_scale_input_df_color_covar,
line_group_scale_input_df_covar],
axis=1)
loading_scale_input_line_graph_covar = pd.concat([loading_scale_input_dff_covar, zero_scale_input_dff_covar],
axis=0)
loading_scale_input_line_graph_sort_covar = loading_scale_input_line_graph_covar.sort_values(by='cos2')
# COVARIANCE MATRIX WITH OUTLIERS
x_scale_input_outlier_covar = dff_input_outlier.loc[:, features_input_outlier].values
y_scale_input_outlier_covar = dff_input_outlier.loc[:, ].values
pca_scale_input_outlier_covar = PCA(n_components=len(features_input_outlier))
principalComponents_scale_input_outlier_covar = pca_scale_input_outlier_covar.fit_transform(
x_scale_input_outlier_covar)
principalDf_scale_input_outlier_covar = pd.DataFrame(data=principalComponents_scale_input_outlier_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_input_outlier))])
finalDf_scale_input_outlier_covar = pd.concat(
[outlier_names_input, principalDf_scale_input_outlier_covar, dff_target_outlier],
axis=1)
dfff_scale_input_outlier_covar = finalDf_scale_input_outlier_covar.fillna(0)
Var_scale_input_outlier_covar = pca_scale_input_outlier_covar.explained_variance_ratio_
# calculating loading vector plot
loading_scale_input_outlier_covar = pca_scale_input_outlier_covar.components_.T * np.sqrt(
pca_scale_input_outlier_covar.explained_variance_)
loading_scale_input_outlier_df_covar = pd.DataFrame(data=loading_scale_input_outlier_covar[:, 0:2],
columns=["PC1", "PC2"])
loading_scale_input_outlier_df_covar["cos2"] = (loading_scale_input_outlier_df_covar["PC1"] ** 2) + \
(loading_scale_input_outlier_df_covar["PC2"] ** 2)
line_group_scale_input_outlier_df_covar = pd.DataFrame(data=features_input_outlier, columns=['line_group'])
loading_scale_input_outlier_dff_covar = pd.concat([loading_scale_input_outlier_df_covar,
line_group_scale_input_outlier_df_covar],
axis=1)
a = (len(features_input_outlier), 2)
zero_scale_input_outlier_covar = np.zeros(a)
zero_scale_input_outlier_df_covar = pd.DataFrame(data=zero_scale_input_outlier_covar, columns=["PC1", "PC2"])
zero_scale_input_outlier_df_color_covar = pd.DataFrame(data=loading_scale_input_outlier_df_covar.iloc[:, 2],
columns=['cos2'])
zero_scale_input_outlier_dff_covar = pd.concat([zero_scale_input_outlier_df_covar,
zero_scale_input_outlier_df_color_covar,
line_group_scale_input_outlier_df_covar],
axis=1)
loading_scale_input_outlier_line_graph_covar = pd.concat(
[loading_scale_input_outlier_dff_covar, zero_scale_input_outlier_dff_covar],
axis=0)
loading_scale_input_outlier_line_graph_sort_covar = loading_scale_input_outlier_line_graph_covar.sort_values(
by='cos2')
####################################################################################################
if outlier == 'No' and matrix_type == "Correlation":
data = loading_scale_input_line_graph_sort
variance = Var_scale_input
elif outlier == 'Yes' and matrix_type == "Correlation":
variance = Var_scale_input_outlier
data = loading_scale_input_outlier_line_graph_sort
elif outlier == "No" and matrix_type == "Covariance":
data = loading_scale_input_line_graph_sort_covar
variance = Var_scale_input_covar
elif outlier == "Yes" and matrix_type == "Covariance":
variance = Var_scale_input_outlier_covar
data = loading_scale_input_outlier_line_graph_sort_covar
N = len(data['cos2'].unique())
end_color = "#00264c" # dark blue
start_color = "#c6def5" # light blue
colorscale = [x.hex for x in list(Color(start_color).range_to(Color(end_color), N))]
counter_color = 0
counter = 0
lists = [[] for i in range(len(data['line_group'].unique()))]
for i in data['line_group'].unique():
dataf = data[data['line_group'] == i]
trace1 = go.Scatter(x=dataf['PC1'], y=dataf['PC2'], name=i, line=dict(color=colorscale[counter_color]),
mode='lines+text', textposition='bottom right', textfont=dict(size=12),
meta=i,
hovertemplate=
'<b>%{meta}</b>' +
'<br>PC1: %{x}<br>' +
'PC2: %{y}'
"<extra></extra>",
)
trace2_all = go.Scatter(x=[1, -1], y=[1, -1], mode='markers', hoverinfo='skip',
marker=dict(showscale=True, color=[data["cos2"].min(), data["cos2"].max()],
colorscale=colorscale, opacity=0,
colorbar=dict(title=dict(text="Cos2",
side='right'), ypad=0)
), )
lists[counter] = trace1
counter_color = counter_color + 1
counter = counter + 1
lists.append(trace2_all)
####################################################################################################
return {'data': lists,
'layout': go.Layout(xaxis=dict(title='PC1 ({}%)'.format(round((variance[0] * 100), 2)),
mirror=True, ticks='outside', showline=True),
yaxis=dict(title='PC2 ({}%)'.format(round((variance[1] * 100), 2)),
mirror=True, ticks='outside', showline=True),
showlegend=False, margin={'r': 0},
# shapes=[dict(type="circle", xref="x", yref="y", x0=-1,
# y0=-1, x1=1, y1=1,
# line_color="DarkSlateGrey")]
),
}
@app.callback(Output('contrib-plot', 'figure'),
[
Input('outlier-value-contrib', 'value'),
Input('feature-input', 'value'),
Input('all-custom-choice', 'value'),
Input("matrix-type-contrib", "value"),
Input('csv-data', 'data')
])
def update_cos2_plot(outlier, input, all_custom, matrix_type, data):
if not data:
return dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
if all_custom == 'All':
features1 = dff.columns
features = list(features1)
# OUTLIER DATA
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier = outlier_dff.columns
features_outlier = list(features1_outlier)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
x_scale = dff.loc[:, features].values
y_scale = dff.loc[:, ].values
x_scale = StandardScaler().fit_transform(x_scale)
pca_scale = PCA(n_components=len(features))
principalComponents_scale = pca_scale.fit_transform(x_scale)
principalDf_scale = pd.DataFrame(data=principalComponents_scale
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf_scale = pd.concat([df[[df.columns[0]]], principalDf_scale], axis=1)
Var_scale = pca_scale.explained_variance_ratio_
# calculating loading vector plot
loading_scale = pca_scale.components_.T * np.sqrt(pca_scale.explained_variance_)
loading_scale_df = pd.DataFrame(data=loading_scale[:, 0:2],
columns=["PC1", "PC2"])
loading_scale_df["PC1_cos2"] = loading_scale_df["PC1"] ** 2
loading_scale_df["PC2_cos2"] = loading_scale_df["PC2"] ** 2
loading_scale_df["PC1_contrib"] = \
(loading_scale_df["PC1_cos2"] * 100) / (loading_scale_df["PC1_cos2"].sum(axis=0))
loading_scale_df["PC2_contrib"] = \
(loading_scale_df["PC2_cos2"] * 100) / (loading_scale_df["PC2_cos2"].sum(axis=0))
loading_scale_df["contrib"] = loading_scale_df["PC1_contrib"] + loading_scale_df["PC2_contrib"]
# after youve got sum of contrib (colorscale) get that and PC1 and PC2 into a sep df
loading_scale_dataf = pd.concat([loading_scale_df.iloc[:, 0:2], loading_scale_df.iloc[:, 6]], axis=1)
line_group_scale_df = pd.DataFrame(data=features, columns=['line_group'])
loading_scale_dff = pd.concat([loading_scale_dataf, line_group_scale_df], axis=1)
a = (len(features), 2)
zero_scale = np.zeros(a)
zero_scale_df = pd.DataFrame(data=zero_scale, columns=["PC1", "PC2"])
zero_scale_df_color = pd.DataFrame(data=loading_scale_dataf.iloc[:, 2], columns=['contrib'])
zero_scale_dff = pd.concat([zero_scale_df, zero_scale_df_color, line_group_scale_df], axis=1)
loading_scale_line_graph = pd.concat([loading_scale_dff, zero_scale_dff], axis=0)
# ORIGINAL DATA WITH REMOVING OUTLIERS
x_outlier_scale = outlier_dff.loc[:, features_outlier].values
y_outlier_scale = outlier_dff.loc[:, ].values
x_outlier_scale = StandardScaler().fit_transform(x_outlier_scale)
pca_outlier_scale = PCA(n_components=len(features_outlier))
principalComponents_outlier_scale = pca_outlier_scale.fit_transform(x_outlier_scale)
principalDf_outlier_scale = pd.DataFrame(data=principalComponents_outlier_scale
, columns=['PC' + str(i + 1) for i in range(len(features_outlier))])
finalDf_outlier_scale = pd.concat([outlier_names, principalDf_outlier_scale], axis=1)
Var_outlier_scale = pca_outlier_scale.explained_variance_ratio_
loading_outlier_scale = pca_outlier_scale.components_.T * np.sqrt(pca_outlier_scale.explained_variance_)
loading_outlier_scale_df = pd.DataFrame(data=loading_outlier_scale[:, 0:2],
columns=["PC1", "PC2"])
loading_outlier_scale_df["PC1_cos2"] = loading_outlier_scale_df["PC1"] ** 2
loading_outlier_scale_df["PC2_cos2"] = loading_outlier_scale_df["PC2"] ** 2
loading_outlier_scale_df["PC1_contrib"] = \
(loading_outlier_scale_df["PC1_cos2"] * 100) / (loading_outlier_scale_df["PC1_cos2"].sum(axis=0))
loading_outlier_scale_df["PC2_contrib"] = \
(loading_outlier_scale_df["PC2_cos2"] * 100) / (loading_outlier_scale_df["PC2_cos2"].sum(axis=0))
loading_outlier_scale_df["contrib"] = loading_outlier_scale_df["PC1_contrib"] + loading_outlier_scale_df[
"PC2_contrib"]
# after youve got sum of contrib (colorscale) get that and PC1 and PC2 into a sep df
loading_outlier_scale_dataf = pd.concat(
[loading_outlier_scale_df.iloc[:, 0:2], loading_outlier_scale_df.iloc[:, 6]], axis=1)
line_group_df = pd.DataFrame(data=features_outlier, columns=['line_group'])
loading_outlier_scale_dff = pd.concat([loading_outlier_scale_dataf, line_group_df], axis=1)
a = (len(features_outlier), 2)
zero_outlier_scale = np.zeros(a)
zero_outlier_scale_df = pd.DataFrame(data=zero_outlier_scale, columns=["PC1", "PC2"])
zero_outlier_scale_df_color = pd.DataFrame(data=loading_outlier_scale_dataf.iloc[:, 2], columns=['contrib'])
zero_outlier_scale_dff = pd.concat([zero_outlier_scale_df, zero_outlier_scale_df_color, line_group_df], axis=1)
loading_outlier_scale_line_graph = pd.concat([loading_outlier_scale_dff, zero_outlier_scale_dff], axis=0)
loading_scale_line_graph_sort = loading_scale_line_graph.sort_values(by='contrib')
loading_outlier_scale_line_graph_sort = loading_outlier_scale_line_graph.sort_values(by='contrib')
# COVARIANCE MATRIX
x_scale_covar = dff.loc[:, features].values
y_scale_covar = dff.loc[:, ].values
pca_scale_covar = PCA(n_components=len(features))
principalComponents_scale_covar = pca_scale_covar.fit_transform(x_scale_covar)
principalDf_scale_covar = pd.DataFrame(data=principalComponents_scale_covar
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf_scale_covar = pd.concat([df[[df.columns[0]]], principalDf_scale_covar], axis=1)
Var_scale_covar = pca_scale_covar.explained_variance_ratio_
# calculating loading vector plot
loading_scale_covar = pca_scale_covar.components_.T * np.sqrt(pca_scale_covar.explained_variance_)
loading_scale_df_covar = pd.DataFrame(data=loading_scale_covar[:, 0:2],
columns=["PC1", "PC2"])
loading_scale_df_covar["PC1_cos2"] = loading_scale_df_covar["PC1"] ** 2
loading_scale_df_covar["PC2_cos2"] = loading_scale_df_covar["PC2"] ** 2
loading_scale_df_covar["PC1_contrib"] = \
(loading_scale_df_covar["PC1_cos2"] * 100) / (loading_scale_df_covar["PC1_cos2"].sum(axis=0))
loading_scale_df_covar["PC2_contrib"] = \
(loading_scale_df_covar["PC2_cos2"] * 100) / (loading_scale_df_covar["PC2_cos2"].sum(axis=0))
loading_scale_df_covar["contrib"] = loading_scale_df_covar["PC1_contrib"] + loading_scale_df_covar[
"PC2_contrib"]
loading_scale_dataf_covar = pd.concat([loading_scale_df_covar.iloc[:, 0:2], loading_scale_df_covar.iloc[:, 6]],
axis=1)
line_group_scale_df_covar = pd.DataFrame(data=features, columns=['line_group'])
loading_scale_dff_covar = pd.concat([loading_scale_dataf_covar, line_group_scale_df_covar], axis=1)
a = (len(features), 2)
zero_scale_covar = np.zeros(a)
zero_scale_df_covar = pd.DataFrame(data=zero_scale_covar, columns=["PC1", "PC2"])
zero_scale_df_color_covar = pd.DataFrame(data=loading_scale_dataf_covar.iloc[:, 2], columns=['contrib'])
zero_scale_dff_covar = pd.concat([zero_scale_df_covar, zero_scale_df_color_covar, line_group_scale_df_covar],
axis=1)
loading_scale_line_graph_covar = pd.concat([loading_scale_dff_covar, zero_scale_dff_covar], axis=0)
loading_scale_line_graph_sort_covar = loading_scale_line_graph_covar.sort_values(by='contrib')
# COVARIANCE MATRIX OUTLIERS REMOVED
x_outlier_scale_covar = outlier_dff.loc[:, features_outlier].values
y_outlier_scale_covar = outlier_dff.loc[:, ].values
pca_outlier_scale_covar = PCA(n_components=len(features_outlier))
principalComponents_outlier_scale_covar = pca_outlier_scale_covar.fit_transform(x_outlier_scale_covar)
principalDf_outlier_scale_covar = pd.DataFrame(data=principalComponents_outlier_scale_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_outlier))])
finalDf_outlier_scale_covar = pd.concat([outlier_names, principalDf_outlier_scale_covar], axis=1)
Var_outlier_scale_covar = pca_outlier_scale_covar.explained_variance_ratio_
loading_outlier_scale_covar = pca_outlier_scale_covar.components_.T * np.sqrt(
pca_outlier_scale_covar.explained_variance_)
loading_outlier_scale_df_covar = pd.DataFrame(data=loading_outlier_scale_covar[:, 0:2],
columns=["PC1", "PC2"])
loading_outlier_scale_df_covar["PC1_cos2"] = loading_outlier_scale_df_covar["PC1"] ** 2
loading_outlier_scale_df_covar["PC2_cos2"] = loading_outlier_scale_df_covar["PC2"] ** 2
loading_outlier_scale_df_covar["PC1_contrib"] = \
(loading_outlier_scale_df_covar["PC1_cos2"] * 100) / (
loading_outlier_scale_df_covar["PC1_cos2"].sum(axis=0))
loading_outlier_scale_df_covar["PC2_contrib"] = \
(loading_outlier_scale_df_covar["PC2_cos2"] * 100) / (
loading_outlier_scale_df_covar["PC2_cos2"].sum(axis=0))
loading_outlier_scale_df_covar["contrib"] = loading_outlier_scale_df_covar["PC1_contrib"] + \
loading_outlier_scale_df_covar[
"PC2_contrib"]
# after youve got sum of contrib (colorscale) get that and PC1 and PC2 into a sep df
loading_outlier_scale_dataf_covar = pd.concat(
[loading_outlier_scale_df_covar.iloc[:, 0:2], loading_outlier_scale_df_covar.iloc[:, 6]], axis=1)
line_group_df_covar = | pd.DataFrame(data=features_outlier, columns=['line_group']) | pandas.DataFrame |
import pandas as pd
import numpy as np
from time import perf_counter
from utils import View, Rule
class DataReader:
def __init__(self, raw_data_path='data/unigram_freq.csv', data_path='data/data.csv'):
# https://www.kaggle.com/rtatman/english-word-frequency
# This dataset was well sorted by frequency
self.raw_data_path = raw_data_path
self.data_path = data_path
def extract_5gram_words(self):
"""
Extract the 5-gram-words from the raw data
:return:
"""
raw_data = pd.read_csv(self.raw_data_path)
_5gram_word = []
_5gram_count = []
start = perf_counter()
length = raw_data.__len__()
View.title_bar('Extracting 5-Gram-Words...')
for index, row in raw_data.iterrows():
if type(row['word']) == str and len(row['word']) == 5:
_5gram_count.append(row['count'])
_5gram_word.append(row['word'])
View.progress_bar(index, length, start)
df = pd.DataFrame({'word': _5gram_word, 'count': _5gram_count})
df.to_csv(self.data_path, index=False)
return
def get_sigmoid_frequency(self):
"""
Calculate the word frequency after sigmoid smoothing and save the result
:return:
"""
data = | pd.read_csv(self.data_path) | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
This module contains the ReadSets class that is in charge
of reading the sets files, reshaping them to be used in
the build class, creating and reading the parameter files and
checking the errors in the definition of the sets and parameters
"""
import itertools as it
from openpyxl import load_workbook
import pandas as pd
from hypatia.error_log.Checks import (
check_nan,
check_index,
check_index_data,
check_table_name,
check_mapping_values,
check_mapping_ctgry,
check_sheet_name,
check_tech_category,
check_carrier_type,
check_years_mode_consistency,
)
from hypatia.error_log.Exceptions import WrongInputMode
import numpy as np
from hypatia.utility.constants import (
global_set_ids,
regional_set_ids,
technology_categories,
carrier_types,
)
from hypatia.utility.constants import take_trade_ids, take_ids, take_global_ids
MODES = ["Planning", "Operation"]
class ReadSets:
""" Class that reads the sets of the model, creates the parameter files with
default values and reads the filled parameter files
Attributes
------------
mode:
The mode of optimization including the operation and planning mode
path:
The path of the set files given by the user
glob_mapping : dict
A dictionary of the global set tables given by the user in the global.xlsx file
mapping : dict
A dictionary of the regional set tables given by the user in the regional
set files
connection_sheet_ids: dict
A nested dictionary that defines the sheet names of the parameter file of
the inter-regional links with their default values, indices and columns
global_sheet_ids : dict
A nested dictionary that defines the sheet names of the global parameter file
with their default values, indices and columns
regional_sheets_ids : dict
A nested dictionary that defines the sheet names of the regional parameter files
with their default values, indices and columns
trade_data : dict
A nested dictionary for storing the inter-regional link data
global_data : dict
A nested dictionary for storing the global data
data : dict
A nested dictionary for storing the regional data
"""
def __init__(self, path, mode="Planning"):
self.mode = mode
self.path = path
self._init_by_xlsx()
def _init_by_xlsx(self,):
"""
Reads and organizes the global and regional sets
"""
glob_mapping = {}
wb_glob = load_workbook(r"{}/global.xlsx".format(self.path))
sets_glob = wb_glob["Sets"]
set_glob_category = {key: value for key, value in sets_glob.tables.items()}
for entry, data_boundary in sets_glob.tables.items():
data_glob = sets_glob[data_boundary]
content = [[cell.value for cell in ent] for ent in data_glob]
header = content[0]
rest = content[1:]
df = pd.DataFrame(rest, columns=header)
glob_mapping[entry] = df
self.glob_mapping = glob_mapping
check_years_mode_consistency(
mode=self.mode, main_years=list(self.glob_mapping["Years"]["Year"])
)
for key, value in self.glob_mapping.items():
check_table_name(
file_name="global",
allowed_names=list(global_set_ids.keys()),
table_name=key,
)
check_index(value.columns, key, "global", pd.Index(global_set_ids[key]))
check_nan(key, value, "global")
if key == "Technologies":
check_tech_category(value, technology_categories, "global")
if key == "Carriers":
check_carrier_type(value, carrier_types, "global")
self.regions = list(self.glob_mapping["Regions"]["Region"])
self.main_years = list(self.glob_mapping["Years"]["Year"])
if "Timesteps" in self.glob_mapping.keys():
self.time_steps = list(self.glob_mapping["Timesteps"]["Timeslice"])
self.timeslice_fraction = self.glob_mapping["Timesteps"][
"Timeslice_fraction"
].values
else:
self.time_steps = ["Annual"]
self.timeslice_fraction = np.ones((1, 1))
# possible connections among the regions
if len(self.regions) > 1:
lines_obj = it.permutations(self.regions, r=2)
self.lines_list = []
for item in lines_obj:
if item[0] < item[1]:
self.lines_list.append("{}-{}".format(item[0], item[1]))
mapping = {}
for reg in self.regions:
wb = load_workbook(r"{}/{}.xlsx".format(self.path, reg))
sets = wb["Sets"]
self._setbase_reg = [
"Technologies",
"Carriers",
"Carrier_input",
"Carrier_output",
]
set_category = {key: value for key, value in sets.tables.items()}
reg_mapping = {}
for entry, data_boundary in sets.tables.items():
data = sets[data_boundary]
content = [[cell.value for cell in ent] for ent in data]
header = content[0]
rest = content[1:]
df = pd.DataFrame(rest, columns=header)
reg_mapping[entry] = df
mapping[reg] = reg_mapping
for key, value in mapping[reg].items():
check_table_name(
file_name=reg,
allowed_names=list(regional_set_ids.keys()),
table_name=key,
)
check_index(value.columns, key, reg, pd.Index(regional_set_ids[key]))
check_nan(key, value, reg)
if key == "Technologies":
check_tech_category(value, technology_categories, reg)
if key == "Carriers":
check_carrier_type(value, carrier_types, reg)
if key == "Carrier_input" or key == "Carrier_output":
check_mapping_values(
value,
key,
mapping[reg]["Technologies"],
"Technologies",
"Technology",
"Technology",
reg,
)
check_mapping_values(
mapping[reg]["Carrier_input"],
"Carrier_input",
mapping[reg]["Carriers"],
"Carriers",
"Carrier_in",
"Carrier",
reg,
)
check_mapping_values(
mapping[reg]["Carrier_output"],
"Carrier_output",
mapping[reg]["Carriers"],
"Carriers",
"Carrier_out",
"Carrier",
reg,
)
check_mapping_ctgry(
mapping[reg]["Carrier_input"],
"Carrier_input",
mapping[reg]["Technologies"],
"Supply",
reg,
)
check_mapping_ctgry(
mapping[reg]["Carrier_output"],
"Carrier_output",
mapping[reg]["Technologies"],
"Demand",
reg,
)
self.mapping = mapping
Technologies = {}
for reg in self.regions:
regional_tech = {}
for key in list(self.mapping[reg]["Technologies"]["Tech_category"]):
regional_tech[key] = list(
self.mapping[reg]["Technologies"].loc[
self.mapping[reg]["Technologies"]["Tech_category"] == key
]["Technology"]
)
Technologies[reg] = regional_tech
self.Technologies = Technologies
self._create_input_data()
def _create_input_data(self):
"""
Defines the sheets, indices and columns of the parameter files
"""
if len(self.regions) > 1:
# Create the columns of inter-regional links as a multi-index of the
# pairs of regions and the transmitted carriers
indexer = pd.MultiIndex.from_product(
[self.lines_list, self.glob_mapping["Carriers_glob"]["Carrier"]],
names=["Line", "Transmitted Carrier"],
)
self.connection_sheet_ids = {
"F_OM": {
"value": 0,
"index": pd.Index(self.main_years, name="Years"),
"columns": indexer,
},
"V_OM": {
"value": 0,
"index": pd.Index(self.main_years, name="Years"),
"columns": indexer,
},
"Residual_capacity": {
"value": 0,
"index": pd.Index(self.main_years, name="Years"),
"columns": indexer,
},
"Capacity_factor_line": {
"value": 1,
"index": pd.Index(self.main_years, name="Years"),
"columns": indexer,
},
"Line_efficiency": {
"value": 1,
"index": pd.Index(self.main_years, name="Years"),
"columns": indexer,
},
"AnnualProd_perunit_capacity": {
"value": 1,
"index": pd.Index(
["AnnualProd_Per_UnitCapacity"], name="Performance Parameter"
),
"columns": indexer,
},
}
self.global_sheet_ids = {
"Max_production_global": {
"value": 1e30,
"index": pd.Index(self.main_years, name="Years"),
"columns": self.glob_mapping["Technologies_glob"].loc[
(
self.glob_mapping["Technologies_glob"]["Tech_category"]
!= "Demand"
)
& (
self.glob_mapping["Technologies_glob"]["Tech_category"]
!= "Storage"
)
]["Technology"],
},
"Min_production_global": {
"value": 0,
"index": pd.Index(self.main_years, name="Years"),
"columns": self.glob_mapping["Technologies_glob"].loc[
(
self.glob_mapping["Technologies_glob"]["Tech_category"]
!= "Demand"
)
& (
self.glob_mapping["Technologies_glob"]["Tech_category"]
!= "Storage"
)
]["Technology"],
},
"Glob_emission_cap_annual": {
"value": 1e30,
"index": pd.Index(self.main_years, name="Years"),
"columns": ["Global Emission Cap"],
},
}
if self.mode == "Planning":
self.connection_sheet_ids.update(
{
"INV": {
"value": 0,
"index": pd.Index(self.main_years, name="Years"),
"columns": indexer,
},
"Decom_cost": {
"value": 0,
"index": pd.Index(self.main_years, name="Years"),
"columns": indexer,
},
"Min_totalcap": {
"value": 0,
"index": pd.Index(self.main_years, name="Years"),
"columns": indexer,
},
"Max_totalcap": {
"value": 1e10,
"index": pd.Index(self.main_years, name="Years"),
"columns": indexer,
},
"Min_newcap": {
"value": 0,
"index": pd.Index(self.main_years, name="Years"),
"columns": indexer,
},
"Max_newcap": {
"value": 1e10,
"index": | pd.Index(self.main_years, name="Years") | pandas.Index |
import numpy as np # We recommend to use numpy arrays
import gc
import pandas as pd
import time
from multiprocessing import Pool
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.model_selection import train_test_split
#from gensim.models.word2vec import Word2Vec
from sklearn.model_selection import KFold
from sklearn.decomposition import TruncatedSVD
def timmer(func):
def warpper(*args,**kwargs):
strat_time = time.time()
r = func(*args, **kwargs)
stop_time = time.time()
print("[Success] Info: function: {}() done".format(func.__name__))
print("the func run time is %.2fs" %(stop_time-strat_time))
return r
return warpper
def left_merge(data1, data2, on):
if type(on) != list:
on = [on]
if (set(on) & set(data2.columns)) != set(on):
data2_temp = data2.reset_index()
else:
data2_temp = data2.copy()
columns = [f for f in data2.columns if f not in on]
result = data1.merge(data2_temp,on=on,how='left')
result = result[columns]
return result
def universe_mp_generator(gen_func, feat_list):
"""
tools for multy thread generator
"""
pool = Pool(4)
result = [pool.apply_async(gen_func, feats) for feats in feat_list]
pool.close()
pool.join()
return [aresult.get() for aresult in result]
def concat(L):
"""
tools for concat new dataframe
"""
result = None
for l in L:
if l is None:
continue
if result is None:
result = l
else:
try:
result[l.columns.tolist()] = l
except Exception as err:
print(err)
print(l.head())
return result
#################### UTILS for FE#############################
def count_helper(df, i):
"""
tools for multy thread count generator
"""
df['count_' + i] = df.groupby(i)[i].transform('count')
return df[['count_' + i]].fillna(-99999).astype(np.int32)
def hash_helper(df, i):
"""
tools for multy thread hash generator
"""
df['hash_' + i] = df[i].apply(hash)
return df[['hash_' + i]]
def bi_count_helper(df, i, j):
"""
tools for multy thread bi_count
"""
df['bicount_{}_{}'.format(i,j)] = df.groupby([i,j])[i].transform('count')
return df[['bicount_{}_{}'.format(i,j)]].fillna(-99999).astype(np.int32)
def cross_count_helper(df, i):
"""
tools for multy thread bi_count
"""
name = "count_"+ '_'.join(i)
df[name] = df.groupby(i)[i[0]].transform('count')
return df[[name]].fillna(-99999).astype(np.int32)
def fast_join(x):
r = ''
for i in x:
r += str(i) + ' '
return r
def agg_helper(main_df, df, num_col, col, table_name):
agg_dict = {}
agg_dict['AGG_min_{}_{}_{}'.format(table_name, num_col, col)] = 'min'
agg_dict['AGG_max_{}_{}_{}'.format(table_name, num_col, col)] = 'max'
agg_dict['AGG_mean_{}_{}_{}'.format(table_name, num_col, col)] = 'mean'
agg_dict['AGG_median_{}_{}_{}'.format(table_name, num_col, col)] = 'median'
#agg_dict['AGG_skew_{}_{}_{}'.format(table_name, num_col, col)] = 'skew'
agg_dict['AGG_var_{}_{}_{}'.format(table_name, num_col, col)] = 'var'
agg_result = df.groupby(col)[num_col].agg(agg_dict)
merget_result = left_merge(main_df[[col]], agg_result, on = [col])
#merget_result = main_df[[col]].merge(agg_result, on = [col], how = 'left')
return merget_result[[i for i in merget_result.columns if i != col]] #df[['ke_cnt_' + col]]
def hist_helper(main_df, df, cat_col, key, table_name):
nfrac = min(10000, len(df)) / len(df)
#print(nfrac)
df = df.sample(frac = nfrac)
df[cat_col] = df[cat_col].cat.add_categories("NAN").fillna("NAN")
#seq_all = df.groupby([key])[cat_col].apply(lambda x:' '.join([str(i) for i in list(x)]))
seq_all = df.groupby([key])[cat_col].apply(lambda x: list(x))
#print(seq_all)
seq_all_uid = seq_all.index
cv = CountVectorizer(max_features = 4, analyzer= lambda xs:xs)#token_pattern='(?u)\\b\\w+\\b')
#vectorizer.get_feature_names()
seq_all_count = cv.fit_transform(seq_all.values)
print(cv.get_feature_names())
seq_all_lad = seq_all_count
seq_all_lad = pd.DataFrame(seq_all_lad.todense())
seq_all_lad.columns = ["TOP_HIST_FETURE_{}_{}_{}_{}".format(table_name, key, cat_col ,i) for i in seq_all_lad.columns]
# print(seq_all_lad.head(5))
seq_all_lad[key] = list(seq_all_uid)
#seq_all_lad[",".join(base_feat)] = list(seq_all_uid)
result = seq_all_lad
result = left_merge(main_df, result, on = [key])
return result
def diff_num_helper(df, time_col, col, num_col):
df['ke_cnt_' + col] = df.groupby(col)[time_col].rank(ascending=False,method = 'first')
df2 = df[[col, 'ke_cnt_' + col, num_col]].copy()
df2['ke_cnt_' + col] = df2['ke_cnt_' + col] - 1
df3 = pd.merge(df, df2, on=[col, 'ke_cnt_' + col], how='left')
df['LAG_{}_{}'.format(col, num_col)] = df3[num_col +'_x'] - df3[num_col + '_y']
del df2,df3
gc.collect()
return df[['DIFF_{}_{}'.format(col, num_col)]]
def lag_id_helper(df, time_col, col):
df['ke_cnt_' + col] = df.groupby(col)[time_col].rank(ascending=False,method = 'first')
df2 = df[[col, 'ke_cnt_' + col, time_col]].copy()
df2['ke_cnt_' + col] = df2['ke_cnt_' + col] - 1
df3 = pd.merge(df, df2, on=[col, 'ke_cnt_' + col], how='left')
df['LAG_{}_{}'.format(col, time_col)] = (df3[time_col +'_x'] - df3[time_col + '_y'])
df['LAG_{}_{}'.format(col, time_col)] = df['LAG_{}_{}'.format(col, time_col)] .values.astype(np.int64) // 10 ** 9
del df2,df3
gc.collect()
return df[['LAG_{}_{}'.format(col, time_col)]]
def base_embedding(x, model, size):
vec = np.zeros(size)
x = [item for item in x if model.wv.__contains__(item)]
for item in x:
vec += model.wv[str(item)]
if len(x) == 0:
return vec
else:
return vec / len(x)
def embedding_helper(df, col):
input_ = df[col].fillna('NA').apply(lambda x: str(x).split(' '))
model = Word2Vec(input_, size=12, min_count=2, iter=5, window=5, workers=4)
data_vec = []
for row in input_:
data_vec.append(base_embedding(row, model, size=12))
svdT = TruncatedSVD(n_components=6)
data_vec = svdT.fit_transform(data_vec)
column_names = []
for i in range(6):
column_names.append('embedding_{}_{}'.format(col, i))
data_vec = pd.DataFrame(data_vec, columns=column_names)
df = | pd.concat([df, data_vec], axis=1) | pandas.concat |
# coding: utf-8
import logging
import multiprocessing
import os
import pickle
import sys
import time
from math import sqrt
import enchant
import numpy as np
import codecs
import simplejson
import pandas as pd
from glob import glob
from nltk import word_tokenize
from nltk.corpus import stopwords
from nltk.tokenize import sent_tokenize
from sklearn.metrics import f1_score, accuracy_score, recall_score, \
precision_score
logging.basicConfig(filename='generate_lexicons_and_results.log')
log = logging.getLogger()
log.setLevel(logging.DEBUG)
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
log.addHandler(ch)
class GenerateLexicons(object):
""" Class for automatic generation of the sentimenal lexicons and
evaluating them based on provided dataset with star/scores or sentiment
values.
"""
def __init__(self, sentiment_generation_method='frequentiment',
levels=[1.43, 1.54, 0.19], lexicons_output=None, results_output='',
output_name='', rerun=False, n_tries=10, n_domains=10,
thresh=0.01, csv_path='/datasets/amazon-data/csv/',
train_test_path=None, start_name='', end_name='',
review_score_column='review/score',
review_text_column='review/text'):
"""
Initialization
Parameters
----------
sentiment_generation_method : str, from values ['frequentiment', 'potts']
Method of frequentiment generation.
levels : tab of floats
Threshold's level for ngrams
rerun : bool
If true then experiment will be rerun, otherwise if it is possile
already counted values will be loaded.
n_tries : int
TODO
n_domains : int
Number of domain used in the experiment
thresh : float, percetage point in range [0, 1]
Threshold for word frequency in dataset, only word with frequency higher
than this threshold will be used in the experiment
csv_path : string path
Path to the directory with amazon csv files with reviews.
train_test_path : string path
Path to the directory with Cross-Validation splits
Returns
-------
"""
self.output_name = output_name
self.lexicons_output = lexicons_output
self.results_output = results_output
self.sentiment_generation_method = sentiment_generation_method
# TODO LSA and PMI
self.tries = n_tries
self.n_domains = n_domains # if None then all
self.levels = levels # the best threshold for uni, bi and trigram's
# frequentiment
self.thresh = thresh
self.rerun = rerun
# file's path etc.
self.csv_path = csv_path
if train_test_path is not None:
self.train_test_path = os.path.join(train_test_path)
else:
self.train_test_path = train_test_path
self.start_name = start_name
self.end_name = end_name
self.review_text_column = review_text_column
self.review_score_column = review_score_column
self.lexicons_output = os.path.join(self.lexicons_output,
'{}-lexicons.pkl'.format(
output_name))
if os.path.isfile(self.lexicons_output) and not self.rerun:
self.lexicon_exists = True
else:
self.lexicon_exists = False
self.results_output = os.path.join(self.results_output, 'lexicons',
'{}-results.pkl'.format(
output_name))
if os.path.isfile(self.results_output) and not self.rerun:
self.results_exists = True
else:
self.results_exists = False
self.final_lexicons = {}
# just initialize
self.train_test_subsets = {} # review's dataset
self.reviews = {}
self.results = {}
# TODO: przepisać z uyciem glob'a
def get_reviews_and_train_test_subsets(self):
"""
Function for loading amazon review's data and train/test
Cross-Validation splits into internal fields. Paths to these files
should be specified in initialization of the object.
"""
if self.n_domains is not None:
domains = os.listdir(self.train_test_path)[:self.n_domains]
else:
domains = os.listdir(self.train_test_path)
for fn in domains:
if fn.startswith(self.start_name):
item = fn.replace(self.start_name, '').replace(self.end_name,
'')
log.debug('Set item: {}'.format(item))
with open(os.path.join(self.train_test_path, fn), 'r') as fp:
self.train_test_subsets[item] = pickle.load(fp)
# break
for set_name in self.train_test_subsets:
log.debug('Load reviews domain {}'.format(set_name))
self.reviews[set_name] = pd.read_csv(
self.csv_path + set_name + '.txt.gz.csv',
sep=';')
def get_reviews(self, filter_str=None, file_type='csv', nrows=None,
sep=';'):
if file_type is None:
datasets = glob(os.path.join(self.csv_path, '*'))
else:
datasets = glob(
os.path.join(self.csv_path, '*{}*'.format(filter_str)))
for dataset in datasets:
try:
log.debug('Start loading for {}'.format(dataset))
dataset_name = os.path.basename(dataset).split('.')[0]
log.debug('Load reviews domain {}'.format(dataset_name))
if file_type in ['txt', 'csv']:
log.debug('Load txt/csv file: {}'.format(dataset))
self.reviews[dataset_name] = pd.read_csv(dataset, sep=sep,
nrows=nrows)
elif file_type in ['json']:
log.debug('Load JSON file: {}'.format(dataset))
d = {}
with codecs.open(dataset, 'r') as f:
for idx, line in enumerate(f):
if idx < nrows or nrows is None:
d[idx] = simplejson.loads(line)
self.reviews[dataset_name] = pd.DataFrame.from_dict(d,
orient='index')
else:
raise Exception('Unknown file type')
except IOError:
raise IOError('Problem with file: {}'.format(dataset))
def handle_ngram(self, ngram, grade, words_occured, sentiment_dict,
count_dict):
if ngram in words_occured:
return
# if sentiment_dict.has_key(ngram):
if ngram in sentiment_dict:
sentiment_dict[ngram][grade - 1] += 1
else:
sentiment_dict[ngram] = [0, 0, 0, 0, 0]
sentiment_dict[ngram][grade - 1] = 1
# if count_dict.has_key(ngram):
if ngram in count_dict:
count_dict[ngram] += 1
else:
count_dict[ngram] = 1
words_occured.append(ngram)
return
def make_word_dict_unique_per_review(self, row, sentiment_dict, count_dict,
d1, d2, stop):
grade = int(float(row[self.review_score_column]))
sentences = map(lambda x:
filter(lambda y: y not in ['!', '?', '.'],
word_tokenize(x.lower())),
sent_tokenize(row[self.review_text_column])
)
words_occured = [[], [], []]
for sentence in sentences:
for word in sentence:
if d1.check(word) and d2.check(word) and len(word) > 4:
self.handle_ngram(word, grade, words_occured[0],
sentiment_dict[0],
count_dict[0])
for bigram in zip(sentence, sentence[1:]):
if all(map(lambda x: d1.check(x) and d2.check(x), bigram)):
self.handle_ngram(" ".join(bigram), grade, words_occured[1],
sentiment_dict[1], count_dict[1])
for trigram in zip(sentence, sentence[1:], sentence[2:]):
if all(map(lambda x: d1.check(x) and d2.check(x), trigram)):
self.handle_ngram(" ".join(trigram), grade,
words_occured[2],
sentiment_dict[2], count_dict[2])
def get_count_by_rating(self, df):
grouped_reviews = df.groupby(self.review_score_column)
counted = grouped_reviews.count()
cardinalities = [0, 0, 0, 0, 0]
for gr in counted.index:
gr_index = int(float(gr)) - 1
cardinalities[gr_index] = counted[self.review_text_column][gr]
return (cardinalities)
@staticmethod
def frequentiment(w, train_dict, grade_weights, cardinalities):
"""
Counting sentiment lexicons based on frequetiment measure proposed by us
Parameters
----------
w :
Returns
-------
"""
return sum(
[(sum(cardinalities) / cardinalities[gr]) * grade_weights[gr] *
train_dict[w][gr] / sum(train_dict[w]) for gr in range(0, 5)])
@staticmethod
def potts(w, train_dict, grade_weights):
"""
Generating sentiment lexicons base on Christophe Potts webpage
http://sentiment.christopherpotts.net/lexicons.html#counts
"""
return sum(
[grade_weights[gr] * train_dict[w][gr] / sum(train_dict[w]) for gr
in range(0, 5)])
def create_single_sentiment(self, train_dict, cardinalities):
sentiments = {}
grade_weights = [(((x + 1) - 3) / 2.0) for x in range(0, 5)]
# we will get stuff like 2.3*Exp[-16] which is just a numpy
# numerical artifact, this should be zero, we have no data to
# provide a sentiment of happening once in 16 millions because
# we only have a sum(cardinalities) number of reviews
# zero_thr = 1.0 / float(sum(cardinalities))
if self.sentiment_generation_method.lower() in ['frequentiment']:
log.info('Frequentiment sentiment generation method is chosen')
for w in train_dict:
sentiments[w] = self.frequentiment(w, train_dict, grade_weights,
cardinalities)
elif self.sentiment_generation_method.lower() in ['potts']:
log.info('Potts\' sentiment generation method is chosen')
for w in train_dict:
sentiments[w] = self.potts(w, train_dict, grade_weights)
else:
raise NameError(
'Wrong sentiment lexicon generation method was specified: {}!'.format(
self.sentiment_generation_method))
return (sentiments) # tuple should be returned
def prepare_unique_sentiment_dict(self, df, t=0.01):
sentiment_dict = [{}, {}, {}]
count_dict = [{}, {}, {}]
us = enchant.Dict("en_US")
en = enchant.Dict("en_GB")
s = stopwords.words('english')
l = len(df)
df.apply(
lambda x: self.make_word_dict_unique_per_review(x, sentiment_dict,
count_dict, us, en,
s), axis=1)
for i in range(len(count_dict)):
for w in count_dict[i]:
if count_dict[i][w] < t * l or len(w) < 4 or (
i == 0 and w in s):
del sentiment_dict[i][w]
return sentiment_dict, count_dict
def get_frequentidict(self, data_dict, cardinalities):
sentiment_dict = [None for _ in range(len(data_dict))]
for i in range(len(data_dict)):
sentiments = self.create_single_sentiment(data_dict[i],
cardinalities)
if len(sentiments) == 0:
sentiment_dict[i] = None
continue
sentiment_dict[i] = pd.DataFrame.from_dict(sentiments,
orient='index')
sentiment_dict[i].columns = ['sentiment']
sentiment_dict[i].sort('sentiment', inplace=True)
return sentiment_dict
@staticmethod
def cosine_distance(u, v):
"""
Returns the cosine of the angle between vectors v and u. This is equal
to u.v / |u||v|.
"""
return np.dot(u, v) / (sqrt(np.dot(u, u)) * sqrt(np.dot(v, v)))
def cosentiment(self, w, tfidf, voc):
positive = ['good', 'nice', 'excellent', 'positive', 'fortunate',
'correct', 'superior']
negative = ['bad', 'nasty', 'poor', 'negative', 'unfortunate', 'wrong',
'inferior']
pos = sum(
[self.cosine_distance(tfidf.T[voc[w]].A[0], tfidf.T[voc[p]].A[0])
for p in positive if p in voc])
neg = sum(
[self.cosine_distance(tfidf.T[voc[w]].A[0], tfidf.T[voc[p]].A[0])
for p in negative if p in voc])
return pos - neg
# TODO LSA multiproc
# def generate_LSA(self):
# for set_name in sets:
# final_lexicons[set_name] = [None for i in range(tries)]
#
# for set_name in sets:
# for cross_validation in range(tries):
# print("{}-{}".format(set_name, str(cross_validation)))
# start = time.time()
# df = reviews[set_name].iloc[sets[set_name][cross_validation][0]]
# ziewak = TfidfVectorizer()
# dane1 = ziewak.fit_transform(df['review/text'])
#
# svd = TruncatedSVD(150)
# data1 = svd.inverse_transform(svd.fit_transform(dane1))
#
# voc = ziewak.vocabulary_
# # fqdt = create_single_sentiment(dane1, voc)
#
# fqdt = create_single_sentiment(np.matrix(data1), voc)
#
# end = time.time()
# final_lexicons[set_name][cross_validation] = [fqdt, end - start]
# print(end-start)
#
# with open(self.csv_path + "/lexicons/lsalexicons-%s.pkl" % fn, "w") as fp:
# pickle.dump(self.final_lexicons, fp)
def generate_lexicons(self):
"""
Generate lexicons based on specified in initialization conditions with
multiprocessing.
"""
if not self.lexicon_exists:
log.info('New lexicons will be generated in {}'.format(
self.lexicons_output))
for set_name in self.reviews:
log.debug('CV folds: {}'.format(self.tries))
# log.debug([None for i in range(self.tries)])
self.final_lexicons[set_name] = [None for _ in
range(self.tries)]
log.info('Distributed code starts here')
result_queue = multiprocessing.Queue()
jobs = []
for set_name in self.reviews:
log.info('Add process for {}'.format(set_name))
p = multiprocessing.Process(target=self.generate_lexicon,
args=(set_name, result_queue))
p.start()
jobs.append(p)
# must be before join, otherwise it could create deadlock
[self.final_lexicons.update(result_queue.get()) for j in jobs]
# wait for all processes to end
log.info('Waiting for all processes')
[j.join() for j in jobs]
log.info('All processes joined')
# log.info(self.final_lexicons)
# retrieve outputs from each Process
log.info('End of parallel code!')
with open(self.lexicons_output, "w") as fp:
pickle.dump(self.final_lexicons, fp)
log.info('Lexicon save in {}'.format(self.lexicons_output))
# return self.final_lexicons
else:
log.info(
'Lexicon has been already generate, it will be loaded from {}'.format(
self.lexicons_output))
self.final_lexicons = | pd.read_pickle(self.lexicons_output) | pandas.read_pickle |
# -------------------------------------------------- ML 02/10/2019 ----------------------------------------------------#
#
# This is the class for poisson process
#
# -------------------------------------------------------------------------------------------------------------------- #
import numpy as np
import pandas as pd
import math
from handles.data_hand import get_slotted_data
from sklearn.linear_model import LinearRegression
from scipy.stats import kstest
import statsmodels.api as sm
import statsmodels.formula.api as smf
from modeling.stat.models import fit_neg_binom
from scipy.stats import expon,gamma,nbinom
import random
random.seed( 30 )
class poisson_process:
def __init__(self,events,x,slotmin=60,sesonality=24.00,x_meta=None,combine=None,variablity_lambda=True):
# x is the numeric features lambda depends on.
# x_meta are catagorical features that lambda depends on
# Sesonality is when to loop the ts back. i.e. like 24 hours
# x can be any factor levels. with _ in between each category. however, each catogory
# should be defined by a numeric indicator
self.x_names = np.array( x.columns )
self.ts = np.array(events)
self.x = np.array(x)
self.x_meta=x_meta
self.slotmin = slotmin
self.sesonality = float( sesonality )
self.processed_data = self.get_combined_ts_data(combine=combine)
self.def_scale_multiplier()
self._variablity_lambda = variablity_lambda
def combine_timeslots(self,x,combine):
p = x.copy()
p[np.in1d(x, combine)] = combine[0]
return p
def poles_fun(self,d):
return pd.DataFrame(d).apply(lambda x: 1/(x**3))
def def_scale_multiplier(self):
# this is based on emperical data
average_mat = pd.DataFrame({'2014':[0.237053898,0.23033784,0.22646637,0.224855127,0.22145071,0.22017719,0.219680942],
'2015':[0.190591233,0.185363899,0.183113651,0.180825924,0.179276851,0.179478113,0.17919847]}).T
average_mat.columns = [1000,1100,1200,1300,1400,1500,1600]
average_mat=average_mat.reset_index()
average_mat=average_mat.melt(id_vars=["index"],var_name="Poles",value_name="Value")
cols = ['year','poles','scale']
average_mat.columns = cols
average_mat[cols] = average_mat[cols].apply(pd.to_numeric, errors='coerce')
average_mat['poles']=self.poles_fun(average_mat['poles'])
regressor = LinearRegression()
regressor.fit(average_mat[['year','poles']], average_mat['scale'])
self.scale_multiplier_predictor = regressor
self.reset_scale_multiplier()
def reset_scale_multiplier(self):
self._scale_multiplier = 1
def avg_scale_pred(self,year,poles):
return self.scale_multiplier_predictor.predict(np.array([year,
np.array(self.poles_fun([poles]))]).reshape(1, -1))
def get_processed_data(self):
diff_col_name = 'Aarrival_diff'
delta_t = np.diff(self.ts, n=1).reshape(-1, 1)
fin_d = pd.DataFrame(np.concatenate((delta_t, self.x[:-1, :]), axis=1))
fin_d.columns = np.concatenate(
(np.array(diff_col_name).reshape(-1, 1), np.array(self.x_names).reshape(-1, 1)), axis=0).flatten()
fin_d[diff_col_name] = pd.to_numeric(fin_d[diff_col_name])
# split the values in the factor that was provided to us
split = fin_d[self.x_names[0]].str.split("_", -1)
n = []
for i in range(0, len(split[0])):
fin_d['f' + str(i)] = split.str.get(i)#.astype(float) # update this if code breaks
n.append('f' + str(i))
n.append(self.x_names[1])
self.all_names = n
fin_d = fin_d.sort_values(by=n)
return fin_d
def get_combined_ts_data(self,combine):
# combine timeslots
# if given argument = combine -- array of time slots to combine. we will replace these with
# the first element of the combine array
# start time internal is the timeslots to model the data on
self.processed_data = self.get_processed_data()
self.combine = combine
if combine is None:
self.combined_slots = False
combined_timeslots = self.processed_data[self.x_names[1]]
else:
self.combined_slots = True
combined_timeslots = self.combine_timeslots(self.processed_data[self.x_names[1]], combine=combine)
self.processed_data['Start_time_internal'] = combined_timeslots
return self.processed_data
def get_slotted_data(self,data, slot_secs):
return get_slotted_data(data=data,slot_secs=slot_secs)
# ------------------------------------------- FITTING --------------------------------------------------------------
def daywise_training_data(self,d,combine,fac1,fac2,f1,days,orignal_start_slot):
# fac2 is out internal slots that are combined
# it is also worth noting that we calculate the average for combined slots and then put them for
# all the slots for that given duration
if self.combined_slots:
x = fac2[(fac1 == f1)]
day = days[(fac1 == f1)]
model_d = []
for day_i in np.unique(day):
model_d_temp = []
for t_i in np.unique(x):
try:
model_d_temp.append([[t_i, expon.fit(pd.to_numeric(d[(x == t_i) & (day == day_i)]))[1], day_i]])
except:
continue
model_d_temp = np.vstack(model_d_temp)
scale_val = model_d_temp[(model_d_temp[:, 0] == combine[0])].flatten()[1]
add = [[i, scale_val, day_i] for i in combine[1:]]
model_d_temp = np.concatenate((model_d_temp, add))
model_d.append(model_d_temp)
model_d = np.vstack(model_d)
else:
x = orignal_start_slot[(fac1 == f1)]
day = days[(fac1 == f1)]
model_d = []
for day_i in np.unique(day):
model_d_temp = []
for t_i in np.unique(x):
try:
model_d_temp.append([[t_i, expon.fit(pd.to_numeric(d[(x == t_i) & (day == day_i)]))[1], day_i]])
except:
continue
model_d_temp = np.vstack(model_d_temp)
model_d.append(model_d_temp)
model_d = np.vstack(model_d)
return model_d
def discreet_fit_model(self,data,x):
data_gamma = pd.DataFrame({'days':data, 'arrivalslot':x,'indicator':1})
data_gamma = data_gamma.groupby(['days','arrivalslot']).agg(['count']).reset_index()
data_gamma.columns = ['days', 'arrivalslot','count']
data_save = data_gamma['count']
x_save = data_gamma['arrivalslot']
ks_t_D = pd.DataFrame()
ks_t_pval = pd.DataFrame()
t_t_pval = pd.DataFrame()
exp_loc = pd.DataFrame()
exp_scale = pd.DataFrame()
exp_shape = pd.DataFrame()
time_slot = pd.DataFrame()
pos_l = pd.DataFrame()
neg_bio_r = pd.DataFrame()
neg_bio_p = pd.DataFrame()
for f2 in np.unique(data_gamma['arrivalslot']):
d = pd.to_numeric( data_gamma[data_gamma['arrivalslot'] == f2]['count'] )
# poission
lam = np.mean(d)
# gamma
alpha,loc, beta = gamma.fit(d,loc=0)
# ks test
D , kspval = kstest(d,'gamma', args=(alpha,loc,beta))
# ttest - one sided
# sample2 = gamma.rvs(a = alpha, loc=loc, scale=beta, size=d.shape[0])
val , pval = 0,0 #ttest_ind(d,sample2)
# neg_binom
r,p = fit_neg_binom(vec=np.array(d).flatten(),init=0.0000001)
# if we have combined data then add same model to all combined timeslots
if self.combined_slots and f2 == self.combine[0]:
for var in self.combine:
pos_l = pos_l.append(pd.DataFrame([lam]))
exp_loc = exp_loc.append(pd.DataFrame([loc]))
exp_shape = exp_shape.append(pd.DataFrame([alpha]))
exp_scale = exp_scale.append(pd.DataFrame([beta]))
neg_bio_r = neg_bio_r.append(pd.DataFrame([r]))
neg_bio_p = neg_bio_p.append( | pd.DataFrame([p]) | pandas.DataFrame |
"""This module contains auxiliary functions for the creation of tables in the main notebook."""
import json
import scipy
import numpy as np
from numpy import nan
import pandas as pd
import pandas.io.formats.style
import seaborn as sns
import statsmodels as sm
import statsmodels.formula.api as smf
import statsmodels.api as sm_api
import matplotlib as plt
import matplotlib.pyplot as pltpy
from IPython.display import HTML
from stargazer.stargazer import Stargazer
import math
import linearmodels as lm
from linearmodels import PanelOLS
from auxiliary.auxiliary_plots import *
from auxiliary.auxiliary_prepare_data import *
def did_est(variable,df):
'''
arguments:variable of interest, dataset
return: 2x2 dif-in-dif table
'''
# NJ Before and after
NJ_before = round(df.loc[(df['state']==1) & (df['time']!=1),variable].mean(),2)
NJ_after = round(df.loc[(df['state']==1) & (df['time']==1),variable].mean(),2)
# PA Before and after
PA_before = round(df.loc[(df['state']!=1) & (df['time']!=1),variable].mean(),2)
PA_after = round(df.loc[(df['state']!=1) & (df['time']==1),variable].mean(),2)
did= pd.DataFrame(data=np.array([[NJ_after, NJ_before,NJ_after - NJ_before],
[PA_after, PA_before, PA_after - PA_before]]), index=['NJ','PA'],columns=['after','before','$\Delta$'])
return did
def table1(df):
'''
replication Table 1
argument:dataset
return: table1 from the paper
'''
state_unique= df["state"].unique()
status_unique= df["status2"].unique()
table1 = pd.DataFrame()
for i in state_unique:
for j in status_unique:
table1.at[j,i]= sum((df.state == i) & (df.status2 == j))
table1["All"]=table1[0]+table1[1]
table1=table1.append(table1.sum(numeric_only=True), ignore_index=True)
table1 = table1.rename({0: 'PA',1: 'NJ'}, axis=1)
table1 = table1.rename({0: 'interviewed stores:',1: 'closed sores:',2:'temp closed -highway',
3:'under renovation:',4:'refusals:',5:'temp closed - fire',6:'total stores in sample:'}, axis=0)
return table1
#table2 - panel 1
def distr_store_type(data_NJ,data_PA):
'''
replication Table 2 - panel 1
argument:subset for New Jersey, subset for Pennsilvanya
return:distribution of stores table2 from the paper
'''
variables=['bk','kfc','roys','wendys','co_owned']
NJ= | pd.DataFrame() | pandas.DataFrame |
"""Backtesting Controller Module"""
__docformat__ = "numpy"
import argparse
import os
from typing import List
import matplotlib as mpl
import matplotlib.pyplot as plt
import pandas as pd
from prompt_toolkit.completion import NestedCompleter
from gamestonk_terminal import feature_flags as gtff
from gamestonk_terminal.helper_funcs import (
check_positive,
get_flair,
parse_known_args_and_warn,
try_except,
)
from gamestonk_terminal.menu import session
from gamestonk_terminal.stocks.stocks_helper import load
# This code below aims to fix an issue with the fnn module, used by bt module
# which forces matplotlib backend to be 'agg' which doesn't allow to plot
# Save current matplotlib backend
default_backend = mpl.get_backend()
# pylint: disable=wrong-import-position
from gamestonk_terminal.stocks.backtesting import bt_view # noqa: E402
# Restore backend matplotlib used
mpl.use(default_backend)
class BacktestingController:
"""Backtesting Class"""
CHOICES = ["?", "cls", "help", "q", "quit", "load"]
CHOICES_COMMANDS = ["ema", "ema_cross", "rsi"]
CHOICES += CHOICES_COMMANDS
def __init__(self, ticker: str, stock: pd.DataFrame):
self.ticker = ticker
self.stock = stock
self.bt_parser = argparse.ArgumentParser(add_help=False, prog="bt")
self.bt_parser.add_argument(
"cmd",
choices=self.CHOICES,
)
def print_help(self):
"""Print help"""
help_text = f"""
Backtesting:
cls clear screen
?/help show this menu again
q quit this menu, and shows back to main menu
quit quit to abandon program
load load new ticker to analyze
Current Ticker: {self.ticker.upper()}
ema buy when price exceeds EMA(l)
ema_cross buy when EMA(short) > EMA(long)
rsi buy when RSI < low and sell when RSI > high
"""
print(help_text)
def switch(self, an_input: str):
"""Process and dispatch input
Returns
-------
True, False or None
False - quit the menu
True - quit the program
None - continue in the menu
"""
# Empty command
if not an_input:
print("")
return None
(known_args, other_args) = self.bt_parser.parse_known_args(an_input.split())
# Help menu again
if known_args.cmd == "?":
self.print_help()
return None
# Clear screen
if known_args.cmd == "cls":
os.system("cls||clear")
return None
return getattr(
self, "call_" + known_args.cmd, lambda: "Command not recognized!"
)(other_args)
def call_help(self, _):
"""Process Help command"""
self.print_help()
def call_q(self, _):
"""Process Q command - quit the menu"""
return False
def call_quit(self, _):
"""Process Quit command - quit the program"""
return True
def call_load(self, other_args: List[str]):
"""Process load command"""
self.ticker, _, _, self.stock = load(
other_args, self.ticker, "", "1440", | pd.DataFrame() | pandas.DataFrame |
import os
import sys
import argparse
import pandas as pd
from utils import simplify_string_for_hdf5
parser = argparse.ArgumentParser(description='S-PrediXcan results processor.')
parser.add_argument('--spredixcan-hdf5-folder', required=True, type=str)
parser.add_argument('--spredixcan-hdf5-file-template', required=False, type=str, default='spredixcan-{tissue}-{column}.h5')
parser.add_argument('--tissue-id', required=True, type=int)
parser.add_argument('--phenotype-id', required=True, type=int)
parser.add_argument('--phenotypes-info-file', required=True, type=str)
parser.add_argument('--tissues-info-file', required=True, type=str)
parser.add_argument('--genes-info-file', required=True, type=str)
parser.add_argument('--no-header', required=False, action='store_true')
args = parser.parse_args()
# pheno info
pheno_info = | pd.read_csv(args.phenotypes_info_file, sep='\t', index_col='pheno_id') | pandas.read_csv |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import pickle
import os
import yaml
from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import r2_score
# from .utils import Boba_Utils as u
# from ._03_Modeling import Boba_Modeling as m
class Boba_Sys_Diagnostics():
def __init__(self):
pass
def run_sys_scoring(self, model, target,prod):
if prod == True:
pass
elif (self.position_group == 'hitters' and target in ['BABIP','BB%','K%']):
pass
elif (self.position_group == 'SP' and target in ['OBP','SLG','ShO_per_GS','CG_per_GS']):
pass
elif (self.position_group == 'RP' and target in ['OBP','SLG','HLD_per_G']):
pass
else:
master_df = pd.read_csv('data/processed/'+self.position_group+'/master_df.csv',index_col=0)
path = 'data/scoring/evaluation_'+self.position_group+'_'+str(self.year-1)+'.csv'
if os.path.exists(path):
print('does exist')
evaluation_df = pd.read_csv(path,index_col=0)
else:
print('does not exist')
data_group = 'hitters' if self.position_group == 'hitters' else 'pitchers'
zips_df = pd.read_csv('data/raw/'+data_group+'/projection_systems/zips/'+str(self.year-1)+'.csv')
atc_df = pd.read_csv('data/raw/'+data_group+'/projection_systems/atc/'+str(self.year-1)+'.csv')
bat_df = pd.read_csv('data/raw/'+data_group+'/projection_systems/thebat/'+str(self.year-1)+'.csv')
stmr_df = pd.read_csv('data/raw/'+data_group+'/projection_systems/steamer/'+str(self.year-1)+'.csv')
zips_df = zips_df.rename(columns={"K/9": "K_per_9", "BB/9": "BB_per_9"})
atc_df = atc_df.rename(columns={"K/9": "K_per_9", "BB/9": "BB_per_9"})
bat_df = bat_df.rename(columns={"K/9": "K_per_9", "BB/9": "BB_per_9"})
stmr_df = stmr_df.rename(columns={"K/9": "K_per_9", "BB/9": "BB_per_9"})
evaluation_df = master_df[master_df['Season']==(self.year-1)]
evaluation_df['playerID'] = evaluation_df['playerID'].astype('str')
if self.position_group == 'hitters':
evaluation_df = evaluation_df[self.information_cols+[self.pt_metric]+self.model_targets+self.counting_stats]
zips_df = zips_df[['playerid']+[self.pt_metric]+[x for x in zips_df.columns if x in self.model_targets]+[x for x in zips_df.columns if x in self.counting_stats]]
atc_df = atc_df[['playerid']+[self.pt_metric]+[x for x in atc_df.columns if x in self.model_targets]+[x for x in atc_df.columns if x in self.counting_stats]]
bat_df = bat_df[['playerid']+[self.pt_metric]+[x for x in bat_df.columns if x in self.model_targets]+[x for x in bat_df.columns if x in self.counting_stats]]
stmr_df = stmr_df[['playerid']+[self.pt_metric]+[x for x in stmr_df.columns if x in self.model_targets]+[x for x in stmr_df.columns if x in self.counting_stats]]
else:
evaluation_df = evaluation_df[self.information_cols+[self.pt_metric]+[self.per_metric]+self.model_targets+self.counting_stats]
zips_df = zips_df[['playerid']+[self.pt_metric]+[self.per_metric]+[x for x in zips_df.columns if x in self.model_targets]+[x for x in zips_df.columns if x in self.counting_stats]]
atc_df = atc_df[['playerid']+[self.pt_metric]+[self.per_metric]+[x for x in atc_df.columns if x in self.model_targets]+[x for x in atc_df.columns if x in self.counting_stats]]
bat_df = bat_df[['playerid']+[self.pt_metric]+[self.per_metric]+[x for x in bat_df.columns if x in self.model_targets]+[x for x in bat_df.columns if x in self.counting_stats]]
stmr_df = stmr_df[['playerid']+[self.pt_metric]+[self.per_metric]+[x for x in stmr_df.columns if x in self.model_targets]+[x for x in stmr_df.columns if x in self.counting_stats]]
evaluation_df = pd.merge(evaluation_df,zips_df,how='left',left_on='playerID', right_on='playerid',suffixes=('','_zips')).drop('playerid',axis=1)
evaluation_df = pd.merge(evaluation_df,atc_df,how='left',left_on='playerID', right_on='playerid',suffixes=('','_atc')).drop('playerid',axis=1)
evaluation_df = pd.merge(evaluation_df,bat_df,how='left',left_on='playerID', right_on='playerid',suffixes=('','_bat')).drop('playerid',axis=1)
evaluation_df = pd.merge(evaluation_df,stmr_df,how='left',left_on='playerID', right_on='playerid',suffixes=('','_stmr')).drop('playerid',axis=1)
evaluation_df.to_csv(path)
temp_df = master_df[master_df['Season']==(self.year-1)]
temp_df['Season'] = (self.year-2)
temp_df = self.isolate_relevant_columns(modeling_df = temp_df,target = target)
temp_df = temp_df.drop([target],axis=1)
pipeline = pickle.load(open('data/modeling/'+self.position_group+'/'+target+'/preprocessing_pipeline_eval.sav', 'rb'))
temp_df_2 = pipeline.transform(temp_df)
with open(r'data/modeling/'+self.position_group+'/'+target+'/model_features_eval.yaml') as file:
yaml_data = yaml.load(file, Loader=yaml.FullLoader)
model_features = yaml_data[target]
temp_df = pd.DataFrame(temp_df_2, columns = model_features,index=temp_df.index)
temp_df[target+'_Boba'] = model.predict(temp_df)
temp_df = temp_df[[target+'_Boba']]
evaluation_df = evaluation_df.drop([target+'_Boba'],axis=1,errors='ignore')
new_df = | pd.merge(evaluation_df,temp_df,left_index=True,right_index=True) | pandas.merge |
import pandas as pd
from pandas import DataFrame
import sys
#--------
# Imports medi dataset with icd9 and rxcui descriptions to .csv file
# PARAMETERS:
# medi = medi spreadsheet
# icd9_desc = contains icd9 codes and their descriptions
# rxcui_desc = contains rxcui codes and their descriptions
def add_info_to_medi(medi, icd9_desc, rxcui_desc):
# adding in icd9 descriptions
df_icd9_desc = pd.read_table(icd9_desc, sep=' ', header=None, usecols=[0, 1])
df_icd9_desc.columns = ['ICD9', 'ICD9_DESC']
# adding in rxcui descriptions into the medi spreadsheet
df_rxcui_desc = pd.read_csv(rxcui_desc, encoding='latin-1').drop_duplicates().groupby('RXCUI_IN')['STR'].apply('; '.join)
rxcui_desc = pd.DataFrame({'RXCUI_IN': df_rxcui_desc.index, 'STR': df_rxcui_desc.values})
df_medi = pd.read_csv(medi)
df_medi_desc = pd.merge(df_medi, rxcui_desc, how='left', on='RXCUI_IN')
df_rxcui_icd9 = pd.merge(df_medi_desc, df_icd9_desc, how='left', on='ICD9')
df_rxcui_icd9 = df_rxcui_icd9[['RXCUI_IN', 'STR', 'DRUG_DESC', 'ICD9', 'ICD9_DESC', 'INDICATION_DESCRIPTION', 'MENTIONEDBYRESOURCES',
'HIGHPRECISIONSUBSET', 'POSSIBLE_LABEL_USE']]
df_rxcui_icd9.to_csv('medi_with_icd9_rxcui.csv', index=False)
#--------
# Imports medi_rxcui_icd9 dataset with icd9-phecode mappings to .csv file
# Maps drug (rxcui codes) with clinical phenotype (phecode) through icd9 codes
# PARAMETERS:
# medi_rxcui_icd9 = medi spreadsheet (created from add_info_to_medi function above) with rxcui + icd9 descriptions
# phecode_icd9_mapping = maps phecodes to icd9 codes
def drug_phenotype(phecode_icd9_mapping, medi_rxcui_icd9):
df_rxcui_icd9 = pd.read_csv(medi_rxcui_icd9)
df_phecode_icd9 = pd.read_csv(phecode_icd9_mapping, usecols=['ICD9', 'PheCode'])
result = pd.merge(df_rxcui_icd9, df_phecode_icd9, how='left', on='ICD9').drop_duplicates().sort_values('RXCUI_IN')
result.to_csv('drug_phenotype.csv', index=False)
#print (result)
#--------
# Imports medi_rxcui_icd9 dataset with drug-targeted gene mappings to .csv file
# Maps drugs (rxcui codes) with corresponding targeted genes (HuGOIDs) through unii codes and DrugBank drug IDs
# PARAMETERS:
# unii_rxcui = contains mapping of unii codes to rxcui codes
# unii_drug = contains mapping of unii codes to HuGOIDs (DrugBank), needs to be .txt file
# medi_rxcui_icd9 = medi spreadsheet (created from add_info_to_medi function above) with rxcui + icd9 descriptions
# drug_gene = for each gene, contains list of drugs that target said gene
def drug_gene(unii_rxcui, unii_drug, drug_gene, medi_rxcui_icd9):
df_unii_rxcui = pd.read_csv(unii_rxcui)
df_unii_drug = pd.read_table(unii_drug, header=0, sep=':', usecols=['unii', 'drug_id'])
df_rxcui_icd9 = pd.read_csv(medi_rxcui_icd9)
# drugbank id and rxcui mapping
data1 = pd.merge(df_unii_drug, df_unii_rxcui, how='left',
on='unii').drop('unii', axis=1).drop_duplicates()
# splits drugs for each gene in individual cell
data2 = pd.read_csv(drug_gene, usecols=['Drug IDs', 'Gene Name'])
df_drugbank_gene = DataFrame(data2['Drug IDs'].str.split('; ').tolist(),
index=data2['Gene Name']).stack().reset_index()[[0, 'Gene Name']] # var1 variable is currently labeled 0
df_drugbank_gene.columns = ['drug_id', 'Gene Name']
df_drugbank_gene = df_drugbank_gene.dropna(how='any', axis=0)
# for each drug combines all targeted genes into one cell
data3 = df_drugbank_gene.drop_duplicates().groupby('drug_id')['Gene Name'].apply('; '.join)
data4 = pd.DataFrame({'drug_id': data3.index, 'Gene Name': data3.values})
drug_rxcui = pd.merge(data1, data4, how='left', on='drug_id').drop_duplicates()
result = | pd.merge(df_rxcui_icd9, drug_rxcui, how='left', on='RXCUI_IN') | pandas.merge |
# coding: utf-8
import json
import pandas as pd
import numpy as np
import glob
import ast
from modlamp.descriptors import *
import re
import cfg
import os
def not_in_range(seq):
if seq is None or len(seq) < 1 or len(seq) > 80:
return True
return False
def bad_terminus(peptide):
if peptide.nTerminus[0] is not None or peptide.cTerminus[0] is not None:
return True
return False
def is_valid(peptide):
try:
seq = peptide.seq[0]
if not seq.isupper():
return False
if bad_terminus(peptide):
return False
if not_in_range(seq):
return False
if seq.find("X") != -1:
return False
return True
except:
return False
def get_valid_sequences():
peptides = pd.DataFrame()
all_file_names = []
for j_file in glob.glob(os.path.join(cfg.DATA_ROOT, "dbaasp/*.json")):
filename = j_file[j_file.rfind("/") + 1:]
with open(j_file, encoding='utf-8') as train_file:
try:
dict_tmp = json.load(train_file)
dict_tmp["seq"] = dict_tmp.pop("sequence")
dict_train = {}
dict_train["peptideCard"] = dict_tmp
except:
print(f'jsonLoad error!:{filename}')
continue
if dict_train["peptideCard"].get("unusualAminoAcids") != []:
continue
peptide = pd.DataFrame.from_dict(dict_train, orient='index')
if is_valid(peptide):
peptides = pd.concat([peptides, peptide])
all_file_names.append(filename)
peptides["filename"] = all_file_names
peptides.to_csv("./data/valid_sequences.csv")
return peptides
def add_activity_list(peptides):
activity_list_all = []
for targets in peptides.targetActivities: # one seq has a list of targets
try:
activity_list = []
for target in targets:
if target['unit']['name'] == 'µM': # µg/ml
try:
con = target['concentration']
activity_list.append(target['concentration'])
except:
continue
activity_list_all.append(activity_list)
except:
activity_list_all.append([])
continue
peptides["activity_list"] = activity_list_all
return peptides
def add_toxic_list(peptides):
toxic_list_all = []
for targets in peptides.hemoliticCytotoxicActivities: # one seq has a list of targets
try:
toxic_list = []
for target in targets:
if target['unit']['name'] == 'µM': # µg/ml
try:
toxic_list.append(target['concentration'])
except:
continue
toxic_list_all.append(toxic_list)
except:
toxic_list_all.append([])
continue
peptides["toxic_list"] = toxic_list_all
return peptides
def add_molecular_weights(peptides):
seqs = [doc for doc in peptides["seq"]]
mws = []
for seq in seqs:
try:
desc = GlobalDescriptor(seq.strip())
desc.calculate_MW(amide=True)
mw = desc.descriptor[0][0]
mws.append(mw)
except:
mws.append(None)
peptides["molecular_weight"] = mws
return peptides
def convert_units(peptides):
converted_activity_all = []
converted_toxic_all = []
for activity_list, toxic_list, molecular_weight in zip(peptides.activity_list,
peptides.toxic_list,
peptides.molecular_weight):
converted_activity_list = []
converted_toxic_list = []
for item in activity_list:
item = item.replace(">", "") # '>10' => 10
item = item.replace("<", "") # '<1.25' => 1.25
item = item.replace("=", "") # '=2' => 2
if item == "NA":
continue
if item.find("±") != -1:
item = item[:item.find("±")] # 10.7±4.6 => 10.7
if item.find("-") != -1:
item = item[:item.find("-")] # 12.5-25.0 => 12.5
item = item.strip()
try:
converted_activity_list.append(float(item) * molecular_weight / 1000)
except:
pass
for item in toxic_list:
item = item.replace(">", "") # '>10' => 10
item = item.replace("<", "") # '<1.25' => 1.25
item = item.replace("=", "") # '=2' => 2
if item == "NA":
continue
if item.find("±") != -1:
item = item[:item.find("±")] # 10.7±4.6 => 10.7
if item.find("-") != -1:
item = item[:item.find("-")] # 12.5-25.0 => 12.5
item = item.strip()
try:
converted_toxic_list.append(float(item) * molecular_weight / 1000)
except:
pass
converted_activity_all.append(converted_activity_list)
converted_toxic_all.append(converted_toxic_list)
peptides["converted_activity"] = converted_activity_all
peptides["converted_toxic"] = converted_toxic_all
print('--> Writing valid sequences with molecular weights converted to valid_sequences_with_mw_converted.csv')
peptides.to_csv("./data/valid_sequences_with_mw_converted.csv")
return peptides
# Starting process
print('Dataset Creation process begins ... ')
# AMP data
print('**** Creating AMP datasets ****')
# Get Valid Sequences
peptide_all = get_valid_sequences()
print ('1. Getting all valid peptide sequences from DBAASP, number of seqs extracted = ', len(peptide_all))
print('--> Sequences stored in valid_sequences.csv')
# Add molecular weights
print('2. Converting Molecular weights')
peptide_all_with_mw = add_molecular_weights(peptide_all)
# Extract list of anti-microbial activities and list of toxicities
peptide_all_with_activity = add_activity_list(peptide_all)
peptide_all_with_activity_toxicity = add_toxic_list(peptide_all_with_activity)
# Add the converted units to activity list and toxicity list
peptide_all_converted = convert_units(peptide_all_with_activity_toxicity)
# Statistics
def get_stats():
peptides = pd.DataFrame()
all_file_names = []
total = 0
unusual_amino_acids = 0
for j_file in glob.glob(os.path.join(cfg.DATA_ROOT, "dbaasp/*.json")):
total += 1
filename = j_file[j_file.rfind("/") + 1:]
with open(j_file, encoding='utf-8') as train_file:
try:
dict_tmp = json.load(train_file)
dict_tmp["seq"] = dict_tmp.pop("sequence")
dict_train = {}
dict_train["peptideCard"] = dict_tmp
except:
print(f'jsonLoad error!:{filename}')
continue
if dict_train["peptideCard"].get("unusualAminoAcids") != []:
unusual_amino_acids += 1
continue
peptide = pd.DataFrame.from_dict(dict_train, orient='index')
peptides = pd.concat([peptides, peptide])
all_file_names.append(filename)
peptides["filename"] = all_file_names
print ("--> For DBAASP:")
print ("Total number of sequences:", total)
print ("Total number of unusual AminoAcids:", unusual_amino_acids)
return peptides
print('3. Some Statistics of collected valid sequences')
peptide_all = get_stats()
not_valid_count = len([seq for seq in peptide_all.seq if not_in_range(seq)])
print ("--> Number of not in range sequences:", not_valid_count)
print ("--> Number of valid sequences:", len(peptide_all_converted))
has_activity = [item for item in peptide_all_converted.activity_list if item != []]
print ("--> Number of valid sequences with antimicrobial activity:", len(has_activity))
has_toxicity = [item for item in peptide_all_converted.toxic_list if item != []]
print ("--> Number of valid sequences with toxicity:", len(has_toxicity))
################################################################
df = pd.read_csv("./data/valid_sequences_with_mw_converted.csv")
print (len(df))
# df.head() # default df: is dbaasp
def add_min_max_mean(df_in):
min_col = [min(ast.literal_eval(li_str)) if li_str != '[]' else '' for li_str in list(df_in.converted_activity)]
max_col = [max(ast.literal_eval(li_str)) if li_str != '[]' else '' for li_str in list(df_in.converted_activity)]
mean_col = [np.mean(ast.literal_eval(li_str)) if li_str != '[]' else '' for li_str in list(df_in.converted_activity)]
df_in["min_activity"] = min_col
df_in["max_activity"] = max_col
df_in["avg_activity"] = mean_col
return df_in
def all_activity_more_than_30(x_str):
x = ast.literal_eval(x_str)
for i in range(len(x)):
if x[i] < 30:
return False # all of them
# just for negative (pos: any item < 10, neg: all of them > 30)
return True
def all_activity_more_than_str(x_str, num):
x = ast.literal_eval(x_str)
if len(x) == 0:
return False
for i in range(len(x)):
if x[i] < num:
return False
return True
def all_activity_more_than(df, num):
return df[df['converted_activity'].apply(lambda x: all_activity_more_than_str(x, num))]
def all_toxic_more_than(df, num):
return df[df['converted_toxic'].apply(lambda x: all_activity_more_than_str(x, num))]
def all_activity_less_than_str(x_str, num):
x = ast.literal_eval(x_str)
if len(x) == 0:
return False
for i in range(len(x)):
if x[i] > num:
return False
return True
def all_toxic_less_than(df, num):
return df[df['converted_toxic'].apply(lambda x: all_activity_less_than_str(x, num))]
def has_activity_less_than_10(x_str):
x = ast.literal_eval(x_str)
for i in range(len(x)):
if x[i] < 10:
return True
return False
def has_activity_less_than_str(x_str, num):
x = ast.literal_eval(x_str)
for i in range(len(x)):
if x[i] < num:
return True
return False
def has_activity_less_than(df, num):
return df[df['converted_activity'].apply(lambda x: has_activity_less_than_str(x, num))]
def get_seq_len_less_than(df, seq_length):
df_short = df[df['seq'].apply(lambda x: len(x) <= seq_length)]
return df_short
def remove_df(df1, df2):
return pd.concat([df1, df2, df2]).drop_duplicates(keep=False)
# add min, max, mean to all dbaasp
df = add_min_max_mean(df)
df_dbaasp = df[["seq", "activity_list", "converted_activity",
"min_activity", "max_activity", "avg_activity"]]
df_dbaasp.to_csv("./data/all_valid_dbaasp.csv")
# 3) Overlapping sequences between DBAASP and Satpdb with AMP activity <10 ug/ml
print('4. Finding overlapping sequences between DBAASP and Satpdb with AMP activity <10 ug/ml ...')
def get_satpdb(train_file):
for line in train_file.readlines():
if "Peptide ID" in line:
record = {}
line = re.sub(u"\\<.*?\\>", "", line)
peptideId = line.split('Peptide ID')[1].split('Sequence')[0]
record['Peptide.ID'] = peptideId
record['Sequence'] = line.split('Sequence')[1].split('C-terminal modification')[0]
record['C.terminal.modification'] = line.split('C-terminal modification')[1].split('N-terminal modification')[0]
record['N.terminal.modification'] = line.split('N-terminal modification')[1].split('Peptide Type')[0]
record['Peptide.Type'] = line.split('Peptide Type')[1].split('Type of Modification')[0]
record['Type.of.Modification'] = line.split('Type of Modification')[1].split('Source (Databases)')[0]
record['Source..Databases.'] = line.split('Source (Databases)')[1].split('Link to Source')[0]
record['Link.to.Source'] = line.split('Link to Source')[1].split('Major Functions')[0]
record['Major.Functions'] = line.split('Major Functions')[1].split('Sub-functions')[0]
record['Sub.functions'] = line.split('Sub-functions')[1].split('Additional Info')[0]
record['Additional.Info'] = line.split('Additional Info')[1].split('Helix (%)')[0]
record['Helix'] = line.split('Helix (%)')[1].split('Strand (%)')[0]
record['Strand'] = line.split('Strand (%)')[1].split('Coil (%)')[0]
record['Coil'] = line.split('Coil (%)')[1].split('Turn (%)')[0]
record['Turn'] = line.split('Turn (%)')[1].split('DSSP states')[0]
record['DSSP.states'] = line.split('DSSP states')[1].split('Tertiary Structure')[0]
return peptideId, record
def get_satpdbs():
dict_train = {}
for j_file in glob.glob(os.path.join(cfg.DATA_ROOT, "satpdb/source/*.html")):
with open(j_file, encoding='utf-8') as train_file:
try:
name, record = get_satpdb(train_file)
dict_train[name] = record
except:
print(f'error loading html:{j_file}')
peptides = pd.DataFrame.from_dict(dict_train, orient='index')
peptides.to_csv(os.path.join(cfg.DATA_ROOT,"satpdb/satpdb.csv"))
return peptides
df_satpdb = get_satpdbs()
#df_satpdb = pd.read_csv("./data/satpdb/satpdb.csv")
df_satpdb = df_satpdb.rename(index=str, columns={"Sequence": "seq",
"C.terminal.modification": "cterminal",
"N.terminal.modification": "nterminal",
"Peptide.Type": "Peptide_Type",
"Type.of.Modification": "modi"})
valid_df_satpdb = df_satpdb[(df_satpdb.cterminal == "Free") &
(df_satpdb.nterminal == "Free") &
(df_satpdb.Peptide_Type == "Linear") &
(df_satpdb.modi == "None")]
print ("--> Number of valid satpdb = ", len(valid_df_satpdb))
df_overlap = pd.merge(df, valid_df_satpdb, on='seq', how='inner')
print ("--> Number of overlap sequences = ", len(df_overlap))
min_col = [min(ast.literal_eval(li_str)) if li_str != '[]' else '' for li_str in list(df_overlap.converted_activity)]
max_col = [max(ast.literal_eval(li_str)) if li_str != '[]' else '' for li_str in list(df_overlap.converted_activity)]
mean_col = [np.mean(ast.literal_eval(li_str)) if li_str != '[]' else '' for li_str in list(df_overlap.converted_activity)]
df_overlap["min_activity"] = min_col
df_overlap["max_activity"] = max_col
df_overlap["avg_activity"] = mean_col
df_overlap_all = df_overlap[["seq", "activity_list", "converted_activity",
"min_activity", "max_activity", "avg_activity"]]
print('5. Writing the overlap sequences to all_overlap.csv')
df_overlap_all.to_csv("./data/all_overlap.csv")
# length for all <=50
#
# overlap_neg: satpdb all activity greater than 100 : negative
# ** satpdb_pos: satpdb (the same as uniprot1) - overlap_neg
# dbaasp < 25 -> pos anything
# ** amp_pos = dbassp < 25 + satpdb_pos
# select sequences dbaasp, satpdb, and overlap(dbaasp, satpdb) of len <=50
print('6. Selecting sequences dbaasp, satpdb, and overlap(dbaasp, satpdb) of len <=50')
df = get_seq_len_less_than(df, 50)
df_overlap = get_seq_len_less_than(df_overlap, 50)
valid_df_satpdb = get_seq_len_less_than(valid_df_satpdb, 50)
print('7. Selecting negative and positive sequences for AMP activity')
overlap_neg = all_activity_more_than(df_overlap, 100)
print ("--> Number of negative seq in satpdb", len(overlap_neg))
print ("--> Number of unique seq in satpdb", len(valid_df_satpdb["seq"].drop_duplicates()))
satpdb_pos = remove_df(valid_df_satpdb["seq"].drop_duplicates(), overlap_neg["seq"])
satpdb_pos1 = pd.DataFrame({'seq': satpdb_pos.values}) # amp_pos[["seq"]]
satpdb_pos1["source"] = ["satpdb_pos"] * len(satpdb_pos1)
satpdb_pos1 = satpdb_pos1[["seq", "source"]]
print ("--> Number of positive seq in satpdb", len(satpdb_pos))
satpdb_pos1.seq = satpdb_pos1.seq.apply(lambda x: "".join(x.split())) # remove the space from the seq
satpdb_pos1 = satpdb_pos1.drop_duplicates('seq')
print('--> Writing to satpdb_pos.csv')
satpdb_pos1.to_csv("./data/satpdb_pos.csv", index=False, header=False)
def get_ampep(path):
ampeps = {}
ampeps['seq'] = []
for line in open(path).readlines():
if not line.startswith('>'):
ampeps['seq'].append(line.strip())
return pd.DataFrame.from_dict(ampeps)
# combine all positive sequences
print('8. Combining all positive sequences for AMP activity')
# col_Names = ["seq", "label"]
# print('--> Parsing ampep sequences')
# ampep_pos = pd.read_csv("./data_processing/data/ampep/pos_ampep_l1-80.csv", names=col_Names)
# ampep_pos = ampep_pos.drop(columns=['label'])
# ampep_pos.seq = ampep_pos.seq.apply(lambda x: "".join(x.split())) # remove the space from the seq
ampep_pos = get_ampep(os.path.join(cfg.DATA_ROOT, "ampep/train_AMP_3268.fasta"))
ampep_pos = get_seq_len_less_than(ampep_pos, 50)
ampep_pos["source"] = ["ampep_pos"]*len(ampep_pos)
ampep_pos = ampep_pos[["seq", "source"]]
print('--> Writing to ampep_pos.csv')
print ("--> Number of ampep_pos", len(ampep_pos))
ampep_pos.to_csv("./data/ampep_pos.csv", index=False, header=False)
print('--> Writing dbaasp sequences')
print ("--> Number of all seqs dbaasp", len(df))
dbaasp_pos = has_activity_less_than(df, 25)["seq"]
dbaasp_pos1 = pd.DataFrame({'seq': dbaasp_pos.values})
dbaasp_pos1["source"] = ["dbaasp_pos"] * len(dbaasp_pos1)
dbaasp_pos1 = dbaasp_pos1[["seq", "source"]]
print ("--> Number of dbaasp_less_than_25:", len(dbaasp_pos), "number of satpdb_pos:", len(satpdb_pos))
amp_pos = pd.concat([dbaasp_pos1, satpdb_pos1, ampep_pos]).drop_duplicates('seq')
print ("--> Number of amp_pos", len(amp_pos))
amp_pos.columns = ['seq', 'source']
amp_pos['source2'] = amp_pos['source']
amp_pos['source'] = amp_pos['source'].map({'dbaasp_pos': 'amp_pos', 'ampep_pos': 'amp_pos', 'satpdb_pos': 'amp_pos'})
amp_pos = amp_pos[amp_pos['seq'].str.contains('^[A-Z]+')]
amp_pos = amp_pos[~amp_pos.seq.str.contains("B")]
amp_pos = amp_pos[~amp_pos.seq.str.contains("J")]
amp_pos = amp_pos[~amp_pos.seq.str.contains("O")]
amp_pos = amp_pos[~amp_pos.seq.str.contains("U")]
amp_pos = amp_pos[~amp_pos.seq.str.contains("X")]
amp_pos = amp_pos[~amp_pos.seq.str.contains("Z")]
amp_pos = amp_pos[~amp_pos.seq.str.contains('[a-z]')]
amp_pos = amp_pos[~amp_pos.seq.str.contains("-")]
amp_pos = amp_pos[~amp_pos.seq.str.contains(r'[0-9]')]
#amp_pos.seq = amp_pos.seq.apply(lambda x: " ".join(x)) # remove the space from the seq
print('--> Writing amp_pos.csv combined from dbaasp, ampep, satpdb positive sequences')
amp_pos.to_csv("./data/amp_pos.csv", index=False, header=False)
dbaasp_more_than_100 = pd.DataFrame()
dbaasp_more_than_100["seq"] = all_activity_more_than(df, 100)["seq"]
#print ("dbaasp_more_than_100", len(dbaasp_more_than_100))
#print(all_activity_more_than(df, 100).head())
# ampep negative and uniprot sequences
print('9. Collecting uniprot sequences as unknown label')
col_Names = ["seq"]
uniprot_unk1 = pd.read_csv(os.path.join(cfg.DATA_ROOT,"uniprot/uniprot_reviewed_yes_l1-80.txt"), names=col_Names)
col_Names = ["seq"]
uniprot_unk2 = pd.read_csv(os.path.join(cfg.DATA_ROOT,"uniprot/uniprot_reviewed_no_l1-80.txt"), names=col_Names)
uniprot_unk = pd.concat([uniprot_unk1, uniprot_unk2]).drop_duplicates()
uniprot_unk = get_seq_len_less_than(uniprot_unk, 50)
print ("--> uniprot_unk", len(uniprot_unk))
uniprot_unk["source"] = ["uniprot"] * len(uniprot_unk)
uniprot_unk["source2"] = uniprot_unk["source"]
uniprot_unk['source'] = uniprot_unk['source'].map({'uniprot': 'unk'})
print('--> Writing uniprot_unk.csv ')
uniprot_unk.to_csv("./data/uniprot_unk.csv", index=False, header=False)
print('10. Collecting negative sequences for AMP activity ...')
# col_Names = ["seq", "label"]
# ampep_neg = pd.read_csv("./data/ampep/neg_ampep_l1-80.csv", names=col_Names)
# ampep_neg.seq = ampep_neg.seq.apply(lambda x: "".join(x.split())) # remove the space from the seq
# #ampep_neg.columns = ['']
# ampep_neg = ampep_neg.drop(columns=['label'])
ampep_neg = get_ampep(os.path.join(cfg.DATA_ROOT, "ampep/train_nonAMP_9777.fasta"))
ampep_neg = get_seq_len_less_than(ampep_neg, 50)
#print ("----------")
print ("--> Parsing ampep negative sequences, number of ampep_neg = ", len(ampep_neg))
# dbaasp_neg = dbaasp > 100 -> neg (how many you loose)
# Combined_NEG: 10*(dbaasp > 100) + UNIPROT_0
# Combined_POS = Satpdb_pos + ampep_pos + dbaasp_pos
dbaasp_more_than_100["source"] = ["dbaasp_neg"] * len(dbaasp_more_than_100)
# remove duplicates between ampep negative and dbaasp negative
ampep_neg["source"] = ["ampep_neg"] * len(ampep_neg)
ampep_neg = ampep_neg[["seq", "source"]]
print ("--> dbaasp_more_than_100:", len(dbaasp_more_than_100), "ampep_neg:", len(ampep_neg))
# combined_neg = remove_df(pd.concat([dbaasp_more_than_100, uniprot_neg]).drop_duplicates, amp_pos1)
combined_neg = pd.concat([dbaasp_more_than_100, ampep_neg]).drop_duplicates('seq')
# satpdb_pos = remove_df(valid_df_satpdb["seq"].drop_duplicates(), overlap_neg["seq"])
print ("--> combined_neg number = ", len(combined_neg))
combined_neg.to_csv("./data/dbaasp_more_than100_combined_ampep_neg.csv", index=False, header=False) # not multiplied the samples.
common = amp_pos.merge(combined_neg, on=['seq'])
# print(common.head())
combined_neg1 = pd.concat([combined_neg, common]).drop_duplicates('seq')
# print(combined_neg1.head())
combined_neg1['source2'] = combined_neg1['source']
combined_neg1['source'] = combined_neg1['source'].map({'dbaasp_neg': 'amp_negc', 'ampep_neg': 'amp_negnc'})
combined_neg1 = combined_neg1.drop(columns=['source_x', 'source_y'])
# print(combined_neg1.head())
combined_neg1 = combined_neg1[combined_neg1['seq'].str.contains('^[A-Z]+')]
combined_neg1 = combined_neg1[~combined_neg1.seq.str.contains("B")]
combined_neg1 = combined_neg1[~combined_neg1.seq.str.contains("J")]
combined_neg1 = combined_neg1[~combined_neg1.seq.str.contains("O")]
combined_neg1 = combined_neg1[~combined_neg1.seq.str.contains("U")]
combined_neg1 = combined_neg1[~combined_neg1.seq.str.contains("X")]
combined_neg1 = combined_neg1[~combined_neg1.seq.str.contains("Z")]
combine_neg1 = combined_neg1[~combined_neg1.seq.str.contains("-")]
combine_neg1 = combined_neg1[~combined_neg1.seq.str.contains('[a-z]')]
#combined_neg1=combined_neg1[~combined_neg1.seq.str.contains("*")]
combined_neg1 = combined_neg1[~combined_neg1.seq.str.contains(r'[0-9]')]
print('--> Writing combined negative sequences collected from DBAASP and AMPEP to amp_neg.csv')
combined_neg1.to_csv("./data/amp_neg.csv", index=False, header=False) # not multiplied the samples.
# Toxicity data
print('**** Creating Toxicity datasets ****')
# don't need toxinpred_pos as satpdb takes care of it
# toxinpred is already len <=35.
col_Names = ["seq"]
print('1. Collecting Toxicity negative samples')
toxinpred_neg1 = pd.read_csv(os.path.join(cfg.DATA_ROOT,"toxicity/nontoxic_trembl_toxinnpred.txt"), names=col_Names)
print ("--> toxinpred_neg1 number = ", len(toxinpred_neg1))
toxinpred_neg1["source2"] = ["toxinpred_neg_tr"] * len(toxinpred_neg1)
toxinpred_neg1 = toxinpred_neg1[["seq", "source2"]]
toxinpred_neg2 = pd.read_csv(os.path.join(cfg.DATA_ROOT,"toxicity/nontoxic_swissprot_toxinnpred.txt"), names=col_Names)
print ("--> toxinpred_neg2 number = ", len(toxinpred_neg2))
toxinpred_neg2["source2"] = ["toxinpred_neg_sp"] * len(toxinpred_neg2)
toxinpred_neg2 = toxinpred_neg2[["seq", "source2"]]
toxinpred_neg = pd.concat([toxinpred_neg1, toxinpred_neg2]).drop_duplicates('seq')
print('--> toxinpred_neg number = ', len(toxinpred_neg))
# valid_df_satpdb
toxic = valid_df_satpdb[valid_df_satpdb['Major.Functions'].str.contains("toxic")]
toxic = valid_df_satpdb[valid_df_satpdb['Major.Functions'].str.contains("toxic") | valid_df_satpdb['Sub.functions'].str.contains("toxic")]
print ('--> Valid toxicity sequences from Satpdb = ', len(toxic))
# for toxicity:
# dbassp
# all of them > 250 -> dbaap_neg
# all of them < 200-> dbaap_pos
#
# combined_toxic_pos = satpdb_pos + dbaap_pos
#
# combined_toxic_neg = 10*(dbaap_neg) + UNiprot0
# df from dbaasp, toxic from satpdb
print('2. Collecting Toxicity positive samples')
df_overlap_tox = pd.merge(df, toxic, on='seq', how='inner')[["seq", "toxic_list", "converted_toxic"]]
combined_toxic_pos = all_toxic_less_than(df_overlap_tox, 200)
dbaasp_toxic_pos = all_toxic_less_than(df, 200)
dbaasp_toxic_pos["source2"] = ["dbaasp"] * len(dbaasp_toxic_pos)
dbaasp_toxic_pos = dbaasp_toxic_pos[["seq", "source2"]]
toxic["source2"] = ["satpdb"]*len(toxic)
toxic = toxic[["seq", "source2"]]
combined_toxic_pos = pd.concat([dbaasp_toxic_pos, toxic]).drop_duplicates('seq')
combined_toxic_pos['source'] = 'tox_pos'
#combined_toxic_pos = combined_toxic_pos[["seq", "source", "tox"]]
combined_toxic_pos = combined_toxic_pos[["seq", "source", "source2"]]
combined_toxic_pos = combined_toxic_pos[combined_toxic_pos['seq'].str.contains('^[A-Z]+')]
combined_toxic_pos = combined_toxic_pos[~combined_toxic_pos.seq.str.contains("B")]
combined_toxic_pos = combined_toxic_pos[~combined_toxic_pos.seq.str.contains("J")]
combined_toxic_pos = combined_toxic_pos[~combined_toxic_pos.seq.str.contains("O")]
combined_toxic_pos = combined_toxic_pos[~combined_toxic_pos.seq.str.contains("U")]
combined_toxic_pos = combined_toxic_pos[~combined_toxic_pos.seq.str.contains("X")]
combined_toxic_pos = combined_toxic_pos[~combined_toxic_pos.seq.str.contains("Z")]
combined_toxic_pos = combined_toxic_pos[~combined_toxic_pos.seq.str.contains('[a-z]')]
combined_toxic_pos = combined_toxic_pos[~combined_toxic_pos.seq.str.contains("-")]
#combined_toxic_pos=combined_toxic_pos[~combined_toxic_pos.seq.str.contains("*")]
combined_toxic_pos = combined_toxic_pos[~combined_toxic_pos.seq.str.contains(r'[0-9]')]
combined_toxic_pos.to_csv("./data/toxic_pos.csv", index=False, header=False)
print ('--> combined_toxic_pos number = ', len(combined_toxic_pos))
dbaasp_neg = all_toxic_more_than(df, 250)
dbaasp_neg["source2"] = ["dbaasp"] * len(dbaasp_neg)
dbaasp_neg['source'] = 'tox_negc'
dbaasp_neg = dbaasp_neg[["seq", "source", "source2"]]
dbaasp_neg.head()
toxinpred_neg['source'] = 'tox_negnc'
toxinpred_neg = toxinpred_neg[["seq", "source", "source2"]]
combined_toxic_neg = pd.concat([dbaasp_neg, toxinpred_neg]).drop_duplicates('seq')
combined_toxic_neg = combined_toxic_neg[["seq", "source", "source2"]]
combined_toxic_neg.to_csv("./data/toxic_neg_nofilter.csv", index=False, header=False)
print ('--> combined_toxic_neg number = ', len(combined_toxic_neg))
commont = combined_toxic_pos.merge(combined_toxic_neg, on=['seq'])
combined_negt1 = | pd.concat([combined_toxic_neg, commont]) | pandas.concat |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Copyright 2011-2020, <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The ``py2neo.database.work`` package contains classes pertaining to the
execution of Cypher queries and transactions.
"""
from __future__ import absolute_import, print_function, unicode_literals
from collections import deque, OrderedDict
from functools import reduce
from io import StringIO
from operator import xor as xor_operator
from warnings import warn
from py2neo.collections import iter_items
from py2neo.client import Connection
from py2neo.compat import Mapping, numeric_types, ustr, xstr
from py2neo.cypher import cypher_repr, cypher_str
class Transaction(object):
""" Logical context for one or more graph operations.
Transaction objects are typically constructed by the
:meth:`.Graph.auto` and :meth:`.Graph.begin` methods. User
applications should not generally need to create these objects
directly.
Each transaction has a lifetime which ends by a call to either
:meth:`.commit` or :meth:`.rollback`. In the case of an error, the
server can also prematurely end transactions. The
:meth:`.finished` method can be used to determine whether or not
any of these cases have occurred.
The :meth:`.run` and :meth:`.evaluate` methods are used to execute
Cypher queries within the transactional context. The remaining
methods operate on :class:`.Subgraph` objects, including
derivatives such as :class:`.Node` and :class:`.Relationship`.
"""
_finished = False
def __init__(self, graph, autocommit=False,
# readonly=False, after=None, metadata=None, timeout=None
):
self._graph = graph
self._autocommit = autocommit
self._entities = deque()
self._connector = self.graph.service.connector
if autocommit:
self._transaction = None
else:
self._transaction = self._connector.begin(self._graph.name,
# readonly, after, metadata, timeout
)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type is None:
self.commit()
else:
self.rollback()
def _assert_unfinished(self, message):
if self._finished:
raise TypeError(message)
@property
def graph(self):
return self._graph
@property
def entities(self):
return self._entities
def finished(self):
""" Indicates whether or not this transaction has been completed
or is still open.
"""
return self._finished
def run(self, cypher, parameters=None, **kwparameters):
""" Send a Cypher query to the server for execution and return
a :py:class:`.Cursor` for navigating its result.
:param cypher: Cypher query
:param parameters: dictionary of parameters
:returns: :py:class:`.Cursor` object
"""
self._assert_unfinished("Cannot run query in finished transaction")
try:
entities = self._entities.popleft()
except IndexError:
entities = {}
try:
hydrant = Connection.default_hydrant(self._connector.profile, self.graph)
parameters = dict(parameters or {}, **kwparameters)
if self._transaction:
result = self._connector.run_in_tx(self._transaction, cypher, parameters, hydrant)
else:
result = self._connector.auto_run(self.graph.name, cypher, parameters, hydrant)
return Cursor(result, hydrant, entities)
finally:
if not self._transaction:
self.finish()
def finish(self):
self._assert_unfinished("Transaction already finished")
self._finished = True
def commit(self):
""" Commit the transaction.
"""
self._assert_unfinished("Cannot commit finished transaction")
try:
return self._connector.commit(self._transaction)
finally:
self._finished = True
def rollback(self):
""" Roll back the current transaction, undoing all actions previously taken.
"""
self._assert_unfinished("Cannot rollback finished transaction")
try:
return self._connector.rollback(self._transaction)
finally:
self._finished = True
def evaluate(self, cypher, parameters=None, **kwparameters):
""" Execute a single Cypher statement and return the value from
the first column of the first record.
:param cypher: Cypher statement
:param parameters: dictionary of parameters
:returns: single return value or :const:`None`
"""
return self.run(cypher, parameters, **kwparameters).evaluate(0)
def create(self, subgraph):
""" Create remote nodes and relationships that correspond to those in a
local subgraph. Any entities in *subgraph* that are already bound to
remote entities will remain unchanged, those which are not will become
bound to their newly-created counterparts.
For example::
>>> from py2neo import Graph, Node, Relationship
>>> g = Graph()
>>> tx = g.begin()
>>> a = Node("Person", name="Alice")
>>> tx.create(a)
>>> b = Node("Person", name="Bob")
>>> ab = Relationship(a, "KNOWS", b)
>>> tx.create(ab)
>>> tx.commit()
>>> g.exists(ab)
True
:param subgraph: a :class:`.Node`, :class:`.Relationship` or other
creatable object
"""
try:
create = subgraph.__db_create__
except AttributeError:
raise TypeError("No method defined to create object %r" % subgraph)
else:
create(self)
def delete(self, subgraph):
""" Delete the remote nodes and relationships that correspond to
those in a local subgraph. To delete only the relationships, use
the :meth:`.separate` method.
:param subgraph: a :class:`.Node`, :class:`.Relationship` or other
:class:`.Subgraph`
"""
try:
delete = subgraph.__db_delete__
except AttributeError:
raise TypeError("No method defined to delete object %r" % subgraph)
else:
delete(self)
def exists(self, subgraph):
""" Determine whether one or more entities all exist within the
graph. Note that if any nodes or relationships in *subgraph* are not
bound to remote counterparts, this method will return ``False``.
:param subgraph: a :class:`.Node`, :class:`.Relationship` or other
:class:`.Subgraph`
:returns: ``True`` if all entities exist remotely, ``False`` otherwise
"""
try:
exists = subgraph.__db_exists__
except AttributeError:
raise TypeError("No method defined to check existence of object %r" % subgraph)
else:
return exists(self)
def merge(self, subgraph, primary_label=None, primary_key=None):
""" Create or update the nodes and relationships of a local
subgraph in the remote database. Note that the functionality of
this operation is not strictly identical to the Cypher MERGE
clause, although there is some overlap.
Each node and relationship in the local subgraph is merged
independently, with nodes merged first and relationships merged
second.
For each node, the merge is carried out by comparing that node with
a potential remote equivalent on the basis of a single label and
property value. If no remote match is found, a new node is created;
if a match is found, the labels and properties of the remote node
are updated. The label and property used for comparison are determined
by the `primary_label` and `primary_key` arguments but may be
overridden for individual nodes by the of `__primarylabel__` and
`__primarykey__` attributes on the node itself.
For each relationship, the merge is carried out by comparing that
relationship with a potential remote equivalent on the basis of matching
start and end nodes plus relationship type. If no remote match is found,
a new relationship is created; if a match is found, the properties of
the remote relationship are updated.
:param subgraph: a :class:`.Node`, :class:`.Relationship` or other
:class:`.Subgraph` object
:param primary_label: label on which to match any existing nodes
:param primary_key: property key(s) on which to match any existing
nodes
"""
try:
merge = subgraph.__db_merge__
except AttributeError:
raise TypeError("No method defined to merge object %r" % subgraph)
else:
merge(self, primary_label, primary_key)
def pull(self, subgraph):
""" Update local entities from their remote counterparts.
For any nodes and relationships that exist in both the local
:class:`.Subgraph` and the remote :class:`.Graph`, pull properties
and node labels into the local copies. This operation does not
create or delete any entities.
:param subgraph: a :class:`.Node`, :class:`.Relationship` or other
:class:`.Subgraph`
"""
try:
pull = subgraph.__db_pull__
except AttributeError:
raise TypeError("No method defined to pull object %r" % subgraph)
else:
return pull(self)
def push(self, subgraph):
""" Update remote entities from their local counterparts.
For any nodes and relationships that exist in both the local
:class:`.Subgraph` and the remote :class:`.Graph`, push properties
and node labels into the remote copies. This operation does not
create or delete any entities.
:param subgraph: a :class:`.Node`, :class:`.Relationship` or other
:class:`.Subgraph`
"""
try:
push = subgraph.__db_push__
except AttributeError:
raise TypeError("No method defined to push object %r" % subgraph)
else:
return push(self)
def separate(self, subgraph):
""" Delete the remote relationships that correspond to those in a local
subgraph. This leaves any nodes untouched.
:param subgraph: a :class:`.Node`, :class:`.Relationship` or other
:class:`.Subgraph`
"""
try:
separate = subgraph.__db_separate__
except AttributeError:
raise TypeError("No method defined to separate object %r" % subgraph)
else:
separate(self)
class Cursor(object):
""" A `Cursor` is a navigator for a stream of records.
A cursor can be thought of as a window onto an underlying data
stream. All cursors in py2neo are "forward-only", meaning that
navigation starts before the first record and may proceed only in a
forward direction.
It is not generally necessary for application code to instantiate a
cursor directly as one will be returned by any Cypher execution method.
However, cursor creation requires only a :class:`.DataSource` object
which contains the logic for how to access the source data that the
cursor navigates.
Many simple cursor use cases require only the :meth:`.forward` method
and the :attr:`.current` attribute. To navigate through all available
records, a `while` loop can be used::
while cursor.forward():
print(cursor.current["name"])
If only the first record is of interest, a similar `if` structure will
do the job::
if cursor.forward():
print(cursor.current["name"])
To combine `forward` and `current` into a single step, use the built-in
py:func:`next` function::
print(next(cursor)["name"])
Cursors are also iterable, so can be used in a loop::
for record in cursor:
print(record["name"])
For queries that are expected to return only a single value within a
single record, use the :meth:`.evaluate` method. This will return the
first value from the next record or :py:const:`None` if neither the
field nor the record are present::
print(cursor.evaluate())
"""
def __init__(self, result, hydrant=None, entities=None):
self._result = result
self._hydrant = hydrant
self._entities = entities
self._current = None
self._closed = False
def __del__(self):
try:
self.close()
except OSError:
pass
def __repr__(self):
return repr(self.preview(3))
def __next__(self):
if self.forward():
return self._current
else:
raise StopIteration()
# Exists only for Python 2 iteration compatibility
next = __next__
def __iter__(self):
while self.forward():
yield self._current
def __getitem__(self, key):
return self._current[key]
@property
def current(self):
""" Returns the current record or :py:const:`None` if no record
has yet been selected.
"""
return self._current
def close(self):
""" Close this cursor and free up all associated resources.
"""
if not self._closed:
self._result.buffer() # force consumption of remaining data
self._closed = True
def keys(self):
""" Return the field names for the records in the stream.
"""
return self._result.fields()
def summary(self):
""" Return the result summary.
"""
self._result.buffer()
metadata = self._result.summary()
return CypherSummary(**metadata)
def plan(self):
""" Return the plan returned with this result, if any.
"""
self._result.buffer()
metadata = self._result.summary()
if "plan" in metadata:
return CypherPlan(**metadata["plan"])
elif "profile" in metadata:
return CypherPlan(**metadata["profile"])
else:
return None
def stats(self):
""" Return the query statistics.
This contains details of the activity undertaken by the database
kernel for the query, such as the number of entities created or
deleted. Specifically, this returns a :class:`.CypherStats` object.
>>> from py2neo import Graph
>>> g = Graph()
>>> g.run("CREATE (a:Person) SET a.name = 'Alice'").stats()
constraints_added: 0
constraints_removed: 0
contained_updates: True
indexes_added: 0
indexes_removed: 0
labels_added: 1
labels_removed: 0
nodes_created: 1
nodes_deleted: 0
properties_set: 1
relationships_created: 0
relationships_deleted: 0
"""
self._result.buffer()
metadata = self._result.summary()
return CypherStats(**metadata.get("stats", {}))
def forward(self, amount=1):
""" Attempt to move the cursor one position forward (or by
another amount if explicitly specified). The cursor will move
position by up to, but never more than, the amount specified.
If not enough scope for movement remains, only that remainder
will be consumed. The total amount moved is returned.
:param amount: the amount to move the cursor
:returns: the amount that the cursor was able to move
"""
if amount == 0:
return 0
if amount < 0:
raise ValueError("Cursor can only move forwards")
amount = int(amount)
moved = 0
v = self._result.protocol_version
while moved != amount:
values = self._result.fetch()
if values is None:
break
else:
keys = self._result.fields() # TODO: don't do this for every record
if self._hydrant:
values = self._hydrant.hydrate(keys, values, entities=self._entities, version=v)
self._current = Record(zip(keys, values))
moved += 1
return moved
def preview(self, limit=1):
""" Construct a :class:`.Table` containing a preview of
upcoming records, including no more than the given `limit`.
:param limit: maximum number of records to include in the
preview
:returns: :class:`.Table` containing the previewed records
"""
if limit < 0:
raise ValueError("Illegal preview size")
v = self._result.protocol_version
records = []
keys = self._result.fields()
for values in self._result.peek_records(int(limit)):
if self._hydrant:
values = self._hydrant.hydrate(keys, values, entities=self._entities, version=v)
records.append(values)
return Table(records, keys)
def evaluate(self, field=0):
""" Return the value of the first field from the next record
(or the value of another field if explicitly specified).
This method attempts to move the cursor one step forward and,
if successful, selects and returns an individual value from
the new current record. By default, this value will be taken
from the first value in that record but this can be overridden
with the `field` argument, which can represent either a
positional index or a textual key.
If the cursor cannot be moved forward or if the record contains
no values, :py:const:`None` will be returned instead.
This method is particularly useful when it is known that a
Cypher query returns only a single value.
:param field: field to select value from (optional)
:returns: value of the field or :py:const:`None`
Example:
>>> from py2neo import Graph
>>> g = Graph()
>>> g.run("MATCH (a) WHERE a.email=$x RETURN a.name", x="<EMAIL>").evaluate()
'<NAME>'
"""
if self.forward():
try:
return self[field]
except IndexError:
return None
else:
return None
def data(self, *keys):
""" Consume and extract the entire result as a list of
dictionaries.
::
>>> from py2neo import Graph
>>> graph = Graph()
>>> graph.run("MATCH (a:Person) RETURN a.name, a.born LIMIT 4").data()
[{'a.born': 1964, 'a.name': '<NAME>'},
{'a.born': 1967, 'a.name': '<NAME>'},
{'a.born': 1961, 'a.name': '<NAME>'},
{'a.born': 1960, 'a.name': '<NAME>'}]
:param keys: indexes or keys of the items to include; if none
are provided, all values will be included
:returns: list of dictionary of values, keyed by field name
:raises IndexError: if an out-of-bounds index is specified
"""
return [record.data(*keys) for record in self]
def to_table(self):
""" Consume and extract the entire result as a :class:`.Table`
object.
:return: the full query result
"""
return Table(self)
def to_subgraph(self):
""" Consume and extract the entire result as a :class:`.Subgraph`
containing the union of all the graph structures within.
:return: :class:`.Subgraph` object
"""
s = None
for record in self:
s_ = record.to_subgraph()
if s_ is not None:
if s is None:
s = s_
else:
s |= s_
return s
def to_ndarray(self, dtype=None, order='K'):
""" Consume and extract the entire result as a
`numpy.ndarray <https://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.html>`_.
.. note::
This method requires `numpy` to be installed.
:param dtype:
:param order:
:warns: If `numpy` is not installed
:returns: `ndarray
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.html>`__ object.
"""
try:
# noinspection PyPackageRequirements
from numpy import array
except ImportError:
warn("Numpy is not installed.")
raise
else:
return array(list(map(list, self)), dtype=dtype, order=order)
def to_series(self, field=0, index=None, dtype=None):
""" Consume and extract one field of the entire result as a
`pandas.Series <http://pandas.pydata.org/pandas-docs/stable/dsintro.html#series>`_.
.. note::
This method requires `pandas` to be installed.
:param field:
:param index:
:param dtype:
:warns: If `pandas` is not installed
:returns: `Series
<http://pandas.pydata.org/pandas-docs/stable/dsintro.html#series>`__ object.
"""
try:
# noinspection PyPackageRequirements
from pandas import Series
except ImportError:
warn("Pandas is not installed.")
raise
else:
return | Series([record[field] for record in self], index=index, dtype=dtype) | pandas.Series |
# Copyright 2021 AI Singapore. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import os
import pandas as pd
from rarity.data_loader import CSVDataLoader, DataframeLoader
# add this in the conftest.py under tests folder
from selenium.webdriver.chrome.options import Options
def pytest_setup_options():
options = Options()
# added mainly for integration test in gitlab-ci to resolve
# (unknown error: DevToolsActivePort file doesn't exist)
# (The process started from chrome location /usr/bin/google-chrome is no longer running,
# so ChromeDriver is assuming that Chrome has crashed.)
# solution reference => https://github.com/plotly/dash/issues/1420
options.add_argument('--no-sandbox')
return options
@pytest.fixture
def csv_loader_single_modal_reg():
SAMPLE_DATA_DIR = './tests/sample_data/regression/'
FEATURES_FILE = os.path.join(SAMPLE_DATA_DIR, 'reg_features.csv')
Y_TRUE_FILE = os.path.join(SAMPLE_DATA_DIR, 'reg_yTrue.csv')
Y_PRED_FILE_1 = os.path.join(SAMPLE_DATA_DIR, 'reg_yPreds_modelA.csv')
MODEL_NAMES = ['model_A']
ANALYSIS_TYPE = 'Regression'
data_loader = CSVDataLoader(FEATURES_FILE,
Y_TRUE_FILE,
yPred_file_ls=[Y_PRED_FILE_1],
model_names_ls=MODEL_NAMES,
analysis_type=ANALYSIS_TYPE)
return data_loader
@pytest.fixture
def csv_loader_single_modal_cls():
SAMPLE_DATA_DIR = './tests/sample_data/classification/binary/'
FEATURES_FILE = os.path.join(SAMPLE_DATA_DIR, 'binary_features.csv')
Y_TRUE_FILE = os.path.join(SAMPLE_DATA_DIR, 'binary_yTrue.csv')
Y_PRED_FILE_1 = os.path.join(SAMPLE_DATA_DIR, 'binary_yPreds_modelA.csv')
MODEL_NAMES = ['model_A']
ANALYSIS_TYPE = 'Binary-Classification'
data_loader = CSVDataLoader(FEATURES_FILE,
Y_TRUE_FILE,
yPred_file_ls=[Y_PRED_FILE_1],
model_names_ls=MODEL_NAMES,
analysis_type=ANALYSIS_TYPE)
return data_loader
@pytest.fixture
def csv_loader_bimodal_reg():
SAMPLE_DATA_DIR = './tests/sample_data/regression/'
FEATURES_FILE = os.path.join(SAMPLE_DATA_DIR, 'reg_features.csv')
Y_TRUE_FILE = os.path.join(SAMPLE_DATA_DIR, 'reg_yTrue.csv')
Y_PRED_FILE_1 = os.path.join(SAMPLE_DATA_DIR, 'reg_yPreds_modelA.csv')
Y_PRED_FILE_2 = os.path.join(SAMPLE_DATA_DIR, 'reg_yPreds_modelB.csv')
MODEL_NAMES = ['model_A', 'model_B']
ANALYSIS_TYPE = 'Regression'
data_loader = CSVDataLoader(FEATURES_FILE,
Y_TRUE_FILE,
yPred_file_ls=[Y_PRED_FILE_1, Y_PRED_FILE_2],
model_names_ls=MODEL_NAMES,
analysis_type=ANALYSIS_TYPE)
return data_loader
@pytest.fixture
def csv_loader_bimodal_cls():
SAMPLE_DATA_DIR = './tests/sample_data/classification/binary/'
FEATURES_FILE = os.path.join(SAMPLE_DATA_DIR, 'binary_features.csv')
Y_TRUE_FILE = os.path.join(SAMPLE_DATA_DIR, 'binary_yTrue.csv')
Y_PRED_FILE_1 = os.path.join(SAMPLE_DATA_DIR, 'binary_yPreds_modelA.csv')
Y_PRED_FILE_2 = os.path.join(SAMPLE_DATA_DIR, 'binary_yPreds_modelB.csv')
MODEL_NAMES = ['model_A', 'model_B']
ANALYSIS_TYPE = 'Binary-Classification'
data_loader = CSVDataLoader(FEATURES_FILE,
Y_TRUE_FILE,
yPred_file_ls=[Y_PRED_FILE_1, Y_PRED_FILE_2],
model_names_ls=MODEL_NAMES,
analysis_type=ANALYSIS_TYPE)
return data_loader
@pytest.fixture
def csv_loader_bimodal_cls_multi():
SAMPLE_DATA_DIR = './tests/sample_data/classification/multiclass/'
FEATURES_FILE = os.path.join(SAMPLE_DATA_DIR, 'multiclass_features.csv')
Y_TRUE_FILE = os.path.join(SAMPLE_DATA_DIR, 'multiclass_yTrue.csv')
Y_PRED_FILE_1 = os.path.join(SAMPLE_DATA_DIR, 'multiclass_yPreds_modelA.csv')
Y_PRED_FILE_2 = os.path.join(SAMPLE_DATA_DIR, 'multiclass_yPreds_modelB.csv')
MODEL_NAMES = ['model_A', 'model_B']
ANALYSIS_TYPE = 'Multiclass-Classification'
data_loader = CSVDataLoader(FEATURES_FILE,
Y_TRUE_FILE,
yPred_file_ls=[Y_PRED_FILE_1, Y_PRED_FILE_2],
model_names_ls=MODEL_NAMES,
analysis_type=ANALYSIS_TYPE)
return data_loader
@pytest.fixture
def dataframe_loader_single_modal_reg():
DF_FEATURES = | pd.DataFrame([[0.1, 2.5, 3.6], [0.5, 2.2, 6.6]], columns=['x1', 'x2', 'x3']) | pandas.DataFrame |
################################################################################
# Module: schedule.py
# Description: Functions for handling conversion of EnergyPlus schedule objects
# License: MIT, see full license in LICENSE.txt
# Web: https://github.com/samuelduchesne/archetypal
################################################################################
import functools
import io
import logging as lg
from datetime import datetime, timedelta
import archetypal
import numpy as np
import pandas as pd
from archetypal import log
class Schedule(object):
"""An object designed to handle any EnergyPlys schedule object"""
def __init__(self, sch_name, idf=None, start_day_of_the_week=0,
strict=False, base_year=2018, schType=None, **kwargs):
"""
Args:
idf (IDF): IDF object
sch_name (str): The schedule name in the idf file
start_day_of_the_week (int): 0-based day of week (Monday=0)
strict (bool): if True, schedules that have the Field-Sets such
as Holidays and CustomDay will raise an error if they are absent
from the IDF file. If False, any missing qualifiers will be
ignored.
base_year (int): The base year of the schedule. Defaults to 2018
since the first day of that year is a Monday.
"""
super(Schedule, self).__init__(**kwargs)
self.strict = strict
self.idf = idf
self.schName = sch_name
self.startDayOfTheWeek = self.get_sdow(start_day_of_the_week)
self.year = base_year
self.startDate = self.start_date()
self.count = 0
self.startHOY = 1
self.endHOY = 24
self.unit = "unknown"
self.index_ = None
self.values = None
self.schType = schType
_type = kwargs.get('Type', None)
if _type is None:
self.schTypeLimitsName = self.get_schedule_type_limits_name(
sch_type=self.schType)
else:
self.schTypeLimitsName = _type
@classmethod
def constant_schedule(cls, hourly_value=1, Name='AlwaysOn', **kwargs):
idftxt = "VERSION, 8.9;" # Not an emplty string. has just the
# version number
# we can make a file handle of a string
fhandle = io.StringIO(idftxt)
# initialize the IDF object with the file handle
idf_scratch = archetypal.IDF(fhandle)
idf_scratch.add_object(ep_object='Schedule:Constant'.upper(),
**dict(Name=Name,
Schedule_Type_Limits_Name='',
Hourly_Value=hourly_value),
save=False)
sched = Schedule(sch_name=Name, idf=idf_scratch, **kwargs)
return sched
@property
def all_values(self):
"""returns the values array"""
if self.values is None:
self.values = self.get_schedule_values(sch_name=self.schName,
sch_type=self.schType)
return self.values
else:
return self.values
@property
def max(self):
return max(self.all_values)
@property
def min(self):
return min(self.all_values)
@property
def mean(self):
return np.mean(self.all_values)
@property
def series(self):
"""Returns the schedule values as a pd.Series object with a
DateTimeIndex"""
index = pd.date_range(start=self.startDate, periods=len(
self.all_values), freq='1H')
return pd.Series(self.all_values, index=index)
def get_schedule_type_limits_name(self, sch_name=None, sch_type=None):
"""Return the Schedule Type Limits name associated to a schedule
name"""
if sch_name is None:
sch_name = self.schName
if sch_type is None:
schedule_values = self.idf.get_schedule_data_by_name(sch_name,
sch_type=sch_type)
try:
schedule_limit_name = schedule_values.Schedule_Type_Limits_Name
except:
return 'unknown'
else:
return schedule_limit_name
def get_schedule_type_limits_data(self, sch_name=None):
"""Returns Schedule Type Limits data from schedule name"""
if sch_name is None:
sch_name = self.schName
schedule_values = self.idf.get_schedule_data_by_name(sch_name)
try:
schedule_limit_name = schedule_values.Schedule_Type_Limits_Name
except:
# this schedule is probably a 'Schedule:Week:Daily' which does
# not have a Schedule_Type_Limits_Name field
return '', '', '', ''
else:
lower_limit, upper_limit, numeric_type, unit_type = \
self.idf.get_schedule_type_limits_data_by_name(
schedule_limit_name)
self.unit = unit_type
if self.unit == "unknown":
self.unit = numeric_type
return lower_limit, upper_limit, numeric_type, unit_type
def get_schedule_type(self, sch_name=None):
"""Return the schedule type"""
if sch_name is None:
sch_name = self.schName
schedule_values = self.idf.get_schedule_data_by_name(sch_name)
sch_type = schedule_values.fieldvalues[0]
return sch_type
def start_date(self):
"""The start date of the schedule. Satisfies `startDayOfTheWeek`"""
import calendar
c = calendar.Calendar(firstweekday=self.startDayOfTheWeek)
start_date = c.monthdatescalendar(self.year, 1)[0][0]
return datetime(start_date.year, start_date.month, start_date.day)
def plot(self, slice=None, **kwargs):
hourlyvalues = self.all_values
index = pd.date_range(self.startDate, periods=len(
hourlyvalues),
freq='1H')
series = pd.Series(hourlyvalues, index=index, dtype=float)
if slice is None:
slice = pd.IndexSlice[:]
elif len(slice) > 1:
slice = pd.IndexSlice[slice[0]:slice[1]]
ax = series.loc[slice].plot(**kwargs, label=self.schName)
return ax
def get_interval_day_ep_schedule_values(self, sch_name=None):
"""'Schedule:Day:Interval"""
if sch_name is None:
sch_name = self.schName
values = self.idf.getobject('Schedule:Day:Interval'.upper(), sch_name)
lower_limit, upper_limit, numeric_type, unit_type = \
self.get_schedule_type_limits_data(sch_name)
number_of_day_sch = int((len(values.fieldvalues) - 3) / 2)
hourly_values = np.arange(24)
start_hour = 0
for i in range(number_of_day_sch):
value = float(values['Value_Until_Time_{}'.format(i + 1)])
until_time = [int(s.strip()) for s in
values['Time_{}'.format(i + 1)].split(":") if
s.strip().isdigit()]
end_hour = int(until_time[0] + until_time[1] / 60)
for hour in range(start_hour, end_hour):
hourly_values[hour] = value
start_hour = end_hour
if numeric_type.strip().lower() == "discrete":
hourly_values = hourly_values.astype(int)
return hourly_values
def get_hourly_day_ep_schedule_values(self, sch_name=None):
"""'Schedule:Day:Hourly'"""
if sch_name is None:
sch_name = self.schName
values = self.idf.getobject('Schedule:Day:Hourly'.upper(), sch_name)
fieldvalues_ = np.array(values.fieldvalues[3:])
return fieldvalues_
def get_compact_weekly_ep_schedule_values(self, sch_name=None,
start_date=None, index=None):
"""'schedule:week:compact'"""
if start_date is None:
start_date = self.startDate
if index is None:
idx = pd.date_range(start=start_date, periods=168, freq='1H')
slicer_ = pd.Series([False] * (len(idx)), index=idx)
else:
slicer_ = pd.Series([False] * (len(index)), index=index)
if sch_name is None:
sch_name = self.schName
values = self.idf.getobject('schedule:week:compact'.upper(), sch_name)
weekly_schedules = pd.Series([0] * len(slicer_), index=slicer_.index)
# update last day of schedule
if self.count == 0:
self.schType = values.key
self.endHOY = 168
num_of_daily_schedules = int(len(values.fieldvalues[2:]) / 2)
for i in range(num_of_daily_schedules):
day_type = values['DayType_List_{}'.format(i + 1)].lower()
how = self.field_set(day_type, slicer_)
if not weekly_schedules.loc[how].empty:
# Loop through days and replace with day:schedule values
days = []
for name, day in weekly_schedules.loc[how].groupby(pd.Grouper(
freq='D')):
if not day.empty:
ref = values.get_referenced_object(
"ScheduleDay_Name_{}".format(i + 1))
day.loc[:] = self.get_schedule_values(
sch_name=ref.Name, sch_type=ref.key)
days.append(day)
new = pd.concat(days)
slicer_.update(
pd.Series([True] * len(new.index), index=new.index))
slicer_ = slicer_.apply(lambda x: x == True)
weekly_schedules.update(new)
else:
return weekly_schedules.values
return weekly_schedules.values
def get_daily_weekly_ep_schedule_values(self, sch_name=None):
"""'schedule:week:daily'"""
if sch_name is None:
sch_name = self.schName
values = self.idf.getobject('schedule:week:daily'.upper(), sch_name)
# 7 list for 7 days of the week
hourly_values = []
for day in ['Monday', 'Tuesday', 'Wednesday', 'Thursday',
'Friday', 'Saturday', 'Sunday']:
ref = values.get_referenced_object(
'{}_ScheduleDay_Name'.format(day))
h = self.get_schedule_values(sch_name=ref.Name, sch_type=ref.key)
hourly_values.append(h)
hourly_values = np.array(hourly_values)
# shift days earlier by self.startDayOfTheWeek
hourly_values = np.roll(hourly_values, -self.startDayOfTheWeek, axis=0)
return hourly_values.ravel()
def get_list_day_ep_schedule_values(self, sch_name=None):
"""'schedule:day:list'"""
if sch_name is None:
sch_name = self.schName
values = self.idf.getobject('schedule:day:list'.upper(), sch_name)
lower_limit, upper_limit, numeric_type, unit_type = \
self.get_schedule_type_limits_data(sch_name)
import pandas as pd
freq = int(values['Minutes_per_Item']) # Frequency of the values
num_values = values.fieldvalues[5:] # List of values
method = values['Interpolate_to_Timestep'] # How to resample
# fill a list of available values and pad with zeros (this is safer
# but should not occur)
all_values = np.arange(int(24 * 60 / freq))
for i in all_values:
try:
all_values[i] = num_values[i]
except:
all_values[i] = 0
# create a fake index to help us with the resampling
index = pd.date_range(start=self.startDate,
periods=(24 * 60) / freq,
freq='{}T'.format(freq))
series = pd.Series(all_values, index=index)
# resample series to hourly values and apply resampler function
series = series.resample('1H').apply(_how(method))
return series.values
def get_constant_ep_schedule_values(self, sch_name=None):
"""'schedule:constant'"""
if sch_name is None:
sch_name = self.schName
values = self.idf.getobject('schedule:constant'.upper(), sch_name)
lower_limit, upper_limit, numeric_type, unit_type = \
self.get_schedule_type_limits_data(sch_name)
hourly_values = np.arange(8760)
value = float(values['Hourly_Value'])
for hour in hourly_values:
hourly_values[hour] = value
if numeric_type.strip().lower() == 'discrete':
hourly_values = hourly_values.astype(int)
return hourly_values
def get_file_ep_schedule_values(self, sch_name=None):
"""'schedule:file'"""
if sch_name is None:
sch_name = self.schName
values = self.idf.getobject('schedule:file'.upper(), sch_name)
lower_limit, upper_limit, numeric_type, unit_type = \
self.get_schedule_type_limits_data(sch_name)
filename = values['File_Name']
column = values['Column_Number']
rows = values['Rows_to_Skip_at_Top']
hours = values['Number_of_Hours_of_Data']
sep = values['Column_Separator']
interp = values['Interpolate_to_Timestep']
import pandas as pd
import os
idfdir = os.path.dirname(self.idf.idfname)
file = os.path.join(idfdir, filename)
delimeter = _separator(sep)
skip_rows = int(rows) - 1 # We want to keep the column
col = [int(column) - 1] # zero-based
values = pd.read_csv(file, delimiter=delimeter, skiprows=skip_rows,
usecols=col)
return values.iloc[:, 0].values
def get_compact_ep_schedule_values(self, sch_name=None):
"""'schedule:compact'"""
if sch_name is None:
sch_name = self.schName
values = self.idf.getobject('schedule:compact'.upper(), sch_name)
lower_limit, upper_limit, numeric_type, unit_type = \
self.get_schedule_type_limits_data(sch_name)
field_sets = ['through', 'for', 'interpolate', 'until', 'value']
fields = values.fieldvalues[3:]
index = pd.date_range(start=self.startDate, periods=8760, freq='H')
zeros = np.zeros(len(index))
slicer_ = pd.Series([False] * len(index), index=index)
series = pd.Series(zeros, index=index)
from_day = self.startDate
ep_from_day = datetime(self.year, 1, 1)
from_time = '00:00'
how_interpolate = None
for field in fields:
if any([spe in field.lower() for spe in field_sets]):
f_set, hour, minute, value = self.field_interpreter(field)
if f_set.lower() == 'through':
# main condition. All sub-conditions must obey a
# `Through` condition
# First, initialize the slice (all False for now)
through_conditions = self.invalidate_condition(series)
# reset from_time
from_time = '00:00'
# Prepare ep_to_day variable
ep_to_day = self.date_field_interpretation(value) + \
timedelta(days=1)
# Calculate Timedelta in days
days = (ep_to_day - ep_from_day).days
# Add timedelta to start_date
to_day = from_day + timedelta(days=days) + timedelta(
hours=-1)
# slice the conditions with the range and apply True
through_conditions.loc[from_day:to_day] = True
from_day = to_day + timedelta(hours=1)
ep_from_day = ep_to_day
elif f_set.lower() == 'for':
# slice specific days
# reset from_time
from_time = '00:00'
for_condition = self.invalidate_condition(series)
values = value.split()
if len(values) > 1:
# if multiple `For`. eg.: For: Weekends Holidays,
# Combine both conditions
for value in values:
if value.lower() == 'allotherdays':
# Apply condition to slice
how = self.field_set(value, slicer_)
# Reset though condition
through_conditions = how
for_condition = how
else:
how = self.field_set(value, slicer_)
for_condition.loc[how] = True
elif value.lower() == 'allotherdays':
# Apply condition to slice
how = self.field_set(value, slicer_)
# Reset though condition
through_conditions = how
for_condition = how
else:
# Apply condition to slice
how = self.field_set(value)
for_condition.loc[how] = True
# Combine the for_condition with all_conditions
all_conditions = through_conditions & for_condition
# update in memory slice
# self.sliced_day_.loc[all_conditions] = True
elif 'interpolate' in f_set.lower():
# we need to upsample to series to 8760 * 60 values
new_idx = pd.date_range(start=self.startDate,
periods=525600, closed='left',
freq='T')
series = series.resample('T').pad()
series = series.reindex(new_idx)
series.fillna(method='pad', inplace=True)
through_conditions = through_conditions.resample('T').pad()
through_conditions = through_conditions.reindex(new_idx)
through_conditions.fillna(method='pad', inplace=True)
for_condition = for_condition.resample('T').pad()
for_condition = for_condition.reindex(new_idx)
for_condition.fillna(method='pad', inplace=True)
how_interpolate = value.lower()
elif f_set.lower() == 'until':
until_condition = self.invalidate_condition(series)
if series.index.freq.name == 'T':
# until_time = str(int(hour) - 1) + ':' + minute
until_time = timedelta(hours=int(hour),
minutes=int(minute)) - timedelta(
minutes=1)
else:
until_time = str(int(hour) - 1) + ':' + minute
until_condition.loc[until_condition.between_time(from_time,
str(
until_time)).index] = True
all_conditions = for_condition & through_conditions & \
until_condition
from_time = str(int(hour)) + ':' + minute
elif f_set.lower() == 'value':
# If the therm `Value: ` field is used, we will catch it
# here.
# update in memory slice
slicer_.loc[all_conditions] = True
series[all_conditions] = value
else:
# Do something here before looping to the next Field
pass
else:
# If the term `Value: ` is not used; the variable is simply
# passed in the Field
value = float(field)
series[all_conditions] = value
# update in memory slice
slicer_.loc[all_conditions] = True
if how_interpolate:
return series.resample('H').mean().values
else:
return series.values
def field_interpreter(self, field):
"""dealing with a Field-Set (Through, For, Interpolate,
# Until, Value) and return the parsed string"""
if 'through' in field.lower():
# deal with through
if ':' in field.lower():
# parse colon
f_set, statement = field.split(':')
hour = None
minute = None
value = statement.strip()
else:
msg = 'The schedule "{sch}" contains a Field ' \
'that is not understood: "{field}"'.format(
sch=self.schName, field=field)
raise NotImplementedError(msg)
elif 'for' in field.lower():
if ':' in field.lower():
# parse colon
f_set, statement = field.split(':')
value = statement.strip()
hour = None
minute = None
else:
# parse without a colon
msg = 'The schedule "{sch}" contains a Field ' \
'that is not understood: "{field}"'.format(
sch=self.schName, field=field)
raise NotImplementedError(msg)
elif 'interpolate' in field.lower():
msg = 'The schedule "{sch}" contains sub-hourly values (' \
'Field-Set="{field}"). The average over the hour is ' \
'taken'.format(sch=self.schName, field=field)
log(msg, lg.WARNING)
f_set, value = field.split(':')
hour = None
minute = None
elif 'until' in field.lower():
if ':' in field.lower():
# parse colon
try:
f_set, hour, minute = field.split(':')
hour = hour.strip() # remove trailing spaces
minute = minute.strip() # remove trailing spaces
value = None
except:
f_set = 'until'
hour, minute = field.split(':')
hour = hour[-2:].strip()
minute = minute.strip()
value = None
else:
msg = 'The schedule "{sch}" contains a Field ' \
'that is not understood: "{field}"'.format(
sch=self.schName, field=field)
raise NotImplementedError(msg)
elif 'value' in field.lower():
if ':' in field.lower():
# parse colon
f_set, statement = field.split(':')
value = statement.strip()
hour = None
minute = None
else:
msg = 'The schedule "{sch}" contains a Field ' \
'that is not understood: "{field}"'.format(
sch=self.schName, field=field)
raise NotImplementedError(msg)
else:
# deal with the data value
f_set = field
hour = None
minute = None
value = field[len(field) + 1:].strip()
return f_set, hour, minute, value
@staticmethod
def invalidate_condition(series):
index = series.index
periods = len(series)
return | pd.Series([False] * periods, index=index) | pandas.Series |
# author: DSCI-522 Group-21
# date: 2021-11-26
"""Score the model with the test set and generate a confusion matrix
Usage: scoring.py --input=<input> --output=<output>
Options:
--input=<input> The directory where the data and model is
--output=<output> Directory specifying where to store output figure(s)/table(s)
"""
import pandas as pd
from docopt import docopt
import pickle
from sklearn.metrics import confusion_matrix, get_scorer
opt = docopt(__doc__)
def main(input, output):
# Get optimized model
model = pickle.load(open(f"{output}final_model.rds", "rb"))
# Get test data
test_df = (
pd.read_csv(f"{input}test.csv", low_memory=False)
.set_index("index")
.rename_axis(None)
)
X_test = test_df.drop(columns=["FATALITY"])
y_test = test_df["FATALITY"]
# Import training scores df
results_df = (
pd.read_csv(f"{output}final_training_scores.csv", index_col=0)
)
# Create list of desired scoring metrics
scoring_metrics = ["accuracy", "f1", "recall", "precision", "average_precision"]
# Score the test data
scores = {
scorer: [
round(get_scorer(scorer)(model, X_test, y_test), 3)
] for scorer in scoring_metrics
}
scores = | pd.DataFrame(scores, index=["test_scores"]) | pandas.DataFrame |
#!/usr/bin/env python3
import cv2
# from cv2 import aruco
from tqdm import trange
import numpy as np
import os, os.path
from glob import glob
from collections import defaultdict
import pandas as pd
## TODO: rewrite this whole file with aniposelib
from .common import \
get_calibration_board, get_board_type, \
find_calibration_folder, make_process_fun, \
get_cam_name, get_video_name, load_intrinsics, load_extrinsics
from .triangulate import triangulate_optim, triangulate_simple, \
reprojection_error, reprojection_error_und
from .calibrate_extrinsics import detect_aruco, estimate_pose, fill_points
def expand_matrix(mtx):
z = np.zeros((4,4))
z[0:3,0:3] = mtx[0:3,0:3]
z[3,3] = 1
return z
def process_trig_errors(config, fname_dict, cam_intrinsics, extrinsics, skip=20):
minlen = np.inf
caps = dict()
for cam_name, fname in fname_dict.items():
cap = cv2.VideoCapture(fname)
caps[cam_name] = cap
length = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
minlen = min(length, minlen)
cam_names = sorted(fname_dict.keys())
board = get_calibration_board(config)
cam_mats = []
cam_mats_dist = []
for cname in cam_names:
mat = np.array(extrinsics[cname])
left = np.array(cam_intrinsics[cname]['camera_mat'])
cam_mats.append(mat)
cam_mats_dist.append(left)
cam_mats = np.array(cam_mats)
cam_mats_dist = np.array(cam_mats_dist)
go = skip
all_points = []
framenums = []
all_rvecs = []
all_tvecs = []
for framenum in trange(minlen, desc='detecting', ncols=70):
row = []
rvecs = []
tvecs = []
for cam_name in cam_names:
intrinsics = cam_intrinsics[cam_name]
cap = caps[cam_name]
ret, frame = cap.read()
if framenum % skip != 0 and go <= 0:
continue
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# corners, ids = detect_aruco(gray, intrinsics)
detected, stuff = estimate_pose(gray, intrinsics, board)
if detected:
corners, ids, rvec, tvec = stuff
rvec = rvec.flatten()
tvec = tvec.flatten()
else:
corners = ids = None
rvec = np.zeros(3)*np.nan
tvec = np.zeros(3)*np.nan
points = fill_points(corners, ids, board)
points_flat = points.reshape(-1, 1, 2)
points_new = cv2.undistortPoints(
points_flat,
np.array(intrinsics['camera_mat']),
np.array(intrinsics['dist_coeff']))
row.append(points_new.reshape(points.shape))
rvecs.append(rvec)
tvecs.append(tvec)
if ~np.all(np.isnan(row)):
all_points.append(row)
all_tvecs.append(tvecs)
all_rvecs.append(rvecs)
framenums.append(framenum)
go = skip
go = max(0, go-1)
all_points_raw = np.array(all_points)
all_rvecs = np.array(all_rvecs)
all_tvecs = np.array(all_tvecs)
framenums = np.array(framenums)
shape = all_points_raw.shape
all_points_3d = np.zeros((shape[0], shape[2], 3))
all_points_3d.fill(np.nan)
num_cams = np.zeros((shape[0], shape[2]))
num_cams.fill(np.nan)
errors = np.zeros((shape[0], shape[2]))
errors.fill(np.nan)
for i in trange(all_points_raw.shape[0], desc='triangulating', ncols=70):
for j in range(all_points_raw.shape[2]):
pts = all_points_raw[i, :, j, :]
good = ~np.isnan(pts[:, 0])
if np.sum(good) >= 2:
# p3d = triangulate_optim(pts, cam_mats)
p3d = triangulate_simple(pts[good], cam_mats[good])
all_points_3d[i, j] = p3d[:3]
errors[i,j] = reprojection_error_und(p3d, pts[good], cam_mats[good], cam_mats_dist[good])
num_cams[i,j] = np.sum(good)
## all_tvecs
# framenum, camera num, axis
dout = | pd.DataFrame() | pandas.DataFrame |
# Copyright (c) 2016 The UUV Simulator Authors.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import rospy
import numpy as np
import matplotlib.pyplot as plt
import os
from simulation_data import SimulationData, COLOR_RED, COLOR_GREEN, COLOR_BLUE
class FinsData(SimulationData):
LABEL = 'fins'
def __init__(self, bag):
super(FinsData, self).__init__()
for x in bag.get_type_and_topic_info():
for k in x:
if self._prefix is None:
if 'fins' in k:
rospy.loginfo('Fins topic prefix found <%s>' % k)
i = len('fins')
i_max = k.find('fins') + i
self._prefix = k[:i_max]
break
if self._prefix is not None:
break
try:
# Find all fins input topics
if self._prefix is not None:
for i in range(16):
for topic, msg, time in bag.read_messages('%s/%d/input' % (self._prefix, i)):
if i not in self._recorded_data:
self._recorded_data[i] = dict(input=dict(time=list(), values=list()))
t = msg.header.stamp.to_sec()
self._recorded_data[i]['input']['time'].append(t)
self._recorded_data[i]['input']['values'].append(float(msg.data))
if i in self._recorded_data:
self._logger.info('%s/%d/input=loaded' % (self._prefix, i))
except Exception as e:
self._logger.error('Error retrieving fin input data from rosbag, message=' + str(e))
try:
# Find all fins output topics
if self._prefix is not None:
for i in range(16):
for topic, msg, time in bag.read_messages('%s/%d/output' % (self._prefix, i)):
if 'output' not in self._recorded_data[i]:
self._recorded_data[i]['output'] = dict(time=list(), values=list())
t = msg.header.stamp.to_sec()
self._recorded_data[i]['output']['time'].append(t)
self._recorded_data[i]['output']['values'].append(float(msg.data))
if i in self._recorded_data:
self._logger.info('%s/%d/output=loaded' % (self._prefix, i))
except Exception as e:
self._logger.error('Error retrieving fin output data from rosbag, message=' + str(e))
try:
# Find all fin wrench topics
if self._prefix is not None:
for i in range(16):
for topic, msg, time in bag.read_messages('%s/%d/wrench_topic' % (self._prefix, i)):
if 'wrench' not in self._recorded_data[i]:
self._recorded_data[i]['wrench'] = dict(time=list(), force=list(), torque=list())
time = msg.header.stamp.to_sec()
self._recorded_data[i]['wrench']['time'].append(time)
self._recorded_data[i]['wrench']['force'].append(
[msg.wrench.force.x, msg.wrench.force.y, msg.wrench.force.z])
self._recorded_data[i]['wrench']['torque'].append(
[msg.wrench.torque.x, msg.wrench.torque.y, msg.wrench.torque.z])
if i in self._recorded_data:
self._logger.info('%s/%d/wrench_topic=loaded' % (self._prefix, i))
except Exception as e:
self._logger.error('Error retrieving fin wrench data from rosbag, message=' + str(e))
def get_as_dataframe(self, add_group_name=None):
try:
import pandas
data = dict()
for i in self._recorded_data:
data[self.LABEL + '_id'] = [i for _ in range(self._recorded_data[i]['output']['time'])]
if add_group_name is not None:
data['group'] = [add_group_name for _ in range(self._recorded_data[i]['output']['time'])]
data[self.LABEL + '_output_time'] = self._recorded_data[i]['output']['time']
data[self.LABEL + '_output_values'] = self._recorded_data[i]['output']['values']
df_output = pandas.DataFrame(data)
data = dict()
for i in self._recorded_data:
data[self.LABEL + '_id'] = [i for _ in range(self._recorded_data[i]['input']['time'])]
if add_group_name is not None:
data['group'] = [add_group_name for _ in range(self._recorded_data[i]['input']['time'])]
data[self.LABEL + '_input_time'] = self._recorded_data[i]['input']['time']
data[self.LABEL + '_input_values'] = self._recorded_data[i]['input']['values']
df_input = pandas.DataFrame(data)
data = dict()
for i in self._recorded_data:
data[self.LABEL + '_id'] = [i for _ in range(self._recorded_data[i]['wrench']['time'])]
if add_group_name is not None:
data['group'] = [add_group_name for _ in range(self._recorded_data[i]['wrench']['time'])]
data[self.LABEL + '_wrench_force_x'] = [x[0] for x in self._recorded_data[i]['wrench']['force']]
data[self.LABEL + '_wrench_force_y'] = [x[1] for x in self._recorded_data[i]['wrench']['force']]
data[self.LABEL + '_wrench_force_z'] = [x[2] for x in self._recorded_data[i]['wrench']['force']]
data[self.LABEL + '_wrench_torque_x'] = [x[0] for x in self._recorded_data[i]['wrench']['torque']]
data[self.LABEL + '_wrench_torque_y'] = [x[1] for x in self._recorded_data[i]['wrench']['torque']]
data[self.LABEL + '_wrench_torque_z'] = [x[2] for x in self._recorded_data[i]['wrench']['torque']]
if len(data) == 0:
return None
df_wrench = pandas.DataFrame(data)
return | pandas.concat([df_input, df_output, df_wrench], ignore_index=True) | pandas.concat |
#!/usr/bin/env python
# -- coding: utf-8 --
# PAQUETES PARA CORRER OP.
import netCDF4
import pandas as pd
import numpy as np
import datetime as dt
import json
import wmf.wmf as wmf
import hydroeval
import glob
import MySQLdb
#modulo pa correr modelo
import hidrologia
from sklearn.linear_model import LinearRegression
import math
import os
#spatial
import cartopy.crs as crs
import geopandas as gpd
import pyproj
from pyproj import transform
from cartopy.feature import ShapelyFeature
import cartopy.crs as ccrs
from cartopy.io.shapereader import Reader
from cartopy.mpl.ticker import LongitudeFormatter, LatitudeFormatter
from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER
import seaborn as sns
sns.set(style="whitegrid")
sns.set_context('notebook', font_scale=1.13)
#FORMATO
# fuente
import matplotlib
matplotlib.use('Agg')
import pylab as pl
#avoid warnings
import warnings
warnings.filterwarnings('ignore')
#---------------
#Funciones base.
#---------------
def get_rutesList(rutas):
''' Abre el archivo de texto en la ruta: rutas, devuelve una lista de las lineas de ese archivo.
Funcion base.
#Argumentos
rutas: string, path indicado.
'''
f = open(rutas,'r')
L = f.readlines()
f.close()
return L
def set_modelsettings(ConfigList):
ruta_modelset = get_ruta(ConfigList,'ruta_proj')+get_ruta(ConfigList,'ruta_modelset')
# model settings Json
with open(ruta_modelset, 'r') as f:
model_set = json.load(f)
# Model set
wmf.models.max_aquifer = wmf.models.max_gravita * 10
wmf.models.retorno = model_set['retorno']
wmf.models.show_storage = model_set['show_storage']
wmf.models.separate_fluxes = model_set['separate_fluxes']
wmf.models.dt = model_set['dt']
def round_time(date = dt.datetime.now(),round_mins=5):
'''
Rounds datetime object to nearest 'round_time' minutes.
If 'dif' is < 'round_time'/2 takes minute behind, else takesminute ahead.
Parameters
----------
date : date to round
round_mins : round to this nearest minutes interval
Returns
----------
datetime object rounded, datetime object
'''
dif = date.minute % round_mins
if dif <= round_mins/2:
return dt.datetime(date.year, date.month, date.day, date.hour, date.minute - (date.minute % round_mins))
else:
return dt.datetime(date.year, date.month, date.day, date.hour, date.minute - (date.minute % round_mins)) + dt.timedelta(minutes=round_mins)
def get_credentials(ruta_credenciales):
credentials = json.load(open(ruta_credenciales))
#creds para consultas
mysqlServer = credentials['MySql_Siata']
for key in np.sort(list(credentials['MySql_Siata'].keys()))[::-1]: #1:hal, 2:sal
try:
connection = MySQLdb.connect(host=mysqlServer[key]['host'],
user=mysqlServer[key]['user'],
password=mysqlServer[key]['password'],
db=mysqlServer[key]['db'])
print('SERVER_CON: Succesful connection to %s'%(key))
host=mysqlServer[key]['host']
user=mysqlServer[key]['user']
password=mysqlServer[key]['password']
db=mysqlServer[key]['db']
break #si conecta bien a SAL para.
except:
print('SERVER_CON: No connection to %s'%(key))
pass
#creds para copiar a var
user2copy2var = credentials['cred_2copy2var']['user']; host2copy2var = credentials['cred_2copy2var']['host']
return host,user,password,db,user2copy2var,host2copy2var
def coord2hillID(ruta_nc, df_coordxy):
#lee simubasin pa asociar tramos, saca topologia basica
cu = wmf.SimuBasin(rute= ruta_nc)
cu.GetGeo_Cell_Basics()
cu.GetGeo_Parameters()
#saca coordenadas de todo el simubasin y las distancias entre ellas
coordsX = wmf.cu.basin_coordxy(cu.structure,cu.ncells)[0]
coordsY = wmf.cu.basin_coordxy(cu.structure,cu.ncells)[1]
disty = np.unique(np.diff(np.unique(np.sort(coordsY))))
distx = np.unique(np.diff(np.unique(np.sort(coordsX))))
df_ids = pd.DataFrame(index = df_coordxy.index,columns=['id'])
#identifica el id de la ladera donde caen los ptos
for index in df_coordxy.index:
df_ids.loc[index]=cu.hills_own[np.where((coordsY+disty[0]/2>df_coordxy.loc[index].values[1]) & (coordsY-disty[0]/2<df_coordxy.loc[index].values[1]) & (coordsX+distx[0]/2>df_coordxy.loc[index].values[0]) & (coordsX-distx[0]/2<df_coordxy.loc[index].values[0]))[0]].data
return df_ids
#-----------------------------------
#-----------------------------------
#Funciones de lectura del configfile
#-----------------------------------
#-----------------------------------
def get_ruta(RutesList, key):
''' Busca en una lista 'RutesList' la linea que empieza con el key indicado, entrega rutas.
Funcion base.
#Argumentos
RutesList: Lista que devuelve la funcion en este script get_rutesList()
key: string, key indicado para buscar que linea en la lista empieza con el.
'''
if any(i.startswith('- **'+key+'**') for i in RutesList):
for i in RutesList:
if i.startswith('- **'+key+'**'):
return i.split(' ')[-1][:-1]
else:
return 'Aviso: no existe linea con el key especificado'
def get_line(RutesList, key):
''' Busca en una lista 'RutesList' la linea que empieza con el key indicado, entrega lineas.
Funcion base.
#Argumentos
RutesList: Lista que devuelve la funcion en este script get_rutesList()
key: string, key indicado para buscar que linea en la lista empieza con el.
'''
if any(i.startswith('- **'+key+'**') for i in RutesList):
for i in RutesList:
if i.startswith('- **'+key+'**'):
return i[:-1].split(' ')[2:]
else:
return 'Aviso: no existe linea con el key especificado'
def get_modelPlot(RutesList, PlotType = 'Qsim_map'):
''' #Devuelve un diccionario con la informacion de la tabla Plot en el configfile.
#Funcion operacional.
#Argumentos:
- RutesList= lista, es el resultado de leer el configfile con al.get_ruteslist.
- PlotType= boolean, tipo del plot? . Default= 'Qsim_map'.
'''
for l in RutesList:
key = l.split('|')[1].rstrip().lstrip()
if key[3:] == PlotType:
EjecsList = [i.rstrip().lstrip() for i in l.split('|')[2].split(',')]
return EjecsList
return key
def get_modelPars(RutesList):
''' #Devuelve un diccionario con la informacion de la tabla Calib en el configfile.
#Funcion operacional.
#Argumentos:
- RutesList= lista, es el resultado de leer el configfile con al.get_ruteslist.
'''
DCalib = {}
for l in RutesList:
c = [float(i) for i in l.split('|')[3:-1]]
name = l.split('|')[2]
DCalib.update({name.rstrip().lstrip(): c})
return DCalib
def get_modelPaths(List):
''' #Devuelve un diccionario con la informacion de la tabla Calib en el configfile.
#Funcion operacional.
#Argumentos:
- RutesList= lista, es el resultado de leer el configfile con al.get_ruteslist.
'''
DCalib = {}
for l in List:
c = [i for i in l.split('|')[3:-1]]
name = l.split('|')[2]
DCalib.update({name.rstrip().lstrip(): c[0]})
return DCalib
def get_modelStore(RutesList):
''' #Devuelve un diccionario con la informacion de la tabla Store en el configfile.
#Funcion operacional.
#Argumentos:
- RutesList= lista, es el resultado de leer el configfile con al.get_ruteslist.
'''
DStore = {}
for l in RutesList:
l = l.split('|')
DStore.update({l[1].rstrip().lstrip():
{'Nombre': l[2].rstrip().lstrip(),
'Actualizar': l[3].rstrip().lstrip(),
'Tiempo': float(l[4].rstrip().lstrip()),
'Condition': l[5].rstrip().lstrip(),
'Calib': l[6].rstrip().lstrip(),
'BackSto': l[7].rstrip().lstrip(),
'Slides': l[8].rstrip().lstrip()}})
return DStore
def get_modelStoreLastUpdate(RutesList):
''' #Devuelve un diccionario con la informacion de la tabla Update en el configfile.
#Funcion operacional.
#Argumentos:
- RutesList= lista, es el resultado de leer el configfile con al.get_ruteslist.
'''
DStoreUpdate = {}
for l in RutesList:
l = l.split('|')
DStoreUpdate.update({l[1].rstrip().lstrip():
{'Nombre': l[2].rstrip().lstrip(),
'LastUpdate': l[3].rstrip().lstrip()}})
return DStoreUpdate
def get_ConfigLines(RutesList, key, keyTable = None, PlotType = None):
''' #Devuelve un diccionario con la informacion de las tablas en el configfile: Calib, Store, Update, Plot.
#Funcion operacional.
#Argumentos:
- RutesList= lista, es el resultado de leer el configfile con al.get_ruteslist.
- key= string, palabra clave de la tabla que se quiere leer. Puede ser: -s,-t.
- Calib_Storage= string, palabra clave de la tabla que se quiere leer. Puede ser: Calib, Store, Update, Plot.
- PlotType= boolean, tipo del plot? . Default= None.
'''
List = []
for i in RutesList:
if i.startswith('|'+key) or i.startswith('| '+key):
List.append(i)
if len(List)>0:
if keyTable == 'Pars':
return get_modelPars(List)
if keyTable == 'Paths':
return get_modelPaths(List)
if keyTable == 'Store':
return get_modelStore(List)
if keyTable == 'Update':
return get_modelStoreLastUpdate(List)
if keyTable == 'Plot':
return get_modelPlot(List, PlotType=PlotType)
return List
else:
return 'Aviso: no se encuentran lineas con el key de inicio especificado.'
#-----------------------------------
#-----------------------------------
#Funciones generacion de radar
#-----------------------------------
#-----------------------------------
def file_format(start,end):
'''
Returns the file format customized for siata for elements containing
starting and ending point
Parameters
----------
start : initial date
end : final date
Returns
----------
file format with datetimes like %Y%m%d%H%M
Example
----------
'''
start,end = pd.to_datetime(start),pd.to_datetime(end)
format = '%Y%m%d%H%M'
return '%s-%s'%(start.strftime(format),end.strftime(format))
def hdr_to_series(path):
'''
Reads hdr rain files and converts it into pandas Series
Parameters
----------
path : path to .hdr file
Returns
----------
pandas time Series with mean radar rain
'''
s = pd.read_csv(path,skiprows=5,usecols=[2,3]).set_index(' Fecha ')[' Lluvia']
s.index = pd.to_datetime(list(map(lambda x:x.strip()[:10]+' '+x.strip()[11:],s.index)))
return s
def hdr_to_df(path):
'''
Reads hdr rain files and converts it into pandas DataFrame
Parameters
----------
path : path to .hdr file
Returns
----------
pandas DataFrame with mean radar rain
'''
if path.endswith('.hdr') != True:
path = path+'.hdr'
df = pd.read_csv(path,skiprows=5).set_index(' Fecha ')
df.index = pd.to_datetime(list(map(lambda x:x.strip()[:10]+' '+x.strip()[11:],df.index)))
df = df.drop('IDfecha',axis=1)
df.columns = ['record','mean_rain']
return df
def bin_to_df(path,ncells,start=None,end=None,**kwargs):
'''
Reads rain fields (.bin) and converts it into pandas DataFrame
Parameters
----------
path : path to .hdr and .bin file
start : initial date
end : final date
Returns
----------
pandas DataFrame with mean radar rain
Note
----------
path without extension, ejm folder_path/file not folder_path/file.bin,
if start and end is None, the program process all the data
'''
start,end = pd.to_datetime(start),pd.to_datetime(end)
records = df['record'].values
rain_field = []
for count,record in enumerate(records):
if record != 1:
rain_field.append(wmf.models.read_int_basin('%s.bin'%path,record,ncells)[0]/1000.0)
count = count+1
# format = (count*100.0/len(records),count,len(records))
else:
rain_field.append(np.zeros(ncells))
return pd.DataFrame(np.matrix(rain_field),index=df.index)
def get_radar_rain(start,end,Dt,cuenca,codigos,rutaNC,accum=False,path_tif=None,all_radextent=False,
mask=None,meanrain_ALL=True,path_masks_csv=None,complete_naninaccum=False,save_bin=False,
save_class = False,path_res=None,umbral=0.005,
verbose=True, zero_fill = None):
start,end = pd.to_datetime(start),pd.to_datetime(end)
#hora UTC
startUTC,endUTC = start + pd.Timedelta('5 hours'), end + pd.Timedelta('5 hours')
fechaI,fechaF,hora_1,hora_2 = startUTC.strftime('%Y-%m-%d'), endUTC.strftime('%Y-%m-%d'),startUTC.strftime('%H:%M'),endUTC.strftime('%H:%M')
#Obtiene las fechas por dias para listar archivos por dia
datesDias = pd.date_range(fechaI, fechaF,freq='D')
a = pd.Series(np.zeros(len(datesDias)),index=datesDias)
a = a.resample('A').sum()
Anos = [i.strftime('%Y') for i in a.index.to_pydatetime()]
datesDias = [d.strftime('%Y%m%d') for d in datesDias.to_pydatetime()]
#lista los .nc existentes de ese dia: rutas y fechas del nombre del archivo
ListDatesinNC = []
ListRutas = []
for d in datesDias:
try:
L = glob.glob(rutaNC + d + '*.nc')
ListRutas.extend(L)
ListDatesinNC.extend([i.split('/')[-1].split('_')[0] for i in L])
except:
print ('Sin archivos para la fecha %s'%d)
# Organiza las listas de rutas y la de las fechas a las que corresponde cada ruta.
ListRutas.sort()
ListDatesinNC.sort()#con estas fechas se asignaran los barridos a cada timestep.
#index con las fechas especificas de los .nc existentes de radar
datesinNC = [dt.datetime.strptime(d,'%Y%m%d%H%M') for d in ListDatesinNC]
datesinNC = pd.to_datetime(datesinNC)
#Obtiene el index con la resolucion deseada, en que se quiere buscar datos existentes de radar,
textdt = '%d' % Dt
#Agrega hora a la fecha inicial
if hora_1 != None:
inicio = fechaI+' '+hora_1
else:
inicio = fechaI
#agrega hora a la fecha final
if hora_2 != None:
final = fechaF+' '+hora_2
else:
final = fechaF
datesDt = pd.date_range(inicio,final,freq = textdt+'s')
#Obtiene las posiciones de acuerdo al dt para cada fecha, si no hay barrido en ese paso de tiempo se acumula
#elbarrido inmediatamente anterior.
#Saca una lista con las pos de los barridos por cada timestep, y las pega en PosDates
#Si el limite de completar faltantes con barrido anterior es de 10 min, solo se completa si dt=300s
#limite de autocompletar : 10m es decir, solo repito un barrido.
PosDates = []
pos1 = []
pos_completed = []
lim_completed = 3 #ultimos 3 barridos - 15min
for ind,d1,d2 in zip(np.arange(datesDt[:-1].size),datesDt[:-1],datesDt[1:]):
pos2 = np.where((datesinNC<d2) & (datesinNC>=d1))[0].tolist()
# si no hay barridos en el dt de inicio sellena con zero - lista vacia
#y no esta en los primero 3 pasos : 15min.
# si se puede completar
# y si en el los lim_completed pasos atras no hubo más de lim_completed-1 pos con pos_completed=2, lim_completed-1 para que deje correr sólo hasta el lim_completed.
#asi solo se pueded completar y pos_completed=2 una sola vez.
if len(pos2) == 0 and ind not in np.arange(lim_completed) and complete_naninaccum == True and Dt == 300. and np.where(np.array(pos_completed[ind-lim_completed:])==2)[0].size <= lim_completed-1 : #+1 porque coge los ultimos n-1 posiciones.
pos2 = pos1
pos_completed.append(2)
elif len(pos2) == 0:
pos2=[]
pos_completed.append(0)
else:
pos_completed.append(1)
#si se quiere completar y hay barridos en este dt, guarda esta pos para si es necesario completar las pos de dt en el sgte paso
if complete_naninaccum == True and len(pos2) != 0 and Dt == 300. and np.where(np.array(pos_completed[ind-lim_completed:])==2)[0].size <= lim_completed-1 :
pos1 = pos2
else:
pos1 = []
PosDates.append(pos2)
# si se asigna, se agregas dates y PosDates para barridos en cero al final.
if zero_fill is not None:
#se redefinen datesDt luego que los PosDates fueron asignados
final = (pd.to_datetime(final) + pd.Timedelta('%ss'%Dt*zero_fill)).strftime('%Y-%m-%d %H:%M')
datesDt = pd.date_range(inicio,final,freq = textdt+'s')
# se agrega a PosDates pasos del futuro con barridos en cero, y se cambia end.
end = end + pd.Timedelta('%ss'%Dt*zero_fill) #pasos de tiempo:steps, independiente del Dt
for steps in np.arange(zero_fill): PosDates.append([])
# paso a hora local
datesDt = datesDt - dt.timedelta(hours=5)
datesDt = datesDt.to_pydatetime()
#Index de salida en hora local
rng= pd.date_range(start.strftime('%Y-%m-%d %H:%M'),end.strftime('%Y-%m-%d %H:%M'), freq= textdt+'s')
df = pd.DataFrame(index = rng,columns=codigos)
#mascara con shp a parte de wmf
if mask is not None:
#se abre un barrido para sacar la mascara
g = netCDF4.Dataset(ListRutas[PosDates[0][0]])
field = g.variables['Rain'][:].T/(((len(pos)*3600)/Dt)*1000.0)#g['Rain'][:]#
RadProp = [g.ncols, g.nrows, g.xll, g.yll, g.dx, g.dx]
g.close()
longs=np.array([RadProp[2]+0.5*RadProp[4]+i*RadProp[4] for i in range(RadProp[0])])
lats=np.array([RadProp[3]+0.5*RadProp[5]+i*RadProp[5] for i in range(RadProp[1])])
x,y = np.meshgrid(longs,lats)
#mask as a shp
if type(mask) == str:
#boundaries
shp = gpd.read_file(mask)
poly = shp.geometry.unary_union
shp_mask = np.zeros([len(lats),len(longs)])
for i in range(len(lats)):
for j in range(len(longs)):
if (poly.contains(Point(longs[j],lats[i])))==True:
shp_mask[i,j] = 1# Rain_mask es la mascara
l = x[shp_mask==1].min()
r = x[shp_mask==1].max()
d = y[shp_mask==1].min()
a = y[shp_mask==1].max()
#mask as a list with coordinates whithin the radar extent
elif type(mask) == list:
l = mask[0] ; r = mask[1] ; d = mask[2] ; a = mask[3]
x,y = x.T,y.T #aun tengo dudas con el recorte, si en nc queda en la misma pos que los lats,longs.
#boundaries position
x_wh,y_wh = np.where((x>l)&(x<r)&(y>d)&(y<a))
#se redefine sfield con size que corresponde
field = field[np.unique(x_wh)[0]:np.unique(x_wh)[-1],np.unique(y_wh)[0]:np.unique(y_wh)[-1]]
if save_bin and len(codigos)==1 and path_res is not None:
#open nc file
f = netCDF4.Dataset(path_res,'w', format='NETCDF4') #'w' stands for write
tempgrp = f.createGroup('rad_data') # as folder for saving files
lon = longs[np.unique(x_wh)[0]:np.unique(x_wh)[-1]]
lat = lats[np.unique(y_wh)[0]:np.unique(y_wh)[-1]]
#set name and leght of dimensions
tempgrp.createDimension('lon', len(lon))
tempgrp.createDimension('lat', len(lat))
tempgrp.createDimension('time', None)
#building variables
longitude = tempgrp.createVariable('longitude', 'f4', 'lon')
latitude = tempgrp.createVariable('latitude', 'f4', 'lat')
rain = tempgrp.createVariable('rain', 'f4', (('time', 'lat', 'lon')))
time = tempgrp.createVariable('time', 'i4', 'time')
#adding globalattributes
f.description = "Radar rainfall dataset containing one group"
f.history = "Created " + dt.datetime.now().strftime("%d/%m/%y")
#Add local attributes to variable instances
longitude.units = 'degrees east - wgs4'
latitude.units = 'degrees north - wgs4'
time.units = 'minutes since 2020-01-01 00:00'
rain.units = 'mm/h'
#passing data into variables
# use proper indexing when passing values into the variables - just like you would a numpy array.
longitude[:] = lon #The "[:]" at the end of the variable instance is necessary
latitude[:] = lat
else:
# acumular dentro de la cuenca.
cu = wmf.SimuBasin(rute= cuenca)
if save_class:
cuConv = wmf.SimuBasin(rute= cuenca)
cuStra = wmf.SimuBasin(rute= cuenca)
#accumulated in basin
if accum:
if mask is not None:
rvec_accum = np.zeros(field.shape)
dfaccum = pd.DataFrame(index = rng) #este producto no da con mask.
else:
rvec_accum = np.zeros(cu.ncells)
# rvec = np.zeros(cu.ncells)
dfaccum = pd.DataFrame(np.zeros((cu.ncells,rng.size)).T,index = rng)
else:
pass
#all extent
if all_radextent:
radmatrix = np.zeros((1728, 1728))
#ITERA SOBRE LOS BARRIDOS DEL PERIODO Y SE SACAN PRODUCTOS
# print ListRutas
for ind,dates,pos in zip(np.arange(len(datesDt[1:])),datesDt[1:],PosDates):
#escoge como definir el size de rvec
if mask is not None:
rvec = np.zeros(shape = field.shape)
else:
rvec = np.zeros(cu.ncells)
if save_class:
rConv = np.zeros(cu.ncells, dtype = int)
rStra = np.zeros(cu.ncells, dtype = int)
try:
#se lee y agrega lluvia de los nc en el intervalo.
for c,p in enumerate(pos):
#lista archivo leido
if verbose:
print (ListRutas[p])
#Lee la imagen de radar para esa fecha
g = netCDF4.Dataset(ListRutas[p])
rainfield = g.variables['Rain'][:].T/(((len(pos)*3600)/Dt)*1000.0)
RadProp = [g.ncols, g.nrows, g.xll, g.yll, g.dx, g.dx]
#if all extent
if all_radextent:
radmatrix += rainfield
#if mask
if mask is not None and type(mask) == str:
rvec += (rainfield*shp_mask)[np.unique(x_wh)[0]:np.unique(x_wh)[-1],np.unique(y_wh)[0]:np.unique(y_wh)[-1]]
elif mask is not None and type(mask) == list:
rvec += rainfield[np.unique(x_wh)[0]:np.unique(x_wh)[-1],np.unique(y_wh)[0]:np.unique(y_wh)[-1]]
# on WMF.
else:
#Agrega la lluvia en el intervalo
rvec += cu.Transform_Map2Basin(rainfield,RadProp)
if save_class:
ConvStra = cu.Transform_Map2Basin(g.variables['Conv_Strat'][:].T, RadProp)
# 1-stra, 2-conv
rConv = np.copy(ConvStra)
rConv[rConv == 1] = 0; rConv[rConv == 2] = 1
rStra = np.copy(ConvStra)
rStra[rStra == 2] = 0
rvec[(rConv == 0) & (rStra == 0)] = 0
Conv[rvec == 0] = 0
Stra[rvec == 0] = 0
#Cierra el netCDF
g.close()
#muletilla
path = 'bla'
except:
print ('error - no field found ')
path = ''
if accum:
if mask is not None:
rvec += np.zeros(shape = field.shape)
rvec = np.zeros(shape = field.shape)
else:
rvec_accum += np.zeros(cu.ncells)
rvec = np.zeros(cu.ncells)
else:
if mask is not None:
rvec = np.zeros(shape = field.shape)
else:
rvec = np.zeros(cu.ncells)
if save_class:
rConv = np.zeros(cu.ncells)
rStra = np.zeros(cu.ncells)
if all_radextent:
radmatrix += np.zeros((1728, 1728))
#acumula dentro del for que recorre las fechas
if accum:
rvec_accum += rvec
if mask is None: #esto para mask no sirve
dfaccum.loc[dates.strftime('%Y-%m-%d %H:%M:%S')]= rvec
else:
pass
# si se quiere sacar promedios de lluvia de radar en varias cuencas definidas en 'codigos'
#subbasins defined for WMF
if meanrain_ALL and mask is None:
mean = []
df_posmasks = pd.read_csv(path_masks_csv,index_col=0)
for codigo in codigos:
if path == '': # si no hay nc en ese paso de tiempo.
mean.append(np.nan)
else:
mean.append(np.sum(rvec*df_posmasks['%s'%codigo])/float(df_posmasks['%s'%codigo][df_posmasks['%s'%codigo]==1].size))
# se actualiza la media de todas las mascaras en el df.
df.loc[dates.strftime('%Y-%m-%d %H:%M:%S')]=mean
else:
pass
#guarda binario y df, si guardar binaria paso a paso no me interesa rvecaccum
if mask is None and save_bin == True and len(codigos)==1 and path_res is not None:
mean = []
#guarda en binario
dentro = cu.rain_radar2basin_from_array(vec = rvec,
ruta_out = path_res,
fecha = dates,
dt = Dt,
umbral = umbral)
#si guarda nc de ese timestep guarda clasificados
if dentro == 0:
hagalo = True
else:
hagalo = False
#mira si guarda o no los clasificados
if save_class:
#Escribe el binario convectivo
aa = cuConv.rain_radar2basin_from_array(vec = rConv,
ruta_out = path_res+'_conv',
fecha = dates,
dt = Dt,
doit = hagalo)
#Escribe el binario estratiforme
aa = cuStra.rain_radar2basin_from_array(vec = rStra,
ruta_out = path_res+'_stra',
fecha = dates,
dt = Dt,
doit = hagalo)
#guarda en df meanrainfall.
try:
mean.append(rvec.mean())
except:
mean.append(np.nan)
df.loc[dates.strftime('%Y-%m-%d %H:%M:%S')]=mean
elif mask is None and save_bin == True and len(codigos)==1 and path_res is None: #si es una cuenca pero no se quiere guardar binarios.
mean = []
#guarda en df meanrainfall.
try:
mean.append(rvec.mean())
except:
mean.append(np.nan)
df.loc[dates.strftime('%Y-%m-%d %H:%M:%S')]=mean
#guardar .nc con info de recorte de radar: mask.
if mask is not None and save_bin and len(codigos)==1 and path_res is not None:
mean = []
#https://pyhogs.github.io/intro_netcdf4.html
rain[ind,:,:] = rvec.T
time[ind] = int((dates - pd.to_datetime('2010-01-01 00:00')).total_seconds()/60) #min desde 2010
if ind == np.arange(len(datesDt[1:]))[-1]:
f.close()
print ('.nc saved')
#guarda en df meanrainfall.
if path == '': # si no hay nc en ese paso de tiempo.
mean.append(np.nan)
else:
mean.append(np.sum(rvec)/float(shp_mask[shp_mask==1].size))
#save
df.loc[dates.strftime('%Y-%m-%d %H:%M:%S')]=mean
if mask is None and save_bin == True and len(codigos)==1 and path_res is not None:
#Cierrra el binario y escribe encabezado
cu.rain_radar2basin_from_array(status = 'close',ruta_out = path_res)
print ('.bin & .hdr saved')
if save_class:
cuConv.rain_radar2basin_from_array(status = 'close',ruta_out = path_res+'_conv')
cuStra.rain_radar2basin_from_array(status = 'close',ruta_out = path_res+'_stra')
print ('.bin & .hdr escenarios saved')
else:
print ('.bin & .hdr NOT saved')
pass
#elige los retornos.
if accum == True and path_tif is not None:
cu.Transform_Basin2Map(rvec_accum,path_tif)
return df,rvec_accum,dfaccum
elif accum == True and mask is not None:
return df,rvec_accum
elif accum == True and mask is None:
return df,rvec_accum,dfaccum
elif all_radextent:
return df,radmatrix
else:
return df
def get_radar_rain_OP(start,end,Dt,cuenca,codigos,rutaNC,accum=False,path_tif=None,all_radextent=False,
meanrain_ALL=True,complete_naninaccum=False, evs_hist=False,save_bin=False,save_class = False,
path_res=None,umbral=0.005,include_escenarios = None,
verbose=True):
'''
Read .nc's file forn rutaNC:101Radar_Class within assigned period and frequency.
Por ahora solo sirve con un barrido por timestep, operacional a 5 min, melo.
0. It divides by 1000.0 and converts from mm/5min to mm/h.
1. Get mean radar rainfall in basins assigned in 'codigos' for finding masks, if the mask exist.
2. Write binary files if is setted.
- Cannot do both 1 and 2.
- To saving binary files (2) set: meanrain_ALL=False, save_bin=True, path_res= path where to write results,
len('codigos')=1, nc_path aims to the one with dxp and simubasin props setted.
Parameters
----------
start: string, date&time format %Y-%m%-d %H:%M, local time.
end: string, date&time format %Y-%m%-d %H:%M, local time.
Dt: float, timedelta in seconds. For this function it should be lower than 3600s (1h).
cuenca: string, simubasin .nc path with dxp and format from WMF. It should be 260 path if whole catchment analysis is needed, or any other .nc path for saving the binary file.
codigos: list, with codes of stage stations. Needed for finding the mask associated to a basin.
rutaNC: string, path with .nc files from radar meteorology group. Default in amazonas: 101Radar_Class
Optional Parameters
----------
accum: boolean, default False. True for getting the accumulated matrix between start and end.
Change returns: df,rvec (accumulated)
path_tif: string, path of tif to write accumlated basin map. Default None.
all_radextent:boolean, default False. True for getting the accumulated matrix between start and end in the
whole radar extent. Change returns: df,radmatrix.
meanrain_ALL: boolean, defaul True. True for getting the mean radar rainfall within several basins which mask are defined in 'codigos'.
save_bin: boolean, default False. True for saving .bin and .hdr files with rainfall and if len('codigos')=1.
save_class: boolean,default False. True for saving .bin and .hdr for convective and stratiform classification. Applies if len('codigos')=1 and save_bin = True.
path_res: string with path where to write results if save_bin=True, default None.
umbral: float. Minimum umbral for writing rainfall, default = 0.005.
Returns
----------
- df whith meanrainfall of assiged codes in 'codigos'.
- df,rvec if accum = True.
- df,radmatrix if all_radextent = True.
- save .bin and .hdr if save_bin = True, len('codigos')=1 and path_res=path.
'''
#### FECHAS Y ASIGNACIONES DE NC####
start,end = | pd.to_datetime(start) | pandas.to_datetime |
"""
Creates a new Database
"""
import re
from app import APPLOG
import pandas as pd
from core.settings import (
DB_MOD,
DB_DIR,
ARTLIST_DIR,
SQL_T_ARTLIST,
SQL_T_BOM,
SQL_CONN,
)
from sqlalchemy import create_engine
def createBomDB(bom_df: pd.DataFrame, items_df: pd.DataFrame) -> tuple:
"""Save DB to directory data, either SQLITE or CSV format."""
if bom_df.empty or items_df.empty:
return pd.DataFrame(), pd.DataFrame()
unwanted_columns = [
# "Father Name",
"Father No of pairs",
"Father Qty",
"Child Name",
"Item No._x",
"Item No._y",
]
try:
bom_df["brand"] = bom_df.apply(
lambda x: getBrandName(x.get("Process Order"), x.get("Father Name")), axis=1
)
bom_df = bom_df.merge(
items_df[
["Item No.", "FOREIGN NAME", "INVENTORY UOM", "Last Purchase Price"]
],
how="left",
left_on="Child",
right_on="Item No.",
)
bom_df = bom_df.merge(
items_df[["Item No.", "Item MRP", "Product Type"]],
how="left",
left_on="Father",
right_on="Item No.",
)
bom_df.drop(unwanted_columns, axis=1, inplace=True, errors="ignore")
bom_df.columns = [changeColumnName(name) for name in bom_df.columns.values]
bom_df["childtype"] = bom_df.apply(
lambda x: getMaterialType(x.father, x.child), axis=1
)
except Exception as e:
APPLOG.append(f">> ERROR: {e}")
return pd.DataFrame(), pd.DataFrame()
# MC, SC
bom_df["application"] = bom_df.apply(
lambda x: getApplication(x.father, x.processorder), axis=1
)
bom_df["childqty"] = | pd.to_numeric(bom_df["childqty"], errors="coerce") | pandas.to_numeric |
r"""Submodule frequentist_statistics.py includes the following functions: <br>
- **normal_check():** compare the distribution of numeric variables to a normal distribution using the
Kolmogrov-Smirnov test <br>
- **correlation_analysis():** Run correlations for numerical features and return output in different formats <br>
- **correlations_as_sample_increases():** Run correlations for subparts of the data to check robustness <br>
- **multiple_univariate_OLSs():** Tmp <br>
- **potential_for_change_index():** Calculate the potential for change index based on either variants of the r-squared
(from linear regression) or the r-value (pearson correlation) <br>
- **correct_pvalues():** function to correct for multiple testing <br>
- **partial_correlation():** function to calculate the partial correlations whilst correcting for other variables <br>
"""
from itertools import combinations
from itertools import product
from typing import Tuple
from typing import Union
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import statsmodels.api as sm
from matplotlib.lines import Line2D
from scipy import stats
from sklearn.linear_model import LinearRegression
from statsmodels.stats.multitest import multipletests
from .utils import apply_scaling
def normal_check(data: pd.DataFrame) -> pd.DataFrame:
r"""Compare the distribution of numeric variables to a normal distribution using the Kolmogrov-Smirnov test
Wrapper for `scipy.stats.kstest`: the empircal data is compared to a normally distributed variable with the
same mean and standard deviation. A significant result (p < 0.05) in the goodness of fit test means that the
data is not normally distributed.
Parameters
----------
data: pandas.DataFrame
Dataframe including the columns of interest
Returns
----------
df_normality_check: pd.DataFrame
Dataframe with column names, p-values and an indication of normality
Examples
----------
>>> tips = sns.load_dataset("tips")
>>> df_normality_check = normal_check(tips)
"""
# Select numeric columns only
num_features = data.select_dtypes(include="number").columns.tolist()
# Compare distribution of each feature to a normal distribution with given mean and std
df_normality_check = data[num_features].apply(
lambda x: stats.kstest(
x.dropna(), stats.norm.cdf, args=(np.nanmean(x), np.nanstd(x)), N=len(x)
)[1],
axis=0,
)
# create a label that indicates whether a feature has a normal distribution or not
df_normality_check = | pd.DataFrame(df_normality_check) | pandas.DataFrame |
# coding=utf-8
# pylint: disable-msg=E1101,W0612
from datetime import timedelta
from numpy import nan
import numpy as np
import pandas as pd
from pandas import (Series, isnull, date_range,
MultiIndex, Index)
from pandas.tseries.index import Timestamp
from pandas.compat import range
from pandas.util.testing import assert_series_equal
import pandas.util.testing as tm
from .common import TestData
def _skip_if_no_pchip():
try:
from scipy.interpolate import pchip_interpolate # noqa
except ImportError:
import nose
raise nose.SkipTest('scipy.interpolate.pchip missing')
def _skip_if_no_akima():
try:
from scipy.interpolate import Akima1DInterpolator # noqa
except ImportError:
import nose
raise nose.SkipTest('scipy.interpolate.Akima1DInterpolator missing')
class TestSeriesMissingData(TestData, tm.TestCase):
_multiprocess_can_split_ = True
def test_timedelta_fillna(self):
# GH 3371
s = Series([Timestamp('20130101'), Timestamp('20130101'), Timestamp(
'20130102'), Timestamp('20130103 9:01:01')])
td = s.diff()
# reg fillna
result = td.fillna(0)
expected = Series([timedelta(0), timedelta(0), timedelta(1), timedelta(
days=1, seconds=9 * 3600 + 60 + 1)])
assert_series_equal(result, expected)
# interprested as seconds
result = td.fillna(1)
expected = Series([timedelta(seconds=1), timedelta(0), timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1)])
assert_series_equal(result, expected)
result = td.fillna(timedelta(days=1, seconds=1))
expected = Series([timedelta(days=1, seconds=1), timedelta(
0), timedelta(1), timedelta(days=1, seconds=9 * 3600 + 60 + 1)])
assert_series_equal(result, expected)
result = td.fillna(np.timedelta64(int(1e9)))
expected = Series([timedelta(seconds=1), timedelta(0), timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1)])
assert_series_equal(result, expected)
from pandas import tslib
result = td.fillna(tslib.NaT)
expected = Series([tslib.NaT, timedelta(0), timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1)],
dtype='m8[ns]')
assert_series_equal(result, expected)
# ffill
td[2] = np.nan
result = td.ffill()
expected = td.fillna(0)
expected[0] = np.nan
assert_series_equal(result, expected)
# bfill
td[2] = np.nan
result = td.bfill()
expected = td.fillna(0)
expected[2] = timedelta(days=1, seconds=9 * 3600 + 60 + 1)
assert_series_equal(result, expected)
def test_datetime64_fillna(self):
s = Series([Timestamp('20130101'), Timestamp('20130101'), Timestamp(
'20130102'), Timestamp('20130103 9:01:01')])
s[2] = np.nan
# reg fillna
result = s.fillna(Timestamp('20130104'))
expected = Series([Timestamp('20130101'), Timestamp(
'20130101'), Timestamp('20130104'), Timestamp('20130103 9:01:01')])
assert_series_equal(result, expected)
from pandas import tslib
result = s.fillna(tslib.NaT)
expected = s
assert_series_equal(result, expected)
# ffill
result = s.ffill()
expected = Series([Timestamp('20130101'), Timestamp(
'20130101'), Timestamp('20130101'), | Timestamp('20130103 9:01:01') | pandas.tseries.index.Timestamp |
import pandas as pd
import psycopg2
import pickle
import numpy as np
# counterS = 0
# global counterS
# global valGlob
# from sqlalchemy import create_engine
# -*- coding: utf-8 -*-
import os
import sys
import copy
# fileName = '/Users/alessandro/Documents/PhD/OntoHistory/WDTaxo_October2014.csv'
# connection parameters
def get_db_params():
params = {
'database': 'wikidb',
'user': 'postgres',
'password': '<PASSWORD>',
'host': 'localhost',
'port': '5432'
}
conn = psycopg2.connect(**params)
return conn
def queryexecutor():
# dictStats = {}
# conn = get_db_params()
# cur = conn.cursor()
npCoso = np.load('/data/wikidata-project/WDOntoHistory/automated_revs.npy')
setCoso = set(npCoso)
for i in range(13, 18):
for j in range(1, 7):
date = "20" + str(i) + "-0" + str(j) + "-01"
if j == 1:
mt = "12"
datePrev = "20" + str(i-1) + "-" + mt + "-01"
else:
datePrev = "20" + str(i) + "-0" + str(j-1) + "-01"
print(date)
try:
queryStart = """SELECT item_id AS itemid, rev_id AS revid, time_stamp AS timestamp, user_name AS username, automated_tool FROM revision_history_201710 WHERE (time_stamp > '"""+ datePrev + """ 00:00:00' AND time_stamp < '"""+ date + """ 00:00:00');"""
conn = get_db_params()
cur = conn.cursor()
cur.execute(queryStart)
cur.close()
conn.commit()
# print(query)
timetable_temp = pd.DataFrame()
for chunk in pd.read_sql(queryStart, con=conn, chunksize=10000):
timetable_temp = timetable_temp.append(chunk)
#columns: itemid revid parid timestamp username
noEdits = timetable_temp['username'].value_counts()
noEdits = noEdits.reset_index()
noEdits.columns = ['username', 'noEdits']
noItems = timetable_temp.groupby('username')['itemid'].nunique()
noItems = noItems.reset_index()
noItems.columns = ['username', 'noItems']
noEdits = noEdits.merge(noItems, how='left')
timetable_temp.loc[timetable_temp['rev_id'].isin(setCoso),] = 'TRUE'
noBatchEdits = timetable_temp['username'].loc[timetable_temp['automated_tool'] == 'TRUE', ].value_counts()
if ~noBatchEdits.empty:
noBatchEdits = noBatchEdits.reset_index()
noBatchEdits.columns = ['username', 'noBatchEdits']
noEdits = noEdits.merge(noBatchEdits, how='left')
else:
noEdits['noBatchEdits'] = 0
print('batch edits')
classesDataQuery = """SELECT statvalue FROM tempData WHERE ts < '"""+ date + """ 00:00:00';"""
dfClasses = | pd.read_sql(classesDataQuery, con=conn) | pandas.read_sql |
""" test get/set & misc """
from datetime import timedelta
import re
import numpy as np
import pytest
from pandas import (
DataFrame,
IndexSlice,
MultiIndex,
Series,
Timedelta,
Timestamp,
date_range,
period_range,
timedelta_range,
)
import pandas._testing as tm
def test_basic_indexing():
s = Series(np.random.randn(5), index=["a", "b", "a", "a", "b"])
msg = "index 5 is out of bounds for axis 0 with size 5"
with pytest.raises(IndexError, match=msg):
s[5]
with pytest.raises(IndexError, match=msg):
s[5] = 0
with pytest.raises(KeyError, match=r"^'c'$"):
s["c"]
s = s.sort_index()
with pytest.raises(IndexError, match=msg):
s[5]
msg = r"index 5 is out of bounds for axis (0|1) with size 5|^5$"
with pytest.raises(IndexError, match=msg):
s[5] = 0
def test_basic_getitem_with_labels(datetime_series):
indices = datetime_series.index[[5, 10, 15]]
result = datetime_series[indices]
expected = datetime_series.reindex(indices)
tm.assert_series_equal(result, expected)
result = datetime_series[indices[0] : indices[2]]
expected = datetime_series.loc[indices[0] : indices[2]]
tm.assert_series_equal(result, expected)
def test_basic_getitem_dt64tz_values():
# GH12089
# with tz for values
ser = Series(
date_range("2011-01-01", periods=3, tz="US/Eastern"), index=["a", "b", "c"]
)
expected = Timestamp("2011-01-01", tz="US/Eastern")
result = ser.loc["a"]
assert result == expected
result = ser.iloc[0]
assert result == expected
result = ser["a"]
assert result == expected
def test_getitem_setitem_ellipsis():
s = Series(np.random.randn(10))
np.fix(s)
result = s[...]
tm.assert_series_equal(result, s)
s[...] = 5
assert (result == 5).all()
@pytest.mark.parametrize(
"result_1, duplicate_item, expected_1",
[
[
Series({1: 12, 2: [1, 2, 2, 3]}),
Series({1: 313}),
Series({1: 12}, dtype=object),
],
[
Series({1: [1, 2, 3], 2: [1, 2, 2, 3]}),
Series({1: [1, 2, 3]}),
Series({1: [1, 2, 3]}),
],
],
)
def test_getitem_with_duplicates_indices(result_1, duplicate_item, expected_1):
# GH 17610
result = result_1.append(duplicate_item)
expected = expected_1.append(duplicate_item)
tm.assert_series_equal(result[1], expected)
assert result[2] == result_1[2]
def test_getitem_setitem_integers():
# caused bug without test
s = Series([1, 2, 3], ["a", "b", "c"])
assert s.iloc[0] == s["a"]
s.iloc[0] = 5
tm.assert_almost_equal(s["a"], 5)
def test_series_box_timestamp():
rng = date_range("20090415", "20090519", freq="B")
ser = Series(rng)
assert isinstance(ser[0], Timestamp)
assert isinstance(ser.at[1], Timestamp)
assert isinstance(ser.iat[2], Timestamp)
assert isinstance(ser.loc[3], Timestamp)
assert isinstance(ser.iloc[4], Timestamp)
ser = Series(rng, index=rng)
assert isinstance(ser[0], Timestamp)
assert isinstance(ser.at[rng[1]], Timestamp)
assert isinstance(ser.iat[2], Timestamp)
assert isinstance(ser.loc[rng[3]], Timestamp)
assert isinstance(ser.iloc[4], Timestamp)
def test_series_box_timedelta():
rng = timedelta_range("1 day 1 s", periods=5, freq="h")
ser = Series(rng)
assert isinstance(ser[0], Timedelta)
assert isinstance(ser.at[1], Timedelta)
assert isinstance(ser.iat[2], Timedelta)
assert isinstance(ser.loc[3], Timedelta)
assert isinstance(ser.iloc[4], Timedelta)
def test_getitem_ambiguous_keyerror(indexer_sl):
ser = Series(range(10), index=list(range(0, 20, 2)))
with pytest.raises(KeyError, match=r"^1$"):
indexer_sl(ser)[1]
def test_getitem_dups_with_missing(indexer_sl):
# breaks reindex, so need to use .loc internally
# GH 4246
ser = Series([1, 2, 3, 4], ["foo", "bar", "foo", "bah"])
with pytest.raises(KeyError, match=re.escape("['bam'] not in index")):
indexer_sl(ser)[["foo", "bar", "bah", "bam"]]
def test_setitem_ambiguous_keyerror(indexer_sl):
s = Series(range(10), index=list(range(0, 20, 2)))
# equivalent of an append
s2 = s.copy()
indexer_sl(s2)[1] = 5
expected = s.append(Series([5], index=[1]))
tm.assert_series_equal(s2, expected)
def test_setitem(datetime_series, string_series):
datetime_series[datetime_series.index[5]] = np.NaN
datetime_series[[1, 2, 17]] = np.NaN
datetime_series[6] = np.NaN
assert np.isnan(datetime_series[6])
assert np.isnan(datetime_series[2])
datetime_series[np.isnan(datetime_series)] = 5
assert not np.isnan(datetime_series[2])
def test_setslice(datetime_series):
sl = datetime_series[5:20]
assert len(sl) == len(sl.index)
assert sl.index.is_unique is True
# FutureWarning from NumPy about [slice(None, 5).
@pytest.mark.filterwarnings("ignore:Using a non-tuple:FutureWarning")
def test_basic_getitem_setitem_corner(datetime_series):
# invalid tuples, e.g. td.ts[:, None] vs. td.ts[:, 2]
msg = "key of type tuple not found and not a MultiIndex"
with pytest.raises(KeyError, match=msg):
datetime_series[:, 2]
with pytest.raises(KeyError, match=msg):
datetime_series[:, 2] = 2
# weird lists. [slice(0, 5)] will work but not two slices
with tm.assert_produces_warning(FutureWarning):
# GH#31299
result = datetime_series[[slice(None, 5)]]
expected = datetime_series[:5]
tm.assert_series_equal(result, expected)
# OK
msg = r"unhashable type(: 'slice')?"
with pytest.raises(TypeError, match=msg):
datetime_series[[5, slice(None, None)]]
with pytest.raises(TypeError, match=msg):
datetime_series[[5, slice(None, None)]] = 2
def test_slice(string_series, object_series):
numSlice = string_series[10:20]
numSliceEnd = string_series[-10:]
objSlice = object_series[10:20]
assert string_series.index[9] not in numSlice.index
assert object_series.index[9] not in objSlice.index
assert len(numSlice) == len(numSlice.index)
assert string_series[numSlice.index[0]] == numSlice[numSlice.index[0]]
assert numSlice.index[1] == string_series.index[11]
assert tm.equalContents(numSliceEnd, np.array(string_series)[-10:])
# Test return view.
sl = string_series[10:20]
sl[:] = 0
assert (string_series[10:20] == 0).all()
def test_timedelta_assignment():
# GH 8209
s = Series([], dtype=object)
s.loc["B"] = timedelta(1)
tm.assert_series_equal(s, Series(Timedelta("1 days"), index=["B"]))
s = s.reindex(s.index.insert(0, "A"))
tm.assert_series_equal(s, Series([np.nan, | Timedelta("1 days") | pandas.Timedelta |
# Copyright 1999-2020 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import tempfile
import unittest
import numpy as np
import pandas as pd
import mars.dataframe as md
from mars.config import option_context
from mars.dataframe import DataFrame
from mars.deploy.local.core import new_cluster
from mars.session import new_session
from mars.tests.core import TestBase
try:
import vineyard
except ImportError:
vineyard = None
try:
import sqlalchemy
except ImportError:
sqlalchemy = None
try:
import pyarrow as pa
except ImportError:
pa = None
try:
import fastparquet
except ImportError:
fastparquet = None
class Test(TestBase):
def setUp(self):
super().setUp()
self.ctx, self.executor = self._create_test_context()
def testToCSVExecution(self):
index = pd.RangeIndex(100, 0, -1, name='index')
raw = pd.DataFrame({
'col1': np.random.rand(100),
'col2': np.random.choice(['a', 'b', 'c'], (100,)),
'col3': np.arange(100)
}, index=index)
df = DataFrame(raw, chunk_size=33)
with tempfile.TemporaryDirectory() as base_path:
# DATAFRAME TESTS
# test one file with dataframe
path = os.path.join(base_path, 'out.csv')
r = df.to_csv(path)
self.executor.execute_dataframe(r)
result = pd.read_csv(path, dtype=raw.dtypes.to_dict())
result.set_index('index', inplace=True)
pd.testing.assert_frame_equal(result, raw)
# test multi files with dataframe
path = os.path.join(base_path, 'out-*.csv')
r = df.to_csv(path)
self.executor.execute_dataframe(r)
dfs = [pd.read_csv(os.path.join(base_path, f'out-{i}.csv'),
dtype=raw.dtypes.to_dict())
for i in range(4)]
result = pd.concat(dfs, axis=0)
result.set_index('index', inplace=True)
| pd.testing.assert_frame_equal(result, raw) | pandas.testing.assert_frame_equal |
# -*- coding: utf-8 -*-
import sys
import os
import random
import pdb
import glob
import numpy as np
import pandas as pd
from skimage import io
from skimage.color import rgba2rgb
from . import util
from .util import kconfig
from .transformers import resize_image
#----------------------------------------------------------------------------Train/Val- iminfo generation
def save_multi_sources_train_val_imgs(img_dirpath, train_sources_dict, labels_dict, train_frac=0.9):
# Structure
#{'source_origin': {'class_name': img_name}}
columns = ['img_relpath', 'class_label', 'source_label']
train_info = []
val_info = []
# pdb.set_trace()
for src_id, source in train_sources_dict.items():
full_img_dirpath_src = os.path.join(img_dirpath, source)
store_img_relpath_src = os.path.join(source)
for lbl_id, label in labels_dict.items():
full_img_dirpath_label = os.path.join(full_img_dirpath_src, label)
store_img_relpath_label = os.path.join(store_img_relpath_src, label)
# Load all the images
imgs_relpath = [os.path.join(store_img_relpath_label, img) for img in os.listdir(full_img_dirpath_label) if not img.startswith('.')]
random.shuffle(imgs_relpath)
train_len = int(len(imgs_relpath) * train_frac)
src_lb_train_data = [[img_relpath, lbl_id, src_id] for img_relpath in imgs_relpath[:train_len]]
src_lb_val_data = [[img_relpath, lbl_id, src_id] for img_relpath in imgs_relpath[train_len:]]
train_info += src_lb_train_data
val_info += src_lb_val_data
# pdb.set_trace()
random.shuffle(train_info)
df_train_info = pd.DataFrame(data=train_info, columns=columns)
df_val_info = pd.DataFrame(data=val_info, columns=columns)
df_train_info.to_csv(util.get_train_info_datapath(), index=False)
df_val_info.to_csv(util.get_val_info_datapath(), index=False)
return df_train_info, df_val_info
#----------------------------------------------------------------------------Test- iminfo generation
def save_multi_sources_test_imgs(img_dirpath, test_sources_dict, labels_dict):
# Structure
#{'source_origin': {'class_name': img_name}}
columns = ['img_relpath', 'class_label', 'source_label']
test_info = []
# pdb.set_trace()
for src_id, source in test_sources_dict.items():
full_img_dirpath_src = os.path.join(img_dirpath, source)
store_img_relpath_src = os.path.join(source)
for lbl_id, label in labels_dict.items():
full_img_dirpath_label = os.path.join(full_img_dirpath_src, label)
store_img_relpath_label = os.path.join(store_img_relpath_src, label)
# Load all the images
imgs_relpath = [os.path.join(store_img_relpath_label, img) for img in os.listdir(full_img_dirpath_label) if not img.startswith('.')]
imgs_relpath = [[img_relpath, lbl_id, src_id] for img_relpath in imgs_relpath]
test_info += imgs_relpath
# pdb.set_trace()
df_test_info = | pd.DataFrame(data=test_info, columns=columns) | pandas.DataFrame |
#===============================================
# PROJETO <NAME>
# funcoes para preprocessamento e visualizacao
#
# @claudioalvesmonteiro
#===============================================
# funcao para gerar dummies com base em threshold
def categoryToDummyThreshold(dataframe, data, column, threshold):
import pandas as pd
# capturar distribuicao da CID
cont = count_porcent(data, column)
# combinar com base de ambulatorio
data = data.merge(cont, on=column)
# criar coluna CID > 1%
data[('SIG_'+column)] = [data[column][x] if data['porcent'][x] >= threshold else 'OUTROS' for x in range(len(data))]
# selecionar casos unicos do cod integracao e cid
uni = data[['Cod Integração', ('SIG_'+column)]].drop_duplicates()
# combinar com base alvo
dataframe = dataframe.merge(uni, on='Cod Integração', how='left')
# criar variaveis dummy para CID > %1 e combinar com a base
dummies = | pd.get_dummies(dataframe[('SIG_'+column)], prefix=column) | pandas.get_dummies |
import sys
import click
import requests, requests_cache
import configparser
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from pathlib import Path
from datetime import datetime
from mpl_toolkits.axes_grid1 import make_axes_locatable
from pynance.auth import signed_params
from pynance.utils import create_session, create_datetime, to_milliseconds
from utils import WIDTH, GOLDEN_RATIO, pt_to_in
def create_trades_frame(trades_list):
trades = pd.DataFrame(trades_list)
return trades.assign(time= | pd.to_datetime(trades.time, unit="ms") | pandas.to_datetime |
import matplotlib.pyplot as plt
import seaborn as sns
import pdb
import requests
import re
import threading
import concurrent.futures
import numpy as np
import pandas as pd
from functools import reduce
from collections import Counter
from sklearn.preprocessing import normalize, StandardScaler, Normalizer, RobustScaler, MinMaxScaler, MaxAbsScaler
import networkx as nx
# import signal
import warnings
warnings.filterwarnings("ignore")
from url_utils import *
from wiki_scrapper import WikiScrapper
from WikiMultiQuery import wiki_multi_query
from graph_helpers import create_dispersion_df, dict_values_to_df, sort_dict_values, format_categories, compare_categories, rank_order, similarity_rank
################
# GraphCreator #
################
class GraphCreator:
"""
Retrieves data from the Wikipedia API and constructs a graph network of article relations.
Allows for the fast creation of a graph based recommender system.
Input:
------
entry (required, string)
A string containing the title of a Wikipedia article or a valid Wikipedia URL.
include_see_also (defaul: True, bool)
If True, marks any see also links as important and related to the main topic (default).
If False, does nothing to the see also links. Mark as False if validating recommendations
max_recursive_requests (default: 50, int)
The maximum number of times an API call will repeat to get all information. This can be an important parameter to set if efficiency is an issue.
Lower values will be more efficient, but may miss important information. Higher values are less efficient, but gather more data.
"""
def __init__(self, entry, include_see_also=True, max_recursive_requests=50):
self.graph = nx.DiGraph()
self.entry = get_title(entry) # from url_utils
self.max_requests = max_recursive_requests
ws = WikiScrapper(f"https://en.wikipedia.org/wiki/{self.entry}")
ws.parse_intro_links()
self.primary_nodes = {title : True for title in ws.get_primary_links(include_see_also=include_see_also)}
# see also articles to be used as targets for evaluation
self.see_also_articles = ws.see_also_link_titles
self.visited = {self.entry}
self.next_links = []
self.categories = {}
self.redirect_targets = []
self.redirect_sources = {}
self.query_articles([self.entry])
# setup timeout function
# def handle_alarm(signum, frame):
# raise RuntimeError
# signal.signal(signal.SIGALRM, handle_alarm)
######################################
# GRAPH SETUP & MAINTAINANCE METHODS #
######################################
def _add_edges(self, articles):
"""
Given a list of articles, adds nodes and connections (edges) to the network.
It can be called manually, but the expected use is within an internal graph update call.
"""
for article in articles:
self.categories[article['title']] = format_categories([category.split("Category:")[1] for category in article['categories'] if not bool(re.findall(r"(articles)|(uses)|(commons)|(category\:use)", category, re.I))])
self.graph.add_edges_from(
[(article['title'], link) for link in article['links']])
self.graph.add_edges_from(
[(linkhere, article['title']) for linkhere in article['linkshere']])
def update_edge_weights(self):
"""
Edges are weighted by the number of categories two connect nodes share. This method will look at each node and its neighbors and adjust in and outbound edge weights as needed.
"""
for edge in self.graph.out_edges:
weight = compare_categories(edge[0], edge[1], self.categories)
self.graph.add_edge(edge[0], edge[1], weight=weight)
for edge in self.graph.in_edges:
weight = compare_categories(edge[0], edge[1], self.categories)
self.graph.add_edge(edge[0], edge[1], weight=weight)
def get_edge_weights(self):
"""
A getter method to view the edge weights of each node (in and outbound).
"""
edge_weights = []
for edge in self.graph.edges:
edge_weights.append((edge[0], edge[1], self.graph.get_edge_data(edge[0], edge[1])['weight']))
return | pd.DataFrame(edge_weights, columns=["source_node", "target_node", "edge_weight"]) | pandas.DataFrame |
from binance.client import Client
import keys
from pandas import DataFrame as df
from datetime import datetime
import trading_key
client=Client(api_key=keys.Pkeys, api_secret=keys.Skeys)
#get candle data
def candle_data(symbols, intervals):
candles=client.get_klines(symbol=symbols, interval=intervals)
#create (date) dataframe
candles_data_frame=df(candles)
candles_data_frame_date=candles_data_frame[0]
#create the empty date list
final_date=[]
#convert timestamp to readable date and append it to the list
for time in candles_data_frame_date.unique():
readable=datetime.fromtimestamp(int(time/1000))
final_date.append(readable)
#drop the first and last columns of the dateframe
candles_data_frame.pop(0)
candles_data_frame.pop(len(candles_data_frame.columns))
dataframe_final_date= | df(final_date) | pandas.DataFrame |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import os
import math
import itertools
import pandas as pd
import datetime
from fclib.dataset.retail.benchmark_paths import DATA_DIR
import fclib.dataset.retail.benchmark_settings as bs
# Utility functions
def week_of_month(dt):
"""Get the week of the month for the specified date.
Args:
dt (Datetime): Input date
Returns:
wom (Integer): Week of the month of the input date
"""
from math import ceil
first_day = dt.replace(day=1)
dom = dt.day
adjusted_dom = dom + first_day.weekday()
wom = int(ceil(adjusted_dom / 7.0))
return wom
def lagged_features(df, lags):
"""Create lagged features based on time series data.
Args:
df (Dataframe): Input time series data sorted by time
lags (List): Lag lengths
Returns:
fea (Dataframe): Lagged features
"""
df_list = []
for lag in lags:
df_shifted = df.shift(lag)
df_shifted.columns = [x + "_lag" + str(lag) for x in df_shifted.columns]
df_list.append(df_shifted)
fea = pd.concat(df_list, axis=1)
return fea
def moving_averages(df, start_step, window_size=None):
"""Compute averages of every feature over moving time windows.
Args:
df (Dataframe): Input features as a dataframe
Returns:
fea (Dataframe): Dataframe consisting of the moving averages
"""
if window_size is None:
# Use a large window to compute average over all historical data
window_size = df.shape[0]
fea = df.shift(start_step).rolling(min_periods=1, center=False, window=window_size).mean()
fea.columns = fea.columns + "_mean" + str(window_size)
return fea
if __name__ == "__main__":
for submission_round in range(1, bs.NUM_ROUNDS + 1):
print("creating features for round {}...".format(submission_round))
# read in data
train_file = os.path.join(DATA_DIR, "train/train_round_{}.csv".format(submission_round))
aux_file = os.path.join(DATA_DIR, "train/aux_round_{}.csv".format(submission_round))
train_df = pd.read_csv(train_file, index_col=False)
aux_df = pd.read_csv(aux_file, index_col=False)
# calculate move
train_df["move"] = train_df["logmove"].apply(lambda x: round(math.exp(x)))
train_df = train_df[["store", "brand", "week", "profit", "move", "logmove"]]
# merge train_df with aux_df
all_df = pd.merge(train_df, aux_df, how="right", on=["store", "brand", "week"])
# fill missing datetime gaps
store_list = all_df["store"].unique()
brand_list = all_df["brand"].unique()
week_list = range(bs.TRAIN_START_WEEK, bs.TEST_END_WEEK_LIST[submission_round - 1] + 1)
item_list = list(itertools.product(store_list, brand_list, week_list))
item_df = | pd.DataFrame.from_records(item_list, columns=["store", "brand", "week"]) | pandas.DataFrame.from_records |
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 12 16:44:24 2020
@author: Borja
"""
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
"""
- Ultra Trail Mont Blanc. Clasificación desde 2003 hasta 2017.
https://www.kaggle.com/ceruleansea/ultratrail-du-montblanc-20032017?select=utmb_2017.csv
-Ultra Trail Mont Blanc, Clasificación desde 2017 hasta 2019.
https://www.kaggle.com/purpleyupi/utmb-results
Datos guardados en 'Data/csv/*.csv'
"""
utmb_2003 = pd.read_csv('Data/csv/utmb_2003.csv', sep=',', decimal='.')
utmb_2004 = pd.read_csv('Data/csv/utmb_2004.csv', sep=',', decimal='.')
utmb_2005 = pd.read_csv('Data/csv/utmb_2005.csv', sep=',', decimal='.')
utmb_2006 = pd.read_csv('Data/csv/utmb_2006.csv', sep=',', decimal='.')
utmb_2007 = pd.read_csv('Data/csv/utmb_2007.csv', sep=',', decimal='.')
utmb_2008 = | pd.read_csv('Data/csv/utmb_2008.csv', sep=',', decimal='.') | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
from __future__ import division
from datetime import datetime
from sklearn import linear_model
import pandas as pd
import numpy as np
import scipy.stats as st
import statsmodels.distributions.empirical_distribution as edis
import seaborn as sns; sns.set(color_codes=True)
import matplotlib.pyplot as plt
#########################################################################
# This purpose of this script is to use historical temperature and streamflow data
# to calculate synthetic time series of daily flows at each of the stream gages
# used in the hydropower production models.
# Regression and vector-autoregressive errors are used to simulate total annual
# streamflows, and these are then paired with daily streamflow fractions tied
# to daily temperature dynamics
#########################################################################
# Import historical tmeperature data
df_temp = pd.read_excel('Synthetic_streamflows/hist_temps_1953_2007.xlsx')
his_temp_matrix = df_temp.values
# Import calender
calender=pd.read_excel('Synthetic_streamflows/BPA_hist_streamflow.xlsx',sheet_name='Calender',header= None)
calender=calender.values
julian=calender[:,2]
###############################
# Synthetic HDD CDD calculation
# Simulation data
sim_weather= | pd.read_csv('Synthetic_weather/synthetic_weather_data.csv',header=0) | pandas.read_csv |
#!/usr/bin/env python
# coding: utf-8
# # This will create plots for institutions of universities in THE WUR univs only and for the period of 2007-2017. The input dataset contains info of THE WUR univs only but for any period of time.
# #### The unpaywall dump used was from (April or June) 2018; hence analysis until 2017 only is going to be included.
# ## Question : What is the distribution of incoming citation counts for OA and non-OA papers published by THE WUR univ within each country?
# In[1]:
# standard path wrangling to be able to import project config and sources
import os
import sys
from os.path import join
root = os.path.dirname(os.getcwd())
sys.path.append(root)
print('Project root: {}'.format(root))
# In[2]:
sys.path.append(join(root,"spark/shared/"))
from MAG_utils import *
# In[ ]:
# In[3]:
# Built-in
import json
# Installed
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
from matplotlib import rc,rcParams
from matplotlib.patches import Rectangle
import unicodedata
import re
from statistics import mean
# In[4]:
cfg = None
with open(join(root,"spark/config.json")) as fp:
cfg = json.load(fp)
# In[5]:
# cfg
# In[6]:
cnames_for_plot = {
"austria" : "Austria",
"brazil" : "Brazil",
"germany" : "Germany",
"india" : "India",
"portugal" : "Portugal",
"russia" : "Russia",
"uk" : "UK",
"usa" : "USA"
}
# In[7]:
output_dir = join(root,"documents/analysis/dataset_selection_question5")
# In[ ]:
# Create a new directory to save results
os.makedirs(output_dir)
# In[8]:
study_years = [2007,2008,2009,2010,2011,2012,2013,2014,2015,2016,2017]
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# # Extraction of Citation Counts of OA and unknown papers for each university
# In[9]:
def get_univ_papers_citation_counts(country_papers_OA_df, univs_name):
'''
Get the plot of count of citations for both OA and non-OA papers for each university in the input country
'''
univs_info = {}
univs_not_found = []
univs_found = []
for org_univ_name in set(univs_name): # remove duplicate univ names in the THE list, if any
# print(org_univ_name)
THE_univ_name_normalised = mag_normalisation_institution_names(org_univ_name)
'''
The dataframe that will be selected for the current univ is either :
1. When the MAG normalizedname column matches to THE_univ_name_normalised
or
2. When the MAG normalised(wikiname) matches to THE_univ_name_normalised -- this matches English names (in MAG wiki links as well as THE) of non English name (in MAG normalisedname or displayname) universities.
'''
univ_papers_df_set1 = country_papers_OA_df[country_papers_OA_df['normalizedname']==THE_univ_name_normalised]
univ_papers_df_set2 = country_papers_OA_df[country_papers_OA_df['normalizedwikiname']==THE_univ_name_normalised]
# The records in two sets can be the excatly the same
# Concat and remove exact duplicates -- https://stackoverflow.com/a/21317570/530399
univ_papers_df = | pd.concat([univ_papers_df_set1, univ_papers_df_set2]) | pandas.concat |
"""
Created on Mon Mar 23 17:06:41 2020
@author: diego
"""
from mip import *
import os
import pandas as pd
import numpy as np
import subprocess
# from .Configurations import Gurobi_license_path
# os.environ['GRB_LICENSE_FILE'] = Gurobi_license_path
#list_of_inputs
#
# P : number of working shifts
# T : time horizon partitioned in time shifts [int]
# C : hourly cost of each operator [int]
# sigma_t : working hours for time t determined by the corresponding shift p [..list..]
# C_t = C*sigma_t : cost of each operator at time t [..list..]
# f_j : set-up cost of sorting stage j [..list..]
# a_t : quantity of material in kg unloaded from trucks at time t [..list..]
# alpha_j : percentage of waste processed in stage j-1, received in input by buffer j [..list..]
# S_j : maximum inventory capacity of the sorting stage buffer j [..list..]
# LC_j : critical stock level threshold of buffer j [..list..]
# rho_j : fraction of material allowed to be left at buffer j at the end of time horizon [..list..]
# K_j : single operator hourly production capacity [kg/h] of sorting stage j [..list..]
# SK_{j,t} = K_j*\sigma_t : operator sorting capacity in sorting stage j, at time t [..list..]
# M : maximum number of operators available in each time shift [int]
# E_j : minimum number of operators to be employed in each time shift of stage j [..list..]
# h_j^i : slope of the i-th part of linearization of the buffer j stock cost curve [..list..]
def sorting_model(input_app,arrivals):
GapTol = 1e-3
TimeLimit = 600
deltadays = (input_app.horizon_UB - input_app.horizon_LB).days + 1
J = 2 #two default sorting stages
P = input_app.dailyshifts
TH = deltadays * P
sigma = [input_app.shift_1_hours,input_app.shift_2_hours,input_app.shift_3_hours]
sigma = sigma[:P]
T = [[]] * P
for p in range(P):
T[p] = np.arange(p, TH, P)
p = p + 1
alpha = [1,input_app.firstTO2nd_sort/100]
S = [input_app.sort1_maxstock,input_app.sort2_maxstock]
K = [input_app.sort1_capacity,input_app.sort2_capacity]
SK = np.zeros((J,P)) #Selection single worker's shiftly productive capacity
for j in range(J):
for p in range(P):
SK[j,p] = K[j] * sigma[p]
ro = [input_app.finalstock_treshold] * J
LC = []
for j in range(J):
LC.append((input_app.overfill_treshold/100) * S[j])
C = input_app.operator_wage # single worker's hourly cost (euro/h)
C_t = np.zeros(TH) # single worker's cost for the whole shift p (euro/shift)
for t in range(TH):
if t in T[0]:
C_t[t] = C * sigma[0]
else:
C_t[t] = C * sigma[1]
E = [input_app.min_op_sort1,input_app.min_op_sort2]
M = input_app.max_operators
setup_cost = [input_app.setup_sort1,input_app.setup_sort1]
# Arrivals
a = np.zeros(TH)
i = 0
j = 0
for t in range(TH):
if t in T[0]:
a[t] = arrivals[0][i]
i += 1
if t in T[1]:
a[t] = arrivals[1][j]
j += 1
base = 20
prec = 2
def rounding(a, prec, base):
return base * (a / base).round().round(prec)
a = rounding(a, prec, base)
a = dict(enumerate(a))
# Storage costs
cost_labels = ['null', 'under balance', 'balanced', 'over balance']
dh = np.zeros((J, 2))
dh[0, 0] = 0.009 # over the balance point between production and storage costs
dh[0, 1] = 0.4
dh[1, 0] = 0.005
dh[1, 1] = 0.2
for j in range(2, J):
dh[j, 0] = 0.005
dh[j, 1] = 0.2
C_prod = True
C_stock = True
######## Model ###########################################################
# Gurobi_cl_path in Configurations.py obtained by "which gurobi_cl" in terminal prompt.
from .Configurations import Gurobi_cl_path
solver = 'GRB'
if solver == 'GRB':
try:
# gurobi_cl path to include in Configurations.py can be retrieved
# by entering "which gurobi_cl" in a command/terminal prompt. Please take a look to Configurations.py
subprocess.run(Gurobi_cl_path, stdout=subprocess.PIPE).stdout.decode('utf-8')
m = Model("WFA", sense=MINIMIZE, solver_name=solver)
except Exception as e:
print(e)
solver = 'CBC'
m = Model("WFA", sense=MINIMIZE, solver_name=solver)
else:
solver = 'CBC'
m = Model("WFA", sense=MINIMIZE, solver_name=solver)
# VARIABLES
# workforce to be employed on selection stag j
if C_prod == True:
x = {(j, t): m.add_var(lb=0, var_type=INTEGER, obj=C_t[t], name="x_{}_{}".format(j, t)) for j in range(J) for t in range(TH)}
else:
x = {(j, t): m.add_var(lb=0, var_type=INTEGER, name="x_{}_{}".format(j, t)) for j in range(J) for t in range(TH)}
# quantity preoccesed in a shift
u = {(j, t): m.add_var(lb=0, var_type=CONTINUOUS, name="u_{}_{}".format(j, t)) for j in range(J) for t in range(TH)}
# selection stage start
if C_prod == True:
y = {(j, t): m.add_var(lb=0, ub=1, obj=setup_cost[j], var_type=BINARY, name="y_{}_{}".format(j, t)) for j in range(J) for t in range(TH)}
else:
y = {(j, t): m.add_var(lb=0, ub=1, var_type=BINARY, name="y_{}_{}".format(j, t)) for j in range(J) for t in range(TH)}
# These are the stock variables regarding the j-th selection stage
if C_stock == True:
I = {(j, t): m.add_var(lb=0, ub=S[j], var_type=CONTINUOUS, name="I_{}_{}".format(j, t)) for j in range(J) for t in range(TH)} # stock quantity
I_1 = {(j, t): m.add_var(lb=0, ub=LC[j], var_type=CONTINUOUS, obj=dh[j, 0], name="I_1_{}_{}".format(j, t)) for j in range(J) for t in range(TH)} # stock quantity below critical level
I_2 = {(j, t): m.add_var(lb=0, var_type=CONTINUOUS, obj=dh[j, 1], name="I_2_{}_{}".format(j, t)) for j in range(J) for t in range(TH)} # stock quantity above critical level
else:
I = {(j, t): m.add_var(lb=0, ub=S[j], var_type=CONTINUOUS, name="I_{}_{}".format(j, t)) for j in range(J) for t in range(TH)} # stock quantity
# binary variable concerning the overcoming of critical level
if C_stock == True:
w = {(j, t): m.add_var(lb=0, ub=1, var_type=BINARY, name="w_{}_{}".format(j, t)) for j in range(J) for t in range(TH)}
# CONSTRAINTS
# ( 2 ) #######################################################################################
for j in range(J):
for t in range(TH):
# Planned workforce must not exceed the maximum number of available workforce
m += x[j, t] <= M * y[j, t], "bigM_{}_{}".format(j, t)
# For each period there is a minimum fixed number of workers for selection stage j
m += x[j, t] >= E[j] * y[j, t], 'minWF_{}_{}'.format(j, t)
###############################################################################################
# ( 3 ) #######################################################################################
for t in range(TH):
m += xsum(x[j, t] for j in range(J)) <= M, "Wforce_bound_{}".format(t)
###############################################################################################
# ( 4 ) #######################################################################################
# These constraints bound the processed quantity to the maximum production capacity
# over the corresponding shift as a function of the workforce allocation
for j in range(J):
for t in range(TH):
if t in T[0]:
m += u[j, t] <= SK[j, 0] * x[j, t], "prod_{}_{}".format(j, t)
if t in T[1]:
m += u[j, t] <= SK[j, 1] * x[j, t], "prod_{}_{}".format(j, t)
###############################################################################################
# not included in paper formulation ###########################################################
# Starting level of the first storage
for j in range(J):
if j == 0:
m += I[j, 0] == a[0] - u[0, 0], "initializzation_{}".format(j)
else:
m += I[j, 0] == alpha[j] * u[j - 1, 0], "initializzation_{}".format(j)
###############################################################################################
# ( 5 ) & ( 6 ) ################################################################################
# Flow balance equation for 1st and 2nd selection
for j in range(J):
for t in range(1, TH):
if j == 0:
m += I[j, t] == I[j, t - 1] + a[t] - u[j, t], "balance_{}_{}".format(j, t)
else:
m += I[j, t] == I[j, t - 1] + alpha[j] * u[j - 1, t] - u[j, t], "balance_{}_{}".format(j, t)
###############################################################################################
if C_stock == True:
# ( 7 ) #######################################################################################
# The constraints below are needed to linearize the stock costs relating to selection stages
for j in range(J):
for t in range(TH):
m += I[j, t] == I_1[j, t] + I_2[j, t], "linear_stock_{}_{}".format(j, t)
###############################################################################################
# ( 8 ) & ( 9 ) ###############################################################################
for t in range(TH):
m += I_1[j, t] >= LC[j] * w[j, t], "v1_{}_{}".format(j, t)
m += I_2[j, t] <= (S[j] - LC[j]) * w[j, t], "v2_{}_{}".format(j, t)
###############################################################################################
# ( 10 ) ######################################################################################
# These two constraints set the stock level over the last period below a decided level both for the first and second stock level
for j in range(J):
m += I[j, TH - 1] <= np.floor(ro[j] * LC[j]), "end_{}_{}".format(j, TH - 1)
###############################################################################################
m.objective = minimize(xsum(C_t[t] * x[j, t] for j in range(J) for t in range(TH)) +
xsum(setup_cost[j] * y[j, t] for j in range(J) for t in range(TH)) +
xsum(dh[j, 0] * I_1[j, t] + dh[j, 1] * I_2[j, t] for j in range(J) for t in range(TH)))
m.max_gap = GapTol
status = m.optimize(max_seconds=TimeLimit)
var_results = []
y_opt = []
x_opt = []
u_opt = []
if status == OptimizationStatus.NO_SOLUTION_FOUND:
status = "infeasible"
performances = [m.objective_value, m.objective_bound, m.gap]
if status == OptimizationStatus.OPTIMAL or status == OptimizationStatus.FEASIBLE:
status = "optimal"
performances = [m.objective_value, m.objective_bound, m.gap]
for v in m.vars:
var_results.append([v.name, v.x])
var_results = pd.DataFrame.from_records(var_results, columns=["variable", "value"])
y_opt = var_results[var_results['variable'].str.contains("y", na=False)]
x_opt = var_results[var_results['variable'].str.contains("x", na=False)]
u_opt = var_results[var_results['variable'].str.contains("u", na=False)]
y_opt['value'] = y_opt['value'].apply(pd.to_numeric).astype(int)
x_opt['value'] = x_opt['value'].apply(pd.to_numeric).astype(int)
u_opt['value'] = u_opt['value'].apply(pd.to_numeric).astype(int)
y_opt_1t = y_opt[y_opt['variable'].str.contains("y_0", na=False)]['value'].tolist()
y_opt_2t = y_opt[y_opt['variable'].str.contains("y_1", na=False)]['value'].tolist()
y_opt = | pd.DataFrame.from_records([y_opt_1t,y_opt_2t]) | pandas.DataFrame.from_records |
#!/usr/bin/env python
"""
analyse Elasticsearch query
"""
import json
from elasticsearch import Elasticsearch
from elasticsearch import logger as es_logger
from collections import defaultdict, Counter
import re
import os
from datetime import datetime
# Preprocess terms for TF-IDF
import numpy as np
import pandas as pd
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
# progress bar
from tqdm import tqdm
# ploting
import matplotlib.pyplot as plt
# LOG
import logging
from logging.handlers import RotatingFileHandler
# Word embedding for evaluation
from sentence_transformers import SentenceTransformer
from sklearn.manifold import TSNE
import seaborn as sns
from sklearn.cluster import KMeans, AgglomerativeClustering
from sklearn.metrics.pairwise import cosine_similarity
from scipy import sparse
import scipy.spatial as sp
# Spatial entity as descriptor :
from geopy.geocoders import Nominatim
from geopy.extra.rate_limiter import RateLimiter
# venn
from matplotlib_venn_wordcloud import venn2_wordcloud, venn3_wordcloud
import operator
# Global var on Levels on spatial and temporal axis
spatialLevels = ['city', 'state', 'country']
temporalLevels = ['day', 'week', 'month', 'period']
def elasticsearch_query(query_fname, logger):
"""
Build a ES query and return a default dict with resuls
:return: tweetsByCityAndDate
"""
# Elastic search credentials
client = Elasticsearch("http://localhost:9200")
es_logger.setLevel(logging.WARNING)
index = "twitter"
# Define a Query
query = open(query_fname, "r").read()
result = Elasticsearch.search(client, index=index, body=query, scroll='2m', size=5000)
# Append all pages form scroll search : avoid the 10k limitation of ElasticSearch
results = avoid10kquerylimitation(result, client, logger)
# Initiate a dict for each city append all Tweets content
tweetsByCityAndDate = defaultdict(list)
for hits in results:
# parse Java date : EEE MMM dd HH:mm:ss Z yyyy
inDate = hits["_source"]["created_at"]
parseDate = datetime.strptime(inDate, "%a %b %d %H:%M:%S %z %Y")
try:# geodocing may be bad
geocoding = hits["_source"]["rest"]["features"][0]["properties"]
except:
continue # skip this iteraction
if "country" in hits["_source"]["rest"]["features"][0]["properties"]:
# locaties do not necessarily have an associated stated
try:
cityStateCountry = str(hits["_source"]["rest"]["features"][0]["properties"]["city"]) + "_" + \
str(hits["_source"]["rest"]["features"][0]["properties"]["state"]) + "_" + \
str(hits["_source"]["rest"]["features"][0]["properties"]["country"])
except: # there is no state in geocoding
try:
logger.debug(hits["_source"]["rest"]["features"][0]["properties"]["city"] + " has no state")
cityStateCountry = str(hits["_source"]["rest"]["features"][0]["properties"]["city"]) + "_" + \
str("none") + "_" + \
str(hits["_source"]["rest"]["features"][0]["properties"]["country"])
except: # there is no city as well : only country
# print(json.dumps(hits["_source"], indent=4))
try: #
cityStateCountry = str("none") + "_" + \
str("none") + "_" + \
str(hits["_source"]["rest"]["features"][0]["properties"]["country"])
except:
cityStateCountry = str("none") + "_" + \
str("none") + "_" + \
str("none")
try:
tweetsByCityAndDate[cityStateCountry].append(
{
"tweet": preprocessTweets(hits["_source"]["full_text"]),
"created_at": parseDate
}
)
except:
print(json.dumps(hits["_source"], indent=4))
# biotexInputBuilder(tweetsByCityAndDate)
# pprint(tweetsByCityAndDate)
return tweetsByCityAndDate
def avoid10kquerylimitation(result, client, logger):
"""
Elasticsearch limit results of query at 10 000. To avoid this limit, we need to paginate results and scroll
This method append all pages form scroll search
:param result: a result of a ElasticSearcg query
:return:
"""
scroll_size = result['hits']['total']["value"]
logger.info("Number of elasticsearch scroll: " + str(scroll_size))
results = []
# Progress bar
pbar = tqdm(total=scroll_size)
while (scroll_size > 0):
try:
scroll_id = result['_scroll_id']
res = client.scroll(scroll_id=scroll_id, scroll='60s')
results += res['hits']['hits']
scroll_size = len(res['hits']['hits'])
pbar.update(scroll_size)
except:
pbar.close()
logger.error("elasticsearch search scroll failed")
break
pbar.close()
return results
def preprocessTweets(text):
"""
1 - Clean up tweets text cf : https://medium.com/analytics-vidhya/basic-tweet-preprocessing-method-with-python-56b4e53854a1
2 - Detection lang
3 - remove stopword ??
:param text:
:return: list : texclean, and langue detected
"""
## 1 clean up twetts
# remove URLs
textclean = re.sub('((www\.[^\s]+)|(https?://[^\s]+)|(http?://[^\s]+))', '', text)
textclean = re.sub(r'http\S+', '', textclean)
# remove usernames
# textclean = re.sub('@[^\s]+', '', textclean)
# remove the # in #hashtag
# textclean = re.sub(r'#([^\s]+)', r'\1', textclean)
return textclean
def matrixOccurenceBuilder(tweetsofcity, matrixAggDay_fout, matrixOccurence_fout, save_intermediaire_files, logger):
"""
Create a matrix of :
- line : (city,day)
- column : terms
- value of cells : TF (term frequency)
Help found here :
http://www.xavierdupre.fr/app/papierstat/helpsphinx/notebooks/artificiel_tokenize_features.html
https://towardsdatascience.com/natural-language-processing-feature-engineering-using-tf-idf-e8b9d00e7e76
:param tweetsofcity:
:param matrixAggDay_fout: file to save
:param matrixOccurence_fout: file to save
:return:
"""
# initiate matrix of tweets aggregate by day
# col = ['city', 'day', 'tweetsList', 'bow']
col = ['city', 'day', 'tweetsList']
matrixAggDay = pd.DataFrame(columns=col)
cityDayList = []
logger.info("start full_text concatenation for city & day")
pbar = tqdm(total=len(tweetsofcity))
for city in tweetsofcity:
# create a table with 2 columns : tweet and created_at for a specific city
matrix = pd.DataFrame(tweetsofcity[city])
# Aggregate list of tweets by single day for specifics cities
## Loop on days for a city
period = matrix['created_at'].dt.date
period = period.unique()
period.sort()
for day in period:
# aggregate city and date document
document = '. \n'.join(matrix.loc[matrix['created_at'].dt.date == day]['tweet'].tolist())
# Bag of Words and preprocces
# preproccesFullText = preprocessTerms(document)
tweetsOfDayAndCity = {
'city': city,
'day': day,
'tweetsList': document
}
cityDayList.append(city + "_" + str(day))
try:
matrixAggDay = matrixAggDay.append(tweetsOfDayAndCity, ignore_index=True)
except:
print("full_text empty after pre-process: "+document)
continue
pbar.update(1)
pbar.close()
if save_intermediaire_files:
logger.info("Saving file: matrix of full_text concatenated by day & city: "+str(matrixAggDay_fout))
matrixAggDay.to_csv(matrixAggDay_fout)
# Count terms with sci-kit learn
cd = CountVectorizer(
stop_words='english',
#preprocessor=sklearn_vectorizer_no_number_preprocessor,
#min_df=2, # token at least present in 2 cities : reduce size of matrix
max_features=25000,
ngram_range=(1, 1),
token_pattern='[a-zA-Z0-9#@]+', #remove user name, i.e term starting with @ for personnal data issue
# strip_accents= "ascii" # remove token with special character (trying to keep only english word)
)
cd.fit(matrixAggDay['tweetsList'])
res = cd.transform(matrixAggDay["tweetsList"])
countTerms = res.todense()
# create matrix
## get terms :
# voc = cd.vocabulary_
# listOfTerms = {term for term, index in sorted(voc.items(), key=lambda item: item[1])}
listOfTerms = cd.get_feature_names()
##initiate matrix with count for each terms
matrixOccurence = pd.DataFrame(data=countTerms[0:, 0:], index=cityDayList, columns=listOfTerms)
# save to file
if save_intermediaire_files:
logger.info("Saving file: occurence of term: "+str(matrixOccurence_fout))
matrixOccurence.to_csv(matrixOccurence_fout)
return matrixOccurence
def spatiotemporelFilter(matrix, listOfcities='all', spatialLevel='city', period='all', temporalLevel='day'):
"""
Filter matrix with list of cities and a period
:param matrix:
:param listOfcities:
:param spatialLevel:
:param period:
:param temporalLevel:
:return: matrix filtred
"""
if spatialLevel not in spatialLevels or temporalLevel not in temporalLevels:
print("wrong level, please double check")
return 1
# Extract cities and period
## cities
if listOfcities != 'all': ### we need to filter
###Initiate a numpy array of False
filter = np.zeros((1, len(matrix.index)), dtype=bool)[0]
for city in listOfcities:
### edit filter if index contains the city (for each city of the list)
filter += matrix.index.str.startswith(str(city) + "_")
matrix = matrix.loc[filter]
##period
if str(period) != 'all': ### we need a filter on date
datefilter = np.zeros((1, len(matrix.index)), dtype=bool)[0]
for date in period:
datefilter += matrix.index.str.contains(date.strftime('%Y-%m-%d'))
matrix = matrix.loc[datefilter]
return matrix
def HTFIDF(matrixOcc, matrixHTFIDF_fname, biggestHTFIDFscore_fname, listOfcities='all', spatialLevel='city',
period='all', temporalLevel='day'):
"""
Aggregate on spatial and temporel and then compute TF-IDF
:param matrixOcc: Matrix with TF already compute
:param listOfcities: filter on this cities
:param spatialLevel: city / state / country / world
:param period: Filter on this period
:param temporalLevel: day / week (month have to be implemented)
:return:
"""
matrixOcc = spatiotemporelFilter(matrix=matrixOcc, listOfcities=listOfcities,
spatialLevel='state', period=period)
# Aggregate by level
## Create 4 new columns : city, State, Country and date
def splitindex(row):
return row.split("_")
matrixOcc["city"], matrixOcc["state"], matrixOcc["country"], matrixOcc["date"] = \
zip(*matrixOcc.index.map(splitindex))
if temporalLevel == 'day':
## In space
if spatialLevel == 'city':
# do nothing
pass
elif spatialLevel == 'state' and temporalLevel == 'day':
matrixOcc = matrixOcc.groupby("state").sum()
elif spatialLevel == 'country' and temporalLevel == 'day':
matrixOcc = matrixOcc.groupby("country").sum()
elif temporalLevel == "week":
matrixOcc.date = pd.to_datetime((matrixOcc.date)) - pd.to_timedelta(7, unit='d')# convert date into datetime
## in space and time
if spatialLevel == 'country':
matrixOcc = matrixOcc.groupby(["country", pd.Grouper(key="date", freq="W")]).sum()
elif spatialLevel == 'state':
matrixOcc = matrixOcc.groupby(["state", pd.Grouper(key="date", freq="W")]).sum()
elif spatialLevel == 'city':
matrixOcc = matrixOcc.groupby(["city", | pd.Grouper(key="date", freq="W") | pandas.Grouper |
#----------------------------------------------------------------------------------------------
####################
# IMPORT LIBRARIES #
####################
import streamlit as st
import pandas as pd
import numpy as np
import plotly as dd
import plotly.express as px
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib.font_manager
import plotly.graph_objects as go
import functions as fc
import os
import altair as alt
import statsmodels.api as sm
from scipy import stats
from sklearn.metrics import make_scorer, mean_squared_error, r2_score, mean_absolute_error, explained_variance_score, roc_auc_score, max_error, log_loss, average_precision_score, precision_recall_curve, auc, roc_curve, confusion_matrix, recall_score, precision_score, f1_score, accuracy_score, balanced_accuracy_score, cohen_kappa_score
from sklearn.model_selection import train_test_split
import scipy
import sys
import platform
import base64
from io import BytesIO
from linearmodels import PanelOLS
from linearmodels import RandomEffects
from linearmodels import PooledOLS
#----------------------------------------------------------------------------------------------
def app():
# Clear cache
st.legacy_caching.clear_cache()
# Hide traceback in error messages (comment out for de-bugging)
sys.tracebacklimit = 0
# Show altair tooltip when full screen
st.markdown('<style>#vg-tooltip-element{z-index: 1000051}</style>',unsafe_allow_html=True)
# workaround for Firefox bug- hide the scrollbar while keeping the scrolling functionality
st.markdown("""
<style>
.ReactVirtualized__Grid::-webkit-scrollbar {
display: none;
}
.ReactVirtualized__Grid {
-ms-overflow-style: none; /* IE and Edge */
scrollbar-width: none; /* Firefox */
}
</style>
""", unsafe_allow_html=True)
#------------------------------------------------------------------------------------------
#++++++++++++++++++++++++++++++++++++++++++++
# RESET INPUT
#Session state
if 'key' not in st.session_state:
st.session_state['key'] = 0
reset_clicked = st.sidebar.button("Reset all your input")
if reset_clicked:
st.session_state['key'] = st.session_state['key'] + 1
st.sidebar.markdown("")
#++++++++++++++++++++++++++++++++++++++++++++
# DATA IMPORT
# File upload section
df_dec = st.sidebar.radio("Get data", ["Use example dataset", "Upload data"], key = st.session_state['key'])
uploaded_data=None
if df_dec == "Upload data":
#st.subheader("Upload your data")
#uploaded_data = st.sidebar.file_uploader("Make sure that dot (.) is a decimal separator!", type=["csv", "txt"])
separator_expander=st.sidebar.expander('Upload settings')
with separator_expander:
a4,a5=st.columns(2)
with a4:
dec_sep=a4.selectbox("Decimal sep.",['.',','], key = st.session_state['key'])
with a5:
col_sep=a5.selectbox("Column sep.",[';', ',' , '|', '\s+', '\t','other'], key = st.session_state['key'])
if col_sep=='other':
col_sep=st.text_input('Specify your column separator', key = st.session_state['key'])
a4,a5=st.columns(2)
with a4:
thousands_sep=a4.selectbox("Thousands x sep.",[None,'.', ' ','\s+', 'other'], key = st.session_state['key'])
if thousands_sep=='other':
thousands_sep=st.text_input('Specify your thousands separator', key = st.session_state['key'])
with a5:
encoding_val=a5.selectbox("Encoding",[None,'utf_8','utf_8_sig','utf_16_le','cp1140','cp1250','cp1251','cp1252','cp1253','cp1254','other'], key = st.session_state['key'])
if encoding_val=='other':
encoding_val=st.text_input('Specify your encoding', key = st.session_state['key'])
# Error handling for separator selection:
if dec_sep==col_sep:
st.sidebar.error("Decimal and column separators cannot be identical!")
elif dec_sep==thousands_sep:
st.sidebar.error("Decimal and thousands separators cannot be identical!")
elif col_sep==thousands_sep:
st.sidebar.error("Column and thousands separators cannot be identical!")
uploaded_data = st.sidebar.file_uploader("Default separators: decimal '.' | column ';'", type=["csv", "txt"])
if uploaded_data is not None:
df = pd.read_csv(uploaded_data, decimal=dec_sep, sep = col_sep,thousands=thousands_sep,encoding=encoding_val, engine='python')
df_name=os.path.splitext(uploaded_data.name)[0]
st.sidebar.success('Loading data... done!')
elif uploaded_data is None:
df = pd.read_csv("default data/Grunfeld.csv", sep = ";|,|\t",engine='python')
df_name="Grunfeld"
else:
df = pd.read_csv("default data/Grunfeld.csv", sep = ";|,|\t",engine='python')
df_name="Grunfeld"
st.sidebar.markdown("")
#Basic data info
n_rows = df.shape[0]
n_cols = df.shape[1]
#++++++++++++++++++++++++++++++++++++++++++++
# SETTINGS
settings_expander=st.sidebar.expander('Settings')
with settings_expander:
st.caption("**Precision**")
user_precision=int(st.number_input('Number of digits after the decimal point',min_value=0,max_value=10,step=1,value=4, key = st.session_state['key']))
st.caption("**Help**")
sett_hints = st.checkbox('Show learning hints', value=False, key = st.session_state['key'])
st.caption("**Appearance**")
sett_wide_mode = st.checkbox('Wide mode', value=False, key = st.session_state['key'])
sett_theme = st.selectbox('Theme', ["Light", "Dark"], key = st.session_state['key'])
#sett_info = st.checkbox('Show methods info', value=False)
#sett_prec = st.number_input('Set the number of diggits for the output', min_value=0, max_value=8, value=2)
st.sidebar.markdown("")
# Check if wide mode
if sett_wide_mode:
fc.wide_mode_func()
# Check theme
if sett_theme == "Dark":
fc.theme_func_dark()
if sett_theme == "Light":
fc.theme_func_light()
fc.theme_func_dl_button()
#------------------------------------------------------------------------------------------
#++++++++++++++++++++++++++++++++++++++++++++
# DATA PREPROCESSING & VISUALIZATION
st.header("**Panel data**")
st.markdown("Get your data ready for powerfull methods! Let STATY do data cleaning, variable transformations, visualizations and deliver you the stats you need. Specify your data processing preferences and start exploring your data stories right below... ")
# Check if enough data is available
if n_cols >= 2 and n_rows > 0:
st.empty()
else:
st.error("ERROR: Not enough data!")
return
# Specify entity and time
st.markdown("**Panel data specification**")
col1, col2 = st.columns(2)
with col1:
entity_na_warn = False
entity_options = df.columns
entity = st.selectbox("Select variable for entity", entity_options, key = st.session_state['key'])
with col2:
time_na_warn = False
time_options = df.columns
time_options = list(time_options[time_options.isin(df.drop(entity, axis = 1).columns)])
time = st.selectbox("Select variable for time", time_options, key = st.session_state['key'])
if np.where(df[entity].isnull())[0].size > 0:
entity_na_warn = "ERROR: The variable selected for entity has NAs!"
st.error(entity_na_warn)
if np.where(df[time].isnull())[0].size > 0:
time_na_warn = "ERROR: The variable selected for time has NAs!"
st.error(time_na_warn)
if df[time].dtypes != "float64" and df[time].dtypes != "float32" and df[time].dtypes != "int64" and df[time].dtypes != "int32":
time_na_warn = "ERROR: Time variable must be numeric!"
st.error(time_na_warn)
run_models = False
if time_na_warn == False and entity_na_warn == False:
data_empty_container = st.container()
with data_empty_container:
st.empty()
st.empty()
st.empty()
st.empty()
st.empty()
st.empty()
st.empty()
st.empty()
# Make sure time is numeric
df[time] = pd.to_numeric(df[time])
data_exploration_container2 = st.container()
with data_exploration_container2:
st.header("**Data screening and processing**")
#------------------------------------------------------------------------------------------
#++++++++++++++++++++++
# DATA SUMMARY
# Main panel for data summary (pre)
#----------------------------------
dev_expander_dsPre = st.expander("Explore raw panel data info and stats", expanded = False)
st.empty()
with dev_expander_dsPre:
# Default data description:
if uploaded_data == None:
if st.checkbox("Show data description", value = False, key = st.session_state['key']):
st.markdown("**Data source:**")
st.markdown("This is the original 11-firm data set from Grunfeld’s Ph.D. thesis (*Grunfeld, 1958, The Determinants of Corporate Investment, Department of Economics, University of Chicago*). For more details see online complements for the article [The Grunfeld Data at 50] (https://www.zeileis.org/grunfeld/).")
st.markdown("**Citation:**")
st.markdown("<NAME>, <NAME> (2010). “The Grunfeld Data at 50,” German Economic Review, 11(4), 404-417. [doi:10.1111/j.1468-0475.2010.00513.x] (https://onlinelibrary.wiley.com/doi/abs/10.1111/j.1468-0475.2010.00513.x)")
st.markdown("**Variables in the dataset:**")
col1,col2=st.columns(2)
col1.write("invest")
col2.write("Gross investment, defined as additions to plant and equipment plus maintenance and repairs in millions of dollars deflated by the implicit price deflator of producers’ durable equipment (base 1947)")
col1,col2=st.columns(2)
col1.write("value")
col2.write("Market value of the firm, defined as the price of common shares at December 31 (or, for WH, IBM and CH, the average price of December 31 and January 31 of the following year) times the number of common shares outstanding plus price of preferred shares at December 31 (or average price of December 31 and January 31 of the following year) times number of preferred shares plus total book value of debt at December 31 in millions of dollars deflated by the implicit GNP price deflator (base 1947)")
col1,col2=st.columns(2)
col1.write("capital")
col2.write("Stock of plant and equipment, defined as the accumulated sum of net additions to plant and equipment deflated by the implicit price deflator for producers’ durable equipment (base 1947) minus depreciation allowance deflated by depreciation expense deflator (10 years moving average of wholesale price index of metals and metal products, base1947)")
col1,col2=st.columns(2)
col1.write("firm")
col2.write("General Motors (GM), US Steel (US), General Electric (GE), Chrysler (CH), Atlantic Refining (AR), IBM, Union Oil (UO), Westinghouse (WH), Goodyear (GY), Diamond Match (DM), American Steel (AS)")
col1,col2=st.columns(2)
col1.write("year")
col2.write("Year ranging from 1935 to 1954")
st.markdown("")
# Show raw data & data info
df_summary = fc.data_summary(df)
if st.checkbox("Show raw data", value = False, key = st.session_state['key']):
st.write(df)
#st.info("Data shape: "+ str(n_rows) + " rows and " + str(n_cols) + " columns")
st.write("Data shape: ", n_rows, " rows and ", n_cols, " columns")
if df[df.duplicated()].shape[0] > 0 or df.iloc[list(pd.unique(np.where(df.isnull())[0]))].shape[0] > 0:
check_nasAnddupl=st.checkbox("Show duplicates and NAs info", value = False, key = st.session_state['key'])
if check_nasAnddupl:
if df[df.duplicated()].shape[0] > 0:
st.write("Number of duplicates: ", df[df.duplicated()].shape[0])
st.write("Duplicate row index: ", ', '.join(map(str,list(df.index[df.duplicated()]))))
if df.iloc[list(pd.unique(np.where(df.isnull())[0]))].shape[0] > 0:
st.write("Number of rows with NAs: ", df.iloc[list(pd.unique(np.where(df.isnull())[0]))].shape[0])
st.write("Rows with NAs: ", ', '.join(map(str,list(pd.unique(np.where(df.isnull())[0])))))
# Show variable info
if st.checkbox('Show variable info', value = False, key = st.session_state['key']):
st.write(df_summary["Variable types"])
# Show summary statistics (raw data)
if st.checkbox('Show summary statistics (raw data)', value = False, key = st.session_state['key']):
st.write(df_summary["ALL"].style.set_precision(user_precision))
# Download link for summary statistics
output = BytesIO()
excel_file = pd.ExcelWriter(output, engine="xlsxwriter")
df_summary["Variable types"].to_excel(excel_file, sheet_name="variable_info")
df_summary["ALL"].to_excel(excel_file, sheet_name="summary_statistics")
excel_file.save()
excel_file = output.getvalue()
b64 = base64.b64encode(excel_file)
dl_file_name = "Summary statistics__" + df_name + ".xlsx"
st.markdown(
f"""
<a href="data:file/excel_file;base64,{b64.decode()}" id="button_dl" download="{dl_file_name}">Download summary statistics</a>
""",
unsafe_allow_html=True)
st.write("")
if fc.get_mode(df).loc["n_unique"].any():
st.caption("** Mode is not unique.")
if sett_hints:
st.info(str(fc.learning_hints("de_summary_statistics")))
dev_expander_anovPre = st.expander("ANOVA for raw panel data", expanded = False)
with dev_expander_anovPre:
if df.shape[1] > 2:
# Target variable
target_var = st.selectbox('Select target variable ', df.drop([entity, time], axis = 1).columns, key = st.session_state['key'])
if df[target_var].dtypes == "int64" or df[target_var].dtypes == "float64":
class_var_options = df.columns
class_var_options = class_var_options[class_var_options.isin(df.drop(target_var, axis = 1).columns)]
clas_var = st.selectbox('Select classifier variable ', [entity, time], key = st.session_state['key'])
# Means and sd by entity
col1, col2 = st.columns(2)
with col1:
df_anova_woTime = df.drop([time], axis = 1)
df_grouped_ent = df_anova_woTime.groupby(entity)
st.write("Mean based on entity:")
st.write(df_grouped_ent.mean()[target_var])
st.write("")
with col2:
st.write("SD based on entity:")
st.write(df_grouped_ent.std()[target_var])
st.write("")
# Means and sd by time
col3, col4 = st.columns(2)
with col3:
df_anova_woEnt= df.drop([entity], axis = 1)
df_grouped_time = df_anova_woEnt.groupby(time)
counts_time = pd.DataFrame(df_grouped_time.count()[target_var])
counts_time.columns = ["count"]
st.write("Mean based on time:")
st.write(df_grouped_time.mean()[target_var])
st.write("")
with col4:
st.write("SD based on time:")
st.write(df_grouped_time.std()[target_var])
st.write("")
col9, col10 = st.columns(2)
with col9:
st.write("Boxplot grouped by entity:")
box_size1 = st.slider("Select box size", 1, 50, 5, key = st.session_state['key'])
# Grouped boxplot by entity
grouped_boxplot_data = pd.DataFrame()
grouped_boxplot_data[entity] = df[entity]
grouped_boxplot_data[time] = df[time]
grouped_boxplot_data["Index"] = df.index
grouped_boxplot_data[target_var] = df[target_var]
grouped_boxchart_ent = alt.Chart(grouped_boxplot_data, height = 300).mark_boxplot(size = box_size1, color = "#1f77b4", median = dict(color = "darkred")).encode(
x = alt.X(entity, scale = alt.Scale(zero = False)),
y = alt.Y(target_var, scale = alt.Scale(zero = False)),
tooltip = [target_var, entity, time, "Index"]
).configure_axis(
labelFontSize = 11,
titleFontSize = 12
)
st.altair_chart(grouped_boxchart_ent, use_container_width=True)
with col10:
st.write("Boxplot grouped by time:")
box_size2 = st.slider("Select box size ", 1, 50, 5, key = st.session_state['key'])
# Grouped boxplot by time
grouped_boxplot_data = pd.DataFrame()
grouped_boxplot_data[entity] = df[entity]
grouped_boxplot_data[time] = df[time]
grouped_boxplot_data["Index"] = df.index
grouped_boxplot_data[target_var] = df[target_var]
grouped_boxchart_time = alt.Chart(grouped_boxplot_data, height = 300).mark_boxplot(size = box_size2, color = "#1f77b4", median = dict(color = "darkred")).encode(
x = alt.X(time, scale = alt.Scale(domain = [min(df[time]), max(df[time])])),
y = alt.Y(target_var, scale = alt.Scale(zero = False)),
tooltip = [target_var, entity, time, "Index"]
).configure_axis(
labelFontSize = 11,
titleFontSize = 12
)
st.altair_chart(grouped_boxchart_time, use_container_width=True)
if sett_hints:
st.info(str(fc.learning_hints("de_anova_boxplot")))
st.write("")
# Count for entity and time
col5, col6 = st.columns(2)
with col5:
st.write("Number of observations per entity:")
counts_ent = pd.DataFrame(df_grouped_ent.count()[target_var])
counts_ent.columns = ["count"]
st.write(counts_ent.transpose())
with col6:
st.write("Number of observations per time:")
counts_time = pd.DataFrame(df_grouped_time.count()[target_var])
counts_time.columns = ["count"]
st.write(counts_time.transpose())
if sett_hints:
st.info(str(fc.learning_hints("de_anova_count")))
st.write("")
# ANOVA calculation
df_grouped = df[[target_var,clas_var]].groupby(clas_var)
overall_mean = (df_grouped.mean()*df_grouped.count()).sum()/df_grouped.count().sum()
dof_between = len(df_grouped.count())-1
dof_within = df_grouped.count().sum()-len(df_grouped.count())
dof_tot = dof_between + dof_within
SS_between = (((df_grouped.mean()-overall_mean)**2)*df_grouped.count()).sum()
SS_within = (df_grouped.var()*(df_grouped.count()-1)).sum()
SS_total = SS_between + SS_within
MS_between = SS_between/dof_between
MS_within = SS_within/dof_within
F_stat = MS_between/MS_within
p_value = scipy.stats.f.sf(F_stat, dof_between, dof_within)
anova_table=pd.DataFrame({
"DF": [dof_between, dof_within.values[0], dof_tot.values[0]],
"SS": [SS_between.values[0], SS_within.values[0], SS_total.values[0]],
"MS": [MS_between.values[0], MS_within.values[0], ""],
"F-statistic": [F_stat.values[0], "", ""],
"p-value": [p_value[0], "", ""]},
index = ["Between", "Within", "Total"],)
st.write("ANOVA:")
st.write(anova_table)
if sett_hints:
st.info(str(fc.learning_hints("de_anova_table")))
st.write("")
#Anova (OLS)
codes = pd.factorize(df[clas_var])[0]
ano_ols = sm.OLS(df[target_var], sm.add_constant(codes))
ano_ols_output = ano_ols.fit()
residuals = ano_ols_output.resid
col7, col8 = st.columns(2)
with col7:
# QQ-plot
st.write("Normal QQ-plot:")
st.write("")
st.write("")
st.write("")
st.write("")
st.write("")
st.write("")
qq_plot_data = pd.DataFrame()
qq_plot_data["StandResiduals"] = (residuals - residuals.mean())/residuals.std()
qq_plot_data["Index"] = df.index
qq_plot_data[entity] = df[entity]
qq_plot_data[time] = df[time]
qq_plot_data = qq_plot_data.sort_values(by = ["StandResiduals"])
qq_plot_data["Theoretical quantiles"] = stats.probplot(residuals, dist="norm")[0][0]
qq_plot = alt.Chart(qq_plot_data, height = 300).mark_circle(size=20).encode(
x = alt.X("Theoretical quantiles", title = "theoretical quantiles", scale = alt.Scale(domain = [min(qq_plot_data["Theoretical quantiles"]), max(qq_plot_data["Theoretical quantiles"])]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("StandResiduals", title = "stand. residuals", scale = alt.Scale(zero = False), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["StandResiduals", "Theoretical quantiles", entity, time, "Index"]
)
line = alt.Chart(
pd.DataFrame({"Theoretical quantiles": [min(qq_plot_data["Theoretical quantiles"]), max(qq_plot_data["Theoretical quantiles"])], "StandResiduals": [min(qq_plot_data["Theoretical quantiles"]), max(qq_plot_data["Theoretical quantiles"])]})).mark_line(size = 2, color = "darkred").encode(
alt.X("Theoretical quantiles"),
alt.Y("StandResiduals"),
)
st.altair_chart(qq_plot + line, use_container_width = True)
with col8:
# Residuals histogram
st.write("Residuals histogram:")
residuals_hist = pd.DataFrame(residuals)
residuals_hist.columns = ["residuals"]
binNo_res = st.slider("Select maximum number of bins ", 5, 100, 25, key = st.session_state['key'])
hist_plot_res = alt.Chart(residuals_hist, height = 300).mark_bar().encode(
x = alt.X("residuals", title = "residuals", bin = alt.BinParams(maxbins = binNo_res), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("count()", title = "count of records", scale = alt.Scale(zero = False), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["count()", alt.Tooltip("residuals", bin = alt.BinParams(maxbins = binNo_res))]
)
st.altair_chart(hist_plot_res, use_container_width=True)
if sett_hints:
st.info(str(fc.learning_hints("de_anova_residuals")))
# Download link for ANOVA statistics
output = BytesIO()
excel_file = pd.ExcelWriter(output, engine="xlsxwriter")
df_grouped_ent.mean()[target_var].to_excel(excel_file, sheet_name="entity_mean")
df_grouped_ent.std()[target_var].to_excel(excel_file, sheet_name="entity_sd")
df_grouped_time.mean()[target_var].to_excel(excel_file, sheet_name="time_mean")
df_grouped_time.std()[target_var].to_excel(excel_file, sheet_name="time_sd")
counts_ent.transpose().to_excel(excel_file, sheet_name="entity_obs")
counts_time.transpose().to_excel(excel_file, sheet_name="time_obs")
anova_table.to_excel(excel_file, sheet_name="ANOVA table")
excel_file.save()
excel_file = output.getvalue()
b64 = base64.b64encode(excel_file)
dl_file_name = "ANOVA statistics__" + target_var + "__" + df_name + ".xlsx"
st.markdown(
f"""
<a href="data:file/excel_file;base64,{b64.decode()}" id="button_dl" download="{dl_file_name}">Download ANOVA statistics</a>
""",
unsafe_allow_html=True)
st.write("")
else:
st.error("ERROR: The target variable must be a numerical one!")
else: st.error("ERROR: No variables available for ANOVA!")
#++++++++++++++++++++++
# DATA PROCESSING
# Settings for data processing
#-------------------------------------
dev_expander_dm_sb = st.expander("Specify data processing preferences", expanded = False)
with dev_expander_dm_sb:
n_rows_wNAs = df.iloc[list(pd.unique(np.where(df.isnull())[0]))].shape[0]
n_rows_wNAs_pre_processing = "No"
if n_rows_wNAs > 0:
n_rows_wNAs_pre_processing = "Yes"
a1, a2, a3 = st.columns(3)
else: a1, a3 = st.columns(2)
sb_DM_dImp_num = None
sb_DM_dImp_other = None
sb_DM_delRows=None
sb_DM_keepRows=None
group_by_num = None
group_by_other = None
with a1:
#--------------------------------------------------------------------------------------
# DATA CLEANING
st.markdown("**Data cleaning**")
# Delete rows
delRows =st.selectbox('Delete rows with index ...', options=['-', 'greater', 'greater or equal', 'smaller', 'smaller or equal', 'equal', 'between'], key = st.session_state['key'])
if delRows!='-':
if delRows=='between':
row_1=st.number_input('Lower limit is', value=0, step=1, min_value= 0, max_value=len(df)-1, key = st.session_state['key'])
row_2=st.number_input('Upper limit is', value=2, step=1, min_value= 0, max_value=len(df)-1, key = st.session_state['key'])
if (row_1 + 1) < row_2 :
sb_DM_delRows=df.index[(df.index > row_1) & (df.index < row_2)]
elif (row_1 + 1) == row_2 :
st.warning("WARNING: No row is deleted!")
elif row_1 == row_2 :
st.warning("WARNING: No row is deleted!")
elif row_1 > row_2 :
st.error("ERROR: Lower limit must be smaller than upper limit!")
return
elif delRows=='equal':
sb_DM_delRows = st.multiselect("to...", df.index, key = st.session_state['key'])
else:
row_1=st.number_input('than...', step=1, value=1, min_value = 0, max_value=len(df)-1, key = st.session_state['key'])
if delRows=='greater':
sb_DM_delRows=df.index[df.index > row_1]
if row_1 == len(df)-1:
st.warning("WARNING: No row is deleted!")
elif delRows=='greater or equal':
sb_DM_delRows=df.index[df.index >= row_1]
if row_1 == 0:
st.error("ERROR: All rows are deleted!")
return
elif delRows=='smaller':
sb_DM_delRows=df.index[df.index < row_1]
if row_1 == 0:
st.warning("WARNING: No row is deleted!")
elif delRows=='smaller or equal':
sb_DM_delRows=df.index[df.index <= row_1]
if row_1 == len(df)-1:
st.error("ERROR: All rows are deleted!")
return
if sb_DM_delRows is not None:
df = df.loc[~df.index.isin(sb_DM_delRows)]
no_delRows=n_rows-df.shape[0]
# Keep rows
keepRows =st.selectbox('Keep rows with index ...', options=['-', 'greater', 'greater or equal', 'smaller', 'smaller or equal', 'equal', 'between'], key = st.session_state['key'])
if keepRows!='-':
if keepRows=='between':
row_1=st.number_input('Lower limit is', value=0, step=1, min_value= 0, max_value=len(df)-1, key = st.session_state['key'])
row_2=st.number_input('Upper limit is', value=2, step=1, min_value= 0, max_value=len(df)-1, key = st.session_state['key'])
if (row_1 + 1) < row_2 :
sb_DM_keepRows=df.index[(df.index > row_1) & (df.index < row_2)]
elif (row_1 + 1) == row_2 :
st.error("ERROR: No row is kept!")
return
elif row_1 == row_2 :
st.error("ERROR: No row is kept!")
return
elif row_1 > row_2 :
st.error("ERROR: Lower limit must be smaller than upper limit!")
return
elif keepRows=='equal':
sb_DM_keepRows = st.multiselect("to...", df.index, key = st.session_state['key'])
else:
row_1=st.number_input('than...', step=1, value=1, min_value = 0, max_value=len(df)-1, key = st.session_state['key'])
if keepRows=='greater':
sb_DM_keepRows=df.index[df.index > row_1]
if row_1 == len(df)-1:
st.error("ERROR: No row is kept!")
return
elif keepRows=='greater or equal':
sb_DM_keepRows=df.index[df.index >= row_1]
if row_1 == 0:
st.warning("WARNING: All rows are kept!")
elif keepRows=='smaller':
sb_DM_keepRows=df.index[df.index < row_1]
if row_1 == 0:
st.error("ERROR: No row is kept!")
return
elif keepRows=='smaller or equal':
sb_DM_keepRows=df.index[df.index <= row_1]
if sb_DM_keepRows is not None:
df = df.loc[df.index.isin(sb_DM_keepRows)]
no_keptRows=df.shape[0]
# Delete columns
sb_DM_delCols = st.multiselect("Select columns to delete", df.drop([entity, time], axis = 1).columns, key = st.session_state['key'])
df = df.loc[:,~df.columns.isin(sb_DM_delCols)]
# Keep columns
sb_DM_keepCols = st.multiselect("Select columns to keep", df.drop([entity, time], axis = 1).columns, key = st.session_state['key'])
if len(sb_DM_keepCols) > 0:
df = df.loc[:,df.columns.isin([entity, time] + sb_DM_keepCols)]
# Delete duplicates if any exist
if df[df.duplicated()].shape[0] > 0:
sb_DM_delDup = st.selectbox("Delete duplicate rows", ["No", "Yes"], key = st.session_state['key'])
if sb_DM_delDup == "Yes":
n_rows_dup = df[df.duplicated()].shape[0]
df = df.drop_duplicates()
elif df[df.duplicated()].shape[0] == 0:
sb_DM_delDup = "No"
# Delete rows with NA if any exist
n_rows_wNAs = df.iloc[list(pd.unique(np.where(df.isnull())[0]))].shape[0]
if n_rows_wNAs > 0:
sb_DM_delRows_wNA = st.selectbox("Delete rows with NAs", ["No", "Yes"], key = st.session_state['key'])
if sb_DM_delRows_wNA == "Yes":
df = df.dropna()
elif n_rows_wNAs == 0:
sb_DM_delRows_wNA = "No"
# Filter data
st.markdown("**Data filtering**")
filter_var = st.selectbox('Filter your data by a variable...', list('-')+ list(df.columns), key = st.session_state['key'])
if filter_var !='-':
if df[filter_var].dtypes=="int64" or df[filter_var].dtypes=="float64":
if df[filter_var].dtypes=="float64":
filter_format="%.8f"
else:
filter_format=None
user_filter=st.selectbox('Select values that are ...', options=['greater','greater or equal','smaller','smaller or equal', 'equal','between'], key = st.session_state['key'])
if user_filter=='between':
filter_1=st.number_input('Lower limit is', format=filter_format, value=df[filter_var].min(), min_value=df[filter_var].min(), max_value=df[filter_var].max(), key = st.session_state['key'])
filter_2=st.number_input('Upper limit is', format=filter_format, value=df[filter_var].max(), min_value=df[filter_var].min(), max_value=df[filter_var].max(), key = st.session_state['key'])
#reclassify values:
if filter_1 < filter_2 :
df = df[(df[filter_var] > filter_1) & (df[filter_var] < filter_2)]
if len(df) == 0:
st.error("ERROR: No data available for the selected limits!")
return
elif filter_1 >= filter_2 :
st.error("ERROR: Lower limit must be smaller than upper limit!")
return
elif user_filter=='equal':
filter_1=st.multiselect('to... ', options=df[filter_var].values, key = st.session_state['key'])
if len(filter_1)>0:
df = df.loc[df[filter_var].isin(filter_1)]
else:
filter_1=st.number_input('than... ',format=filter_format, value=df[filter_var].min(), min_value=df[filter_var].min(), max_value=df[filter_var].max(), key = st.session_state['key'])
#reclassify values:
if user_filter=='greater':
df = df[df[filter_var] > filter_1]
elif user_filter=='greater or equal':
df = df[df[filter_var] >= filter_1]
elif user_filter=='smaller':
df= df[df[filter_var]< filter_1]
elif user_filter=='smaller or equal':
df = df[df[filter_var] <= filter_1]
if len(df) == 0:
st.error("ERROR: No data available for the selected value!")
return
elif len(df) == n_rows:
st.warning("WARNING: Data are not filtered for this value!")
else:
filter_1=st.multiselect('Filter your data by a value...', (df[filter_var]).unique(), key = st.session_state['key'])
if len(filter_1)>0:
df = df.loc[df[filter_var].isin(filter_1)]
if n_rows_wNAs_pre_processing == "Yes":
with a2:
#--------------------------------------------------------------------------------------
# DATA IMPUTATION
# Select data imputation method (only if rows with NA not deleted)
if sb_DM_delRows_wNA == "No" and n_rows_wNAs > 0:
st.markdown("**Data imputation**")
sb_DM_dImp_choice = st.selectbox("Replace entries with NA", ["No", "Yes"], key = st.session_state['key'])
if sb_DM_dImp_choice == "Yes":
# Numeric variables
sb_DM_dImp_num = st.selectbox("Imputation method for numeric variables", ["Mean", "Median", "Random value"], key = st.session_state['key'])
# Other variables
sb_DM_dImp_other = st.selectbox("Imputation method for other variables", ["Mode", "Random value"], key = st.session_state['key'])
group_by_num = st.selectbox("Group imputation by", ["None", "Entity", "Time"], key = st.session_state['key'])
group_by_other = group_by_num
df = fc.data_impute_panel(df, sb_DM_dImp_num, sb_DM_dImp_other, group_by_num, group_by_other, entity, time)
else:
st.markdown("**Data imputation**")
st.write("")
st.info("No NAs in data set!")
with a3:
#--------------------------------------------------------------------------------------
# DATA TRANSFORMATION
st.markdown("**Data transformation**")
# Select columns for different transformation types
transform_options = df.drop([entity, time], axis = 1).select_dtypes([np.number]).columns
numCat_options = df.drop([entity, time], axis = 1).columns
sb_DM_dTrans_log = st.multiselect("Select columns to transform with log", transform_options, key = st.session_state['key'])
if sb_DM_dTrans_log is not None:
df = fc.var_transform_log(df, sb_DM_dTrans_log)
sb_DM_dTrans_sqrt = st.multiselect("Select columns to transform with sqrt", transform_options, key = st.session_state['key'])
if sb_DM_dTrans_sqrt is not None:
df = fc.var_transform_sqrt(df, sb_DM_dTrans_sqrt)
sb_DM_dTrans_square = st.multiselect("Select columns for squaring", transform_options, key = st.session_state['key'])
if sb_DM_dTrans_square is not None:
df = fc.var_transform_square(df, sb_DM_dTrans_square)
sb_DM_dTrans_cent = st.multiselect("Select columns for centering ", transform_options, key = st.session_state['key'])
if sb_DM_dTrans_cent is not None:
df = fc.var_transform_cent(df, sb_DM_dTrans_cent)
sb_DM_dTrans_stand = st.multiselect("Select columns for standardization", transform_options, key = st.session_state['key'])
if sb_DM_dTrans_stand is not None:
df = fc.var_transform_stand(df, sb_DM_dTrans_stand)
sb_DM_dTrans_norm = st.multiselect("Select columns for normalization", transform_options, key = st.session_state['key'])
if sb_DM_dTrans_norm is not None:
df = fc.var_transform_norm(df, sb_DM_dTrans_norm)
sb_DM_dTrans_numCat = st.multiselect("Select columns for numeric categorization ", numCat_options, key = st.session_state['key'])
if sb_DM_dTrans_numCat:
if not df[sb_DM_dTrans_numCat].columns[df[sb_DM_dTrans_numCat].isna().any()].tolist():
sb_DM_dTrans_numCat_sel = st.multiselect("Select variables for manual categorization ", sb_DM_dTrans_numCat, key = st.session_state['key'])
if sb_DM_dTrans_numCat_sel:
for var in sb_DM_dTrans_numCat_sel:
if df[var].unique().size > 5:
st.error("ERROR: Selected variable has too many categories (>5): " + str(var))
return
else:
manual_cats = pd.DataFrame(index = range(0, df[var].unique().size), columns=["Value", "Cat"])
text = "Category for "
# Save manually selected categories
for i in range(0, df[var].unique().size):
text1 = text + str(var) + ": " + str(sorted(df[var].unique())[i])
man_cat = st.number_input(text1, value = 0, min_value=0, key = st.session_state['key'])
manual_cats.loc[i]["Value"] = sorted(df[var].unique())[i]
manual_cats.loc[i]["Cat"] = man_cat
new_var_name = "numCat_" + var
new_var = pd.DataFrame(index = df.index, columns = [new_var_name])
for c in df[var].index:
if pd.isnull(df[var][c]) == True:
new_var.loc[c, new_var_name] = np.nan
elif pd.isnull(df[var][c]) == False:
new_var.loc[c, new_var_name] = int(manual_cats[manual_cats["Value"] == df[var][c]]["Cat"])
df[new_var_name] = new_var.astype('int64')
# Exclude columns with manual categorization from standard categorization
numCat_wo_manCat = [var for var in sb_DM_dTrans_numCat if var not in sb_DM_dTrans_numCat_sel]
df = fc.var_transform_numCat(df, numCat_wo_manCat)
else:
df = fc.var_transform_numCat(df, sb_DM_dTrans_numCat)
else:
col_with_na = df[sb_DM_dTrans_numCat].columns[df[sb_DM_dTrans_numCat].isna().any()].tolist()
st.error("ERROR: Please select columns without NAs: " + ', '.join(map(str,col_with_na)))
return
else:
sb_DM_dTrans_numCat = None
sb_DM_dTrans_mult = st.number_input("Number of variable multiplications ", value = 0, min_value=0, key = st.session_state['key'])
if sb_DM_dTrans_mult != 0:
multiplication_pairs = pd.DataFrame(index = range(0, sb_DM_dTrans_mult), columns=["Var1", "Var2"])
text = "Multiplication pair"
for i in range(0, sb_DM_dTrans_mult):
text1 = text + " " + str(i+1)
text2 = text + " " + str(i+1) + " "
mult_var1 = st.selectbox(text1, transform_options, key = st.session_state['key'])
mult_var2 = st.selectbox(text2, transform_options, key = st.session_state['key'])
multiplication_pairs.loc[i]["Var1"] = mult_var1
multiplication_pairs.loc[i]["Var2"] = mult_var2
fc.var_transform_mult(df, mult_var1, mult_var2)
sb_DM_dTrans_div = st.number_input("Number of variable divisions ", value = 0, min_value=0, key = st.session_state['key'])
if sb_DM_dTrans_div != 0:
division_pairs = pd.DataFrame(index = range(0, sb_DM_dTrans_div), columns=["Var1", "Var2"])
text = "Division pair"
for i in range(0, sb_DM_dTrans_div):
text1 = text + " " + str(i+1) + " (numerator)"
text2 = text + " " + str(i+1) + " (denominator)"
div_var1 = st.selectbox(text1, transform_options, key = st.session_state['key'])
div_var2 = st.selectbox(text2, transform_options, key = st.session_state['key'])
division_pairs.loc[i]["Var1"] = div_var1
division_pairs.loc[i]["Var2"] = div_var2
fc.var_transform_div(df, div_var1, div_var2)
data_transform=st.checkbox("Transform data in Excel?", value=False)
if data_transform==True:
st.info("Press the button to open your data in Excel. Don't forget to save your result as a csv or a txt file!")
# Download link
output = BytesIO()
excel_file = pd.ExcelWriter(output, engine="xlsxwriter")
df.to_excel(excel_file, sheet_name="data",index=False)
excel_file.save()
excel_file = output.getvalue()
b64 = base64.b64encode(excel_file)
dl_file_name = "Data_transformation__" + df_name + ".xlsx"
st.markdown(
f"""
<a href="data:file/excel_file;base64,{b64.decode()}" id="button_dl" download="{dl_file_name}">Transform your data in Excel</a>
""",
unsafe_allow_html=True)
st.write("")
#--------------------------------------------------------------------------------------
# PROCESSING SUMMARY
if st.checkbox('Show a summary of my data processing preferences', value = False, key = st.session_state['key']):
st.markdown("Summary of data changes:")
#--------------------------------------------------------------------------------------
# DATA CLEANING
# Rows
if sb_DM_delRows is not None and delRows!='-' :
if no_delRows > 1:
st.write("-", no_delRows, " rows were deleted!")
elif no_delRows == 1:
st.write("-",no_delRows, " row was deleted!")
elif no_delRows == 0:
st.write("- No row was deleted!")
else:
st.write("- No row was deleted!")
if sb_DM_keepRows is not None and keepRows!='-' :
if no_keptRows > 1:
st.write("-", no_keptRows, " rows are kept!")
elif no_keptRows == 1:
st.write("-",no_keptRows, " row is kept!")
elif no_keptRows == 0:
st.write("- All rows are kept!")
else:
st.write("- All rows are kept!")
# Columns
if len(sb_DM_delCols) > 1:
st.write("-", len(sb_DM_delCols), " columns were manually deleted:", ', '.join(sb_DM_delCols))
elif len(sb_DM_delCols) == 1:
st.write("-",len(sb_DM_delCols), " column was manually deleted:", str(sb_DM_delCols[0]))
elif len(sb_DM_delCols) == 0:
st.write("- No column was manually deleted!")
if len(sb_DM_keepCols) > 1:
st.write("-", len(sb_DM_keepCols), " columns are kept:", ', '.join(sb_DM_keepCols))
elif len(sb_DM_keepCols) == 1:
st.write("-",len(sb_DM_keepCols), " column is kept:", str(sb_DM_keepCols[0]))
elif len(sb_DM_keepCols) == 0:
st.write("- All columns are kept!")
# Duplicates
if sb_DM_delDup == "Yes":
if n_rows_dup > 1:
st.write("-", n_rows_dup, " duplicate rows were deleted!")
elif n_rows_dup == 1:
st.write("-", n_rows_dup, "duplicate row was deleted!")
else:
st.write("- No duplicate row was deleted!")
# NAs
if sb_DM_delRows_wNA == "Yes":
if n_rows_wNAs > 1:
st.write("-", n_rows_wNAs, "rows with NAs were deleted!")
elif n_rows_wNAs == 1:
st.write("-", n_rows - n_rows_wNAs, "row with NAs was deleted!")
else:
st.write("- No row with NAs was deleted!")
# Filter
if filter_var != "-":
if df[filter_var].dtypes=="int64" or df[filter_var].dtypes=="float64":
if isinstance(filter_1, list):
if len(filter_1) == 0:
st.write("-", " Data was not filtered!")
elif len(filter_1) > 0:
st.write("-", " Data filtered by:", str(filter_var))
elif filter_1 is not None:
st.write("-", " Data filtered by:", str(filter_var))
else:
st.write("-", " Data was not filtered!")
elif len(filter_1)>0:
st.write("-", " Data filtered by:", str(filter_var))
elif len(filter_1) == 0:
st.write("-", " Data was not filtered!")
else:
st.write("-", " Data was not filtered!")
#--------------------------------------------------------------------------------------
# DATA IMPUTATION
if sb_DM_delRows_wNA == "No" and n_rows_wNAs > 0:
st.write("- Data imputation method for numeric variables:", sb_DM_dImp_num)
st.write("- Data imputation method for other variable types:", sb_DM_dImp_other)
st.write("- Imputation grouped by:", group_by_num)
#--------------------------------------------------------------------------------------
# DATA TRANSFORMATION
# log
if len(sb_DM_dTrans_log) > 1:
st.write("-", len(sb_DM_dTrans_log), " columns were log-transformed:", ', '.join(sb_DM_dTrans_log))
elif len(sb_DM_dTrans_log) == 1:
st.write("-",len(sb_DM_dTrans_log), " column was log-transformed:", sb_DM_dTrans_log[0])
elif len(sb_DM_dTrans_log) == 0:
st.write("- No column was log-transformed!")
# sqrt
if len(sb_DM_dTrans_sqrt) > 1:
st.write("-", len(sb_DM_dTrans_sqrt), " columns were sqrt-transformed:", ', '.join(sb_DM_dTrans_sqrt))
elif len(sb_DM_dTrans_sqrt) == 1:
st.write("-",len(sb_DM_dTrans_sqrt), " column was sqrt-transformed:", sb_DM_dTrans_sqrt[0])
elif len(sb_DM_dTrans_sqrt) == 0:
st.write("- No column was sqrt-transformed!")
# square
if len(sb_DM_dTrans_square) > 1:
st.write("-", len(sb_DM_dTrans_square), " columns were squared:", ', '.join(sb_DM_dTrans_square))
elif len(sb_DM_dTrans_square) == 1:
st.write("-",len(sb_DM_dTrans_square), " column was squared:", sb_DM_dTrans_square[0])
elif len(sb_DM_dTrans_square) == 0:
st.write("- No column was squared!")
# centering
if len(sb_DM_dTrans_cent) > 1:
st.write("-", len(sb_DM_dTrans_cent), " columns were centered:", ', '.join(sb_DM_dTrans_cent))
elif len(sb_DM_dTrans_cent) == 1:
st.write("-",len(sb_DM_dTrans_cent), " column was centered:", sb_DM_dTrans_cent[0])
elif len(sb_DM_dTrans_cent) == 0:
st.write("- No column was centered!")
# standardize
if len(sb_DM_dTrans_stand) > 1:
st.write("-", len(sb_DM_dTrans_stand), " columns were standardized:", ', '.join(sb_DM_dTrans_stand))
elif len(sb_DM_dTrans_stand) == 1:
st.write("-",len(sb_DM_dTrans_stand), " column was standardized:", sb_DM_dTrans_stand[0])
elif len(sb_DM_dTrans_stand) == 0:
st.write("- No column was standardized!")
# normalize
if len(sb_DM_dTrans_norm) > 1:
st.write("-", len(sb_DM_dTrans_norm), " columns were normalized:", ', '.join(sb_DM_dTrans_norm))
elif len(sb_DM_dTrans_norm) == 1:
st.write("-",len(sb_DM_dTrans_norm), " column was normalized:", sb_DM_dTrans_norm[0])
elif len(sb_DM_dTrans_norm) == 0:
st.write("- No column was normalized!")
# numeric category
if sb_DM_dTrans_numCat is not None:
if len(sb_DM_dTrans_numCat) > 1:
st.write("-", len(sb_DM_dTrans_numCat), " columns were transformed to numeric categories:", ', '.join(sb_DM_dTrans_numCat))
elif len(sb_DM_dTrans_numCat) == 1:
st.write("-",len(sb_DM_dTrans_numCat), " column was transformed to numeric categories:", sb_DM_dTrans_numCat[0])
elif sb_DM_dTrans_numCat is None:
st.write("- No column was transformed to numeric categories!")
# multiplication
if sb_DM_dTrans_mult != 0:
st.write("-", "Number of variable multiplications: ", sb_DM_dTrans_mult)
elif sb_DM_dTrans_mult == 0:
st.write("- No variables were multiplied!")
# division
if sb_DM_dTrans_div != 0:
st.write("-", "Number of variable divisions: ", sb_DM_dTrans_div)
elif sb_DM_dTrans_div == 0:
st.write("- No variables were divided!")
st.write("")
st.write("")
#------------------------------------------------------------------------------------------
#++++++++++++++++++++++
# UPDATED DATA SUMMARY
# Show only if changes were made
if any(v for v in [sb_DM_delCols, sb_DM_dImp_num, sb_DM_dImp_other, sb_DM_dTrans_log, sb_DM_dTrans_sqrt, sb_DM_dTrans_square, sb_DM_dTrans_cent, sb_DM_dTrans_stand, sb_DM_dTrans_norm, sb_DM_dTrans_numCat ] if v is not None) or sb_DM_delDup == "Yes" or sb_DM_delRows_wNA == "Yes" or sb_DM_dTrans_mult != 0 or sb_DM_dTrans_div != 0 or filter_var != "-" or delRows!='-' or keepRows!='-' or len(sb_DM_keepCols) > 0:
dev_expander_dsPost = st.expander("Explore cleaned and transformed panel data info and stats", expanded = False)
with dev_expander_dsPost:
if df.shape[1] > 2 and df.shape[0] > 0:
# Show cleaned and transformed data & data info
df_summary_post = fc.data_summary(df)
if st.checkbox("Show cleaned and transformed data", value = False):
n_rows_post = df.shape[0]
n_cols_post = df.shape[1]
st.dataframe(df)
st.write("Data shape: ", n_rows_post, "rows and ", n_cols_post, "columns")
# Download transformed data:
output = BytesIO()
excel_file = pd.ExcelWriter(output, engine="xlsxwriter")
df.to_excel(excel_file, sheet_name="Clean. and transf. data")
excel_file.save()
excel_file = output.getvalue()
b64 = base64.b64encode(excel_file)
dl_file_name = "CleanedTransfData__" + df_name + ".xlsx"
st.markdown(
f"""
<a href="data:file/excel_file;base64,{b64.decode()}" id="button_dl" download="{dl_file_name}">Download cleaned and transformed data</a>
""",
unsafe_allow_html=True)
st.write("")
if df[df.duplicated()].shape[0] > 0 or df.iloc[list(pd.unique(np.where(df.isnull())[0]))].shape[0] > 0:
check_nasAnddupl2 = st.checkbox("Show duplicates and NAs info (processed)", value = False)
if check_nasAnddupl2:
index_c = []
for c in df.columns:
for r in df.index:
if pd.isnull(df[c][r]):
index_c.append(r)
if df[df.duplicated()].shape[0] > 0:
st.write("Number of duplicates: ", df[df.duplicated()].shape[0])
st.write("Duplicate row index: ", ', '.join(map(str,list(df.index[df.duplicated()]))))
if df.iloc[list(pd.unique(np.where(df.isnull())[0]))].shape[0] > 0:
st.write("Number of rows with NAs: ", len(pd.unique(sorted(index_c))))
st.write("Rows with NAs: ", ', '.join(map(str,list(pd.unique(sorted(index_c))))))
# Show cleaned and transformed variable info
if st.checkbox("Show cleaned and transformed variable info", value = False):
st.write(df_summary_post["Variable types"])
# Show summary statistics (cleaned and transformed data)
if st.checkbox('Show summary statistics (cleaned and transformed data)', value = False):
st.write(df_summary_post["ALL"].style.set_precision(user_precision))
# Download link for cleaned data statistics
output = BytesIO()
excel_file = pd.ExcelWriter(output, engine="xlsxwriter")
df.to_excel(excel_file, sheet_name="cleaned_data")
df_summary_post["Variable types"].to_excel(excel_file, sheet_name="cleaned_variable_info")
df_summary_post["ALL"].to_excel(excel_file, sheet_name="cleaned_summary_statistics")
excel_file.save()
excel_file = output.getvalue()
b64 = base64.b64encode(excel_file)
dl_file_name = "Cleaned data summary statistics_panel_" + df_name + ".xlsx"
st.markdown(
f"""
<a href="data:file/excel_file;base64,{b64.decode()}" id="button_dl" download="{dl_file_name}">Download cleaned data summary statistics</a>
""",
unsafe_allow_html=True)
st.write("")
if fc.get_mode(df).loc["n_unique"].any():
st.caption("** Mode is not unique.")
if sett_hints:
st.info(str(fc.learning_hints("de_summary_statistics")))
else:
st.error("ERROR: No data available for preprocessing!")
return
dev_expander_anovPost = st.expander("ANOVA for cleaned and transformed panel data", expanded = False)
with dev_expander_anovPost:
if df.shape[1] > 2 and df.shape[0] > 0:
# Target variable
target_var2 = st.selectbox('Select target variable', df.drop([entity, time], axis = 1).columns)
if df[target_var2].dtypes == "int64" or df[target_var2].dtypes == "float64":
class_var_options = df.columns
class_var_options = class_var_options[class_var_options.isin(df.drop(target_var2, axis = 1).columns)]
clas_var2 = st.selectbox('Select classifier variable', [entity, time],)
# Means and sd by entity
col1, col2 = st.columns(2)
with col1:
df_anova_woTime = df.drop([time], axis = 1)
df_grouped_ent = df_anova_woTime.groupby(entity)
st.write("Mean based on entity:")
st.write(df_grouped_ent.mean()[target_var2])
st.write("")
with col2:
st.write("SD based on entity:")
st.write(df_grouped_ent.std()[target_var2])
st.write("")
# Means and sd by time
col3, col4 = st.columns(2)
with col3:
df_anova_woEnt= df.drop([entity], axis = 1)
df_grouped_time = df_anova_woEnt.groupby(time)
counts_time = pd.DataFrame(df_grouped_time.count()[target_var2])
counts_time.columns = ["count"]
st.write("Mean based on time:")
st.write(df_grouped_time.mean()[target_var2])
st.write("")
with col4:
st.write("SD based on time:")
st.write(df_grouped_time.std()[target_var2])
st.write("")
col9, col10 = st.columns(2)
with col9:
st.write("Boxplot grouped by entity:")
box_size1 = st.slider("Select box size ", 1, 50, 5)
# Grouped boxplot by entity
grouped_boxplot_data = pd.DataFrame()
grouped_boxplot_data[entity] = df[entity]
grouped_boxplot_data[time] = df[time]
grouped_boxplot_data["Index"] = df.index
grouped_boxplot_data[target_var2] = df[target_var2]
grouped_boxchart_ent = alt.Chart(grouped_boxplot_data, height = 300).mark_boxplot(size = box_size1, color = "#1f77b4", median = dict(color = "darkred")).encode(
x = alt.X(entity, scale = alt.Scale(zero = False)),
y = alt.Y(target_var2, scale = alt.Scale(zero = False)),
tooltip = [target_var2, entity, time, "Index"]
).configure_axis(
labelFontSize = 11,
titleFontSize = 12
)
st.altair_chart(grouped_boxchart_ent, use_container_width=True)
with col10:
st.write("Boxplot grouped by time:")
box_size2 = st.slider("Select box size ", 1, 50, 5)
# Grouped boxplot by time
grouped_boxplot_data = pd.DataFrame()
grouped_boxplot_data[time] = df[time]
grouped_boxplot_data[entity] = df[entity]
grouped_boxplot_data["Index"] = df.index
grouped_boxplot_data[target_var2] = df[target_var2]
grouped_boxchart_time = alt.Chart(grouped_boxplot_data, height = 300).mark_boxplot(size = box_size2, color = "#1f77b4", median = dict(color = "darkred")).encode(
x = alt.X(time, scale = alt.Scale(domain = [min(df[time]), max(df[time])])),
y = alt.Y(target_var2, scale = alt.Scale(zero = False)),
tooltip = [target_var2, entity, time, "Index"]
).configure_axis(
labelFontSize = 11,
titleFontSize = 12
)
st.altair_chart(grouped_boxchart_time, use_container_width=True)
if sett_hints:
st.info(str(fc.learning_hints("de_anova_boxplot")))
st.write("")
# Count for entity and time
col5, col6 = st.columns(2)
with col5:
st.write("Number of observations per entity:")
counts_ent = pd.DataFrame(df_grouped_ent.count()[target_var2])
counts_ent.columns = ["count"]
st.write(counts_ent.transpose())
with col6:
st.write("Number of observations per time:")
counts_time = pd.DataFrame(df_grouped_time.count()[target_var2])
counts_time.columns = ["count"]
st.write(counts_time.transpose())
if sett_hints:
st.info(str(fc.learning_hints("de_anova_count")))
st.write("")
# ANOVA calculation
df_grouped = df[[target_var2,clas_var2]].groupby(clas_var2)
overall_mean = (df_grouped.mean()*df_grouped.count()).sum()/df_grouped.count().sum()
dof_between = len(df_grouped.count())-1
dof_within = df_grouped.count().sum()-len(df_grouped.count())
dof_tot = dof_between + dof_within
SS_between = (((df_grouped.mean()-overall_mean)**2)*df_grouped.count()).sum()
SS_within = (df_grouped.var()*(df_grouped.count()-1)).sum()
SS_total = SS_between + SS_within
MS_between = SS_between/dof_between
MS_within = SS_within/dof_within
F_stat = MS_between/MS_within
p_value = scipy.stats.f.sf(F_stat, dof_between, dof_within)
anova_table=pd.DataFrame({
"DF": [dof_between, dof_within.values[0], dof_tot.values[0]],
"SS": [SS_between.values[0], SS_within.values[0], SS_total.values[0]],
"MS": [MS_between.values[0], MS_within.values[0], ""],
"F-statistic": [F_stat.values[0], "", ""],
"p-value": [p_value[0], "", ""]},
index = ["Between", "Within", "Total"],)
st.write("ANOVA:")
st.write(anova_table)
if sett_hints:
st.info(str(fc.learning_hints("de_anova_table")))
st.write("")
#Anova (OLS)
codes = pd.factorize(df[clas_var2])[0]
ano_ols = sm.OLS(df[target_var2], sm.add_constant(codes))
ano_ols_output = ano_ols.fit()
residuals = ano_ols_output.resid
col7, col8 = st.columns(2)
with col7:
# QQ-plot
st.write("Normal QQ-plot:")
st.write("")
st.write("")
st.write("")
st.write("")
st.write("")
st.write("")
qq_plot_data = pd.DataFrame()
qq_plot_data["StandResiduals"] = (residuals - residuals.mean())/residuals.std()
qq_plot_data["Index"] = df.index
qq_plot_data[entity] = df[entity]
qq_plot_data[time] = df[time]
qq_plot_data = qq_plot_data.sort_values(by = ["StandResiduals"])
qq_plot_data["Theoretical quantiles"] = stats.probplot(residuals, dist="norm")[0][0]
qq_plot = alt.Chart(qq_plot_data, height = 300).mark_circle(size=20).encode(
x = alt.X("Theoretical quantiles", title = "theoretical quantiles", scale = alt.Scale(domain = [min(qq_plot_data["Theoretical quantiles"]), max(qq_plot_data["Theoretical quantiles"])]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("StandResiduals", title = "stand. residuals", scale = alt.Scale(zero = False), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["StandResiduals", "Theoretical quantiles", entity, time, "Index"]
)
line = alt.Chart(
pd.DataFrame({"Theoretical quantiles": [min(qq_plot_data["Theoretical quantiles"]), max(qq_plot_data["Theoretical quantiles"])], "StandResiduals": [min(qq_plot_data["Theoretical quantiles"]), max(qq_plot_data["Theoretical quantiles"])]})).mark_line(size = 2, color = "darkred").encode(
alt.X("Theoretical quantiles"),
alt.Y("StandResiduals"),
)
st.altair_chart(qq_plot + line, use_container_width = True)
with col8:
# Residuals histogram
st.write("Residuals histogram:")
residuals_hist = pd.DataFrame(residuals)
residuals_hist.columns = ["residuals"]
binNo_res2 = st.slider("Select maximum number of bins ", 5, 100, 25)
hist_plot = alt.Chart(residuals_hist, height = 300).mark_bar().encode(
x = alt.X("residuals", title = "residuals", bin = alt.BinParams(maxbins = binNo_res2), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("count()", title = "count of records", axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["count()", alt.Tooltip("residuals", bin = alt.BinParams(maxbins = binNo_res2))]
)
st.altair_chart(hist_plot, use_container_width=True)
if sett_hints:
st.info(str(fc.learning_hints("de_anova_residuals")))
# Download link for ANOVA statistics
output = BytesIO()
excel_file = pd.ExcelWriter(output, engine="xlsxwriter")
df_grouped_ent.mean()[target_var2].to_excel(excel_file, sheet_name="entity_mean")
df_grouped_ent.std()[target_var2].to_excel(excel_file, sheet_name="entity_sd")
df_grouped_time.mean()[target_var2].to_excel(excel_file, sheet_name="time_mean")
df_grouped_time.std()[target_var2].to_excel(excel_file, sheet_name="time_sd")
counts_ent.transpose().to_excel(excel_file, sheet_name="entity_obs")
counts_time.transpose().to_excel(excel_file, sheet_name="time_obs")
anova_table.to_excel(excel_file, sheet_name="ANOVA table")
excel_file.save()
excel_file = output.getvalue()
b64 = base64.b64encode(excel_file)
dl_file_name = "Cleaned ANOVA statistics__" + target_var2 + "__" + df_name + ".xlsx"
st.markdown(
f"""
<a href="data:file/excel_file;base64,{b64.decode()}" id="button_dl" download="{dl_file_name}">Download cleaned ANOVA statistics</a>
""",
unsafe_allow_html=True)
st.write("")
else:
st.error("ERROR: The target variable must be a numerical one!")
else:
st.error("ERROR: No data available for ANOVA!")
return
#------------------------------------------------------------------------------------------
#++++++++++++++++++++++
# DATA VISUALIZATION
data_visualization_container = st.container()
with data_visualization_container:
#st.write("")
st.write("")
st.write("")
st.header("**Data visualization**")
dev_expander_dv = st.expander("Explore visualization types", expanded = False)
with dev_expander_dv:
if df.shape[1] > 2 and df.shape[0] > 0:
st.write('**Variable selection**')
varl_sel_options = df.columns
varl_sel_options = varl_sel_options[varl_sel_options.isin(df.drop([entity, time], axis = 1).columns)]
var_sel = st.selectbox('Select variable for visualizations', varl_sel_options, key = st.session_state['key'])
if df[var_sel].dtypes == "float64" or df[var_sel].dtypes == "float32" or df[var_sel].dtypes == "int64" or df[var_sel].dtypes == "int32":
a4, a5 = st.columns(2)
with a4:
st.write('**Scatterplot with LOESS line**')
yy_options = df.columns
yy_options = yy_options[yy_options.isin(df.drop([entity, time], axis = 1).columns)]
yy = st.selectbox('Select variable for y-axis', yy_options, key = st.session_state['key'])
if df[yy].dtypes == "float64" or df[yy].dtypes == "float32" or df[yy].dtypes == "int64" or df[yy].dtypes == "int32":
fig_data = pd.DataFrame()
fig_data[yy] = df[yy]
fig_data[var_sel] = df[var_sel]
fig_data["Index"] = df.index
fig_data[entity] = df[entity]
fig_data[time] = df[time]
fig = alt.Chart(fig_data).mark_circle().encode(
x = alt.X(var_sel, scale = alt.Scale(domain = [min(fig_data[var_sel]), max(fig_data[var_sel])]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y(yy, scale = alt.Scale(zero = False), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = [yy, var_sel, entity, time, "Index"]
)
st.altair_chart(fig + fig.transform_loess(var_sel, yy).mark_line(size = 2, color = "darkred"), use_container_width=True)
if sett_hints:
st.info(str(fc.learning_hints("dv_scatterplot")))
else: st.error("ERROR: Please select a numeric variable for the y-axis!")
with a5:
st.write('**Histogram**')
binNo = st.slider("Select maximum number of bins", 5, 100, 25, key = st.session_state['key'])
fig2 = alt.Chart(df).mark_bar().encode(
x = alt.X(var_sel, title = var_sel + " (binned)", bin = alt.BinParams(maxbins = binNo), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("count()", title = "count of records", axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["count()", alt.Tooltip(var_sel, bin = alt.BinParams(maxbins = binNo))]
)
st.altair_chart(fig2, use_container_width=True)
if sett_hints:
st.info(str(fc.learning_hints("dv_histogram")))
a6, a7 = st.columns(2)
with a6:
st.write('**Boxplot**')
# Boxplot
boxplot_data = pd.DataFrame()
boxplot_data[var_sel] = df[var_sel]
boxplot_data["Index"] = df.index
boxplot_data[entity] = df[entity]
boxplot_data[time] = df[time]
boxplot = alt.Chart(boxplot_data).mark_boxplot(size = 100, color = "#1f77b4", median = dict(color = "darkred")).encode(
y = alt.Y(var_sel, scale = alt.Scale(zero = False)),
tooltip = [var_sel, entity, time, "Index"]
).configure_axis(
labelFontSize = 11,
titleFontSize = 12
)
st.altair_chart(boxplot, use_container_width=True)
if sett_hints:
st.info(str(fc.learning_hints("dv_boxplot")))
with a7:
st.write("**QQ-plot**")
var_values = df[var_sel]
qqplot_data = pd.DataFrame()
qqplot_data[var_sel] = var_values
qqplot_data["Index"] = df.index
qqplot_data[entity] = df[entity]
qqplot_data[time] = df[time]
qqplot_data = qqplot_data.sort_values(by = [var_sel])
qqplot_data["Theoretical quantiles"] = stats.probplot(var_values, dist="norm")[0][0]
qqplot = alt.Chart(qqplot_data).mark_circle(size=20).encode(
x = alt.X("Theoretical quantiles", title = "theoretical quantiles", scale = alt.Scale(domain = [min(qqplot_data["Theoretical quantiles"]), max(qqplot_data["Theoretical quantiles"])]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y(var_sel, title = str(var_sel), scale = alt.Scale(zero = False), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = [var_sel, "Theoretical quantiles", entity, time, "Index"]
)
st.altair_chart(qqplot + qqplot.transform_regression('Theoretical quantiles', var_sel).mark_line(size = 2, color = "darkred"), use_container_width = True)
if sett_hints:
st.info(str(fc.learning_hints("dv_qqplot")))
else: st.error("ERROR: Please select a numeric variable!")
else: st.error("ERROR: No data available for Data Visualization!")
# Check again after processing
if np.where(df[entity].isnull())[0].size > 0:
entity_na_warn = "WARNING: The variable selected for entity has NAs!"
else:entity_na_warn = False
if np.where(df[time].isnull())[0].size > 0:
time_na_warn = "WARNING: The variable selected for time has NAs!"
else:time_na_warn = False
#------------------------------------------------------------------------------------------
#++++++++++++++++++++++++++++++++++++++++++++
# PANEL DATA MODELLING
data_modelling_container = st.container()
with data_modelling_container:
#st.write("")
#st.write("")
#st.write("")
st.write("")
st.write("")
st.header("**Panel data modelling**")
st.markdown("Go for creating predictive models of your panel data using panel data modelling! STATY will take care of the modelling for you, so you can put your focus on results interpretation and communication! ")
PDM_settings = st.expander("Specify model", expanded = False)
with PDM_settings:
if time_na_warn == False and entity_na_warn == False:
# Initial status for running models
model_full_results = None
do_modval = "No"
model_val_results = None
model_full_results = None
panel_model_fit = None
if df.shape[1] > 2 and df.shape[0] > 0:
#--------------------------------------------------------------------------------------
# GENERAL SETTINGS
st.markdown("**Variable selection**")
# Variable categories
df_summary_model = fc.data_summary(df)
var_cat = df_summary_model["Variable types"].loc["category"]
# Response variable
response_var_options = df.columns
response_var_options = response_var_options[response_var_options.isin(df.drop(entity, axis = 1).columns)]
if time != "NA":
response_var_options = response_var_options[response_var_options.isin(df.drop(time, axis = 1).columns)]
response_var = st.selectbox("Select response variable", response_var_options, key = st.session_state['key'])
# Check if response variable is numeric and has no NAs
response_var_message_num = False
response_var_message_na = False
response_var_message_cat = False
if var_cat.loc[response_var] == "string/binary" or var_cat.loc[response_var] == "bool/binary":
response_var_message_num = "ERROR: Please select a numeric response variable!"
elif var_cat.loc[response_var] == "string/categorical" or var_cat.loc[response_var] == "other" or var_cat.loc[response_var] == "string/single":
response_var_message_num = "ERROR: Please select a numeric response variable!"
elif var_cat.loc[response_var] == "categorical":
response_var_message_cat = "WARNING: Categorical variable is treated as continuous variable!"
if response_var_message_num != False:
st.error(response_var_message_num)
if response_var_message_na != False:
st.error(response_var_message_na)
if response_var_message_cat != False:
st.warning(response_var_message_cat)
# Continue if everything is clean for response variable
if response_var_message_num == False and response_var_message_na == False:
# Select explanatory variables
expl_var_options = response_var_options[response_var_options.isin(df.drop(response_var, axis = 1).columns)]
expl_var = st.multiselect("Select explanatory variables", expl_var_options, key = st.session_state['key'])
var_list = list([entity]) + list([time]) + list([response_var]) + list(expl_var)
# Check if explanatory variables are numeric
expl_var_message_num = False
expl_var_message_na = False
if any(a for a in df[expl_var].dtypes if a != "float64" and a != "float32" and a != "int64" and a != "int64"):
expl_var_not_num = df[expl_var].select_dtypes(exclude=["int64", "int32", "float64", "float32"]).columns
expl_var_message_num = "ERROR: Please exclude non-numeric variables: " + ', '.join(map(str,list(expl_var_not_num)))
# Check if NAs are present and delete them automatically (delete before run models button)
if np.where(df[var_list].isnull())[0].size > 0:
st.warning("WARNING: Your modelling data set includes NAs. Rows with NAs are automatically deleted!")
if expl_var_message_num != False:
st.error(expl_var_message_num)
elif expl_var_message_na != False:
st.error(expl_var_message_na)
# Continue if everything is clean for explanatory variables and at least one was selected
elif expl_var_message_num == False and expl_var_message_na == False and len(expl_var) > 0:
#--------------------------------------------------------------------------------------
# ALGORITHMS
st.markdown("**Specify modelling algorithm**")
# Algorithms selection
col1, col2 = st.columns(2)
algorithms = ["Entity Fixed Effects", "Time Fixed Effects", "Two-ways Fixed Effects", "Random Effects", "Pooled"]
with col1:
PDM_alg = st.selectbox("Select modelling technique", algorithms)
# Covariance type
with col2:
PDM_cov_type = st.selectbox("Select covariance type", ["homoskedastic", "heteroskedastic", "clustered"])
PDM_cov_type2 = None
if PDM_cov_type == "clustered":
PDM_cov_type2 = st.selectbox("Select cluster type", ["entity", "time", "both"])
#--------------------------------------------------------------------------------------
# VALIDATION SETTINGS
st.markdown("**Validation settings**")
do_modval= st.selectbox("Use model validation", ["No", "Yes"])
if do_modval == "Yes":
col1, col2 = st.columns(2)
# Select training/ test ratio
with col1:
train_frac = st.slider("Select training data size", 0.5, 0.95, 0.8)
# Select number for validation runs
with col2:
val_runs = st.slider("Select number for validation runs", 5, 100, 10)
#--------------------------------------------------------------------------------------
# PREDICTION SETTINGS
st.markdown("**Model predictions**")
do_modprednew = st.selectbox("Use model prediction for new data", ["No", "Yes"])
if do_modprednew == "No":
df_new = pd.DataFrame()
if do_modprednew == "Yes":
# Upload new data
new_data_pred = st.file_uploader(" ", type=["csv", "txt"])
if new_data_pred is not None:
# Read data
if uploaded_data is not None:
df_new = pd.read_csv(new_data_pred, decimal=dec_sep, sep = col_sep,thousands=thousands_sep,encoding=encoding_val, engine='python')
else:
df_new = pd.read_csv(new_data_pred, sep = ";|,|\t",engine='python')
st.success('Loading data... done!')
# Transform columns if any were transformed
# Log-transformation
if sb_DM_dTrans_log is not None:
# List of log-transformed variables that are included as explanatory variables
tv_list = []
for tv in sb_DM_dTrans_log:
if "log_"+tv in expl_var:
tv_list.append(tv)
# Check if log-transformed explanatory variables are available for transformation in new data columns
tv_list_not_avail = []
if tv_list:
for tv in tv_list:
if tv not in df_new.columns:
tv_list_not_avail.append(tv)
if tv_list_not_avail:
st.error("ERROR: Some variables are not available for log-transformation in new data: "+ ', '.join(tv_list_not_avail))
return
else:
# Transform data if variables for transformation are all available in new data
df_new = fc.var_transform_log(df_new, tv_list)
# Sqrt-transformation
if sb_DM_dTrans_sqrt is not None:
# List of sqrt-transformed variables that are included as explanatory variables
tv_list = []
for tv in sb_DM_dTrans_sqrt:
if "sqrt_"+tv in expl_var:
tv_list.append(tv)
# Check if sqrt-transformed explanatory variables are available for transformation in new data columns
tv_list_not_avail = []
if tv_list:
for tv in tv_list:
if tv not in df_new.columns:
tv_list_not_avail.append(tv)
if tv_list_not_avail:
st.error("ERROR: Some variables are not available for sqrt-transformation in new data: "+ ', '.join(tv_list_not_avail))
return
else:
# Transform data if variables for transformation are all available in new data
df_new = fc.var_transform_sqrt(df_new, tv_list)
# Square-transformation
if sb_DM_dTrans_square is not None:
# List of square-transformed variables that are included as explanatory variables
tv_list = []
for tv in sb_DM_dTrans_square:
if "square_"+tv in expl_var:
tv_list.append(tv)
# Check if square-transformed explanatory variables are available for transformation in new data columns
tv_list_not_avail = []
if tv_list:
for tv in tv_list:
if tv not in df_new.columns:
tv_list_not_avail.append(tv)
if tv_list_not_avail:
st.error("ERROR: Some variables are not available for square-transformation in new data: "+ ', '.join(tv_list_not_avail))
return
else:
# Transform data if variables for transformation are all available in new data
df_new = fc.var_transform_square(df_new, tv_list)
# Standardization
if sb_DM_dTrans_stand is not None:
# List of standardized variables that are included as explanatory variables
tv_list = []
for tv in sb_DM_dTrans_stand:
if "stand_"+tv in expl_var:
tv_list.append(tv)
# Check if standardized explanatory variables are available for transformation in new data columns
tv_list_not_avail = []
if tv_list:
for tv in tv_list:
if tv not in df_new.columns:
tv_list_not_avail.append(tv)
if tv_list_not_avail:
st.error("ERROR: Some variables are not available for standardization in new data: "+ ', '.join(tv_list_not_avail))
return
else:
# Transform data if variables for transformation are all available in new data
# Use mean and standard deviation of original data for standardization
for tv in tv_list:
if df_new[tv].dtypes == "float64" or df_new[tv].dtypes == "int64" or df_new[tv].dtypes == "float32" or df_new[tv].dtypes == "int32":
if df[tv].std() != 0:
new_var_name = "stand_" + tv
new_var = (df_new[tv] - df[tv].mean())/df[tv].std()
df_new[new_var_name] = new_var
else:
st.error("ERROR: " + str(tv) + " is not numerical and cannot be standardized!")
return
# Normalization
if sb_DM_dTrans_norm is not None:
# List of normalized variables that are included as explanatory variables
tv_list = []
for tv in sb_DM_dTrans_norm:
if "norm_"+tv in expl_var:
tv_list.append(tv)
# Check if normalized explanatory variables are available for transformation in new data columns
tv_list_not_avail = []
if tv_list:
for tv in tv_list:
if tv not in df_new.columns:
tv_list_not_avail.append(tv)
if tv_list_not_avail:
st.error("ERROR: Some variables are not available for normalization in new data: "+ ', '.join(tv_list_not_avail))
return
else:
# Transform data if variables for transformation are all available in new data
# Use min and max of original data for normalization
for tv in tv_list:
if df_new[tv].dtypes == "float64" or df_new[tv].dtypes == "int64" or df_new[tv].dtypes == "float32" or df_new[tv].dtypes == "int32":
if (df[tv].max()-df[tv].min()) != 0:
new_var_name = "norm_" + tv
new_var = (df_new[tv] - df[tv].min())/(df[tv].max()-df[tv].min())
df_new[new_var_name] = new_var
else:
st.error("ERROR: " + str(tv) + " is not numerical and cannot be normalized!")
return
# Categorization
if sb_DM_dTrans_numCat is not None:
# List of categorized variables that are included as explanatory variables
tv_list = []
for tv in sb_DM_dTrans_numCat:
if "numCat_"+tv in expl_var:
tv_list.append(tv)
# Check if categorized explanatory variables are available for transformation in new data columns
tv_list_not_avail = []
if tv_list:
for tv in tv_list:
if tv not in df_new.columns:
tv_list_not_avail.append(tv)
if tv_list_not_avail:
st.error("ERROR: Some variables are not available for categorization in new data: "+ ', '.join(tv_list_not_avail))
return
else:
# Transform data if variables for transformation are all available in new data
# Use same categories as for original data
for tv in tv_list:
new_var_name = "numCat_" + tv
new_var = pd.DataFrame(index = df_new.index, columns = [new_var_name])
for r in df_new.index:
if df.loc[df[tv] == df_new[tv][r]].empty == False:
new_var.loc[r, new_var_name] = df["numCat_" + tv][df.loc[df[tv] == df_new[tv][r]].index[0]]
else:
st.error("ERROR: Category is missing for the value in row: "+ str(r) + ", variable: " + str(tv))
return
df_new[new_var_name] = new_var.astype('int64')
# Multiplication
if sb_DM_dTrans_mult != 0:
# List of multiplied variables that are included as explanatory variables
tv_list = []
for tv in range(0, sb_DM_dTrans_mult):
mult_name = "mult_" + str(multiplication_pairs.loc[tv]["Var1"]) + "_" + str(multiplication_pairs.loc[tv]["Var2"])
if mult_name in expl_var:
tv_list.append(str(multiplication_pairs.loc[tv]["Var1"]))
tv_list.append(str(multiplication_pairs.loc[tv]["Var2"]))
# Check if multiplied explanatory variables are available for transformation in new data columns
tv_list_not_avail = []
if tv_list:
for tv in tv_list:
if tv not in df_new.columns:
tv_list_not_avail.append(tv)
if tv_list_not_avail:
st.error("ERROR: Some variables are not available for multiplication in new data: "+ ', '.join(tv_list_not_avail))
return
else:
# Transform data if variables for transformation are all available in new data
for var in range(0, sb_DM_dTrans_mult):
df_new = fc.var_transform_mult(df_new, multiplication_pairs.loc[var]["Var1"], multiplication_pairs.loc[var]["Var2"])
# Division
if sb_DM_dTrans_div != 0:
# List of divided variables that are included as explanatory variables
tv_list = []
for tv in range(0, sb_DM_dTrans_div):
mult_name = "div_" + str(division_pairs.loc[tv]["Var1"]) + "_" + str(division_pairs.loc[tv]["Var2"])
if mult_name in expl_var:
tv_list.append(str(division_pairs.loc[tv]["Var1"]))
tv_list.append(str(division_pairs.loc[tv]["Var2"]))
# Check if multiplied explanatory variables are available for transformation in new data columns
tv_list_not_avail = []
if tv_list:
for tv in tv_list:
if tv not in df_new.columns:
tv_list_not_avail.append(tv)
if tv_list_not_avail:
st.error("ERROR: Some variables are not available for division in new data: "+ ', '.join(tv_list_not_avail))
return
else:
# Transform data if variables for transformation are all available in new data
for var in range(0, sb_DM_dTrans_div):
df_new = fc.var_transform_div(df_new, division_pairs.loc[var]["Var1"], division_pairs.loc[var]["Var2"])
# Check if explanatory variables are available as columns as well as entity and time
expl_list = []
for expl_incl in expl_var:
if expl_incl not in df_new.columns:
expl_list.append(expl_incl)
if expl_list:
st.error("ERROR: Some variables are missing in new data: "+ ', '.join(expl_list))
return
if any(a for a in df_new.columns if a == entity) and any(a for a in df_new.columns if a == time):
st.info("All variables are available for predictions!")
elif any(a for a in df_new.columns if a == entity) == False:
st.error("ERROR: Entity variable is missing!")
return
elif any(a for a in df_new.columns if a == time) == False:
st.error("ERROR: Time variable is missing!")
return
# Check if NAs are present
if df_new.iloc[list(pd.unique(np.where(df_new.isnull())[0]))].shape[0] == 0:
st.empty()
else:
df_new = df_new[list([entity]) + list([time]) + expl_var].dropna()
st.warning("WARNING: Your new data set includes NAs. Rows with NAs are automatically deleted!")
df_new = df_new[list([entity]) + list([time]) + expl_var]
# Modelling data set
df = df[var_list]
# Check if NAs are present and delete them automatically
if np.where(df[var_list].isnull())[0].size > 0:
df = df.dropna()
#--------------------------------------------------------------------------------------
# SETTINGS SUMMARY
st.write("")
# Show modelling data
if st.checkbox("Show modelling data"):
st.write(df)
st.write("Data shape: ", df.shape[0], " rows and ", df.shape[1], " columns")
# Download link for modelling data
output = BytesIO()
excel_file = pd.ExcelWriter(output, engine="xlsxwriter")
df.to_excel(excel_file, sheet_name="modelling_data")
excel_file.save()
excel_file = output.getvalue()
b64 = base64.b64encode(excel_file)
dl_file_name= "Modelling data__" + df_name + ".xlsx"
st.markdown(
f"""
<a href="data:file/excel_file;base64,{b64.decode()}" id="button_dl" download="{dl_file_name}">Download modelling data</a>
""",
unsafe_allow_html=True)
st.write("")
# Show prediction data
if do_modprednew == "Yes":
if new_data_pred is not None:
if st.checkbox("Show new data for predictions"):
st.write(df_new)
st.write("Data shape: ", df_new.shape[0], " rows and ", df_new.shape[1], " columns")
# Download link for forecast data
output = BytesIO()
excel_file = pd.ExcelWriter(output, engine="xlsxwriter")
df_new.to_excel(excel_file, sheet_name="new_data")
excel_file.save()
excel_file = output.getvalue()
b64 = base64.b64encode(excel_file)
dl_file_name= "New data for predictions__" + df_name + ".xlsx"
st.markdown(
f"""
<a href="data:file/excel_file;base64,{b64.decode()}" id="button_dl" download="{dl_file_name}">Download new data for predictions</a>
""",
unsafe_allow_html=True)
st.write("")
# Show modelling settings
if st.checkbox('Show a summary of modelling settings', value = False):
#--------------------------------------------------------------------------------------
# ALOGRITHMS
st.write("Algorithms summary:")
st.write("- ",PDM_alg)
st.write("- Covariance type: ", PDM_cov_type)
if PDM_cov_type2 is not None:
st.write("- Cluster type: ", PDM_cov_type2)
st.write("")
#--------------------------------------------------------------------------------------
# SETTINGS
# General settings summary
st.write("General settings summary:")
# Modelling formula
if expl_var != False:
st.write("- Modelling formula:", response_var, "~", ' + '.join(expl_var))
st.write("- Entity:", entity)
st.write("- Time:", time)
if do_modval == "Yes":
# Train/ test ratio
if train_frac != False:
st.write("- Train/ test ratio:", str(round(train_frac*100)), "% / ", str(round(100-train_frac*100)), "%")
# Validation runs
if val_runs != False:
st.write("- Validation runs:", str(val_runs))
st.write("")
st.write("")
#--------------------------------------------------------------------------------------
# RUN MODELS
# Models are run on button click
st.write("")
run_models = st.button("Run model")
st.write("")
# Run everything on button click
if run_models:
# Check if new data available
if do_modprednew == "Yes":
if new_data_pred is None:
st.error("ERROR: Please upload new data for additional model predictions or select 'No'!")
return
# Define clustered cov matrix "entity", "time", "both"
cluster_entity = True
cluster_time = False
if PDM_cov_type == "clustered":
if PDM_cov_type2 == "entity":
cluster_entity = True
cluster_time = False
if PDM_cov_type2 == "time":
cluster_entity = False
cluster_time = True
if PDM_cov_type2 == "both":
cluster_entity = True
cluster_time = True
# Prepare data
data = df.set_index([entity, time])
Y_data = data[response_var]
X_data1 = data[expl_var] # for efe, tfe, twfe
X_data2 = sm.add_constant(data[expl_var]) # for re, pool
# Model validation
if do_modval == "Yes":
# Progress bar
st.info("Validation progress")
my_bar = st.progress(0.0)
progress1 = 0
# Model validation
# R²
model_eval_r2 = pd.DataFrame(index = range(val_runs), columns = [response_var])
# MSE
model_eval_mse = pd.DataFrame(index = range(val_runs), columns = ["Value"])
# RMSE
model_eval_rmse = pd.DataFrame(index = range(val_runs), columns = ["Value"])
# MAE
model_eval_mae = pd.DataFrame(index = range(val_runs), columns = ["Value"])
# MaxERR
model_eval_maxerr = pd.DataFrame(index = range(val_runs), columns = ["Value"])
# EVRS
model_eval_evrs = pd.DataFrame(index = range(val_runs), columns = ["Value"])
# SSR
model_eval_ssr = pd.DataFrame(index = range(val_runs), columns = ["Value"])
# Model validation summary
model_eval_mean = pd.DataFrame(index = ["% VE", "MSE", "RMSE", "MAE", "MaxErr", "EVRS", "SSR"], columns = ["Value"])
model_eval_sd = pd.DataFrame(index = ["% VE", "MSE", "RMSE", "MAE", "MaxErr", "EVRS", "SSR"], columns = ["Value"])
# Collect all residuals in test runs
resdiuals_allruns = {}
for val in range(val_runs):
# Split data into train/ test data
if PDM_alg != "Pooled" and PDM_alg != "Random Effects":
X_data = X_data1.copy()
if PDM_alg == "Pooled" or PDM_alg == "Random Effects":
X_data = X_data2.copy()
X_train, X_test, Y_train, Y_test = train_test_split(X_data, Y_data, train_size = train_frac, random_state = val)
# Train selected panel model
# efe
if PDM_alg == "Entity Fixed Effects":
panel_model_efe_val = PanelOLS(Y_train, X_train, entity_effects = True, time_effects = False)
panel_model_fit_efe_val = panel_model_efe_val.fit(cov_type = PDM_cov_type, cluster_entity = cluster_entity, cluster_time = cluster_time, debiased = True, auto_df = True)
# tfe
if PDM_alg == "Time Fixed Effects":
panel_model_tfe_val = PanelOLS(Y_train, X_train, entity_effects = False, time_effects = True)
panel_model_fit_tfe_val = panel_model_tfe_val.fit(cov_type = PDM_cov_type, cluster_entity = cluster_entity, cluster_time = cluster_time, debiased = True, auto_df = True)
# twfe
if PDM_alg == "Two-ways Fixed Effects":
panel_model_twfe_val = PanelOLS(Y_train, X_train, entity_effects = True, time_effects = True)
panel_model_fit_twfe_val = panel_model_twfe_val.fit(cov_type = PDM_cov_type, cluster_entity = cluster_entity, cluster_time = cluster_time, debiased = True, auto_df = True)
# re
if PDM_alg == "Random Effects":
panel_model_re_val = RandomEffects(Y_train, X_train)
panel_model_fit_re_val = panel_model_re_val.fit(cov_type = PDM_cov_type, cluster_entity = cluster_entity, cluster_time = cluster_time, debiased = True, auto_df = True)
# pool
if PDM_alg == "Pooled":
panel_model_pool_val = PooledOLS(Y_train, X_train)
panel_model_fit_pool_val = panel_model_pool_val.fit(cov_type = PDM_cov_type, cluster_entity = cluster_entity, cluster_time = cluster_time, debiased = True, auto_df = True)
# save selected model
if PDM_alg == "Entity Fixed Effects":
panel_model_fit_val = panel_model_fit_efe_val
if PDM_alg == "Time Fixed Effects":
panel_model_fit_val = panel_model_fit_tfe_val
if PDM_alg == "Two-ways Fixed Effects":
panel_model_fit_val = panel_model_fit_twfe_val
if PDM_alg == "Random Effects":
panel_model_fit_val = panel_model_fit_re_val
if PDM_alg == "Pooled":
panel_model_fit_val = panel_model_fit_pool_val
# Extract effects
if PDM_alg != "Pooled":
comb_effects = panel_model_fit_val.estimated_effects
ent_effects = pd.DataFrame(index = X_train.reset_index()[entity].drop_duplicates(), columns = ["Value"])
time_effects = pd.DataFrame(index = sorted(list(X_train.reset_index()[time].drop_duplicates())), columns = ["Value"])
# Use LSDV for estimating effects
if PDM_alg == "Entity Fixed Effects":
X_train_mlr = pd.concat([X_train.reset_index(drop = True), pd.get_dummies(X_train.reset_index()[entity])], axis = 1)
Y_train_mlr = Y_train.reset_index(drop = True)
model_mlr_val = sm.OLS(Y_train_mlr, X_train_mlr)
model_mlr_fit_val = model_mlr_val.fit()
for e in ent_effects.index:
ent_effects.loc[e]["Value"] = model_mlr_fit_val.params[e]
for t in time_effects.index:
time_effects.loc[t]["Value"] = 0
if PDM_alg == "Time Fixed Effects":
X_train_mlr = pd.concat([X_train.reset_index(drop = True), pd.get_dummies(X_train.reset_index()[time])], axis = 1)
Y_train_mlr = Y_train.reset_index(drop = True)
model_mlr_val = sm.OLS(Y_train_mlr, X_train_mlr)
model_mlr_fit_val = model_mlr_val.fit()
for e in ent_effects.index:
ent_effects.loc[e]["Value"] = 0
for t in time_effects.index:
time_effects.loc[t]["Value"] = model_mlr_fit_val.params[t]
if PDM_alg == "Two-ways Fixed Effects":
X_train_mlr = pd.concat([X_train.reset_index(drop = True), pd.get_dummies(X_train.reset_index()[entity]), pd.get_dummies(X_train.reset_index()[time])], axis = 1)
Y_train_mlr = Y_train.reset_index(drop = True)
model_mlr_val = sm.OLS(Y_train_mlr, X_train_mlr)
model_mlr_fit_val = model_mlr_val.fit()
for e in ent_effects.index:
ent_effects.loc[e]["Value"] = model_mlr_fit_val.params[e]
for t in time_effects.index:
time_effects.loc[t]["Value"] = model_mlr_fit_val.params[t]
if PDM_alg == "Random Effects":
for e in ent_effects.index:
ent_effects.loc[e]["Value"] = comb_effects.loc[e,].reset_index(drop = True).iloc[0][0]
# Prediction for Y_test (without including effects)
Y_test_pred = panel_model_fit_val.predict(X_test)
# Add effects for predictions
for p in range(Y_test_pred.size):
entity_ind = Y_test_pred.index[p][0]
time_ind = Y_test_pred.index[p][1]
# if effects are available, add effect
if PDM_alg == "Entity Fixed Effects":
if any(a for a in ent_effects.index if a == entity_ind):
effect = ent_effects.loc[entity_ind][0]
Y_test_pred["predictions"].loc[entity_ind, time_ind] = Y_test_pred["predictions"].loc[entity_ind, time_ind] + effect
if PDM_alg == "Time Fixed Effects":
if any(a for a in time_effects.index if a == time_ind):
effect = time_effects.loc[time_ind][0]
Y_test_pred["predictions"].loc[entity_ind, time_ind] = Y_test_pred["predictions"].loc[entity_ind, time_ind] + effect
if PDM_alg == "Two-ways Fixed Effects":
if any(a for a in time_effects.index if a == time_ind):
effect_time = time_effects.loc[time_ind][0]
else: effect_time = 0
if any(a for a in ent_effects.index if a == entity_ind):
effect_entity = ent_effects.loc[entity_ind][0]
else: effect_entity = 0
Y_test_pred["predictions"].loc[entity_ind, time_ind] = Y_test_pred["predictions"].loc[entity_ind, time_ind] + effect_entity + effect_time
if PDM_alg == "Random Effects":
if any(a for a in ent_effects.index if a == entity_ind):
effect = ent_effects.loc[entity_ind][0]
Y_test_pred["predictions"].loc[entity_ind, time_ind] = Y_test_pred["predictions"].loc[entity_ind, time_ind] + effect
# Adjust format
Y_test_pred = Y_test_pred.reset_index()["predictions"]
Y_test = Y_test.reset_index()[response_var]
# Save R² for test data
model_eval_r2.iloc[val][response_var] = r2_score(Y_test, Y_test_pred)
# Save MSE for test data
model_eval_mse.iloc[val]["Value"] = mean_squared_error(Y_test, Y_test_pred, squared = True)
# Save RMSE for test data
model_eval_rmse.iloc[val]["Value"] = mean_squared_error(Y_test, Y_test_pred, squared = False)
# Save MAE for test data
model_eval_mae.iloc[val]["Value"] = mean_absolute_error(Y_test, Y_test_pred)
# Save MaxERR for test data
model_eval_maxerr.iloc[val]["Value"] = max_error(Y_test, Y_test_pred)
# Save explained variance regression score for test data
model_eval_evrs.iloc[val]["Value"] = explained_variance_score(Y_test, Y_test_pred)
# Save sum of squared residuals for test data
model_eval_ssr.iloc[val]["Value"] = ((Y_test-Y_test_pred)**2).sum()
# Save residual values for test data
res = Y_test-Y_test_pred
resdiuals_allruns[val] = res
progress1 += 1
my_bar.progress(progress1/(val_runs))
# Calculate mean performance statistics
# Mean
model_eval_mean.loc["% VE"]["Value"] = model_eval_r2[response_var].mean()
model_eval_mean.loc["MSE"]["Value"] = model_eval_mse["Value"].mean()
model_eval_mean.loc["RMSE"]["Value"] = model_eval_rmse["Value"].mean()
model_eval_mean.loc["MAE"]["Value"] = model_eval_mae["Value"].mean()
model_eval_mean.loc["MaxErr"]["Value"] = model_eval_maxerr["Value"].mean()
model_eval_mean.loc["EVRS"]["Value"] = model_eval_evrs["Value"].mean()
model_eval_mean.loc["SSR"]["Value"] = model_eval_ssr["Value"].mean()
# Sd
model_eval_sd.loc["% VE"]["Value"] = model_eval_r2[response_var].std()
model_eval_sd.loc["MSE"]["Value"] = model_eval_mse["Value"].std()
model_eval_sd.loc["RMSE"]["Value"] = model_eval_rmse["Value"].std()
model_eval_sd.loc["MAE"]["Value"] = model_eval_mae["Value"].std()
model_eval_sd.loc["MaxErr"]["Value"] = model_eval_maxerr["Value"].std()
model_eval_sd.loc["EVRS"]["Value"] = model_eval_evrs["Value"].std()
model_eval_sd.loc["SSR"]["Value"] = model_eval_ssr["Value"].std()
# Residuals
residuals_collection = pd.DataFrame()
for x in resdiuals_allruns:
residuals_collection = residuals_collection.append(pd.DataFrame(resdiuals_allruns[x]), ignore_index = True)
residuals_collection.columns = [response_var]
# Collect validation results
model_val_results = {}
model_val_results["mean"] = model_eval_mean
model_val_results["sd"] = model_eval_sd
model_val_results["residuals"] = residuals_collection
model_val_results["variance explained"] = model_eval_r2
# Full model
# Progress bar
st.info("Full model progress")
my_bar_fm = st.progress(0.0)
progress2 = 0
# efe
panel_model_efe = PanelOLS(Y_data, X_data1, entity_effects = True, time_effects = False)
panel_model_fit_efe = panel_model_efe.fit(cov_type = PDM_cov_type, cluster_entity = cluster_entity, cluster_time = cluster_time, debiased = True, auto_df = True)
# tfe
panel_model_tfe = PanelOLS(Y_data, X_data1, entity_effects = False, time_effects = True)
panel_model_fit_tfe = panel_model_tfe.fit(cov_type = PDM_cov_type, cluster_entity = cluster_entity, cluster_time = cluster_time, debiased = True, auto_df = True)
# twfe
panel_model_twfe = PanelOLS(Y_data, X_data1, entity_effects = True, time_effects = True)
panel_model_fit_twfe = panel_model_twfe.fit(cov_type = PDM_cov_type, cluster_entity = cluster_entity, cluster_time = cluster_time, debiased = True, auto_df = True)
# re
panel_model_re = RandomEffects(Y_data, X_data2)
panel_model_fit_re = panel_model_re.fit(cov_type = PDM_cov_type, cluster_entity = cluster_entity, cluster_time = cluster_time, debiased = True, auto_df = True)
# pool
panel_model_pool = PooledOLS(Y_data, X_data2)
panel_model_fit_pool = panel_model_pool.fit(cov_type = PDM_cov_type, cluster_entity = cluster_entity, cluster_time = cluster_time, debiased = True, auto_df = True)
# save selected model
if PDM_alg == "Entity Fixed Effects":
panel_model_fit = panel_model_fit_efe
if PDM_alg == "Time Fixed Effects":
panel_model_fit = panel_model_fit_tfe
if PDM_alg == "Two-ways Fixed Effects":
panel_model_fit = panel_model_fit_twfe
if PDM_alg == "Random Effects":
panel_model_fit = panel_model_fit_re
if PDM_alg == "Pooled":
panel_model_fit = panel_model_fit_pool
# Entity information
ent_inf = pd.DataFrame(index = ["No. entities", "Avg observations", "Median observations", "Min observations", "Max observations"], columns = ["Value"])
ent_inf.loc["No. entities"] = panel_model_fit.entity_info["total"]
ent_inf.loc["Avg observations"] = panel_model_fit.entity_info["mean"]
ent_inf.loc["Median observations"] = panel_model_fit.entity_info["median"]
ent_inf.loc["Min observations"] = panel_model_fit.entity_info["min"]
ent_inf.loc["Max observations"] = panel_model_fit.entity_info["max"]
# Time information
time_inf = pd.DataFrame(index = ["No. time periods", "Avg observations", "Median observations", "Min observations", "Max observations"], columns = ["Value"])
time_inf.loc["No. time periods"] = panel_model_fit.time_info["total"]
time_inf.loc["Avg observations"] = panel_model_fit.time_info["mean"]
time_inf.loc["Median observations"] = panel_model_fit.time_info["median"]
time_inf.loc["Min observations"] = panel_model_fit.time_info["min"]
time_inf.loc["Max observations"] = panel_model_fit.time_info["max"]
# Regression information
reg_inf = pd.DataFrame(index = ["Dep. variable", "Estimator", "Method", "No. observations", "DF residuals", "DF model", "Covariance type"], columns = ["Value"])
reg_inf.loc["Dep. variable"] = response_var
reg_inf.loc["Estimator"] = panel_model_fit.name
if PDM_alg == "Entity Fixed Effects" or PDM_alg == "Time Fixed Effects" or "Two-ways Fixed":
reg_inf.loc["Method"] = "Within"
if PDM_alg == "Random Effects":
reg_inf.loc["Method"] = "Quasi-demeaned"
if PDM_alg == "Pooled":
reg_inf.loc["Method"] = "Least squares"
reg_inf.loc["No. observations"] = panel_model_fit.nobs
reg_inf.loc["DF residuals"] = panel_model_fit.df_resid
reg_inf.loc["DF model"] = panel_model_fit.df_model
reg_inf.loc["Covariance type"] = panel_model_fit._cov_type
# Regression statistics
fitted = df[response_var]-panel_model_fit.resids.values
obs = df[response_var]
reg_stats = pd.DataFrame(index = ["R²", "R² (between)", "R² (within)", "R² (overall)", "Log-likelihood", "SST", "SST (overall)"], columns = ["Value"])
reg_stats.loc["R²"] = panel_model_fit._r2
reg_stats.loc["R² (between)"] = panel_model_fit._c2b**2
reg_stats.loc["R² (within)"] = panel_model_fit._c2w**2
reg_stats.loc["R² (overall)"] = panel_model_fit._c2o**2
reg_stats.loc["Log-likelihood"] = panel_model_fit._loglik
reg_stats.loc["SST"] = panel_model_fit.total_ss
reg_stats.loc["SST (overall)"] = ((obs-obs.mean())**2).sum()
# Overall performance metrics (with effects)
reg_overall = pd.DataFrame(index = ["% VE", "MSE", "RMSE", "MAE", "MaxErr", "EVRS", "SSR"], columns = ["Value"])
reg_overall.loc["% VE"] = r2_score(obs, fitted)
reg_overall.loc["MSE"] = mean_squared_error(obs, fitted, squared = True)
reg_overall.loc["RMSE"] = mean_squared_error(obs, fitted, squared = False)
reg_overall.loc["MAE"] = mean_absolute_error(obs, fitted)
reg_overall.loc["MaxErr"] = max_error(obs, fitted)
reg_overall.loc["EVRS"] = explained_variance_score(obs, fitted)
reg_overall.loc["SSR"] = ((obs-fitted)**2).sum()
# ANOVA
if PDM_alg == "Pooled":
Y_data_mlr = df[response_var]
X_data_mlr = sm.add_constant(df[expl_var])
full_model_mlr = sm.OLS(Y_data_mlr, X_data_mlr)
full_model_fit = full_model_mlr.fit()
reg_anova = pd.DataFrame(index = ["Regression", "Residual", "Total"], columns = ["DF", "SS", "MS", "F-statistic"])
reg_anova.loc["Regression"]["DF"] = full_model_fit.df_model
reg_anova.loc["Regression"]["SS"] = full_model_fit.ess
reg_anova.loc["Regression"]["MS"] = full_model_fit.ess/full_model_fit.df_model
reg_anova.loc["Regression"]["F-statistic"] = full_model_fit.fvalue
reg_anova.loc["Residual"]["DF"] = full_model_fit.df_resid
reg_anova.loc["Residual"]["SS"] = full_model_fit.ssr
reg_anova.loc["Residual"]["MS"] = full_model_fit.ssr/full_model_fit.df_resid
reg_anova.loc["Residual"]["F-statistic"] = ""
reg_anova.loc["Total"]["DF"] = full_model_fit.df_resid + full_model_fit.df_model
reg_anova.loc["Total"]["SS"] = full_model_fit.ssr + full_model_fit.ess
reg_anova.loc["Total"]["MS"] = ""
reg_anova.loc["Total"]["F-statistic"] = ""
# Coefficients
if PDM_alg == "Entity Fixed Effects" or PDM_alg == "Time Fixed Effects" or "Two-ways Fixed Effects":
reg_coef = pd.DataFrame(index = expl_var, columns = ["coeff", "std err", "t-statistic", "p-value", "lower 95%", "upper 95%"])
for c in expl_var:
reg_coef.loc[c]["coeff"] = panel_model_fit.params[expl_var.index(c)]
reg_coef.loc[c]["std err"] = panel_model_fit.std_errors.loc[c]
reg_coef.loc[c]["t-statistic"] = panel_model_fit.tstats.loc[c]
reg_coef.loc[c]["p-value"] = panel_model_fit.pvalues.loc[c]
reg_coef.loc[c]["lower 95%"] = panel_model_fit.conf_int(level = 0.95).loc[c]["lower"]
reg_coef.loc[c]["upper 95%"] = panel_model_fit.conf_int(level = 0.95).loc[c]["upper"]
if PDM_alg == "Random Effects" or PDM_alg == "Pooled":
reg_coef = pd.DataFrame(index = ["const"]+ expl_var, columns = ["coeff", "std err", "t-statistic", "p-value", "lower 95%", "upper 95%"])
for c in ["const"] + expl_var:
reg_coef.loc[c]["coeff"] = panel_model_fit.params[(["const"]+ expl_var).index(c)]
reg_coef.loc[c]["std err"] = panel_model_fit.std_errors.loc[c]
reg_coef.loc[c]["t-statistic"] = panel_model_fit.tstats.loc[c]
reg_coef.loc[c]["p-value"] = panel_model_fit.pvalues.loc[c]
reg_coef.loc[c]["lower 95%"] = panel_model_fit.conf_int(level = 0.95).loc[c]["lower"]
reg_coef.loc[c]["upper 95%"] = panel_model_fit.conf_int(level = 0.95).loc[c]["upper"]
# Effects
reg_ent_effects = pd.DataFrame(index = df[entity].drop_duplicates(), columns = ["Value"])
reg_time_effects = pd.DataFrame(index = sorted(list(df[time].drop_duplicates())), columns = ["Value"])
reg_comb_effects = panel_model_fit.estimated_effects
reg_comb_effects.columns = ["Value"]
# Use LSDV for estimating effects
Y_data_mlr = df[response_var]
if PDM_alg == "Pooled" or PDM_alg == "Random Effects":
X_data_mlr = sm.add_constant(df[expl_var])
else: X_data_mlr = df[expl_var]
if PDM_alg == "Entity Fixed Effects":
X_data_mlr = pd.concat([X_data_mlr, pd.get_dummies(df[entity])], axis = 1)
model_mlr = sm.OLS(Y_data_mlr, X_data_mlr)
model_mlr_fit = model_mlr.fit()
for e in reg_ent_effects.index:
reg_ent_effects.loc[e]["Value"] = model_mlr_fit.params[e]
for t in reg_time_effects.index:
reg_time_effects.loc[t]["Value"] = 0
if PDM_alg == "Time Fixed Effects":
X_data_mlr = pd.concat([X_data_mlr, pd.get_dummies(df[time])], axis = 1)
model_mlr = sm.OLS(Y_data_mlr, X_data_mlr)
model_mlr_fit = model_mlr.fit()
for e in reg_ent_effects.index:
reg_ent_effects.loc[e]["Value"] = 0
for t in reg_time_effects.index:
reg_time_effects.loc[t]["Value"] = model_mlr_fit.params[t]
if PDM_alg == "Two-ways Fixed Effects":
X_data_mlr = pd.concat([X_data_mlr, pd.get_dummies(df[entity]), pd.get_dummies(df[time])], axis = 1)
model_mlr = sm.OLS(Y_data_mlr, X_data_mlr)
model_mlr_fit = model_mlr.fit()
for e in reg_ent_effects.index:
reg_ent_effects.loc[e]["Value"] = model_mlr_fit.params[e]
for t in reg_time_effects.index:
reg_time_effects.loc[t]["Value"] = model_mlr_fit.params[t]
if PDM_alg == "Random Effects":
for e in reg_ent_effects.index:
reg_ent_effects.loc[e]["Value"] = reg_comb_effects.loc[e,].reset_index(drop = True).iloc[0][0]
for t in reg_time_effects.index:
reg_time_effects.loc[t]["Value"] = 0
# New predictions
if df_new.empty == False:
data_new = df_new.set_index([entity, time])
X_data1_new = data_new[expl_var] # for efe, tfe, twfe
X_data2_new = sm.add_constant(data_new[expl_var]) # for re, pool
if PDM_alg != "Pooled" and PDM_alg != "Random Effects":
X_data_new = X_data1_new.copy()
if PDM_alg == "Pooled" or PDM_alg == "Random Effects":
X_data_new = X_data2_new.copy()
# Prediction for new prediction data (without including effects)
Y_pred_new = panel_model_fit.predict(X_data_new)
# Add effects for new predictions
for p in range(Y_pred_new.size):
entity_ind = Y_pred_new.index[p][0]
time_ind = Y_pred_new.index[p][1]
# if effects are available, add effect
if PDM_alg == "Entity Fixed Effects":
if any(a for a in reg_ent_effects.index if a == entity_ind):
effect = reg_ent_effects.loc[entity_ind][0]
Y_pred_new["predictions"].loc[entity_ind, time_ind] = Y_pred_new["predictions"].loc[entity_ind, time_ind] + effect
if PDM_alg == "Time Fixed Effects":
if any(a for a in reg_time_effects.index if a == time_ind):
effect = reg_time_effects.loc[time_ind][0]
Y_pred_new["predictions"].loc[entity_ind, time_ind] = Y_pred_new["predictions"].loc[entity_ind, time_ind] + effect
if PDM_alg == "Two-ways Fixed Effects":
if any(a for a in reg_time_effects.index if a == time_ind):
effect_time = reg_time_effects.loc[time_ind][0]
else: effect_time = 0
if any(a for a in reg_ent_effects.index if a == entity_ind):
effect_entity = reg_ent_effects.loc[entity_ind][0]
else: effect_entity = 0
Y_pred_new["predictions"].loc[entity_ind, time_ind] = Y_pred_new["predictions"].loc[entity_ind, time_ind] + effect_entity + effect_time
if PDM_alg == "Random Effects":
if any(a for a in reg_ent_effects.index if a == entity_ind):
effect = reg_ent_effects.loc[entity_ind][0]
Y_pred_new["predictions"].loc[entity_ind, time_ind] = Y_pred_new["predictions"].loc[entity_ind, time_ind] + effect
# Variance decomposition
if PDM_alg == "Random Effects":
reg_var_decomp = pd.DataFrame(index = ["idiosyncratic", "individual"], columns = ["variance", "share"])
reg_theta = pd.DataFrame(index = ["theta"], columns = df[entity].drop_duplicates())
reg_var_decomp.loc["idiosyncratic"]["variance"] = panel_model_fit.variance_decomposition["Residual"]
reg_var_decomp.loc["individual"]["variance"] = panel_model_fit.variance_decomposition["Effects"]
reg_var_decomp.loc["idiosyncratic"]["share"] = panel_model_fit.variance_decomposition["Residual"]/(panel_model_fit.variance_decomposition["Residual"]+panel_model_fit.variance_decomposition["Effects"])
reg_var_decomp.loc["individual"]["share"] = panel_model_fit.variance_decomposition["Effects"]/(panel_model_fit.variance_decomposition["Residual"]+panel_model_fit.variance_decomposition["Effects"])
reg_theta.loc["theta"] = list(panel_model_fit.theta.values)
for j in reg_theta.columns:
reg_theta.loc["theta"][j] = reg_theta.loc["theta"][j][0]
# Statistical tests
if PDM_alg == "Entity Fixed Effects":
if PDM_cov_type == "homoskedastic":
reg_test = pd.DataFrame(index = ["test statistic", "p-value", "distribution"], columns = ["F-test (non-robust)", "F-test (robust)", "F-test (poolability)", "Hausman-test"])
else:
reg_test = pd.DataFrame(index = ["test statistic", "p-value", "distribution"], columns = ["F-test (non-robust)", "F-test (robust)", "F-test (poolability)"])
else:
reg_test = pd.DataFrame(index = ["test statistic", "p-value", "distribution"], columns = ["F-test (non-robust)", "F-test (robust)", "F-test (poolability)"])
if PDM_alg == "Pooled" or PDM_alg == "Random Effects":
reg_test = pd.DataFrame(index = ["test statistic", "p-value", "distribution"], columns = ["F-test (non-robust)", "F-test (robust)"])
reg_test.loc["test statistic"]["F-test (non-robust)"] = panel_model_fit.f_statistic.stat
reg_test.loc["p-value"]["F-test (non-robust)"] = panel_model_fit.f_statistic.pval
reg_test.loc["distribution"]["F-test (non-robust)"] = "F(" + str(panel_model_fit.f_statistic.df) + ", " + str(panel_model_fit.f_statistic.df_denom) + ")"
reg_test.loc["test statistic"]["F-test (robust)"] = panel_model_fit.f_statistic_robust.stat
reg_test.loc["p-value"]["F-test (robust)"] = panel_model_fit.f_statistic_robust.pval
reg_test.loc["distribution"]["F-test (robust)"] = "F(" + str(panel_model_fit.f_statistic_robust.df) + ", " + str(panel_model_fit.f_statistic_robust.df_denom) + ")"
if PDM_alg != "Pooled" and PDM_alg != "Random Effects" :
reg_test.loc["test statistic"]["F-test (poolability)"] = panel_model_fit.f_pooled.stat
reg_test.loc["p-value"]["F-test (poolability)"] = panel_model_fit.f_pooled.pval
reg_test.loc["distribution"]["F-test (poolability)"] = "F(" + str(panel_model_fit.f_pooled.df) + ", " + str(panel_model_fit.f_pooled.df_denom) + ")"
if PDM_alg == "Entity Fixed Effects":
if PDM_cov_type == "homoskedastic":
reg_test.loc["test statistic"]["Hausman-test"] = fc.hausman_test(panel_model_fit, panel_model_fit_re)[0]
reg_test.loc["p-value"]["Hausman-test"] = fc.hausman_test(panel_model_fit, panel_model_fit_re)[2]
reg_test.loc["distribution"]["Hausman-test"] = "Chi²(" + str(fc.hausman_test(panel_model_fit, panel_model_fit_re)[1]) + ")"
# Heteroskedasticity tests
reg_het_test = pd.DataFrame(index = ["test statistic", "p-value"], columns = ["Breusch-Pagan test", "White test (without int.)", "White test (with int.)"])
if PDM_alg == "Pooled":
# Create datasets
Y_data_mlr = df[response_var]
X_data_mlr = sm.add_constant(df[expl_var])
# Create MLR models
full_model_mlr = sm.OLS(Y_data_mlr, X_data_mlr)
full_model_fit = full_model_mlr.fit()
# Breusch-Pagan heteroscedasticity test
bp_result = sm.stats.diagnostic.het_breuschpagan(full_model_fit.resid, full_model_fit.model.exog)
reg_het_test.loc["test statistic"]["Breusch-Pagan test"] = bp_result[0]
reg_het_test.loc["p-value"]["Breusch-Pagan test"] = bp_result[1]
# White heteroscedasticity test with interaction
white_int_result = sm.stats.diagnostic.het_white(full_model_fit.resid, full_model_fit.model.exog)
reg_het_test.loc["test statistic"]["White test (with int.)"] = white_int_result[0]
reg_het_test.loc["p-value"]["White test (with int.)"] = white_int_result[1]
# White heteroscedasticity test without interaction
X_data_mlr_white = X_data_mlr
for i in expl_var:
X_data_mlr_white[i+ "_squared"] = X_data_mlr_white[i]**2
white = sm.OLS(full_model_fit.resid**2, X_data_mlr_white)
del X_data_mlr_white
white_fit = white.fit()
white_statistic = white_fit.rsquared*data.shape[0]
white_p_value = stats.chi2.sf(white_statistic,len(white_fit.model.exog_names)-1)
reg_het_test.loc["test statistic"]["White test (without int.)"] = white_statistic
reg_het_test.loc["p-value"]["White test (without int.)"] = white_p_value
# Residuals distribution
reg_resid = pd.DataFrame(index = ["min", "25%-Q", "median", "75%-Q", "max"], columns = ["Value"])
reg_resid.loc["min"]["Value"] = panel_model_fit.resids.min()
reg_resid.loc["25%-Q"]["Value"] = panel_model_fit.resids.quantile(q = 0.25)
reg_resid.loc["median"]["Value"] = panel_model_fit.resids.quantile(q = 0.5)
reg_resid.loc["75%-Q"]["Value"] = panel_model_fit.resids.quantile(q = 0.75)
reg_resid.loc["max"]["Value"] = panel_model_fit.resids.max()
# Save full model results
model_full_results = {}
model_full_results["Entity information"] = ent_inf
model_full_results["Time information"] = time_inf
model_full_results["Regression information"] = reg_inf
model_full_results["Regression statistics"] = reg_stats
model_full_results["Overall performance"] = reg_overall
if PDM_alg == "Pooled":
model_full_results["ANOVA"] = reg_anova
model_full_results["Coefficients"] = reg_coef
model_full_results["Entity effects"] = reg_ent_effects
model_full_results["Time effects"] = reg_time_effects
model_full_results["Combined effects"] = reg_comb_effects
if PDM_alg == "Random Effects":
model_full_results["Variance decomposition"] = reg_var_decomp
model_full_results["Theta"] = reg_theta
model_full_results["tests"] = reg_test
model_full_results["hetTests"] = reg_het_test
model_full_results["Residuals"] = reg_resid
progress2 += 1
my_bar_fm.progress(progress2/1)
# Success message
st.success('Model run successfully!')
else: st.error("ERROR: No data available for Modelling!")
#++++++++++++++++++++++
# PDM OUTPUT
# Show only if model was run (no further widgets after run models or the full page reloads)
if run_models:
st.write("")
st.write("")
st.header("**Model outputs**")
#--------------------------------------------------------------------------------------
# FULL MODEL OUTPUT
full_output = st.expander("Full model output", expanded = False)
with full_output:
if model_full_results is not None:
st.markdown("**Correlation Matrix & 2D-Histogram**")
# Define variable selector
var_sel_cor = alt.selection_single(fields=['variable', 'variable2'], clear=False,
init={'variable': response_var, 'variable2': response_var})
# Calculate correlation data
corr_data = df[[response_var] + expl_var].corr().stack().reset_index().rename(columns={0: "correlation", 'level_0': "variable", 'level_1': "variable2"})
corr_data["correlation_label"] = corr_data["correlation"].map('{:.2f}'.format)
# Basic plot
base = alt.Chart(corr_data).encode(
x = alt.X('variable2:O', sort = None, axis = alt.Axis(title = None, labelFontSize = 12)),
y = alt.Y('variable:O', sort = None, axis = alt.Axis(title = None, labelFontSize = 12))
)
# Correlation values to insert
text = base.mark_text().encode(
text='correlation_label',
color = alt.condition(
alt.datum.correlation > 0.5,
alt.value('white'),
alt.value('black')
)
)
# Correlation plot
corr_plot = base.mark_rect().encode(
color = alt.condition(var_sel_cor, alt.value('#86c29c'), 'correlation:Q', legend = alt.Legend(title = "Bravais-Pearson correlation coefficient", orient = "top", gradientLength = 350), scale = alt.Scale(scheme='redblue', reverse = True, domain = [-1,1]))
).add_selection(var_sel_cor)
# Calculate values for 2d histogram
value_columns = df[[response_var] + expl_var]
df_2dbinned = pd.concat([fc.compute_2d_histogram(var1, var2, df) for var1 in value_columns for var2 in value_columns])
# 2d binned histogram plot
scat_plot = alt.Chart(df_2dbinned).transform_filter(
var_sel_cor
).mark_rect().encode(
alt.X('value2:N', sort = alt.EncodingSortField(field='raw_left_value2'), axis = alt.Axis(title = "Horizontal variable", labelFontSize = 12)),
alt.Y('value:N', axis = alt.Axis(title = "Vertical variable", labelFontSize = 12), sort = alt.EncodingSortField(field='raw_left_value', order = 'descending')),
alt.Color('count:Q', scale = alt.Scale(scheme='reds'), legend = alt.Legend(title = "Count", orient = "top", gradientLength = 350))
)
# Combine all plots
correlation_plot = alt.vconcat((corr_plot + text).properties(width = 400, height = 400), scat_plot.properties(width = 400, height = 400)).resolve_scale(color = 'independent')
correlation_plot = correlation_plot.properties(padding = {"left": 50, "top": 5, "right": 5, "bottom": 50})
st.altair_chart(correlation_plot, use_container_width = True)
if sett_hints:
st.info(str(fc.learning_hints("mod_cor")))
st.write("")
#-------------------------------------------------------------
# Regression output
st.markdown("**Regression output**")
full_out_col1, full_out_col2 = st.columns(2)
with full_out_col1:
# Entity information
st.write("Entity information:")
st.table(model_full_results["Entity information"].style.set_precision(user_precision))
with full_out_col2:
# Time information
st.write("Time period information:")
st.table(model_full_results["Time information"].style.set_precision(user_precision))
if sett_hints:
st.info(str(fc.learning_hints("mod_pd_information")))
st.write("")
full_out_col3, full_out_col4 = st.columns(2)
with full_out_col3:
# Regression information
st.write("Regression information:")
st.table(model_full_results["Regression information"].style.set_precision(user_precision))
with full_out_col4:
# Regression statistics
st.write("Regression statistics:")
st.table(model_full_results["Regression statistics"].style.set_precision(user_precision))
if sett_hints:
st.info(str(fc.learning_hints("mod_pd_regression")))
st.write("")
# Overall performance (with effects)
full_out_col_op1, full_out_col_op2 = st.columns(2)
with full_out_col_op1:
if PDM_alg != "Pooled":
st.write("Overall performance (with effects):")
if PDM_alg == "Pooled":
st.write("Overall performance :")
st.table(model_full_results["Overall performance"].style.set_precision(user_precision))
# Residuals
with full_out_col_op2:
st.write("Residuals:")
st.table(model_full_results["Residuals"].style.set_precision(user_precision))
if sett_hints:
st.info(str(fc.learning_hints("mod_pd_overallPerf")))
st.write("")
# Coefficients
st.write("Coefficients:")
st.table(model_full_results["Coefficients"].style.set_precision(user_precision))
if sett_hints:
st.info(str(fc.learning_hints("mod_pd_coef")))
st.write("")
# Effects
if PDM_alg != "Pooled":
full_out_col5, full_out_col6 = st.columns(2)
with full_out_col5:
st.write("Entity effects:")
st.write(model_full_results["Entity effects"].style.set_precision(user_precision))
with full_out_col6:
st.write("Time effects:")
st.write(model_full_results["Time effects"].style.set_precision(user_precision))
full_out_col7, full_out_col8 = st.columns(2)
with full_out_col7:
st.write("Combined effects:")
st.write(model_full_results["Combined effects"])
with full_out_col8:
st.write("")
if sett_hints:
st.info(str(fc.learning_hints("mod_pd_effects")))
st.write("")
# ANOVA
if PDM_alg == "Pooled":
st.write("ANOVA:")
st.table(model_full_results["ANOVA"].style.set_precision(user_precision))
if sett_hints:
st.info(str(fc.learning_hints("mod_pd_anova")))
st.write("")
# Statistical tests
if PDM_alg == "Random Effects":
full_out_col_re1, full_out_col_re2 = st.columns(2)
with full_out_col_re1:
st.write("Variance decomposition:")
st.table(model_full_results["Variance decomposition"].style.set_precision(user_precision))
with full_out_col_re2:
st.write("Theta:")
st.table(model_full_results["Theta"].transpose().style.set_precision(user_precision))
if sett_hints:
st.info(str(fc.learning_hints("mod_pd_varDecRE")))
st.write("")
st.write("F-tests:")
st.table(model_full_results["tests"].transpose().style.set_precision(user_precision))
if sett_hints:
st.info(str(fc.learning_hints("mod_pd_testRE")))
st.write("")
if PDM_alg == "Entity Fixed Effects":
if PDM_cov_type == "homoskedastic":
st.write("F-tests and Hausman-test:")
else: st.write("F-tests:")
st.table(model_full_results["tests"].transpose().style.set_precision(user_precision))
if PDM_cov_type == "homoskedastic":
if sett_hints:
st.info(str(fc.learning_hints("mod_pd_testEFE_homosk")))
else:
if sett_hints:
st.info(str(fc.learning_hints("mod_pd_testEFE")))
st.write("")
if PDM_alg != "Entity Fixed Effects" and PDM_alg != "Random Effects":
st.write("F-tests:")
st.table(model_full_results["tests"].transpose().style.set_precision(user_precision))
if PDM_alg == "Pooled":
if sett_hints:
st.info(str(fc.learning_hints("mod_pd_test_pooled")))
else:
if sett_hints:
st.info(str(fc.learning_hints("mod_pd_test")))
st.write("")
# Heteroskedasticity tests
if PDM_alg == "Pooled":
st.write("Heteroskedasticity tests:")
st.table(model_full_results["hetTests"].transpose().style.set_precision(user_precision))
if sett_hints:
st.info(str(fc.learning_hints("mod_md_MLR_hetTest")))
st.write("")
# Graphical output
full_out_col10, full_out_col11 = st.columns(2)
fitted_withEff = df[response_var]-panel_model_fit.resids.values
with full_out_col10:
st.write("Observed vs Fitted:")
observed_fitted_data = pd.DataFrame()
observed_fitted_data["Observed"] = df[response_var]
observed_fitted_data["Fitted"] = list(fitted_withEff)
observed_fitted_data["Index"] = df.index
observed_fitted = alt.Chart(observed_fitted_data, height = 200).mark_circle(size=20).encode(
x = alt.X("Fitted", title = "fitted", scale = alt.Scale(domain = [min(observed_fitted_data["Fitted"]), max(observed_fitted_data["Fitted"])]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("Observed", title = "observed", scale = alt.Scale(zero = False), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["Observed", "Fitted", "Index"]
)
observed_fitted_plot = observed_fitted + observed_fitted.transform_regression("Fitted", "Observed").mark_line(size = 2, color = "darkred")
st.altair_chart(observed_fitted_plot, use_container_width = True)
with full_out_col11:
st.write("Residuals vs Fitted:")
residuals_fitted_data = pd.DataFrame()
residuals_fitted_data["Residuals"] = panel_model_fit.resids.values
residuals_fitted_data["Fitted"] = list(fitted_withEff)
residuals_fitted_data["Index"] = df.index
residuals_fitted = alt.Chart(residuals_fitted_data, height = 200).mark_circle(size=20).encode(
x = alt.X("Fitted", title = "fitted", scale = alt.Scale(domain = [min(residuals_fitted_data["Fitted"]), max(residuals_fitted_data["Fitted"])]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("Residuals", title = "residuals", scale = alt.Scale(zero = False), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["Residuals", "Fitted", "Index"]
)
residuals_fitted_plot = residuals_fitted + residuals_fitted.transform_loess("Fitted", "Residuals", bandwidth = 0.5).mark_line(size = 2, color = "darkred")
st.altair_chart(residuals_fitted_plot, use_container_width = True)
if sett_hints:
st.info(str(fc.learning_hints("mod_md_MLR_obsResVsFit")))
st.write("")
if PDM_alg == "Pooled":
full_out_col12, full_out_col13 = st.columns(2)
with full_out_col12:
st.write("Normal QQ-plot:")
residuals = panel_model_fit.resids.values
qq_plot_data = pd.DataFrame()
qq_plot_data["StandResiduals"] = (residuals - residuals.mean())/residuals.std()
qq_plot_data["Index"] = df.index
qq_plot_data = qq_plot_data.sort_values(by = ["StandResiduals"])
qq_plot_data["Theoretical quantiles"] = stats.probplot(residuals, dist="norm")[0][0]
qq_plot = alt.Chart(qq_plot_data, height = 200).mark_circle(size=20).encode(
x = alt.X("Theoretical quantiles", title = "theoretical quantiles", scale = alt.Scale(domain = [min(qq_plot_data["Theoretical quantiles"]), max(qq_plot_data["Theoretical quantiles"])]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("StandResiduals", title = "stand. residuals", scale = alt.Scale(zero = False), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["StandResiduals", "Theoretical quantiles", "Index"]
)
line = alt.Chart(
pd.DataFrame({"Theoretical quantiles": [min(qq_plot_data["Theoretical quantiles"]), max(qq_plot_data["Theoretical quantiles"])], "StandResiduals": [min(qq_plot_data["Theoretical quantiles"]), max(qq_plot_data["Theoretical quantiles"])]})).mark_line(size = 2, color = "darkred").encode(
alt.X("Theoretical quantiles"),
alt.Y("StandResiduals"),
)
st.altair_chart(qq_plot + line, use_container_width = True)
if sett_hints:
st.info(str(fc.learning_hints("mod_md_MLR_qqplot")))
st.write("")
with full_out_col13:
st.write("Scale-Location:")
scale_location_data = | pd.DataFrame() | pandas.DataFrame |
import numpy as np
import pandas as pd
import pytest
from pandas.testing import assert_frame_equal
@pytest.fixture
def df_checks():
"""fixture dataframe"""
return pd.DataFrame(
{
"famid": [1, 1, 1, 2, 2, 2, 3, 3, 3],
"birth": [1, 2, 3, 1, 2, 3, 1, 2, 3],
"ht1": [2.8, 2.9, 2.2, 2, 1.8, 1.9, 2.2, 2.3, 2.1],
"ht2": [3.4, 3.8, 2.9, 3.2, 2.8, 2.4, 3.3, 3.4, 2.9],
}
)
@pytest.fixture
def df_multi():
"""MultiIndex dataframe fixture."""
return pd.DataFrame(
{
("name", "a"): {0: "Wilbur", 1: "Petunia", 2: "Gregory"},
("names", "aa"): {0: 67, 1: 80, 2: 64},
("more_names", "aaa"): {0: 56, 1: 90, 2: 50},
}
)
def test_column_level_wrong_type(df_multi):
"""Raise TypeError if wrong type is provided for column_level."""
with pytest.raises(TypeError):
df_multi.pivot_longer(index="name", column_level={0})
@pytest.mark.xfail(reason="checking is done within _select_columns")
def test_type_index(df_checks):
"""Raise TypeError if wrong type is provided for the index."""
with pytest.raises(TypeError):
df_checks.pivot_longer(index=2007)
@pytest.mark.xfail(reason="checking is done within _select_columns")
def test_type_column_names(df_checks):
"""Raise TypeError if wrong type is provided for column_names."""
with pytest.raises(TypeError):
df_checks.pivot_longer(column_names=2007)
def test_type_names_to(df_checks):
"""Raise TypeError if wrong type is provided for names_to."""
with pytest.raises(TypeError):
df_checks.pivot_longer(names_to={2007})
def test_subtype_names_to(df_checks):
"""
Raise TypeError if names_to is a sequence
and the wrong type is provided for entries
in names_to.
"""
with pytest.raises(TypeError, match="1 in names_to.+"):
df_checks.pivot_longer(names_to=[1])
def test_duplicate_names_to(df_checks):
"""Raise error if names_to contains duplicates."""
with pytest.raises(ValueError, match="y is duplicated in names_to."):
df_checks.pivot_longer(names_to=["y", "y"], names_pattern="(.+)(.)")
def test_both_names_sep_and_pattern(df_checks):
"""
Raise ValueError if both names_sep
and names_pattern is provided.
"""
with pytest.raises(
ValueError,
match="Only one of names_pattern or names_sep should be provided.",
):
df_checks.pivot_longer(
names_to=["rar", "bar"], names_sep="-", names_pattern="(.+)(.)"
)
def test_name_pattern_wrong_type(df_checks):
"""Raise TypeError if the wrong type provided for names_pattern."""
with pytest.raises(TypeError, match="names_pattern should be one of.+"):
df_checks.pivot_longer(names_to=["rar", "bar"], names_pattern=2007)
def test_name_pattern_no_names_to(df_checks):
"""Raise ValueError if names_pattern and names_to is None."""
with pytest.raises(ValueError):
df_checks.pivot_longer(names_to=None, names_pattern="(.+)(.)")
def test_name_pattern_groups_len(df_checks):
"""
Raise ValueError if names_pattern
and the number of groups
differs from the length of names_to.
"""
with pytest.raises(
ValueError,
match="The length of names_to does not match "
"the number of groups in names_pattern.+",
):
df_checks.pivot_longer(names_to=".value", names_pattern="(.+)(.)")
def test_names_pattern_wrong_subtype(df_checks):
"""
Raise TypeError if names_pattern is a list/tuple
and wrong subtype is supplied.
"""
with pytest.raises(TypeError, match="1 in names_pattern.+"):
df_checks.pivot_longer(
names_to=["ht", "num"], names_pattern=[1, "\\d"]
)
def test_names_pattern_names_to_unequal_length(df_checks):
"""
Raise ValueError if names_pattern is a list/tuple
and wrong number of items in names_to.
"""
with pytest.raises(
ValueError,
match="The length of names_to does not match "
"the number of regexes in names_pattern.+",
):
df_checks.pivot_longer(
names_to=["variable"], names_pattern=["^ht", ".+i.+"]
)
def test_names_pattern_names_to_dot_value(df_checks):
"""
Raise Error if names_pattern is a list/tuple and
.value in names_to.
"""
with pytest.raises(
ValueError,
match=".value is not accepted in names_to "
"if names_pattern is a list/tuple.",
):
df_checks.pivot_longer(
names_to=["variable", ".value"], names_pattern=["^ht", ".+i.+"]
)
def test_name_sep_wrong_type(df_checks):
"""Raise TypeError if the wrong type is provided for names_sep."""
with pytest.raises(TypeError, match="names_sep should be one of.+"):
df_checks.pivot_longer(names_to=[".value", "num"], names_sep=["_"])
def test_name_sep_no_names_to(df_checks):
"""Raise ValueError if names_sep and names_to is None."""
with pytest.raises(ValueError):
df_checks.pivot_longer(names_to=None, names_sep="_")
def test_values_to_wrong_type(df_checks):
"""Raise TypeError if the wrong type is provided for `values_to`."""
with pytest.raises(TypeError, match="values_to should be one of.+"):
df_checks.pivot_longer(values_to={"salvo"})
def test_values_to_wrong_type_names_pattern(df_checks):
"""
Raise TypeError if `values_to` is a list,
and names_pattern is not.
"""
with pytest.raises(
TypeError,
match="values_to can be a list/tuple only "
"if names_pattern is a list/tuple.",
):
df_checks.pivot_longer(values_to=["salvo"])
def test_values_to_names_pattern_unequal_length(df_checks):
"""
Raise ValueError if `values_to` is a list,
and the length of names_pattern
does not match the length of values_to.
"""
with pytest.raises(
ValueError,
match="The length of values_to does not match "
"the number of regexes in names_pattern.+",
):
df_checks.pivot_longer(
values_to=["salvo"],
names_pattern=["ht", r"\d"],
names_to=["foo", "bar"],
)
def test_values_to_names_seq_names_to(df_checks):
"""
Raise ValueError if `values_to` is a list,
and intersects with names_to.
"""
with pytest.raises(
ValueError, match="salvo in values_to already exists in names_to."
):
df_checks.pivot_longer(
values_to=["salvo"], names_pattern=["ht"], names_to="salvo"
)
def test_sub_values_to(df_checks):
"""Raise error if values_to is a sequence, and contains non strings."""
with pytest.raises(TypeError, match="1 in values_to.+"):
df_checks.pivot_longer(
names_to=["x", "y"],
names_pattern=[r"ht", r"\d"],
values_to=[1, "salvo"],
)
def test_duplicate_values_to(df_checks):
"""Raise error if values_to is a sequence, and contains duplicates."""
with pytest.raises(ValueError, match="salvo is duplicated in values_to."):
df_checks.pivot_longer(
names_to=["x", "y"],
names_pattern=[r"ht", r"\d"],
values_to=["salvo", "salvo"],
)
def test_values_to_exists_in_columns(df_checks):
"""
Raise ValueError if values_to already
exists in the dataframe's columns.
"""
with pytest.raises(ValueError):
df_checks.pivot_longer(index="birth", values_to="birth")
def test_values_to_exists_in_names_to(df_checks):
"""
Raise ValueError if values_to is in names_to.
"""
with pytest.raises(ValueError):
df_checks.pivot_longer(values_to="num", names_to="num")
def test_column_multiindex_names_sep(df_multi):
"""
Raise ValueError if the dataframe's column is a MultiIndex,
and names_sep is present.
"""
with pytest.raises(ValueError):
df_multi.pivot_longer(
column_names=[("names", "aa")],
names_sep="_",
names_to=["names", "others"],
)
def test_column_multiindex_names_pattern(df_multi):
"""
Raise ValueError if the dataframe's column is a MultiIndex,
and names_pattern is present.
"""
with pytest.raises(ValueError):
df_multi.pivot_longer(
index=[("name", "a")],
names_pattern=r"(.+)(.+)",
names_to=["names", "others"],
)
def test_index_tuple_multiindex(df_multi):
"""
Raise ValueError if index is a tuple,
instead of a list of tuples,
and the dataframe's column is a MultiIndex.
"""
with pytest.raises(ValueError):
df_multi.pivot_longer(index=("name", "a"))
def test_column_names_tuple_multiindex(df_multi):
"""
Raise ValueError if column_names is a tuple,
instead of a list of tuples,
and the dataframe's column is a MultiIndex.
"""
with pytest.raises(ValueError):
df_multi.pivot_longer(column_names=("names", "aa"))
def test_sort_by_appearance(df_checks):
"""Raise error if sort_by_appearance is not boolean."""
with pytest.raises(TypeError):
df_checks.pivot_longer(
names_to=[".value", "value"],
names_sep="_",
sort_by_appearance="TRUE",
)
def test_ignore_index(df_checks):
"""Raise error if ignore_index is not boolean."""
with pytest.raises(TypeError):
df_checks.pivot_longer(
names_to=[".value", "value"], names_sep="_", ignore_index="TRUE"
)
def test_names_to_index(df_checks):
"""
Raise ValueError if there is no names_sep/names_pattern,
.value not in names_to and names_to intersects with index.
"""
with pytest.raises(
ValueError,
match=r".+in names_to already exist as column labels.+",
):
df_checks.pivot_longer(
names_to="famid",
index="famid",
)
def test_names_sep_pattern_names_to_index(df_checks):
"""
Raise ValueError if names_sep/names_pattern,
.value not in names_to and names_to intersects with index.
"""
with pytest.raises(
ValueError,
match=r".+in names_to already exist as column labels.+",
):
df_checks.pivot_longer(
names_to=["dim", "famid"],
names_sep="_",
index="famid",
)
def test_dot_value_names_to_columns_intersect(df_checks):
"""
Raise ValueError if names_sep/names_pattern,
.value in names_to,
and names_to intersects with the new columns
"""
with pytest.raises(
ValueError,
match=r".+in names_to already exist in the new dataframe\'s columns.+",
):
df_checks.pivot_longer(
index="famid", names_to=(".value", "ht"), names_pattern="(.+)(.)"
)
def test_values_to_seq_index_intersect(df_checks):
"""
Raise ValueError if values_to is a sequence,
and intersects with the index
"""
match = ".+values_to already exist as column labels assigned "
match = match + "to the dataframe's index parameter.+"
with pytest.raises(ValueError, match=rf"{match}"):
df_checks.pivot_longer(
index="famid",
names_to=("value", "ht"),
names_pattern=["ht", r"\d"],
values_to=("famid", "foo"),
)
def test_dot_value_names_to_index_intersect(df_checks):
"""
Raise ValueError if names_sep/names_pattern,
.value in names_to,
and names_to intersects with the index
"""
match = ".+already exist as column labels assigned "
match = match + "to the dataframe's index parameter.+"
with pytest.raises(
ValueError,
match=rf"{match}",
):
df_checks.rename(columns={"famid": "ht"}).pivot_longer(
index="ht", names_to=(".value", "num"), names_pattern="(.+)(.)"
)
def test_names_pattern_list_empty_any(df_checks):
"""
Raise ValueError if names_pattern is a list,
and not all matches are returned.
"""
with pytest.raises(
ValueError, match="No match was returned for the regex.+"
):
df_checks.pivot_longer(
index=["famid", "birth"],
names_to=["ht"],
names_pattern=["rar"],
)
def test_names_pattern_no_match(df_checks):
"""Raise error if names_pattern is a regex and returns no matches."""
with pytest.raises(
ValueError, match="Column labels .+ could not be matched with any .+"
):
df_checks.pivot_longer(
index="famid",
names_to=[".value", "value"],
names_pattern=r"(rar)(.)",
)
def test_names_pattern_incomplete_match(df_checks):
"""
Raise error if names_pattern is a regex
and returns incomplete matches.
"""
with pytest.raises(
ValueError, match="Column labels .+ could not be matched with any .+"
):
df_checks.pivot_longer(
index="famid",
names_to=[".value", "value"],
names_pattern=r"(ht)(.)",
)
def test_names_sep_len(df_checks):
"""
Raise error if names_sep,
and the number of matches returned
is not equal to the length of names_to.
"""
with pytest.raises(ValueError):
df_checks.pivot_longer(names_to=".value", names_sep="(\\d)")
def test_pivot_index_only(df_checks):
"""Test output if only index is passed."""
result = df_checks.pivot_longer(
index=["famid", "birth"],
names_to="dim",
values_to="num",
)
actual = df_checks.melt(
["famid", "birth"], var_name="dim", value_name="num"
)
assert_frame_equal(result, actual)
def test_pivot_column_only(df_checks):
"""Test output if only column_names is passed."""
result = df_checks.pivot_longer(
column_names=["ht1", "ht2"],
names_to="dim",
values_to="num",
ignore_index=False,
)
actual = df_checks.melt(
["famid", "birth"],
var_name="dim",
value_name="num",
ignore_index=False,
)
assert_frame_equal(result, actual)
def test_pivot_sort_by_appearance(df_checks):
"""Test output if sort_by_appearance is True."""
result = df_checks.pivot_longer(
column_names="ht*",
names_to="dim",
values_to="num",
sort_by_appearance=True,
)
actual = (
df_checks.melt(
["famid", "birth"],
var_name="dim",
value_name="num",
ignore_index=False,
)
.sort_index()
.reset_index(drop=True)
)
| assert_frame_equal(result, actual) | pandas.testing.assert_frame_equal |
import re
from unittest.mock import Mock, call, patch
import numpy as np
import pandas as pd
import pytest
from rdt.transformers.categorical import (
CategoricalFuzzyTransformer, CategoricalTransformer, LabelEncodingTransformer,
OneHotEncodingTransformer)
RE_SSN = re.compile(r'\d\d\d-\d\d-\d\d\d\d')
class TestCategoricalTransformer:
def test___setstate__(self):
"""Test the ``__set_state__`` method.
Validate that the ``__dict__`` attribute is correctly udpdated when
Setup:
- create an instance of a ``CategoricalTransformer``.
Side effect:
- it updates the ``__dict__`` attribute of the object.
"""
# Setup
transformer = CategoricalTransformer()
# Run
transformer.__setstate__({
'intervals': {
None: 'abc'
}
})
# Assert
assert transformer.__dict__['intervals'][np.nan] == 'abc'
def test___init__(self):
"""Passed arguments must be stored as attributes."""
# Run
transformer = CategoricalTransformer(
fuzzy='fuzzy_value',
clip='clip_value',
)
# Asserts
assert transformer.fuzzy == 'fuzzy_value'
assert transformer.clip == 'clip_value'
def test_is_transform_deterministic(self):
"""Test the ``is_transform_deterministic`` method.
Validate that this method returs the opposite boolean value of the ``fuzzy`` parameter.
Setup:
- initialize a ``CategoricalTransformer`` with ``fuzzy = True``.
Output:
- the boolean value which is the opposite of ``fuzzy``.
"""
# Setup
transformer = CategoricalTransformer(fuzzy=True)
# Run
output = transformer.is_transform_deterministic()
# Assert
assert output is False
def test_is_composition_identity(self):
"""Test the ``is_composition_identity`` method.
Since ``COMPOSITION_IS_IDENTITY`` is True, just validates that the method
returns the opposite boolean value of the ``fuzzy`` parameter.
Setup:
- initialize a ``CategoricalTransformer`` with ``fuzzy = True``.
Output:
- the boolean value which is the opposite of ``fuzzy``.
"""
# Setup
transformer = CategoricalTransformer(fuzzy=True)
# Run
output = transformer.is_composition_identity()
# Assert
assert output is False
def test__get_intervals(self):
"""Test the ``_get_intervals`` method.
Validate that the intervals for each categorical value are correct.
Input:
- a pandas series containing categorical values.
Output:
- a tuple, where the first element describes the intervals for each
categorical value (start, end).
"""
# Run
data = pd.Series(['foo', 'bar', 'bar', 'foo', 'foo', 'tar'])
result = CategoricalTransformer._get_intervals(data)
# Asserts
expected_intervals = {
'foo': (
0,
0.5,
0.25,
0.5 / 6
),
'bar': (
0.5,
0.8333333333333333,
0.6666666666666666,
0.05555555555555555
),
'tar': (
0.8333333333333333,
0.9999999999999999,
0.9166666666666666,
0.027777777777777776
)
}
expected_means = pd.Series({
'foo': 0.25,
'bar': 0.6666666666666666,
'tar': 0.9166666666666666
})
expected_starts = pd.DataFrame({
'category': ['foo', 'bar', 'tar'],
'start': [0, 0.5, 0.8333333333333333]
}).set_index('start')
assert result[0] == expected_intervals
pd.testing.assert_series_equal(result[1], expected_means)
pd.testing.assert_frame_equal(result[2], expected_starts)
def test__get_intervals_nans(self):
"""Test the ``_get_intervals`` method when data contains nan's.
Validate that the intervals for each categorical value are correct, when passed
data containing nan values.
Input:
- a pandas series cotaining nan values and categorical values.
Output:
- a tuple, where the first element describes the intervals for each
categorical value (start, end).
"""
# Setup
data = pd.Series(['foo', np.nan, None, 'foo', 'foo', 'tar'])
# Run
result = CategoricalTransformer._get_intervals(data)
# Assert
expected_intervals = {
'foo': (
0,
0.5,
0.25,
0.5 / 6
),
np.nan: (
0.5,
0.8333333333333333,
0.6666666666666666,
0.05555555555555555
),
'tar': (
0.8333333333333333,
0.9999999999999999,
0.9166666666666666,
0.027777777777777776
)
}
expected_means = pd.Series({
'foo': 0.25,
np.nan: 0.6666666666666666,
'tar': 0.9166666666666666
})
expected_starts = pd.DataFrame({
'category': ['foo', np.nan, 'tar'],
'start': [0, 0.5, 0.8333333333333333]
}).set_index('start')
assert result[0] == expected_intervals
pd.testing.assert_series_equal(result[1], expected_means)
pd.testing.assert_frame_equal(result[2], expected_starts)
def test__fit_intervals(self):
# Setup
transformer = CategoricalTransformer()
# Run
data = pd.Series(['foo', 'bar', 'bar', 'foo', 'foo', 'tar'])
transformer._fit(data)
# Asserts
expected_intervals = {
'foo': (
0,
0.5,
0.25,
0.5 / 6
),
'bar': (
0.5,
0.8333333333333333,
0.6666666666666666,
0.05555555555555555
),
'tar': (
0.8333333333333333,
0.9999999999999999,
0.9166666666666666,
0.027777777777777776
)
}
expected_means = pd.Series({
'foo': 0.25,
'bar': 0.6666666666666666,
'tar': 0.9166666666666666
})
expected_starts = pd.DataFrame({
'category': ['foo', 'bar', 'tar'],
'start': [0, 0.5, 0.8333333333333333]
}).set_index('start')
assert transformer.intervals == expected_intervals
pd.testing.assert_series_equal(transformer.means, expected_means)
pd.testing.assert_frame_equal(transformer.starts, expected_starts)
def test__get_value_no_fuzzy(self):
# Setup
transformer = CategoricalTransformer(fuzzy=False)
transformer.intervals = {
'foo': (0, 0.5, 0.25, 0.5 / 6),
np.nan: (0.5, 1.0, 0.75, 0.5 / 6),
}
# Run
result_foo = transformer._get_value('foo')
result_nan = transformer._get_value(np.nan)
# Asserts
assert result_foo == 0.25
assert result_nan == 0.75
@patch('rdt.transformers.categorical.norm')
def test__get_value_fuzzy(self, norm_mock):
# setup
norm_mock.rvs.return_value = 0.2745
transformer = CategoricalTransformer(fuzzy=True)
transformer.intervals = {
'foo': (0, 0.5, 0.25, 0.5 / 6),
}
# Run
result = transformer._get_value('foo')
# Asserts
assert result == 0.2745
def test__normalize_no_clip(self):
"""Test normalize data"""
# Setup
transformer = CategoricalTransformer(clip=False)
# Run
data = pd.Series([-0.43, 0.1234, 1.5, -1.31])
result = transformer._normalize(data)
# Asserts
expect = pd.Series([0.57, 0.1234, 0.5, 0.69], dtype=float)
pd.testing.assert_series_equal(result, expect)
def test__normalize_clip(self):
"""Test normalize data with clip=True"""
# Setup
transformer = CategoricalTransformer(clip=True)
# Run
data = pd.Series([-0.43, 0.1234, 1.5, -1.31])
result = transformer._normalize(data)
# Asserts
expect = pd.Series([0.0, 0.1234, 1.0, 0.0], dtype=float)
pd.testing.assert_series_equal(result, expect)
def test__reverse_transform_array(self):
"""Test reverse_transform a numpy.array"""
# Setup
data = pd.Series(['foo', 'bar', 'bar', 'foo', 'foo', 'tar'])
rt_data = np.array([-0.6, 0.5, 0.6, 0.2, 0.1, -0.2])
transformer = CategoricalTransformer()
# Run
transformer._fit(data)
result = transformer._reverse_transform(rt_data)
# Asserts
expected_intervals = {
'foo': (
0,
0.5,
0.25,
0.5 / 6
),
'bar': (
0.5,
0.8333333333333333,
0.6666666666666666,
0.05555555555555555
),
'tar': (
0.8333333333333333,
0.9999999999999999,
0.9166666666666666,
0.027777777777777776
)
}
assert transformer.intervals == expected_intervals
expect = pd.Series(data)
pd.testing.assert_series_equal(result, expect)
def test__transform_by_category_called(self):
"""Test that the `_transform_by_category` method is called.
When the number of rows is greater than the number of categories, expect
that the `_transform_by_category` method is called.
Setup:
The categorical transformer is instantiated with 4 categories.
Input:
- data with 5 rows.
Output:
- the output of `_transform_by_category`.
Side effects:
- `_transform_by_category` will be called once.
"""
# Setup
data = pd.Series([1, 3, 3, 2, 1])
categorical_transformer_mock = Mock()
categorical_transformer_mock.means = pd.Series([0.125, 0.375, 0.625, 0.875])
# Run
transformed = CategoricalTransformer._transform(categorical_transformer_mock, data)
# Asserts
categorical_transformer_mock._transform_by_category.assert_called_once_with(data)
assert transformed == categorical_transformer_mock._transform_by_category.return_value
def test__transform_by_category(self):
"""Test the `_transform_by_category` method with numerical data.
Expect that the correct transformed data is returned.
Setup:
The categorical transformer is instantiated with 4 categories and intervals.
Input:
- data with 5 rows.
Ouptut:
- the transformed data.
"""
# Setup
data = pd.Series([1, 3, 3, 2, 1])
transformer = CategoricalTransformer()
transformer.intervals = {
4: (0, 0.25, 0.125, 0.041666666666666664),
3: (0.25, 0.5, 0.375, 0.041666666666666664),
2: (0.5, 0.75, 0.625, 0.041666666666666664),
1: (0.75, 1.0, 0.875, 0.041666666666666664),
}
# Run
transformed = transformer._transform_by_category(data)
# Asserts
expected = np.array([0.875, 0.375, 0.375, 0.625, 0.875])
assert (transformed == expected).all()
def test__transform_by_category_nans(self):
"""Test the ``_transform_by_category`` method with data containing nans.
Validate that the data is transformed correctly when it contains nan's.
Setup:
- the categorical transformer is instantiated, and the appropriate ``intervals``
attribute is set.
Input:
- a pandas series containing nan's.
Output:
- a numpy array containing the transformed data.
"""
# Setup
data = pd.Series([np.nan, 3, 3, 2, np.nan])
transformer = CategoricalTransformer()
transformer.intervals = {
4: (0, 0.25, 0.125, 0.041666666666666664),
3: (0.25, 0.5, 0.375, 0.041666666666666664),
2: (0.5, 0.75, 0.625, 0.041666666666666664),
np.nan: (0.75, 1.0, 0.875, 0.041666666666666664),
}
# Run
transformed = transformer._transform_by_category(data)
# Asserts
expected = np.array([0.875, 0.375, 0.375, 0.625, 0.875])
assert (transformed == expected).all()
@patch('rdt.transformers.categorical.norm')
def test__transform_by_category_fuzzy_true(self, norm_mock):
"""Test the ``_transform_by_category`` method when ``fuzzy`` is True.
Validate that the data is transformed correctly when ``fuzzy`` is True.
Setup:
- the categorical transformer is instantiated with ``fuzzy`` as True,
and the appropriate ``intervals`` attribute is set.
- the ``intervals`` attribute is set to a a dictionary of intervals corresponding
to the elements of the passed data.
- set the ``side_effect`` of the ``rvs_mock`` to the appropriate function.
Input:
- a pandas series.
Output:
- a numpy array containing the transformed data.
Side effect:
- ``rvs_mock`` should be called four times, one for each element of the
intervals dictionary.
"""
# Setup
def rvs_mock_func(loc, scale, **kwargs):
return loc
norm_mock.rvs.side_effect = rvs_mock_func
data = pd.Series([1, 3, 3, 2, 1])
transformer = CategoricalTransformer(fuzzy=True)
transformer.intervals = {
4: (0, 0.25, 0.125, 0.041666666666666664),
3: (0.25, 0.5, 0.375, 0.041666666666666664),
2: (0.5, 0.75, 0.625, 0.041666666666666664),
1: (0.75, 1.0, 0.875, 0.041666666666666664),
}
# Run
transformed = transformer._transform_by_category(data)
# Assert
expected = np.array([0.875, 0.375, 0.375, 0.625, 0.875])
assert (transformed == expected).all()
norm_mock.rvs.assert_has_calls([
call(0.125, 0.041666666666666664, size=0),
call(0.375, 0.041666666666666664, size=2),
call(0.625, 0.041666666666666664, size=1),
call(0.875, 0.041666666666666664, size=2),
])
def test__transform_by_row_called(self):
"""Test that the `_transform_by_row` method is called.
When the number of rows is less than or equal to the number of categories,
expect that the `_transform_by_row` method is called.
Setup:
The categorical transformer is instantiated with 4 categories.
Input:
- data with 4 rows
Output:
- the output of `_transform_by_row`
Side effects:
- `_transform_by_row` will be called once
"""
# Setup
data = pd.Series([1, 2, 3, 4])
categorical_transformer_mock = Mock()
categorical_transformer_mock.means = pd.Series([0.125, 0.375, 0.625, 0.875])
# Run
transformed = CategoricalTransformer._transform(categorical_transformer_mock, data)
# Asserts
categorical_transformer_mock._transform_by_row.assert_called_once_with(data)
assert transformed == categorical_transformer_mock._transform_by_row.return_value
def test__transform_by_row(self):
"""Test the `_transform_by_row` method with numerical data.
Expect that the correct transformed data is returned.
Setup:
The categorical transformer is instantiated with 4 categories and intervals.
Input:
- data with 4 rows
Ouptut:
- the transformed data
"""
# Setup
data = pd.Series([1, 2, 3, 4])
transformer = CategoricalTransformer()
transformer.intervals = {
4: (0, 0.25, 0.125, 0.041666666666666664),
3: (0.25, 0.5, 0.375, 0.041666666666666664),
2: (0.5, 0.75, 0.625, 0.041666666666666664),
1: (0.75, 1.0, 0.875, 0.041666666666666664),
}
# Run
transformed = transformer._transform_by_row(data)
# Asserts
expected = np.array([0.875, 0.625, 0.375, 0.125])
assert (transformed == expected).all()
@patch('psutil.virtual_memory')
def test__reverse_transform_by_matrix_called(self, psutil_mock):
"""Test that the `_reverse_transform_by_matrix` method is called.
When there is enough virtual memory, expect that the
`_reverse_transform_by_matrix` method is called.
Setup:
The categorical transformer is instantiated with 4 categories. Also patch the
`psutil.virtual_memory` function to return a large enough `available_memory`.
Input:
- numerical data with 4 rows
Output:
- the output of `_reverse_transform_by_matrix`
Side effects:
- `_reverse_transform_by_matrix` will be called once
"""
# Setup
data = pd.Series([1, 2, 3, 4])
categorical_transformer_mock = Mock()
categorical_transformer_mock.means = pd.Series([0.125, 0.375, 0.625, 0.875])
categorical_transformer_mock._normalize.return_value = data
virtual_memory = Mock()
virtual_memory.available = 4 * 4 * 8 * 3 + 1
psutil_mock.return_value = virtual_memory
# Run
reverse = CategoricalTransformer._reverse_transform(categorical_transformer_mock, data)
# Asserts
categorical_transformer_mock._reverse_transform_by_matrix.assert_called_once_with(data)
assert reverse == categorical_transformer_mock._reverse_transform_by_matrix.return_value
@patch('psutil.virtual_memory')
def test__reverse_transform_by_matrix(self, psutil_mock):
"""Test the _reverse_transform_by_matrix method with numerical data
Expect that the transformed data is correctly reverse transformed.
Setup:
The categorical transformer is instantiated with 4 categories and means. Also patch
the `psutil.virtual_memory` function to return a large enough `available_memory`.
Input:
- transformed data with 4 rows
Ouptut:
- the original data
"""
# Setup
data = pd.Series([1, 2, 3, 4])
transformed = pd.Series([0.875, 0.625, 0.375, 0.125])
transformer = CategoricalTransformer()
transformer.means = pd.Series([0.125, 0.375, 0.625, 0.875], index=[4, 3, 2, 1])
transformer.dtype = data.dtype
virtual_memory = Mock()
virtual_memory.available = 4 * 4 * 8 * 3 + 1
psutil_mock.return_value = virtual_memory
# Run
reverse = transformer._reverse_transform_by_matrix(transformed)
# Assert
pd.testing.assert_series_equal(data, reverse)
@patch('psutil.virtual_memory')
def test__reverse_transform_by_category_called(self, psutil_mock):
"""Test that the `_reverse_transform_by_category` method is called.
When there is not enough virtual memory and the number of rows is greater than the
number of categories, expect that the `_reverse_transform_by_category` method is called.
Setup:
The categorical transformer is instantiated with 4 categories. Also patch the
`psutil.virtual_memory` function to return an `available_memory` of 1.
Input:
- numerical data with 5 rows
Output:
- the output of `_reverse_transform_by_category`
Side effects:
- `_reverse_transform_by_category` will be called once
"""
# Setup
transform_data = pd.Series([1, 3, 3, 2, 1])
categorical_transformer_mock = Mock()
categorical_transformer_mock.means = pd.Series([0.125, 0.375, 0.625, 0.875])
categorical_transformer_mock._normalize.return_value = transform_data
virtual_memory = Mock()
virtual_memory.available = 1
psutil_mock.return_value = virtual_memory
# Run
reverse = CategoricalTransformer._reverse_transform(
categorical_transformer_mock, transform_data)
# Asserts
categorical_transformer_mock._reverse_transform_by_category.assert_called_once_with(
transform_data)
assert reverse == categorical_transformer_mock._reverse_transform_by_category.return_value
@patch('psutil.virtual_memory')
def test__reverse_transform_by_category(self, psutil_mock):
"""Test the _reverse_transform_by_category method with numerical data.
Expect that the transformed data is correctly reverse transformed.
Setup:
The categorical transformer is instantiated with 4 categories, and the means
and intervals are set for those categories. Also patch the `psutil.virtual_memory`
function to return an `available_memory` of 1.
Input:
- transformed data with 5 rows
Ouptut:
- the original data
"""
data = pd.Series([1, 3, 3, 2, 1])
transformed = pd.Series([0.875, 0.375, 0.375, 0.625, 0.875])
transformer = CategoricalTransformer()
transformer.means = pd.Series([0.125, 0.375, 0.625, 0.875], index=[4, 3, 2, 1])
transformer.intervals = {
4: (0, 0.25, 0.125, 0.041666666666666664),
3: (0.25, 0.5, 0.375, 0.041666666666666664),
2: (0.5, 0.75, 0.625, 0.041666666666666664),
1: (0.75, 1.0, 0.875, 0.041666666666666664),
}
transformer.dtype = data.dtype
virtual_memory = Mock()
virtual_memory.available = 1
psutil_mock.return_value = virtual_memory
reverse = transformer._reverse_transform_by_category(transformed)
pd.testing.assert_series_equal(data, reverse)
def test__get_category_from_start(self):
"""Test the ``_get_category_from_start`` method.
Setup:
- instantiate a ``CategoricalTransformer``, and set the attribute ``starts``
to a pandas dataframe with ``set_index`` as ``'start'``.
Input:
- an integer, an index from data.
Output:
- a category from the data.
"""
# Setup
transformer = CategoricalTransformer()
transformer.starts = pd.DataFrame({
'start': [0.0, 0.5, 0.7],
'category': ['a', 'b', 'c']
}).set_index('start')
# Run
category = transformer._get_category_from_start(2)
# Assert
assert category == 'c'
@patch('psutil.virtual_memory')
def test__reverse_transform_by_row_called(self, psutil_mock):
"""Test that the `_reverse_transform_by_row` method is called.
When there is not enough virtual memory and the number of rows is less than or equal
to the number of categories, expect that the `_reverse_transform_by_row` method
is called.
Setup:
The categorical transformer is instantiated with 4 categories. Also patch the
`psutil.virtual_memory` function to return an `available_memory` of 1.
Input:
- numerical data with 4 rows
Output:
- the output of `_reverse_transform_by_row`
Side effects:
- `_reverse_transform_by_row` will be called once
"""
# Setup
data = pd.Series([1, 2, 3, 4])
categorical_transformer_mock = Mock()
categorical_transformer_mock.means = pd.Series([0.125, 0.375, 0.625, 0.875])
categorical_transformer_mock.starts = pd.DataFrame(
[0., 0.25, 0.5, 0.75], index=[4, 3, 2, 1], columns=['category'])
categorical_transformer_mock._normalize.return_value = data
virtual_memory = Mock()
virtual_memory.available = 1
psutil_mock.return_value = virtual_memory
# Run
reverse = CategoricalTransformer._reverse_transform(categorical_transformer_mock, data)
# Asserts
categorical_transformer_mock._reverse_transform_by_row.assert_called_once_with(data)
assert reverse == categorical_transformer_mock._reverse_transform_by_row.return_value
@patch('psutil.virtual_memory')
def test__reverse_transform_by_row(self, psutil_mock):
"""Test the _reverse_transform_by_row method with numerical data.
Expect that the transformed data is correctly reverse transformed.
Setup:
The categorical transformer is instantiated with 4 categories, and the means, starts,
and intervals are set for those categories. Also patch the `psutil.virtual_memory`
function to return an `available_memory` of 1.
Input:
- transformed data with 4 rows
Ouptut:
- the original data
"""
# Setup
data = pd.Series([1, 2, 3, 4])
transformed = pd.Series([0.875, 0.625, 0.375, 0.125])
transformer = CategoricalTransformer()
transformer.means = | pd.Series([0.125, 0.375, 0.625, 0.875], index=[4, 3, 2, 1]) | pandas.Series |
import pandas as pd
df_ab = pd.DataFrame({'a': ['a_1', 'a_2', 'a_3'], 'b': ['b_1', 'b_2', 'b_3']})
df_ac = pd.DataFrame({'a': ['a_1', 'a_2', 'a_4'], 'c': ['c_1', 'c_2', 'c_4']})
print(df_ab)
# a b
# 0 a_1 b_1
# 1 a_2 b_2
# 2 a_3 b_3
print(df_ac)
# a c
# 0 a_1 c_1
# 1 a_2 c_2
# 2 a_4 c_4
print(pd.merge(df_ab, df_ac))
# a b c
# 0 a_1 b_1 c_1
# 1 a_2 b_2 c_2
print(df_ab.merge(df_ac))
# a b c
# 0 a_1 b_1 c_1
# 1 a_2 b_2 c_2
print(pd.merge(df_ab, df_ac, on='a'))
# a b c
# 0 a_1 b_1 c_1
# 1 a_2 b_2 c_2
df_ac_ = df_ac.rename(columns={'a': 'a_'})
print(df_ac_)
# a_ c
# 0 a_1 c_1
# 1 a_2 c_2
# 2 a_4 c_4
print(pd.merge(df_ab, df_ac_, left_on='a', right_on='a_'))
# a b a_ c
# 0 a_1 b_1 a_1 c_1
# 1 a_2 b_2 a_2 c_2
print(pd.merge(df_ab, df_ac_, left_on='a', right_on='a_').drop(columns='a_'))
# a b c
# 0 a_1 b_1 c_1
# 1 a_2 b_2 c_2
print(pd.merge(df_ab, df_ac, on='a', how='inner'))
# a b c
# 0 a_1 b_1 c_1
# 1 a_2 b_2 c_2
print( | pd.merge(df_ab, df_ac, on='a', how='left') | pandas.merge |
import pandas as pd
import numpy as np
from data import Data
import pickle
class Stats():
def __init__(self, data):
'''Enter dataclass of pandas dataframe'''
if isinstance(data, Data):
self.df = data.df
elif isinstance(data, pd.DataFrame):
self.df = data
self.totalsparsity = self.calc_sparsity()
self.featuresparsity = self.calc_featuresparsity()
self.constants = self.constantvalues()
self.corrfeatures = self.correlation()
self.mean = self.calc_mean()
self.nonzero = self.calc_nonzero()
self.zero = self.calc_zero()
self.min = self.calc_min()
self.max = self.calc_max()
self.stddv = self.calc_stddv()
self.q1 = self.calc_q1()
self.median = self.calc_median()
self.q3 = self.calc_q3()
def calc_sparsity(self):
'''Calculate the sparsity of the selected data'''
zeroes = 0
for column in self.df.columns:
zeroes += np.count_nonzero(self.df[column] == 0)
return zeroes / (self.df.shape[0] * self.df.shape[1])
def calc_featuresparsity(self):
'''Calculate sparsity per feature'''
df = self.df
result = pd.DataFrame()
result['sparsity'] = df.apply(lambda x: np.count_nonzero(x == 0)/len(x))
return result
def constantvalues(self):
'''Collect the variables which have a contant value'''
constant_columns = [column for column in self.df.columns if len(self.df[column].unique()) < 2]
return constant_columns
def correlation(self):
'''Collect the high correlation variables (> 0.90)'''
corr = self.df.corr(method='pearson').abs()
upper = corr.where(np.triu(np.ones(corr.shape), k=1).astype(np.bool))
high_correlations = [column for column in upper.columns if any(upper[column] > 0.90)]
return high_correlations
def calc_mean(self):
df = self.df
result = pd.DataFrame()
result['mean'] = df.apply(lambda x: np.mean(x))
return result
def calc_nonzero(self):
df = self.df
result = pd.DataFrame()
result['nonzero'] = df.apply(lambda x: (np.count_nonzero(x)))
return result
def calc_zero(self):
df = self.df
result = | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
import pandas.rpy.common as rcom
import rpy2.robjects as robjects
from rpy2.robjects.vectors import SexpVector, ListVector, StrSexpVector
import rpy2.robjects.numpy2ri as numpy2ri
import trtools.rpy.conversion as rconv
import trtools.rpy.tools as rtools
from trtools.rpy.rmodule import get_func, RPackage
import trtools.rpy.rplot as rplot
rplot.patch_call()
def pd_py2ri(o):
"""
"""
res = None
if isinstance(o, pd.Series):
o = | pd.DataFrame(o, index=o.index) | pandas.DataFrame |
# Arithmetic tests for DataFrame/Series/Index/Array classes that should
# behave identically.
from datetime import datetime, timedelta
import numpy as np
import pytest
from pandas.errors import (
NullFrequencyError, OutOfBoundsDatetime, PerformanceWarning)
import pandas as pd
from pandas import (
DataFrame, DatetimeIndex, NaT, Series, Timedelta, TimedeltaIndex,
Timestamp, timedelta_range)
import pandas.util.testing as tm
def get_upcast_box(box, vector):
"""
Given two box-types, find the one that takes priority
"""
if box is DataFrame or isinstance(vector, DataFrame):
return DataFrame
if box is Series or isinstance(vector, Series):
return Series
if box is pd.Index or isinstance(vector, pd.Index):
return pd.Index
return box
# ------------------------------------------------------------------
# Timedelta64[ns] dtype Comparisons
class TestTimedelta64ArrayLikeComparisons:
# Comparison tests for timedelta64[ns] vectors fully parametrized over
# DataFrame/Series/TimedeltaIndex/TimedeltaArray. Ideally all comparison
# tests will eventually end up here.
def test_compare_timedelta64_zerodim(self, box_with_array):
# GH#26689 should unbox when comparing with zerodim array
box = box_with_array
xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
tdi = pd.timedelta_range('2H', periods=4)
other = np.array(tdi.to_numpy()[0])
tdi = tm.box_expected(tdi, box)
res = tdi <= other
expected = np.array([True, False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(res, expected)
with pytest.raises(TypeError):
# zero-dim of wrong dtype should still raise
tdi >= np.array(4)
class TestTimedelta64ArrayComparisons:
# TODO: All of these need to be parametrized over box
def test_compare_timedelta_series(self):
# regression test for GH#5963
s = pd.Series([timedelta(days=1), timedelta(days=2)])
actual = s > timedelta(days=1)
expected = pd.Series([False, True])
tm.assert_series_equal(actual, expected)
def test_tdi_cmp_str_invalid(self, box_with_array):
# GH#13624
xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
tdi = TimedeltaIndex(['1 day', '2 days'])
tdarr = tm.box_expected(tdi, box_with_array)
for left, right in [(tdarr, 'a'), ('a', tdarr)]:
with pytest.raises(TypeError):
left > right
with pytest.raises(TypeError):
left >= right
with pytest.raises(TypeError):
left < right
with pytest.raises(TypeError):
left <= right
result = left == right
expected = np.array([False, False], dtype=bool)
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
result = left != right
expected = np.array([True, True], dtype=bool)
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize('dtype', [None, object])
def test_comp_nat(self, dtype):
left = pd.TimedeltaIndex([pd.Timedelta('1 days'), pd.NaT,
pd.Timedelta('3 days')])
right = pd.TimedeltaIndex([pd.NaT, pd.NaT, pd.Timedelta('3 days')])
lhs, rhs = left, right
if dtype is object:
lhs, rhs = left.astype(object), right.astype(object)
result = rhs == lhs
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = rhs != lhs
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(lhs == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == rhs, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(lhs != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != lhs, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(lhs < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > lhs, expected)
def test_comparisons_nat(self):
tdidx1 = pd.TimedeltaIndex(['1 day', pd.NaT, '1 day 00:00:01', pd.NaT,
'1 day 00:00:01', '5 day 00:00:03'])
tdidx2 = pd.TimedeltaIndex(['2 day', '2 day', pd.NaT, pd.NaT,
'1 day 00:00:02', '5 days 00:00:03'])
tdarr = np.array([np.timedelta64(2, 'D'),
np.timedelta64(2, 'D'), np.timedelta64('nat'),
np.timedelta64('nat'),
np.timedelta64(1, 'D') + np.timedelta64(2, 's'),
np.timedelta64(5, 'D') + np.timedelta64(3, 's')])
cases = [(tdidx1, tdidx2), (tdidx1, tdarr)]
# Check pd.NaT is handles as the same as np.nan
for idx1, idx2 in cases:
result = idx1 < idx2
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx2 > idx1
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= idx2
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx2 >= idx1
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == idx2
expected = np.array([False, False, False, False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != idx2
expected = np.array([True, True, True, True, True, False])
tm.assert_numpy_array_equal(result, expected)
# TODO: better name
def test_comparisons_coverage(self):
rng = timedelta_range('1 days', periods=10)
result = rng < rng[3]
expected = np.array([True, True, True] + [False] * 7)
tm.assert_numpy_array_equal(result, expected)
# raise TypeError for now
with pytest.raises(TypeError):
rng < rng[3].value
result = rng == list(rng)
exp = rng == rng
tm.assert_numpy_array_equal(result, exp)
# ------------------------------------------------------------------
# Timedelta64[ns] dtype Arithmetic Operations
class TestTimedelta64ArithmeticUnsorted:
# Tests moved from type-specific test files but not
# yet sorted/parametrized/de-duplicated
def test_ufunc_coercions(self):
# normal ops are also tested in tseries/test_timedeltas.py
idx = TimedeltaIndex(['2H', '4H', '6H', '8H', '10H'],
freq='2H', name='x')
for result in [idx * 2, np.multiply(idx, 2)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['4H', '8H', '12H', '16H', '20H'],
freq='4H', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '4H'
for result in [idx / 2, np.divide(idx, 2)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['1H', '2H', '3H', '4H', '5H'],
freq='H', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == 'H'
idx = TimedeltaIndex(['2H', '4H', '6H', '8H', '10H'],
freq='2H', name='x')
for result in [-idx, np.negative(idx)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['-2H', '-4H', '-6H', '-8H', '-10H'],
freq='-2H', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '-2H'
idx = TimedeltaIndex(['-2H', '-1H', '0H', '1H', '2H'],
freq='H', name='x')
for result in [abs(idx), np.absolute(idx)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['2H', '1H', '0H', '1H', '2H'],
freq=None, name='x')
tm.assert_index_equal(result, exp)
assert result.freq is None
def test_subtraction_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = pd.date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
msg = "cannot subtract a datelike from a TimedeltaArray"
with pytest.raises(TypeError, match=msg):
tdi - dt
with pytest.raises(TypeError, match=msg):
tdi - dti
msg = (r"descriptor '__sub__' requires a 'datetime\.datetime' object"
" but received a 'Timedelta'")
with pytest.raises(TypeError, match=msg):
td - dt
msg = "bad operand type for unary -: 'DatetimeArray'"
with pytest.raises(TypeError, match=msg):
td - dti
result = dt - dti
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = dti - dt
expected = TimedeltaIndex(['0 days', '1 days', '2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = tdi - td
expected = TimedeltaIndex(['0 days', pd.NaT, '1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = td - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '-1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = dti - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], name='bar')
tm.assert_index_equal(result, expected, check_names=False)
result = dt - tdi
expected = DatetimeIndex(['20121231', pd.NaT, '20121230'], name='foo')
tm.assert_index_equal(result, expected)
def test_subtraction_ops_with_tz(self):
# check that dt/dti subtraction ops with tz are validated
dti = pd.date_range('20130101', periods=3)
ts = Timestamp('20130101')
dt = ts.to_pydatetime()
dti_tz = pd.date_range('20130101', periods=3).tz_localize('US/Eastern')
ts_tz = Timestamp('20130101').tz_localize('US/Eastern')
ts_tz2 = Timestamp('20130101').tz_localize('CET')
dt_tz = ts_tz.to_pydatetime()
td = Timedelta('1 days')
def _check(result, expected):
assert result == expected
assert isinstance(result, Timedelta)
# scalars
result = ts - ts
expected = Timedelta('0 days')
_check(result, expected)
result = dt_tz - ts_tz
expected = Timedelta('0 days')
_check(result, expected)
result = ts_tz - dt_tz
expected = Timedelta('0 days')
_check(result, expected)
# tz mismatches
msg = ("Timestamp subtraction must have the same timezones or no"
" timezones")
with pytest.raises(TypeError, match=msg):
dt_tz - ts
msg = "can't subtract offset-naive and offset-aware datetimes"
with pytest.raises(TypeError, match=msg):
dt_tz - dt
msg = ("Timestamp subtraction must have the same timezones or no"
" timezones")
with pytest.raises(TypeError, match=msg):
dt_tz - ts_tz2
msg = "can't subtract offset-naive and offset-aware datetimes"
with pytest.raises(TypeError, match=msg):
dt - dt_tz
msg = ("Timestamp subtraction must have the same timezones or no"
" timezones")
with pytest.raises(TypeError, match=msg):
ts - dt_tz
with pytest.raises(TypeError, match=msg):
ts_tz2 - ts
with pytest.raises(TypeError, match=msg):
ts_tz2 - dt
with pytest.raises(TypeError, match=msg):
ts_tz - ts_tz2
# with dti
with pytest.raises(TypeError, match=msg):
dti - ts_tz
with pytest.raises(TypeError, match=msg):
dti_tz - ts
with pytest.raises(TypeError, match=msg):
dti_tz - ts_tz2
result = dti_tz - dt_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = dt_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = dti_tz - ts_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = ts_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = td - td
expected = Timedelta('0 days')
_check(result, expected)
result = dti_tz - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], tz='US/Eastern')
tm.assert_index_equal(result, expected)
def test_dti_tdi_numeric_ops(self):
# These are normally union/diff set-like ops
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = pd.date_range('20130101', periods=3, name='bar')
# TODO(wesm): unused?
# td = Timedelta('1 days')
# dt = Timestamp('20130101')
result = tdi - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '0 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '4 days'], name='foo')
tm.assert_index_equal(result, expected)
result = dti - tdi # name will be reset
expected = DatetimeIndex(['20121231', pd.NaT, '20130101'])
tm.assert_index_equal(result, expected)
def test_addition_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = pd.date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
result = tdi + dt
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = dt + tdi
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = td + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + td
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
# unequal length
msg = "cannot add indices of unequal length"
with pytest.raises(ValueError, match=msg):
tdi + dti[0:1]
with pytest.raises(ValueError, match=msg):
tdi[0:1] + dti
# random indexes
with pytest.raises(NullFrequencyError):
tdi + pd.Int64Index([1, 2, 3])
# this is a union!
# pytest.raises(TypeError, lambda : Int64Index([1,2,3]) + tdi)
result = tdi + dti # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dti + tdi # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dt + td
expected = Timestamp('20130102')
assert result == expected
result = td + dt
expected = Timestamp('20130102')
assert result == expected
# TODO: Needs more informative name, probably split up into
# more targeted tests
@pytest.mark.parametrize('freq', ['D', 'B'])
def test_timedelta(self, freq):
index = pd.date_range('1/1/2000', periods=50, freq=freq)
shifted = index + timedelta(1)
back = shifted + timedelta(-1)
tm.assert_index_equal(index, back)
if freq == 'D':
expected = pd.tseries.offsets.Day(1)
assert index.freq == expected
assert shifted.freq == expected
assert back.freq == expected
else: # freq == 'B'
assert index.freq == pd.tseries.offsets.BusinessDay(1)
assert shifted.freq is None
assert back.freq == pd.tseries.offsets.BusinessDay(1)
result = index - timedelta(1)
expected = index + timedelta(-1)
tm.assert_index_equal(result, expected)
# GH#4134, buggy with timedeltas
rng = pd.date_range('2013', '2014')
s = Series(rng)
result1 = rng - pd.offsets.Hour(1)
result2 = DatetimeIndex(s - np.timedelta64(100000000))
result3 = rng - np.timedelta64(100000000)
result4 = DatetimeIndex(s - pd.offsets.Hour(1))
tm.assert_index_equal(result1, result4)
tm.assert_index_equal(result2, result3)
class TestAddSubNaTMasking:
# TODO: parametrize over boxes
def test_tdi_add_timestamp_nat_masking(self):
# GH#17991 checking for overflow-masking with NaT
tdinat = pd.to_timedelta(['24658 days 11:15:00', 'NaT'])
tsneg = Timestamp('1950-01-01')
ts_neg_variants = [tsneg,
tsneg.to_pydatetime(),
tsneg.to_datetime64().astype('datetime64[ns]'),
tsneg.to_datetime64().astype('datetime64[D]')]
tspos = Timestamp('1980-01-01')
ts_pos_variants = [tspos,
tspos.to_pydatetime(),
tspos.to_datetime64().astype('datetime64[ns]'),
tspos.to_datetime64().astype('datetime64[D]')]
for variant in ts_neg_variants + ts_pos_variants:
res = tdinat + variant
assert res[1] is pd.NaT
def test_tdi_add_overflow(self):
# See GH#14068
# preliminary test scalar analogue of vectorized tests below
with pytest.raises(OutOfBoundsDatetime):
pd.to_timedelta(106580, 'D') + Timestamp('2000')
with pytest.raises(OutOfBoundsDatetime):
Timestamp('2000') + pd.to_timedelta(106580, 'D')
_NaT = int(pd.NaT) + 1
msg = "Overflow in int64 addition"
with pytest.raises(OverflowError, match=msg):
pd.to_timedelta([106580], 'D') + Timestamp('2000')
with pytest.raises(OverflowError, match=msg):
Timestamp('2000') + pd.to_timedelta([106580], 'D')
with pytest.raises(OverflowError, match=msg):
pd.to_timedelta([_NaT]) - Timedelta('1 days')
with pytest.raises(OverflowError, match=msg):
pd.to_timedelta(['5 days', _NaT]) - Timedelta('1 days')
with pytest.raises(OverflowError, match=msg):
(pd.to_timedelta([_NaT, '5 days', '1 hours']) -
pd.to_timedelta(['7 seconds', _NaT, '4 hours']))
# These should not overflow!
exp = TimedeltaIndex([pd.NaT])
result = pd.to_timedelta([pd.NaT]) - Timedelta('1 days')
tm.assert_index_equal(result, exp)
exp = TimedeltaIndex(['4 days', pd.NaT])
result = pd.to_timedelta(['5 days', pd.NaT]) - Timedelta('1 days')
tm.assert_index_equal(result, exp)
exp = TimedeltaIndex([pd.NaT, pd.NaT, '5 hours'])
result = (pd.to_timedelta([pd.NaT, '5 days', '1 hours']) +
pd.to_timedelta(['7 seconds', pd.NaT, '4 hours']))
tm.assert_index_equal(result, exp)
class TestTimedeltaArraylikeAddSubOps:
# Tests for timedelta64[ns] __add__, __sub__, __radd__, __rsub__
# TODO: moved from frame tests; needs parametrization/de-duplication
def test_td64_df_add_int_frame(self):
# GH#22696 Check that we don't dispatch to numpy implementation,
# which treats int64 as m8[ns]
tdi = pd.timedelta_range('1', periods=3)
df = tdi.to_frame()
other = pd.DataFrame([1, 2, 3], index=tdi) # indexed like `df`
with pytest.raises(TypeError):
df + other
with pytest.raises(TypeError):
other + df
with pytest.raises(TypeError):
df - other
with pytest.raises(TypeError):
other - df
# TODO: moved from tests.indexes.timedeltas.test_arithmetic; needs
# parametrization+de-duplication
def test_timedelta_ops_with_missing_values(self):
# setup
s1 = pd.to_timedelta(Series(['00:00:01']))
s2 = pd.to_timedelta(Series(['00:00:02']))
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
# Passing datetime64-dtype data to TimedeltaIndex is deprecated
sn = pd.to_timedelta(Series([pd.NaT]))
df1 = pd.DataFrame(['00:00:01']).apply(pd.to_timedelta)
df2 = pd.DataFrame(['00:00:02']).apply(pd.to_timedelta)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
# Passing datetime64-dtype data to TimedeltaIndex is deprecated
dfn = pd.DataFrame([pd.NaT]).apply(pd.to_timedelta)
scalar1 = pd.to_timedelta('00:00:01')
scalar2 = pd.to_timedelta('00:00:02')
timedelta_NaT = pd.to_timedelta('NaT')
actual = scalar1 + scalar1
assert actual == scalar2
actual = scalar2 - scalar1
assert actual == scalar1
actual = s1 + s1
tm.assert_series_equal(actual, s2)
actual = s2 - s1
tm.assert_series_equal(actual, s1)
actual = s1 + scalar1
tm.assert_series_equal(actual, s2)
actual = scalar1 + s1
tm.assert_series_equal(actual, s2)
actual = s2 - scalar1
tm.assert_series_equal(actual, s1)
actual = -scalar1 + s2
tm.assert_series_equal(actual, s1)
actual = s1 + timedelta_NaT
tm.assert_series_equal(actual, sn)
actual = timedelta_NaT + s1
tm.assert_series_equal(actual, sn)
actual = s1 - timedelta_NaT
tm.assert_series_equal(actual, sn)
actual = -timedelta_NaT + s1
tm.assert_series_equal(actual, sn)
with pytest.raises(TypeError):
s1 + np.nan
with pytest.raises(TypeError):
np.nan + s1
with pytest.raises(TypeError):
s1 - np.nan
with pytest.raises(TypeError):
-np.nan + s1
actual = s1 + pd.NaT
tm.assert_series_equal(actual, sn)
actual = s2 - pd.NaT
tm.assert_series_equal(actual, sn)
actual = s1 + df1
tm.assert_frame_equal(actual, df2)
actual = s2 - df1
tm.assert_frame_equal(actual, df1)
actual = df1 + s1
tm.assert_frame_equal(actual, df2)
actual = df2 - s1
tm.assert_frame_equal(actual, df1)
actual = df1 + df1
tm.assert_frame_equal(actual, df2)
actual = df2 - df1
tm.assert_frame_equal(actual, df1)
actual = df1 + scalar1
tm.assert_frame_equal(actual, df2)
actual = df2 - scalar1
tm.assert_frame_equal(actual, df1)
actual = df1 + timedelta_NaT
tm.assert_frame_equal(actual, dfn)
actual = df1 - timedelta_NaT
tm.assert_frame_equal(actual, dfn)
with pytest.raises(TypeError):
df1 + np.nan
with pytest.raises(TypeError):
df1 - np.nan
actual = df1 + pd.NaT # NaT is datetime, not timedelta
tm.assert_frame_equal(actual, dfn)
actual = df1 - pd.NaT
tm.assert_frame_equal(actual, dfn)
# TODO: moved from tests.series.test_operators, needs splitting, cleanup,
# de-duplication, box-parametrization...
def test_operators_timedelta64(self):
# series ops
v1 = pd.date_range('2012-1-1', periods=3, freq='D')
v2 = pd.date_range('2012-1-2', periods=3, freq='D')
rs = Series(v2) - Series(v1)
xp = Series(1e9 * 3600 * 24,
rs.index).astype('int64').astype('timedelta64[ns]')
tm.assert_series_equal(rs, xp)
assert rs.dtype == 'timedelta64[ns]'
df = DataFrame(dict(A=v1))
td = Series([timedelta(days=i) for i in range(3)])
assert td.dtype == 'timedelta64[ns]'
# series on the rhs
result = df['A'] - df['A'].shift()
assert result.dtype == 'timedelta64[ns]'
result = df['A'] + td
assert result.dtype == 'M8[ns]'
# scalar Timestamp on rhs
maxa = df['A'].max()
assert isinstance(maxa, Timestamp)
resultb = df['A'] - df['A'].max()
assert resultb.dtype == 'timedelta64[ns]'
# timestamp on lhs
result = resultb + df['A']
values = [Timestamp('20111230'), Timestamp('20120101'),
Timestamp('20120103')]
expected = Series(values, name='A')
tm.assert_series_equal(result, expected)
# datetimes on rhs
result = df['A'] - datetime(2001, 1, 1)
expected = Series(
[timedelta(days=4017 + i) for i in range(3)], name='A')
tm.assert_series_equal(result, expected)
assert result.dtype == 'm8[ns]'
d = datetime(2001, 1, 1, 3, 4)
resulta = df['A'] - d
assert resulta.dtype == 'm8[ns]'
# roundtrip
resultb = resulta + d
tm.assert_series_equal(df['A'], resultb)
# timedeltas on rhs
td = timedelta(days=1)
resulta = df['A'] + td
resultb = resulta - td
tm.assert_series_equal(resultb, df['A'])
assert resultb.dtype == 'M8[ns]'
# roundtrip
td = timedelta(minutes=5, seconds=3)
resulta = df['A'] + td
resultb = resulta - td
tm.assert_series_equal(df['A'], resultb)
assert resultb.dtype == 'M8[ns]'
# inplace
value = rs[2] + np.timedelta64(timedelta(minutes=5, seconds=1))
rs[2] += np.timedelta64(timedelta(minutes=5, seconds=1))
assert rs[2] == value
def test_timedelta64_ops_nat(self):
# GH 11349
timedelta_series = Series([NaT, Timedelta('1s')])
nat_series_dtype_timedelta = Series([NaT, NaT],
dtype='timedelta64[ns]')
single_nat_dtype_timedelta = Series([NaT], dtype='timedelta64[ns]')
# subtraction
tm.assert_series_equal(timedelta_series - NaT,
nat_series_dtype_timedelta)
tm.assert_series_equal(-NaT + timedelta_series,
nat_series_dtype_timedelta)
tm.assert_series_equal(timedelta_series - single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(-single_nat_dtype_timedelta + timedelta_series,
nat_series_dtype_timedelta)
# addition
tm.assert_series_equal(nat_series_dtype_timedelta + NaT,
nat_series_dtype_timedelta)
tm.assert_series_equal(NaT + nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(nat_series_dtype_timedelta +
single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(single_nat_dtype_timedelta +
nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(timedelta_series + NaT,
nat_series_dtype_timedelta)
tm.assert_series_equal(NaT + timedelta_series,
nat_series_dtype_timedelta)
tm.assert_series_equal(timedelta_series + single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(single_nat_dtype_timedelta + timedelta_series,
nat_series_dtype_timedelta)
tm.assert_series_equal(nat_series_dtype_timedelta + NaT,
nat_series_dtype_timedelta)
tm.assert_series_equal(NaT + nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(nat_series_dtype_timedelta +
single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(single_nat_dtype_timedelta +
nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
# multiplication
tm.assert_series_equal(nat_series_dtype_timedelta * 1.0,
nat_series_dtype_timedelta)
tm.assert_series_equal(1.0 * nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(timedelta_series * 1, timedelta_series)
tm.assert_series_equal(1 * timedelta_series, timedelta_series)
tm.assert_series_equal(timedelta_series * 1.5,
Series([NaT, Timedelta('1.5s')]))
tm.assert_series_equal(1.5 * timedelta_series,
Series([NaT, Timedelta('1.5s')]))
tm.assert_series_equal(timedelta_series * np.nan,
nat_series_dtype_timedelta)
tm.assert_series_equal(np.nan * timedelta_series,
nat_series_dtype_timedelta)
# division
tm.assert_series_equal(timedelta_series / 2,
Series([NaT, Timedelta('0.5s')]))
tm.assert_series_equal(timedelta_series / 2.0,
Series([NaT, Timedelta('0.5s')]))
tm.assert_series_equal(timedelta_series / np.nan,
nat_series_dtype_timedelta)
# -------------------------------------------------------------
# Invalid Operations
def test_td64arr_add_str_invalid(self, box_with_array):
# GH#13624
tdi = TimedeltaIndex(['1 day', '2 days'])
tdi = tm.box_expected(tdi, box_with_array)
with pytest.raises(TypeError):
tdi + 'a'
with pytest.raises(TypeError):
'a' + tdi
@pytest.mark.parametrize('other', [3.14, np.array([2.0, 3.0])])
def test_td64arr_add_sub_float(self, box_with_array, other):
tdi = TimedeltaIndex(['-1 days', '-1 days'])
tdarr = tm.box_expected(tdi, box_with_array)
with pytest.raises(TypeError):
tdarr + other
with pytest.raises(TypeError):
other + tdarr
with pytest.raises(TypeError):
tdarr - other
with pytest.raises(TypeError):
other - tdarr
@pytest.mark.parametrize('freq', [None, 'H'])
def test_td64arr_sub_period(self, box_with_array, freq):
# GH#13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
idx = TimedeltaIndex(['1 hours', '2 hours'], freq=freq)
idx = tm.box_expected(idx, box_with_array)
with pytest.raises(TypeError):
idx - p
with pytest.raises(TypeError):
p - idx
@pytest.mark.parametrize('pi_freq', ['D', 'W', 'Q', 'H'])
@pytest.mark.parametrize('tdi_freq', [None, 'H'])
def test_td64arr_sub_pi(self, box_with_array, tdi_freq, pi_freq):
# GH#20049 subtracting PeriodIndex should raise TypeError
tdi = TimedeltaIndex(['1 hours', '2 hours'], freq=tdi_freq)
dti = Timestamp('2018-03-07 17:16:40') + tdi
pi = dti.to_period(pi_freq)
# TODO: parametrize over box for pi?
tdi = tm.box_expected(tdi, box_with_array)
with pytest.raises(TypeError):
tdi - pi
# -------------------------------------------------------------
# Binary operations td64 arraylike and datetime-like
def test_td64arr_sub_timestamp_raises(self, box_with_array):
idx = TimedeltaIndex(['1 day', '2 day'])
idx = tm.box_expected(idx, box_with_array)
msg = ("cannot subtract a datelike from|"
"Could not operate|"
"cannot perform operation")
with pytest.raises(TypeError, match=msg):
idx - Timestamp('2011-01-01')
def test_td64arr_add_timestamp(self, box_with_array, tz_naive_fixture):
# GH#23215
# TODO: parametrize over scalar datetime types?
tz = tz_naive_fixture
other = Timestamp('2011-01-01', tz=tz)
idx = TimedeltaIndex(['1 day', '2 day'])
expected = DatetimeIndex(['2011-01-02', '2011-01-03'], tz=tz)
idx = tm.box_expected(idx, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = idx + other
tm.assert_equal(result, expected)
result = other + idx
tm.assert_equal(result, expected)
def test_td64arr_add_sub_timestamp(self, box_with_array):
# GH#11925
ts = Timestamp('2012-01-01')
# TODO: parametrize over types of datetime scalar?
tdi = timedelta_range('1 day', periods=3)
expected = pd.date_range('2012-01-02', periods=3)
tdarr = tm.box_expected(tdi, box_with_array)
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(ts + tdarr, expected)
tm.assert_equal(tdarr + ts, expected)
expected2 = pd.date_range('2011-12-31', periods=3, freq='-1D')
expected2 = tm.box_expected(expected2, box_with_array)
tm.assert_equal(ts - tdarr, expected2)
tm.assert_equal(ts + (-tdarr), expected2)
with pytest.raises(TypeError):
tdarr - ts
def test_tdi_sub_dt64_array(self, box_with_array):
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
dtarr = dti.values
expected = pd.DatetimeIndex(dtarr) - tdi
tdi = tm.box_expected(tdi, box_with_array)
expected = tm.box_expected(expected, box_with_array)
with pytest.raises(TypeError):
tdi - dtarr
# TimedeltaIndex.__rsub__
result = dtarr - tdi
tm.assert_equal(result, expected)
def test_tdi_add_dt64_array(self, box_with_array):
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
dtarr = dti.values
expected = pd.DatetimeIndex(dtarr) + tdi
tdi = tm.box_expected(tdi, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = tdi + dtarr
tm.assert_equal(result, expected)
result = dtarr + tdi
tm.assert_equal(result, expected)
def test_td64arr_add_datetime64_nat(self, box_with_array):
# GH#23215
other = np.datetime64('NaT')
tdi = timedelta_range('1 day', periods=3)
expected = pd.DatetimeIndex(["NaT", "NaT", "NaT"])
tdser = tm.box_expected(tdi, box_with_array)
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(tdser + other, expected)
tm.assert_equal(other + tdser, expected)
# ------------------------------------------------------------------
# Operations with int-like others
def test_td64arr_add_int_series_invalid(self, box):
tdser = pd.Series(['59 Days', '59 Days', 'NaT'], dtype='m8[ns]')
tdser = tm.box_expected(tdser, box)
err = TypeError if box is not pd.Index else NullFrequencyError
int_ser = Series([2, 3, 4])
with pytest.raises(err):
tdser + int_ser
with pytest.raises(err):
int_ser + tdser
with pytest.raises(err):
tdser - int_ser
with pytest.raises(err):
int_ser - tdser
def test_td64arr_add_intlike(self, box_with_array):
# GH#19123
tdi = TimedeltaIndex(['59 days', '59 days', 'NaT'])
ser = tm.box_expected(tdi, box_with_array)
err = TypeError
if box_with_array in [pd.Index, tm.to_array]:
err = NullFrequencyError
other = Series([20, 30, 40], dtype='uint8')
# TODO: separate/parametrize
with pytest.raises(err):
ser + 1
with pytest.raises(err):
ser - 1
with pytest.raises(err):
ser + other
with pytest.raises(err):
ser - other
with pytest.raises(err):
ser + np.array(other)
with pytest.raises(err):
ser - np.array(other)
with pytest.raises(err):
ser + pd.Index(other)
with pytest.raises(err):
ser - pd.Index(other)
@pytest.mark.parametrize('scalar', [1, 1.5, np.array(2)])
def test_td64arr_add_sub_numeric_scalar_invalid(self, box_with_array,
scalar):
box = box_with_array
tdser = pd.Series(['59 Days', '59 Days', 'NaT'], dtype='m8[ns]')
tdser = tm.box_expected(tdser, box)
err = TypeError
if box in [pd.Index, tm.to_array] and not isinstance(scalar, float):
err = NullFrequencyError
with pytest.raises(err):
tdser + scalar
with pytest.raises(err):
scalar + tdser
with pytest.raises(err):
tdser - scalar
with pytest.raises(err):
scalar - tdser
@pytest.mark.parametrize('dtype', ['int64', 'int32', 'int16',
'uint64', 'uint32', 'uint16', 'uint8',
'float64', 'float32', 'float16'])
@pytest.mark.parametrize('vec', [
np.array([1, 2, 3]),
pd.Index([1, 2, 3]),
Series([1, 2, 3])
# TODO: Add DataFrame in here?
], ids=lambda x: type(x).__name__)
def test_td64arr_add_sub_numeric_arr_invalid(self, box, vec, dtype):
tdser = pd.Series(['59 Days', '59 Days', 'NaT'], dtype='m8[ns]')
tdser = tm.box_expected(tdser, box)
err = TypeError
if box is pd.Index and not dtype.startswith('float'):
err = NullFrequencyError
vector = vec.astype(dtype)
with pytest.raises(err):
tdser + vector
with pytest.raises(err):
vector + tdser
with pytest.raises(err):
tdser - vector
with pytest.raises(err):
vector - tdser
# ------------------------------------------------------------------
# Operations with timedelta-like others
# TODO: this was taken from tests.series.test_ops; de-duplicate
@pytest.mark.parametrize('scalar_td', [timedelta(minutes=5, seconds=4),
Timedelta(minutes=5, seconds=4),
Timedelta('5m4s').to_timedelta64()])
def test_operators_timedelta64_with_timedelta(self, scalar_td):
# smoke tests
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
td1 + scalar_td
scalar_td + td1
td1 - scalar_td
scalar_td - td1
td1 / scalar_td
scalar_td / td1
# TODO: this was taken from tests.series.test_ops; de-duplicate
def test_timedelta64_operations_with_timedeltas(self):
# td operate with td
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td2 = timedelta(minutes=5, seconds=4)
result = td1 - td2
expected = (Series([timedelta(seconds=0)] * 3) -
Series([timedelta(seconds=1)] * 3))
assert result.dtype == 'm8[ns]'
tm.assert_series_equal(result, expected)
result2 = td2 - td1
expected = (Series([timedelta(seconds=1)] * 3) -
Series([timedelta(seconds=0)] * 3))
tm.assert_series_equal(result2, expected)
# roundtrip
tm.assert_series_equal(result + td2, td1)
# Now again, using pd.to_timedelta, which should build
# a Series or a scalar, depending on input.
td1 = Series(pd.to_timedelta(['00:05:03'] * 3))
td2 = pd.to_timedelta('00:05:04')
result = td1 - td2
expected = (Series([timedelta(seconds=0)] * 3) -
Series([timedelta(seconds=1)] * 3))
assert result.dtype == 'm8[ns]'
tm.assert_series_equal(result, expected)
result2 = td2 - td1
expected = (Series([timedelta(seconds=1)] * 3) -
Series([timedelta(seconds=0)] * 3))
tm.assert_series_equal(result2, expected)
# roundtrip
tm.assert_series_equal(result + td2, td1)
def test_td64arr_add_td64_array(self, box):
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
tdarr = tdi.values
expected = 2 * tdi
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
result = tdi + tdarr
tm.assert_equal(result, expected)
result = tdarr + tdi
tm.assert_equal(result, expected)
def test_td64arr_sub_td64_array(self, box):
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
tdarr = tdi.values
expected = 0 * tdi
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
result = tdi - tdarr
tm.assert_equal(result, expected)
result = tdarr - tdi
tm.assert_equal(result, expected)
# TODO: parametrize over [add, sub, radd, rsub]?
@pytest.mark.parametrize('names', [(None, None, None),
('Egon', 'Venkman', None),
('NCC1701D', 'NCC1701D', 'NCC1701D')])
def test_td64arr_add_sub_tdi(self, box, names):
# GH#17250 make sure result dtype is correct
# GH#19043 make sure names are propagated correctly
if box is pd.DataFrame and names[1] == 'Venkman':
pytest.skip("Name propagation for DataFrame does not behave like "
"it does for Index/Series")
tdi = TimedeltaIndex(['0 days', '1 day'], name=names[0])
ser = Series([Timedelta(hours=3), Timedelta(hours=4)], name=names[1])
expected = Series([Timedelta(hours=3), Timedelta(days=1, hours=4)],
name=names[2])
ser = tm.box_expected(ser, box)
expected = tm.box_expected(expected, box)
result = tdi + ser
tm.assert_equal(result, expected)
if box is not pd.DataFrame:
assert result.dtype == 'timedelta64[ns]'
else:
assert result.dtypes[0] == 'timedelta64[ns]'
result = ser + tdi
tm.assert_equal(result, expected)
if box is not pd.DataFrame:
assert result.dtype == 'timedelta64[ns]'
else:
assert result.dtypes[0] == 'timedelta64[ns]'
expected = Series([Timedelta(hours=-3), Timedelta(days=1, hours=-4)],
name=names[2])
expected = tm.box_expected(expected, box)
result = tdi - ser
tm.assert_equal(result, expected)
if box is not pd.DataFrame:
assert result.dtype == 'timedelta64[ns]'
else:
assert result.dtypes[0] == 'timedelta64[ns]'
result = ser - tdi
tm.assert_equal(result, -expected)
if box is not pd.DataFrame:
assert result.dtype == 'timedelta64[ns]'
else:
assert result.dtypes[0] == 'timedelta64[ns]'
def test_td64arr_add_sub_td64_nat(self, box):
# GH#23320 special handling for timedelta64("NaT")
tdi = pd.TimedeltaIndex([NaT, Timedelta('1s')])
other = np.timedelta64("NaT")
expected = pd.TimedeltaIndex(["NaT"] * 2)
obj = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
result = obj + other
tm.assert_equal(result, expected)
result = other + obj
tm.assert_equal(result, expected)
result = obj - other
tm.assert_equal(result, expected)
result = other - obj
tm.assert_equal(result, expected)
def test_td64arr_sub_NaT(self, box):
# GH#18808
ser = Series([NaT, Timedelta('1s')])
expected = Series([NaT, NaT], dtype='timedelta64[ns]')
ser = tm.box_expected(ser, box)
expected = tm.box_expected(expected, box)
res = ser - pd.NaT
tm.assert_equal(res, expected)
def test_td64arr_add_timedeltalike(self, two_hours, box):
# only test adding/sub offsets as + is now numeric
rng = timedelta_range('1 days', '10 days')
expected = timedelta_range('1 days 02:00:00', '10 days 02:00:00',
freq='D')
rng = tm.box_expected(rng, box)
expected = tm.box_expected(expected, box)
result = rng + two_hours
tm.assert_equal(result, expected)
def test_td64arr_sub_timedeltalike(self, two_hours, box):
# only test adding/sub offsets as - is now numeric
rng = timedelta_range('1 days', '10 days')
expected = timedelta_range('0 days 22:00:00', '9 days 22:00:00')
rng = tm.box_expected(rng, box)
expected = tm.box_expected(expected, box)
result = rng - two_hours
tm.assert_equal(result, expected)
# ------------------------------------------------------------------
# __add__/__sub__ with DateOffsets and arrays of DateOffsets
# TODO: this was taken from tests.series.test_operators; de-duplicate
def test_timedelta64_operations_with_DateOffset(self):
# GH#10699
td = Series([timedelta(minutes=5, seconds=3)] * 3)
result = td + pd.offsets.Minute(1)
expected = Series([timedelta(minutes=6, seconds=3)] * 3)
tm.assert_series_equal(result, expected)
result = td - pd.offsets.Minute(1)
expected = Series([timedelta(minutes=4, seconds=3)] * 3)
tm.assert_series_equal(result, expected)
with tm.assert_produces_warning(PerformanceWarning):
result = td + Series([pd.offsets.Minute(1), pd.offsets.Second(3),
pd.offsets.Hour(2)])
expected = Series([timedelta(minutes=6, seconds=3),
timedelta(minutes=5, seconds=6),
timedelta(hours=2, minutes=5, seconds=3)])
tm.assert_series_equal(result, expected)
result = td + pd.offsets.Minute(1) + pd.offsets.Second(12)
expected = Series([timedelta(minutes=6, seconds=15)] * 3)
tm.assert_series_equal(result, expected)
# valid DateOffsets
for do in ['Hour', 'Minute', 'Second', 'Day', 'Micro', 'Milli',
'Nano']:
op = getattr(pd.offsets, do)
td + op(5)
op(5) + td
td - op(5)
op(5) - td
@pytest.mark.parametrize('names', [(None, None, None),
('foo', 'bar', None),
('foo', 'foo', 'foo')])
def test_td64arr_add_offset_index(self, names, box):
# GH#18849, GH#19744
if box is pd.DataFrame and names[1] == 'bar':
pytest.skip("Name propagation for DataFrame does not behave like "
"it does for Index/Series")
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'],
name=names[0])
other = pd.Index([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)],
name=names[1])
expected = TimedeltaIndex([tdi[n] + other[n] for n in range(len(tdi))],
freq='infer', name=names[2])
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
# The DataFrame operation is transposed and so operates as separate
# scalar operations, which do not issue a PerformanceWarning
warn = PerformanceWarning if box is not pd.DataFrame else None
with tm.assert_produces_warning(warn):
res = tdi + other
tm.assert_equal(res, expected)
with tm.assert_produces_warning(warn):
res2 = other + tdi
tm.assert_equal(res2, expected)
# TODO: combine with test_td64arr_add_offset_index by parametrizing
# over second box?
def test_td64arr_add_offset_array(self, box):
# GH#18849
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'])
other = np.array([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)])
expected = TimedeltaIndex([tdi[n] + other[n] for n in range(len(tdi))],
freq='infer')
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
# The DataFrame operation is transposed and so operates as separate
# scalar operations, which do not issue a PerformanceWarning
warn = PerformanceWarning if box is not pd.DataFrame else None
with tm.assert_produces_warning(warn):
res = tdi + other
tm.assert_equal(res, expected)
with tm.assert_produces_warning(warn):
res2 = other + tdi
tm.assert_equal(res2, expected)
@pytest.mark.parametrize('names', [(None, None, None),
('foo', 'bar', None),
('foo', 'foo', 'foo')])
def test_td64arr_sub_offset_index(self, names, box):
# GH#18824, GH#19744
if box is pd.DataFrame and names[1] == 'bar':
pytest.skip("Name propagation for DataFrame does not behave like "
"it does for Index/Series")
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'],
name=names[0])
other = pd.Index([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)],
name=names[1])
expected = TimedeltaIndex([tdi[n] - other[n] for n in range(len(tdi))],
freq='infer', name=names[2])
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
# The DataFrame operation is transposed and so operates as separate
# scalar operations, which do not issue a PerformanceWarning
warn = PerformanceWarning if box is not pd.DataFrame else None
with tm.assert_produces_warning(warn):
res = tdi - other
tm.assert_equal(res, expected)
def test_td64arr_sub_offset_array(self, box_with_array):
# GH#18824
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'])
other = np.array([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)])
expected = TimedeltaIndex([tdi[n] - other[n] for n in range(len(tdi))],
freq='infer')
tdi = tm.box_expected(tdi, box_with_array)
expected = tm.box_expected(expected, box_with_array)
# The DataFrame operation is transposed and so operates as separate
# scalar operations, which do not issue a PerformanceWarning
warn = None if box_with_array is pd.DataFrame else PerformanceWarning
with tm.assert_produces_warning(warn):
res = tdi - other
tm.assert_equal(res, expected)
@pytest.mark.parametrize('names', [(None, None, None),
('foo', 'bar', None),
('foo', 'foo', 'foo')])
def test_td64arr_with_offset_series(self, names, box_df_fail):
# GH#18849
box = box_df_fail
box2 = Series if box in [pd.Index, tm.to_array] else box
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'],
name=names[0])
other = Series([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)],
name=names[1])
expected_add = Series([tdi[n] + other[n] for n in range(len(tdi))],
name=names[2])
tdi = tm.box_expected(tdi, box)
expected_add = tm.box_expected(expected_add, box2)
with tm.assert_produces_warning(PerformanceWarning):
res = tdi + other
tm.assert_equal(res, expected_add)
with tm.assert_produces_warning(PerformanceWarning):
res2 = other + tdi
tm.assert_equal(res2, expected_add)
# TODO: separate/parametrize add/sub test?
expected_sub = Series([tdi[n] - other[n] for n in range(len(tdi))],
name=names[2])
expected_sub = tm.box_expected(expected_sub, box2)
with tm.assert_produces_warning(PerformanceWarning):
res3 = tdi - other
tm.assert_equal(res3, expected_sub)
@pytest.mark.parametrize('obox', [np.array, pd.Index, pd.Series])
def test_td64arr_addsub_anchored_offset_arraylike(self, obox,
box_with_array):
# GH#18824
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'])
tdi = tm.box_expected(tdi, box_with_array)
anchored = obox([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)])
# addition/subtraction ops with anchored offsets should issue
# a PerformanceWarning and _then_ raise a TypeError.
with pytest.raises(TypeError):
with tm.assert_produces_warning(PerformanceWarning):
tdi + anchored
with pytest.raises(TypeError):
with tm.assert_produces_warning(PerformanceWarning):
anchored + tdi
with pytest.raises(TypeError):
with tm.assert_produces_warning(PerformanceWarning):
tdi - anchored
with pytest.raises(TypeError):
with tm.assert_produces_warning(PerformanceWarning):
anchored - tdi
class TestTimedeltaArraylikeMulDivOps:
# Tests for timedelta64[ns]
# __mul__, __rmul__, __div__, __rdiv__, __floordiv__, __rfloordiv__
# TODO: Moved from tests.series.test_operators; needs cleanup
@pytest.mark.parametrize("m", [1, 3, 10])
@pytest.mark.parametrize("unit", ['D', 'h', 'm', 's', 'ms', 'us', 'ns'])
def test_timedelta64_conversions(self, m, unit):
startdate = Series(pd.date_range('2013-01-01', '2013-01-03'))
enddate = Series(pd.date_range('2013-03-01', '2013-03-03'))
ser = enddate - startdate
ser[2] = np.nan
# op
expected = Series([x / np.timedelta64(m, unit) for x in ser])
result = ser / np.timedelta64(m, unit)
tm.assert_series_equal(result, expected)
# reverse op
expected = Series([Timedelta(np.timedelta64(m, unit)) / x
for x in ser])
result = np.timedelta64(m, unit) / ser
tm.assert_series_equal(result, expected)
# ------------------------------------------------------------------
# Multiplication
# organized with scalar others first, then array-like
def test_td64arr_mul_int(self, box_with_array):
idx = TimedeltaIndex(np.arange(5, dtype='int64'))
idx = tm.box_expected(idx, box_with_array)
result = idx * 1
tm.assert_equal(result, idx)
result = 1 * idx
tm.assert_equal(result, idx)
def test_td64arr_mul_tdlike_scalar_raises(self, two_hours, box_with_array):
rng = timedelta_range('1 days', '10 days', name='foo')
rng = tm.box_expected(rng, box_with_array)
with pytest.raises(TypeError):
rng * two_hours
def test_tdi_mul_int_array_zerodim(self, box_with_array):
rng5 = np.arange(5, dtype='int64')
idx = TimedeltaIndex(rng5)
expected = TimedeltaIndex(rng5 * 5)
idx = tm.box_expected(idx, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = idx * np.array(5, dtype='int64')
tm.assert_equal(result, expected)
def test_tdi_mul_int_array(self, box_with_array):
rng5 = np.arange(5, dtype='int64')
idx = TimedeltaIndex(rng5)
expected = TimedeltaIndex(rng5 ** 2)
idx = tm.box_expected(idx, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = idx * rng5
tm.assert_equal(result, expected)
def test_tdi_mul_int_series(self, box_with_array):
box = box_with_array
xbox = pd.Series if box in [pd.Index, tm.to_array] else box
idx = TimedeltaIndex(np.arange(5, dtype='int64'))
expected = TimedeltaIndex(np.arange(5, dtype='int64') ** 2)
idx = tm.box_expected(idx, box)
expected = tm.box_expected(expected, xbox)
result = idx * pd.Series(np.arange(5, dtype='int64'))
tm.assert_equal(result, expected)
def test_tdi_mul_float_series(self, box_with_array):
box = box_with_array
xbox = pd.Series if box in [pd.Index, tm.to_array] else box
idx = TimedeltaIndex(np.arange(5, dtype='int64'))
idx = tm.box_expected(idx, box)
rng5f = np.arange(5, dtype='float64')
expected = TimedeltaIndex(rng5f * (rng5f + 1.0))
expected = tm.box_expected(expected, xbox)
result = idx * Series(rng5f + 1.0)
tm.assert_equal(result, expected)
# TODO: Put Series/DataFrame in others?
@pytest.mark.parametrize('other', [
np.arange(1, 11),
pd.Int64Index(range(1, 11)),
pd.UInt64Index(range(1, 11)),
pd.Float64Index(range(1, 11)),
pd.RangeIndex(1, 11)
], ids=lambda x: type(x).__name__)
def test_tdi_rmul_arraylike(self, other, box_with_array):
box = box_with_array
xbox = get_upcast_box(box, other)
tdi = TimedeltaIndex(['1 Day'] * 10)
expected = timedelta_range('1 days', '10 days')
expected._data.freq = None
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, xbox)
result = other * tdi
tm.assert_equal(result, expected)
commute = tdi * other
tm.assert_equal(commute, expected)
# ------------------------------------------------------------------
# __div__, __rdiv__
def test_td64arr_div_nat_invalid(self, box_with_array):
# don't allow division by NaT (maybe could in the future)
rng = timedelta_range('1 days', '10 days', name='foo')
rng = tm.box_expected(rng, box_with_array)
with pytest.raises(TypeError,
match="'?true_divide'? cannot use operands"):
rng / pd.NaT
with pytest.raises(TypeError, match='Cannot divide NaTType by'):
pd.NaT / rng
def test_td64arr_div_td64nat(self, box_with_array):
# GH#23829
rng = timedelta_range('1 days', '10 days',)
rng = tm.box_expected(rng, box_with_array)
other = np.timedelta64('NaT')
expected = np.array([np.nan] * 10)
expected = tm.box_expected(expected, box_with_array)
result = rng / other
tm.assert_equal(result, expected)
result = other / rng
tm.assert_equal(result, expected)
def test_td64arr_div_int(self, box_with_array):
idx = TimedeltaIndex(np.arange(5, dtype='int64'))
idx = tm.box_expected(idx, box_with_array)
result = idx / 1
tm.assert_equal(result, idx)
with pytest.raises(TypeError, match='Cannot divide'):
# GH#23829
1 / idx
def test_td64arr_div_tdlike_scalar(self, two_hours, box_with_array):
# GH#20088, GH#22163 ensure DataFrame returns correct dtype
rng = timedelta_range('1 days', '10 days', name='foo')
expected = pd.Float64Index((np.arange(10) + 1) * 12, name='foo')
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = rng / two_hours
tm.assert_equal(result, expected)
result = two_hours / rng
expected = 1 / expected
tm.assert_equal(result, expected)
def test_td64arr_div_tdlike_scalar_with_nat(self, two_hours,
box_with_array):
rng = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
expected = pd.Float64Index([12, np.nan, 24], name='foo')
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = rng / two_hours
tm.assert_equal(result, expected)
result = two_hours / rng
expected = 1 / expected
tm.assert_equal(result, expected)
def test_td64arr_div_td64_ndarray(self, box_with_array):
# GH#22631
rng = TimedeltaIndex(['1 days', pd.NaT, '2 days'])
expected = pd.Float64Index([12, np.nan, 24])
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
other = np.array([2, 4, 2], dtype='m8[h]')
result = rng / other
tm.assert_equal(result, expected)
result = rng / tm.box_expected(other, box_with_array)
tm.assert_equal(result, expected)
result = rng / other.astype(object)
tm.assert_equal(result, expected)
result = rng / list(other)
tm.assert_equal(result, expected)
# reversed op
expected = 1 / expected
result = other / rng
tm.assert_equal(result, expected)
result = tm.box_expected(other, box_with_array) / rng
tm.assert_equal(result, expected)
result = other.astype(object) / rng
tm.assert_equal(result, expected)
result = list(other) / rng
tm.assert_equal(result, expected)
def test_tdarr_div_length_mismatch(self, box_with_array):
rng = TimedeltaIndex(['1 days', pd.NaT, '2 days'])
mismatched = [1, 2, 3, 4]
rng = tm.box_expected(rng, box_with_array)
for obj in [mismatched, mismatched[:2]]:
# one shorter, one longer
for other in [obj, np.array(obj), pd.Index(obj)]:
with pytest.raises(ValueError):
rng / other
with pytest.raises(ValueError):
other / rng
# ------------------------------------------------------------------
# __floordiv__, __rfloordiv__
def test_td64arr_floordiv_tdscalar(self, box_with_array, scalar_td):
# GH#18831
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
expected = Series([0, 0, np.nan])
td1 = tm.box_expected(td1, box_with_array, transpose=False)
expected = tm.box_expected(expected, box_with_array, transpose=False)
result = td1 // scalar_td
tm.assert_equal(result, expected)
def test_td64arr_rfloordiv_tdscalar(self, box_with_array, scalar_td):
# GH#18831
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
expected = Series([1, 1, np.nan])
td1 = tm.box_expected(td1, box_with_array, transpose=False)
expected = tm.box_expected(expected, box_with_array, transpose=False)
result = scalar_td // td1
tm.assert_equal(result, expected)
def test_td64arr_rfloordiv_tdscalar_explicit(self, box_with_array,
scalar_td):
# GH#18831
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
expected = Series([1, 1, np.nan])
td1 = tm.box_expected(td1, box_with_array, transpose=False)
expected = tm.box_expected(expected, box_with_array, transpose=False)
# We can test __rfloordiv__ using this syntax,
# see `test_timedelta_rfloordiv`
result = td1.__rfloordiv__(scalar_td)
tm.assert_equal(result, expected)
def test_td64arr_floordiv_int(self, box_with_array):
idx = TimedeltaIndex(np.arange(5, dtype='int64'))
idx = tm.box_expected(idx, box_with_array)
result = idx // 1
tm.assert_equal(result, idx)
pattern = ('floor_divide cannot use operands|'
'Cannot divide int by Timedelta*')
with pytest.raises(TypeError, match=pattern):
1 // idx
def test_td64arr_floordiv_tdlike_scalar(self, two_hours, box_with_array):
tdi = timedelta_range('1 days', '10 days', name='foo')
expected = pd.Int64Index((np.arange(10) + 1) * 12, name='foo')
tdi = tm.box_expected(tdi, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = tdi // two_hours
tm.assert_equal(result, expected)
# TODO: Is this redundant with test_td64arr_floordiv_tdlike_scalar?
@pytest.mark.parametrize('scalar_td', [
timedelta(minutes=10, seconds=7),
Timedelta('10m7s'),
Timedelta('10m7s').to_timedelta64()
], ids=lambda x: type(x).__name__)
def test_td64arr_rfloordiv_tdlike_scalar(self, scalar_td, box_with_array):
# GH#19125
tdi = TimedeltaIndex(['00:05:03', '00:05:03', pd.NaT], freq=None)
expected = pd.Index([2.0, 2.0, np.nan])
tdi = tm.box_expected(tdi, box_with_array, transpose=False)
expected = tm.box_expected(expected, box_with_array, transpose=False)
res = tdi.__rfloordiv__(scalar_td)
tm.assert_equal(res, expected)
expected = pd.Index([0.0, 0.0, np.nan])
expected = tm.box_expected(expected, box_with_array, transpose=False)
res = tdi // (scalar_td)
tm.assert_equal(res, expected)
# ------------------------------------------------------------------
# mod, divmod
# TODO: operations with timedelta-like arrays, numeric arrays,
# reversed ops
def test_td64arr_mod_tdscalar(self, box_with_array, three_days):
tdi = timedelta_range('1 Day', '9 days')
tdarr = tm.box_expected(tdi, box_with_array)
expected = TimedeltaIndex(['1 Day', '2 Days', '0 Days'] * 3)
expected = tm.box_expected(expected, box_with_array)
result = tdarr % three_days
tm.assert_equal(result, expected)
if box_with_array is pd.DataFrame:
pytest.xfail("DataFrame does not have __divmod__ or __rdivmod__")
result = divmod(tdarr, three_days)
tm.assert_equal(result[1], expected)
tm.assert_equal(result[0], tdarr // three_days)
def test_td64arr_mod_int(self, box_with_array):
tdi = timedelta_range('1 ns', '10 ns', periods=10)
tdarr = tm.box_expected(tdi, box_with_array)
expected = TimedeltaIndex(['1 ns', '0 ns'] * 5)
expected = tm.box_expected(expected, box_with_array)
result = tdarr % 2
tm.assert_equal(result, expected)
with pytest.raises(TypeError):
2 % tdarr
if box_with_array is pd.DataFrame:
pytest.xfail("DataFrame does not have __divmod__ or __rdivmod__")
result = divmod(tdarr, 2)
tm.assert_equal(result[1], expected)
tm.assert_equal(result[0], tdarr // 2)
def test_td64arr_rmod_tdscalar(self, box_with_array, three_days):
tdi = timedelta_range('1 Day', '9 days')
tdarr = tm.box_expected(tdi, box_with_array)
expected = ['0 Days', '1 Day', '0 Days'] + ['3 Days'] * 6
expected = TimedeltaIndex(expected)
expected = tm.box_expected(expected, box_with_array)
result = three_days % tdarr
tm.assert_equal(result, expected)
if box_with_array is pd.DataFrame:
pytest.xfail("DataFrame does not have __divmod__ or __rdivmod__")
result = divmod(three_days, tdarr)
tm.assert_equal(result[1], expected)
tm.assert_equal(result[0], three_days // tdarr)
# ------------------------------------------------------------------
# Operations with invalid others
def test_td64arr_mul_tdscalar_invalid(self, box_with_array, scalar_td):
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
td1 = tm.box_expected(td1, box_with_array)
# check that we are getting a TypeError
# with 'operate' (from core/ops.py) for the ops that are not
# defined
pattern = 'operate|unsupported|cannot|not supported'
with pytest.raises(TypeError, match=pattern):
td1 * scalar_td
with pytest.raises(TypeError, match=pattern):
scalar_td * td1
def test_td64arr_mul_too_short_raises(self, box_with_array):
idx = TimedeltaIndex(np.arange(5, dtype='int64'))
idx = tm.box_expected(idx, box_with_array)
with pytest.raises(TypeError):
idx * idx[:3]
with pytest.raises(ValueError):
idx * np.array([1, 2])
def test_td64arr_mul_td64arr_raises(self, box_with_array):
idx = TimedeltaIndex(np.arange(5, dtype='int64'))
idx = tm.box_expected(idx, box_with_array)
with pytest.raises(TypeError):
idx * idx
# ------------------------------------------------------------------
# Operations with numeric others
@pytest.mark.parametrize('one', [1, np.array(1), 1.0, np.array(1.0)])
def test_td64arr_mul_numeric_scalar(self, box_with_array, one):
# GH#4521
# divide/multiply by integers
tdser = pd.Series(['59 Days', '59 Days', 'NaT'], dtype='m8[ns]')
expected = Series(['-59 Days', '-59 Days', 'NaT'],
dtype='timedelta64[ns]')
tdser = tm.box_expected(tdser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = tdser * (-one)
tm.assert_equal(result, expected)
result = (-one) * tdser
tm.assert_equal(result, expected)
expected = Series(['118 Days', '118 Days', 'NaT'],
dtype='timedelta64[ns]')
expected = tm.box_expected(expected, box_with_array)
result = tdser * (2 * one)
tm.assert_equal(result, expected)
result = (2 * one) * tdser
tm.assert_equal(result, expected)
@pytest.mark.parametrize('two', [2, 2.0, np.array(2), np.array(2.0)])
def test_td64arr_div_numeric_scalar(self, box_with_array, two):
# GH#4521
# divide/multiply by integers
tdser = pd.Series(['59 Days', '59 Days', 'NaT'], dtype='m8[ns]')
expected = Series(['29.5D', '29.5D', 'NaT'], dtype='timedelta64[ns]')
tdser = tm.box_expected(tdser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = tdser / two
tm.assert_equal(result, expected)
with pytest.raises(TypeError, match='Cannot divide'):
two / tdser
@pytest.mark.parametrize('dtype', ['int64', 'int32', 'int16',
'uint64', 'uint32', 'uint16', 'uint8',
'float64', 'float32', 'float16'])
@pytest.mark.parametrize('vector', [np.array([20, 30, 40]),
pd.Index([20, 30, 40]),
Series([20, 30, 40])],
ids=lambda x: type(x).__name__)
def test_td64arr_rmul_numeric_array(self, box_with_array, vector, dtype):
# GH#4521
# divide/multiply by integers
xbox = get_upcast_box(box_with_array, vector)
tdser = pd.Series(['59 Days', '59 Days', 'NaT'], dtype='m8[ns]')
vector = vector.astype(dtype)
expected = Series(['1180 Days', '1770 Days', 'NaT'],
dtype='timedelta64[ns]')
tdser = tm.box_expected(tdser, box_with_array)
expected = tm.box_expected(expected, xbox)
result = tdser * vector
tm.assert_equal(result, expected)
result = vector * tdser
tm.assert_equal(result, expected)
@pytest.mark.parametrize('dtype', ['int64', 'int32', 'int16',
'uint64', 'uint32', 'uint16', 'uint8',
'float64', 'float32', 'float16'])
@pytest.mark.parametrize('vector', [np.array([20, 30, 40]),
pd.Index([20, 30, 40]),
Series([20, 30, 40])],
ids=lambda x: type(x).__name__)
def test_td64arr_div_numeric_array(self, box_with_array, vector, dtype):
# GH#4521
# divide/multiply by integers
xbox = get_upcast_box(box_with_array, vector)
tdser = pd.Series(['59 Days', '59 Days', 'NaT'], dtype='m8[ns]')
vector = vector.astype(dtype)
expected = Series(['2.95D', '1D 23H 12m', 'NaT'],
dtype='timedelta64[ns]')
tdser = tm.box_expected(tdser, box_with_array)
expected = tm.box_expected(expected, xbox)
result = tdser / vector
tm.assert_equal(result, expected)
pattern = ('true_divide cannot use operands|'
'cannot perform __div__|'
'cannot perform __truediv__|'
'unsupported operand|'
'Cannot divide')
with pytest.raises(TypeError, match=pattern):
vector / tdser
if not isinstance(vector, pd.Index):
# Index.__rdiv__ won't try to operate elementwise, just raises
result = tdser / vector.astype(object)
if box_with_array is pd.DataFrame:
expected = [tdser.iloc[0, n] / vector[n]
for n in range(len(vector))]
else:
expected = [tdser[n] / vector[n] for n in range(len(tdser))]
expected = | tm.box_expected(expected, xbox) | pandas.util.testing.box_expected |
from bapiw.api import API
from datetime import datetime, date
import pandas as pd
import numpy as np
bapiw = API()
class DataParser:
# intervals used when calling kline data
# https://github.com/binance-exchange/binance-official-api-docs/blob/master/rest-api.md#enum-definitions
INTERVAL_1MIN = '1m'
INTERVAL_3MIN = '3m'
INTERVAL_5MIN = '5m'
INTERVAL_15MIN = '15m'
INTERVAL_30MIN = '30m'
INTERVAL_1HR = '1h'
INTERVAL_2HR = '2h'
INTERVAL_4HR = '4h'
INTERVAL_6HR = '6h'
INTERVAL_8HR = '8h'
INTERVAL_12HR = '12h'
INTERVAL_1DAY = '1d'
INTERVAL_3DAY = '3d'
INTERVAL_1WEEK = '1w'
INTERVAL_1MONTH = '1M'
def getSymbols(self, onlyTrading=True, includes=''):
# pulls all exchange info
exchange = bapiw.get_exchangeInfo()
# by default onlyTrading is set True which will only shows symbols
# with a status of TRADING on binance. Anything else shows
# all symbols.
# look for symbols in exchanges data, check the status for TRADING and
# add those symbols to a list
symbol_list = []
if onlyTrading:
for zd in exchange['symbols']:
if zd['status'] == 'TRADING':
symbol_list.append(zd['symbol'])
else:
for zd in exchange['symbols']:
symbol_list.append(zd['symbol'])
# create a dataframe with the symbols and rename the column from 0 to symbols
symbols = pd.DataFrame(symbol_list)
symbols = symbols.rename(columns={0: 'symbols'})
# if includes isn't null it will only list symbols that include that string
if includes:
# searches for the symbols that contain string 'includes' and puts them in mysymbols var
mysymbols = symbols[symbols['symbols'].str.contains(includes)]
# replace those symbols in a Dataframe, reset the index
# and delete the old unaccurate index
mysymbols = pd.DataFrame(mysymbols['symbols'])
mysymbols = mysymbols.reset_index()
mysymbols = mysymbols.drop(columns=['index'])
symbols = mysymbols
return symbols
def getKlines(self, symbol, interval, startTime='', endTime='', limit=500, data='ohlcv'):
# pull data from api
kdata = bapiw.get_klines(symbol=symbol, interval=interval, startTime=startTime, endTime=endTime, limit=limit)
# put data into dataframe and remove columns that aren't needed
df = | pd.DataFrame.from_dict(kdata) | pandas.DataFrame.from_dict |
#%%
"""Combine article data from json file into a single dataframe.
"""
import json
import pandas as pd
combined_df = pd.DataFrame() #columns=["Index_str", "Article_text"])
df = pd.read_csv("~/repo/StatMachLearn/NewsArticleClassification/data_to_group_copy.csv", header=0)
domain_list = list(set(df['domain'].values))
domain_list.remove("wsj.com")
for domain in domain_list:
print(domain)
with open(f"backup_data/{domain.rsplit('.', 1)[0]}.json") as f:
obj_json = json.load(f)
for article_dict in obj_json:
print(article_dict.keys())
try:
#combined_df.append({"Index_str": article_dict[0], "Article_text": article_dict[1] })
#combined_df = combined_df.append(article_dict, ignore_index=True)
new_df = | pd.DataFrame.from_dict(article_dict, orient="index") | pandas.DataFrame.from_dict |
import nose
import warnings
import os
import datetime
import numpy as np
import sys
from distutils.version import LooseVersion
from pandas import compat
from pandas.compat import u, PY3
from pandas import (Series, DataFrame, Panel, MultiIndex, bdate_range,
date_range, period_range, Index, Categorical)
from pandas.core.common import PerformanceWarning
from pandas.io.packers import to_msgpack, read_msgpack
import pandas.util.testing as tm
from pandas.util.testing import (ensure_clean,
assert_categorical_equal,
assert_frame_equal,
assert_index_equal,
assert_series_equal,
patch)
from pandas.tests.test_panel import assert_panel_equal
import pandas
from pandas import Timestamp, NaT, tslib
nan = np.nan
try:
import blosc # NOQA
except ImportError:
_BLOSC_INSTALLED = False
else:
_BLOSC_INSTALLED = True
try:
import zlib # NOQA
except ImportError:
_ZLIB_INSTALLED = False
else:
_ZLIB_INSTALLED = True
_multiprocess_can_split_ = False
def check_arbitrary(a, b):
if isinstance(a, (list, tuple)) and isinstance(b, (list, tuple)):
assert(len(a) == len(b))
for a_, b_ in zip(a, b):
check_arbitrary(a_, b_)
elif isinstance(a, Panel):
assert_panel_equal(a, b)
elif isinstance(a, DataFrame):
assert_frame_equal(a, b)
elif isinstance(a, Series):
assert_series_equal(a, b)
elif isinstance(a, Index):
assert_index_equal(a, b)
elif isinstance(a, Categorical):
# Temp,
# Categorical.categories is changed from str to bytes in PY3
# maybe the same as GH 13591
if PY3 and b.categories.inferred_type == 'string':
pass
else:
tm.assert_categorical_equal(a, b)
elif a is NaT:
assert b is NaT
elif isinstance(a, Timestamp):
assert a == b
assert a.freq == b.freq
else:
assert(a == b)
class TestPackers(tm.TestCase):
def setUp(self):
self.path = '__%s__.msg' % tm.rands(10)
def tearDown(self):
pass
def encode_decode(self, x, compress=None, **kwargs):
with ensure_clean(self.path) as p:
to_msgpack(p, x, compress=compress, **kwargs)
return read_msgpack(p, **kwargs)
class TestAPI(TestPackers):
def test_string_io(self):
df = DataFrame(np.random.randn(10, 2))
s = df.to_msgpack(None)
result = read_msgpack(s)
tm.assert_frame_equal(result, df)
s = df.to_msgpack()
result = read_msgpack(s)
tm.assert_frame_equal(result, df)
s = df.to_msgpack()
result = read_msgpack(compat.BytesIO(s))
tm.assert_frame_equal(result, df)
s = to_msgpack(None, df)
result = read_msgpack(s)
tm.assert_frame_equal(result, df)
with ensure_clean(self.path) as p:
s = df.to_msgpack()
fh = open(p, 'wb')
fh.write(s)
fh.close()
result = read_msgpack(p)
tm.assert_frame_equal(result, df)
def test_iterator_with_string_io(self):
dfs = [DataFrame(np.random.randn(10, 2)) for i in range(5)]
s = to_msgpack(None, *dfs)
for i, result in enumerate(read_msgpack(s, iterator=True)):
tm.assert_frame_equal(result, dfs[i])
def test_invalid_arg(self):
# GH10369
class A(object):
def __init__(self):
self.read = 0
tm.assertRaises(ValueError, read_msgpack, path_or_buf=None)
tm.assertRaises(ValueError, read_msgpack, path_or_buf={})
tm.assertRaises(ValueError, read_msgpack, path_or_buf=A())
class TestNumpy(TestPackers):
def test_numpy_scalar_float(self):
x = np.float32(np.random.rand())
x_rec = self.encode_decode(x)
tm.assert_almost_equal(x, x_rec)
def test_numpy_scalar_complex(self):
x = np.complex64(np.random.rand() + 1j * np.random.rand())
x_rec = self.encode_decode(x)
self.assertTrue(np.allclose(x, x_rec))
def test_scalar_float(self):
x = np.random.rand()
x_rec = self.encode_decode(x)
tm.assert_almost_equal(x, x_rec)
def test_scalar_complex(self):
x = np.random.rand() + 1j * np.random.rand()
x_rec = self.encode_decode(x)
self.assertTrue(np.allclose(x, x_rec))
def test_list_numpy_float(self):
x = [np.float32(np.random.rand()) for i in range(5)]
x_rec = self.encode_decode(x)
# current msgpack cannot distinguish list/tuple
tm.assert_almost_equal(tuple(x), x_rec)
x_rec = self.encode_decode(tuple(x))
tm.assert_almost_equal(tuple(x), x_rec)
def test_list_numpy_float_complex(self):
if not hasattr(np, 'complex128'):
raise nose.SkipTest('numpy cant handle complex128')
x = [np.float32(np.random.rand()) for i in range(5)] + \
[np.complex128(np.random.rand() + 1j * np.random.rand())
for i in range(5)]
x_rec = self.encode_decode(x)
self.assertTrue(np.allclose(x, x_rec))
def test_list_float(self):
x = [np.random.rand() for i in range(5)]
x_rec = self.encode_decode(x)
# current msgpack cannot distinguish list/tuple
tm.assert_almost_equal(tuple(x), x_rec)
x_rec = self.encode_decode(tuple(x))
tm.assert_almost_equal(tuple(x), x_rec)
def test_list_float_complex(self):
x = [np.random.rand() for i in range(5)] + \
[(np.random.rand() + 1j * np.random.rand()) for i in range(5)]
x_rec = self.encode_decode(x)
self.assertTrue(np.allclose(x, x_rec))
def test_dict_float(self):
x = {'foo': 1.0, 'bar': 2.0}
x_rec = self.encode_decode(x)
tm.assert_almost_equal(x, x_rec)
def test_dict_complex(self):
x = {'foo': 1.0 + 1.0j, 'bar': 2.0 + 2.0j}
x_rec = self.encode_decode(x)
self.assertEqual(x, x_rec)
for key in x:
self.assertEqual(type(x[key]), type(x_rec[key]))
def test_dict_numpy_float(self):
x = {'foo': np.float32(1.0), 'bar': np.float32(2.0)}
x_rec = self.encode_decode(x)
tm.assert_almost_equal(x, x_rec)
def test_dict_numpy_complex(self):
x = {'foo': np.complex128(1.0 + 1.0j),
'bar': np.complex128(2.0 + 2.0j)}
x_rec = self.encode_decode(x)
self.assertEqual(x, x_rec)
for key in x:
self.assertEqual(type(x[key]), type(x_rec[key]))
def test_numpy_array_float(self):
# run multiple times
for n in range(10):
x = np.random.rand(10)
for dtype in ['float32', 'float64']:
x = x.astype(dtype)
x_rec = self.encode_decode(x)
tm.assert_almost_equal(x, x_rec)
def test_numpy_array_complex(self):
x = (np.random.rand(5) + 1j * np.random.rand(5)).astype(np.complex128)
x_rec = self.encode_decode(x)
self.assertTrue(all(map(lambda x, y: x == y, x, x_rec)) and
x.dtype == x_rec.dtype)
def test_list_mixed(self):
x = [1.0, np.float32(3.5), np.complex128(4.25), u('foo')]
x_rec = self.encode_decode(x)
# current msgpack cannot distinguish list/tuple
tm.assert_almost_equal(tuple(x), x_rec)
x_rec = self.encode_decode(tuple(x))
tm.assert_almost_equal(tuple(x), x_rec)
class TestBasic(TestPackers):
def test_timestamp(self):
for i in [Timestamp(
'20130101'), Timestamp('20130101', tz='US/Eastern'),
Timestamp('201301010501')]:
i_rec = self.encode_decode(i)
self.assertEqual(i, i_rec)
def test_nat(self):
nat_rec = self.encode_decode(NaT)
self.assertIs(NaT, nat_rec)
def test_datetimes(self):
# fails under 2.6/win32 (np.datetime64 seems broken)
if LooseVersion(sys.version) < '2.7':
raise nose.SkipTest('2.6 with np.datetime64 is broken')
for i in [datetime.datetime(2013, 1, 1),
datetime.datetime(2013, 1, 1, 5, 1),
datetime.date(2013, 1, 1),
np.datetime64(datetime.datetime(2013, 1, 5, 2, 15))]:
i_rec = self.encode_decode(i)
self.assertEqual(i, i_rec)
def test_timedeltas(self):
for i in [datetime.timedelta(days=1),
datetime.timedelta(days=1, seconds=10),
np.timedelta64(1000000)]:
i_rec = self.encode_decode(i)
self.assertEqual(i, i_rec)
class TestIndex(TestPackers):
def setUp(self):
super(TestIndex, self).setUp()
self.d = {
'string': tm.makeStringIndex(100),
'date': tm.makeDateIndex(100),
'int': tm.makeIntIndex(100),
'rng': tm.makeRangeIndex(100),
'float': tm.makeFloatIndex(100),
'empty': Index([]),
'tuple': Index(zip(['foo', 'bar', 'baz'], [1, 2, 3])),
'period': Index(period_range('2012-1-1', freq='M', periods=3)),
'date2': Index(date_range('2013-01-1', periods=10)),
'bdate': Index( | bdate_range('2013-01-02', periods=10) | pandas.bdate_range |
import copy
import pandas as pd
from bokeh.plotting import figure, show, output_notebook
from bokeh.models import Legend, Span
# from bokeh.models import HoverTool
from ..utils import in_ipynb
from .plotobj import BasePlot
from .plotutils import get_color
_INITED = False
class BokehPlot(BasePlot):
def __init__(self, size=None, theme=None):
global _INITED
if not _INITED:
if in_ipynb():
output_notebook(hide_banner=True)
size = size or (800, 500)
self.width = size[0]
self.height = size[1]
self.figure = figure(toolbar_location="below",
toolbar_sticky=False,
x_axis_type='datetime',
plot_width=self.width,
plot_height=self.height) # TODO remove
self.legend = []
def show(self, title='', xlabel='', ylabel='', xaxis=True, yaxis=True, xticks=True, yticks=True, legend=True, grid=True, **kwargs):
# self.figure.add_tools(*[HoverTool(
# tooltips=[('x', '@x{%F}'), ('y', '@y')],
# formatters={'x': 'datetime'},
# mode='vline'
# ) for _ in data])
self.figure.outline_line_color = None
# vline = Span(location=0, dimension='height', line_color='red', line_width=3)
hline = Span(location=0, dimension='width', line_color='black', line_width=1)
self.figure.renderers.append(hline)
if xlabel:
self.figure.xaxis.axis_label = kwargs.get('xlabel')
if ylabel:
self.figure.yaxis.axis_label = kwargs.get('ylabel')
if title:
self.figure.title.text = kwargs.get('title')
if legend:
self.figure.legend.location = (self.width + 10, self.height + 10)
legend = Legend(items=self.legend, location=(10, 100))
legend.items = self.legend
legend.click_policy = "mute"
self.figure.add_layout(legend, 'right')
else:
self.figure.legend.location = None
if not grid:
self.figure.xgrid.grid_line_color = None
self.figure.ygrid.grid_line_color = None
# FIXME
if not yaxis:
for ax in self.figure.yaxis:
ax.axis_line_color = 'white'
if not xaxis:
for ax in self.figure.xaxis:
ax.axis_line_color = 'white'
# Turn off labels:
# self.figure.xaxis.major_label_text_font_size = '0pt'
show(self.figure)
return self.figure
def area(self, data, color=None, y_axis='left', stacked=False, **kwargs):
data2 = data.append(data.iloc[-1] * 0)
data2 = data2.append(data2.iloc[0] * 0)
data2 = data2.sort_index()
data2 = data2.sort_index()
x, y = copy.deepcopy(data2.iloc[0]), copy.deepcopy(data2.iloc[1])
data2.iloc[0], data2.iloc[1] = y, x
for i, col in enumerate(data):
c = get_color(i, col, color)
fig = self.figure.patch(x=data2.index, y=data2[col].values, legend=col, fill_alpha=.2, color=c, **kwargs)
self.legend.append((col, [fig]))
# for stacked: https://bokeh.pydata.org/en/latest/docs/gallery/brewer.html
# p.patches([x2] * areas.shape[1], [areas[c].values for c in areas], color=colors, alpha=0.8, line_color=None)
def _stacked(df):
df_top = df.cumsum(axis=1)
df_bottom = df_top.shift(axis=1).fillna({'y0': 0})[::-1]
df_stack = | pd.concat([df_bottom, df_top], ignore_index=True) | pandas.concat |
import itertools
import pathlib
import numpy as np
import pandas as pd
import os
from statistics import median_low
import click
import re
# Unimod parsing
import xml.etree.cElementTree as ET
from xml.etree.cElementTree import iterparse
# mzXML parsing
import pyopenms as po
class pepxml:
def __init__(self, pepxml_file, unimod, base_name, exclude_range):
self.pepxml_file = pepxml_file
self.base_name = base_name
self.psms = self.parse_pepxml()
self.exclude_range = exclude_range
self.match_unimod(unimod)
def get(self):
return(self.psms)
def match_unimod(self, unimod):
def match_modifications(um, peptide):
monomeric_masses = {"A": 71.03711, "R": 156.10111, "N": 114.04293, "D": 115.02694, "C": 103.00919, "E": 129.04259, "Q": 128.05858, "G": 57.02146, "H": 137.05891, "I": 113.08406, "L": 113.08406, "K": 128.09496, "M": 131.04049, "F": 147.06841, "P": 97.05276, "S": 87.03203, "T": 101.04768, "W": 186.07931, "Y": 163.06333, "V": 99.06841}
modified_peptide = peptide['peptide_sequence']
# parse terminal modifications
nterm_modification = ""
if peptide['nterm_modification'] is not "":
nterm_modification = peptide['nterm_modification'] - 1.0078
cterm_modification = ""
if peptide['cterm_modification'] is not "":
cterm_modification = peptide['cterm_modification']
# parse closed modifications
modifications = {}
if "M|" in peptide['modifications']:
for modification in peptide['modifications'].split('|')[1:]:
site, mass = modification.split('$')
delta_mass = float(mass) - monomeric_masses[peptide['peptide_sequence'][int(site)-1]]
modifications[int(site)] = delta_mass
massdiff = float(peptide['massdiff'])
if massdiff < self.exclude_range[0] or massdiff > self.exclude_range[1]:
# parse open modifications
oms_sequence = peptide['peptide_sequence']
for site in modifications.keys():
oms_sequence = oms_sequence[:site-1] + "_" + oms_sequence[site:]
oms_modifications, nterm_modification, cterm_modification = um.get_oms_id(oms_sequence, peptide['massdiff'], nterm_modification, cterm_modification)
modifications = {**modifications, **oms_modifications}
peptide_sequence = peptide['peptide_sequence']
peptide_length = len(peptide_sequence)
for site in sorted(modifications, reverse=True):
positions = ('Anywhere', 'Any N-term', 'Protein N-term') if site == 1 else \
('Anywhere', 'Any C-term', 'Protein C-term') if site == peptide_length else \
'Anywhere'
record_id0 = um.get_id(peptide_sequence[site - 1], positions, modifications[site])
if isinstance(record_id0, tuple):
record_id, position = record_id0
else:
record_id = record_id0
is_N_term = isinstance(record_id0, tuple) and position in ('Any N-term', 'Protein N-term')
if record_id == -1:
raise click.ClickException("Error: Could not annotate site %s (%s) from peptide %s with delta mass %s." % (site, peptide['peptide_sequence'][site-1], peptide['peptide_sequence'], modifications[site]))
modified_peptide = "(UniMod:" + str(record_id) + ")" + modified_peptide \
if is_N_term else \
modified_peptide[:site] + "(UniMod:" + str(record_id) + ")" + modified_peptide[site:]
if nterm_modification is not "":
record_id_nterm = um.get_id("N-term", 'Any N-term', nterm_modification)
if record_id_nterm == -1:
record_id_nterm = um.get_id("N-term", 'Protein N-term', nterm_modification)
if record_id_nterm == -1:
raise click.ClickException("Error: Could not annotate N-terminus from peptide %s with delta mass %s." % (peptide['peptide_sequence'], nterm_modification))
modified_peptide = ".(UniMod:" + str(record_id_nterm) + ")" + modified_peptide
if cterm_modification is not "":
record_id_cterm = um.get_id("C-term", 'Any C-term', cterm_modification)
if record_id_cterm == -1:
record_id_cterm = um.get_id("C-term", 'Protein C-term', cterm_modification)
if record_id_cterm == -1:
raise click.ClickException("Error: Could not annotate C-terminus from peptide %s with delta mass %s." % (peptide['peptide_sequence'], cterm_modification))
modified_peptide = modified_peptide + ".(UniMod:" + str(record_id_cterm) + ")"
return modified_peptide
if self.psms.shape[0] > 0:
self.psms['modified_peptide'] = self.psms[['peptide_sequence','modifications','nterm_modification','cterm_modification','massdiff']].apply(lambda x: match_modifications(unimod, x), axis=1)
def parse_pepxml(self):
peptides = []
namespaces = {'pepxml_ns': "http://regis-web.systemsbiology.net/pepXML"}
ET.register_namespace('', "http://regis-web.systemsbiology.net/pepXML")
context = iterparse(self.pepxml_file, events=("end",))
for event, elem in context:
if elem.tag == "{http://regis-web.systemsbiology.net/pepXML}msms_run_summary":
base_name = os.path.basename(elem.attrib['base_name'])
# only proceed if base_name matches
if base_name == self.base_name:
# find decoy prefix
decoy_prefix = ""
for search_summary in elem.findall('.//pepxml_ns:search_summary', namespaces):
for parameter in search_summary.findall('.//pepxml_ns:parameter', namespaces):
if parameter.attrib['name'] == 'decoy_prefix':
decoy_prefix = parameter.attrib['value']
# go through all spectrum queries
for spectrum_query in elem.findall('.//pepxml_ns:spectrum_query', namespaces):
index = spectrum_query.attrib['index']
start_scan = spectrum_query.attrib['start_scan']
end_scan = spectrum_query.attrib['end_scan']
assumed_charge = spectrum_query.attrib['assumed_charge']
retention_time_sec = spectrum_query.attrib['retention_time_sec']
ion_mobility = np.nan
if 'ion_mobility' in spectrum_query.attrib:
ion_mobility = spectrum_query.attrib['ion_mobility']
for search_result in spectrum_query.findall(".//pepxml_ns:search_result", namespaces):
for search_hit in search_result.findall(".//pepxml_ns:search_hit", namespaces):
hit_rank = search_hit.attrib['hit_rank']
massdiff = search_hit.attrib['massdiff']
# parse peptide and protein information
peptide = search_hit.attrib['peptide']
unprocessed_proteins = [search_hit.attrib['protein']]
for alternative_protein in search_hit.findall('.//pepxml_ns:alternative_protein', namespaces):
unprocessed_proteins.append(alternative_protein.attrib['protein'])
# remove decoy results from mixed target/decoy hits
has_targets = False
has_decoys = False
for prot in unprocessed_proteins:
if decoy_prefix in prot:
has_decoys = True
else:
has_targets = True
processed_proteins = []
for prot in unprocessed_proteins:
if has_targets and has_decoys:
if decoy_prefix not in prot:
processed_proteins.append(prot)
else:
processed_proteins.append(prot)
num_tot_proteins = len(processed_proteins)
is_decoy = False
if has_decoys and not has_targets:
is_decoy = True
proteins = {}
for prot in processed_proteins:
# Remove UniProt prefixes if necessary
if decoy_prefix + "sp|" in prot:
proteins[decoy_prefix + prot.split("|")[1]] = ""
elif "sp|" in prot:
proteins[prot.split("|")[1]] = prot.split("|")[2].split(" ")[0].split("_")[0]
else:
proteins[prot] = prot
protein = ""
gene = ""
for key in sorted(proteins):
if protein == "":
protein = key
else:
protein = protein + ";" + key
if gene == "":
gene = proteins[key]
else:
gene = gene + ";" + proteins[key]
# parse PTM information
modifications = "M"
nterm_modification = ""
cterm_modification = ""
for modification_info in search_hit.findall('.//pepxml_ns:modification_info', namespaces):
if 'mod_nterm_mass' in modification_info.attrib:
nterm_modification = float(modification_info.attrib['mod_nterm_mass'])
if 'mod_cterm_mass' in modification_info.attrib:
cterm_modification = float(modification_info.attrib['mod_cterm_mass'])
for mod_aminoacid_mass in modification_info.findall('.//pepxml_ns:mod_aminoacid_mass', namespaces):
modifications = modifications + "|" + mod_aminoacid_mass.attrib['position'] + "$" + mod_aminoacid_mass.attrib['mass']
# parse search engine score information
scores = {}
for search_score in search_hit.findall('.//pepxml_ns:search_score', namespaces):
scores["var_" + search_score.attrib['name']] = float(search_score.attrib['value'])
# parse PeptideProphet or iProphet results if available
for analysis_result in search_hit.findall('.//pepxml_ns:analysis_result', namespaces):
if analysis_result.attrib['analysis'] == 'interprophet':
for interprophet_result in analysis_result.findall('.//pepxml_ns:interprophet_result', namespaces):
scores["pep"] = 1.0 - float(interprophet_result.attrib['probability'])
prev_pep = scores["pep"]
elif analysis_result.attrib['analysis'] == 'peptideprophet':
for peptideprophet_result in analysis_result.findall('.//pepxml_ns:peptideprophet_result', namespaces):
scores["pep"] = 1.0 - float(peptideprophet_result.attrib['probability'])
prev_pep = scores["pep"]
if "pep" not in scores:
# If 2 search hits have the same rank only the first one has the analysis_result explicitly written out.
scores["pep"] = prev_pep
peptides.append({**{'run_id': base_name, 'scan_id': int(start_scan), 'hit_rank': int(hit_rank), 'massdiff': float(massdiff), 'precursor_charge': int(assumed_charge), 'retention_time': float(retention_time_sec), 'ion_mobility': float(ion_mobility), 'peptide_sequence': peptide, 'modifications': modifications, 'nterm_modification': nterm_modification, 'cterm_modification': cterm_modification, 'protein_id': protein, 'gene_id': gene, 'num_tot_proteins': num_tot_proteins, 'decoy': is_decoy}, **scores})
elem.clear()
df = pd.DataFrame(peptides)
return(df)
class idxml:
def __init__(self, idxml_file, base_name):
self.idxml_file = idxml_file
self.base_name = base_name
self.psms = self.parse_idxml()
#self.exclude_range = exclude_range
#self.match_unimod(unimod)
def get(self):
return(self.psms)
def parse_idxml(self):
peptides = []
proteins = []
scores = {}
parsed_peptides = []
po.IdXMLFile().load(self.idxml_file, proteins, peptides)
for p in peptides:
#search engine scores
scores["var_MS:1002252_Comet:XCorr"] = float(p.getHits()[0].getMetaValue('MS:1002252'))
scores["var_MS:1002253_Comet:DeltCn"] = float(p.getHits()[0].getMetaValue('MS:1002253'))
#percolator probability
scores["q_value"] = float(p.getHits()[0].getMetaValue('MS:1001491'))
scores["pep"] = float(p.getHits()[0].getMetaValue('MS:1001491'))
parsed_peptides.append({**{'run_id': self.base_name,
'scan_id': int(str(p.getMetaValue("spectrum_reference")).split('scan=')[-1].strip("'")),
'hit_rank': int(p.getHits()[0].getRank()),
'massdiff': float(0),
'precursor_charge': int(p.getHits()[0].getCharge()),
'retention_time': float(p.getRT()),
'modified_peptide': p.getHits()[0].getSequence().toUniModString().decode("utf-8"),
'peptide_sequence': p.getHits()[0].getSequence().toUnmodifiedString().decode("utf-8"),
'modifications': '-',
'nterm_modification': '-',
'cterm_modification': '-',
'protein_id': ','.join([prot.getProteinAccession().decode("utf-8") for prot in p.getHits()[0].getPeptideEvidences()]),
'gene_id': '-',
'num_tot_proteins': len([prot.getProteinAccession() for prot in p.getHits()[0].getPeptideEvidences()]),
'decoy': p.getHits()[0].getMetaValue('target_decoy').decode("utf-8")=='decoy'}, **scores})
df = pd.DataFrame(parsed_peptides)
return (df)
class unimod:
def __init__(self, unimod_file, max_delta):
self.unimod_file = unimod_file
self.max_delta = max_delta
self.ptms = self.parse_unimod()
def parse_unimod(self):
namespaces = {'umod': "http://www.unimod.org/xmlns/schema/unimod_2"}
ET.register_namespace('', "http://www.unimod.org/xmlns/schema/unimod_2")
tree = ET.parse(self.unimod_file)
root = tree.getroot()
ptms = {}
sites = ['A','R','N','D','C','E','Q','G','H','O','I','L','K','M','F','P','U','S','T','W','Y','V','N-term','C-term']
positions = ['Anywhere','Any N-term','Any C-term','Protein N-term','Protein C-term']
for site in sites:
ptms[site] = {}
for position in positions:
for site in sites:
ptms[site][position] = {}
for modifications in root.findall('.//umod:modifications', namespaces):
for modification in modifications.findall('.//umod:mod', namespaces):
for specificity in modification.findall('.//umod:specificity', namespaces):
ptms[specificity.attrib['site']][specificity.attrib['position']][int(modification.attrib['record_id'])] = float(modification.findall('.//umod:delta', namespaces)[0].attrib['mono_mass'])
return ptms
def get_id(self, site, position, delta_mass):
candidates = {}
min_id = -1
ptms_site = self.ptms[site]
search_multiple_positions = isinstance(position, (list, tuple))
kvs = itertools.chain.from_iterable((((k, p), v) for k,v in ptms_site[p].items()) for p in position) \
if search_multiple_positions else \
ptms_site[position].items()
for key, value in kvs:
delta_mod = abs(value - float(delta_mass))
if delta_mod < self.max_delta:
if key in candidates.keys():
if delta_mod < candidates[key]:
candidates[key] = delta_mod
else:
candidates[key] = delta_mod
if len(candidates) > 0:
min_id = min(candidates, key=candidates.get)
return(min_id)
def get_oms_id(self, sequence, massdiff, nterm_modification, cterm_modification):
record_ids = {}
for site, aa in enumerate(sequence):
if aa != "_":
record_id_site = self.get_id(aa, 'Anywhere', massdiff)
if record_id_site != -1:
record_ids[site+1] = record_id_site
record_id_nterm = -1
if nterm_modification == "":
record_id_nterm = self.get_id("N-term", 'Any N-term', massdiff)
if record_id_nterm == -1:
record_id_nterm = self.get_id("N-term", 'Protein N-term', massdiff)
record_id_cterm = -1
if cterm_modification == "":
record_id_cterm = self.get_id("C-term", 'Any C-term', massdiff)
if record_id_cterm == -1:
record_id_cterm = self.get_id("C-term", 'Protein C-term', massdiff)
# prefer residual over N-term over C-term modifications
aamod = {}
if len(record_ids) > 0:
aasite = median_low(list(record_ids.keys()))
aamod[aasite] = massdiff
elif record_id_nterm != -1:
nterm_modification = massdiff
elif record_id_cterm != -1:
cterm_modification = massdiff
return aamod, nterm_modification, cterm_modification
def read_mzml_or_mzxml_impl(path, psms, theoretical, max_delta_ppm, filetype):
assert filetype in ('mzml', 'mzxml')
fh = po.MzMLFile() if filetype=='mzml' else po.MzXMLFile()
fh.setLogType(po.LogType.CMD)
input_map = po.MSExperiment()
fh.load(path, input_map)
peaks_list = []
for ix, psm in psms.iterrows():
scan_id = psm['scan_id']
ionseries = theoretical[psm['modified_peptide']][psm['precursor_charge']]
spectrum = input_map.getSpectrum(scan_id - 1)
fragments = []
product_mzs = []
intensities = []
for peak in spectrum:
fragment, product_mz = annotate_mass(peak.getMZ(), ionseries, max_delta_ppm)
if fragment is not None:
fragments.append(fragment)
product_mzs.append(product_mz)
intensities.append(peak.getIntensity())
peaks = pd.DataFrame({'fragment': fragments, 'product_mz': product_mzs, 'intensity': intensities})
peaks['scan_id'] = scan_id
peaks['precursor_mz'] = po.AASequence.fromString(po.String(psm['modified_peptide'])).getMonoWeight(po.Residue.ResidueType.Full, psm['precursor_charge']) / psm['precursor_charge'];
peaks['modified_peptide'] = psm['modified_peptide']
peaks['precursor_charge'] = psm['precursor_charge']
# Baseline normalization to highest annotated peak
max_intensity = np.max(peaks['intensity'])
if max_intensity > 0:
peaks['intensity'] = peaks['intensity'] * (10000 / max_intensity)
peaks_list.append(peaks)
if len(peaks_list) > 0:
transitions = | pd.concat(peaks_list) | pandas.concat |
import inspect
import os
import sys
import time
import unittest
import warnings
from concurrent.futures.process import ProcessPoolExecutor
from contextlib import contextmanager
from glob import glob
from runpy import run_path
from tempfile import NamedTemporaryFile, gettempdir
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pandas as pd
from backtesting import Backtest, Strategy
from backtesting.lib import (
OHLCV_AGG,
barssince,
cross,
crossover,
quantile,
SignalStrategy,
TrailingStrategy,
resample_apply,
plot_heatmaps,
random_ohlc_data,
)
from backtesting.test import GOOG, EURUSD, SMA
from backtesting._util import _Indicator, _as_str, _Array, try_
SHORT_DATA = GOOG.iloc[:20] # Short data for fast tests with no indicator lag
@contextmanager
def _tempfile():
with NamedTemporaryFile(suffix='.html') as f:
if sys.platform.startswith('win'):
f.close()
yield f.name
@contextmanager
def chdir(path):
cwd = os.getcwd()
os.chdir(path)
try:
yield
finally:
os.chdir(cwd)
class SmaCross(Strategy):
# NOTE: These values are also used on the website!
fast = 10
slow = 30
def init(self):
self.sma1 = self.I(SMA, self.data.Close, self.fast)
self.sma2 = self.I(SMA, self.data.Close, self.slow)
def next(self):
if crossover(self.sma1, self.sma2):
self.position.close()
self.buy()
elif crossover(self.sma2, self.sma1):
self.position.close()
self.sell()
class TestBacktest(TestCase):
def test_run(self):
bt = Backtest(EURUSD, SmaCross)
bt.run()
def test_run_invalid_param(self):
bt = Backtest(GOOG, SmaCross)
self.assertRaises(AttributeError, bt.run, foo=3)
def test_run_speed(self):
bt = Backtest(GOOG, SmaCross)
start = time.process_time()
bt.run()
end = time.process_time()
self.assertLess(end - start, .3)
def test_data_missing_columns(self):
df = GOOG.copy(deep=False)
del df['Open']
with self.assertRaises(ValueError):
Backtest(df, SmaCross).run()
def test_data_nan_columns(self):
df = GOOG.copy()
df['Open'] = np.nan
with self.assertRaises(ValueError):
Backtest(df, SmaCross).run()
def test_data_extra_columns(self):
df = GOOG.copy(deep=False)
df['P/E'] = np.arange(len(df))
df['MCap'] = np.arange(len(df))
class S(Strategy):
def init(self):
assert len(self.data.MCap) == len(self.data.Close)
assert len(self.data['P/E']) == len(self.data.Close)
def next(self):
assert len(self.data.MCap) == len(self.data.Close)
assert len(self.data['P/E']) == len(self.data.Close)
Backtest(df, S).run()
def test_data_invalid(self):
with self.assertRaises(TypeError):
Backtest(GOOG.index, SmaCross).run()
with self.assertRaises(ValueError):
Backtest(GOOG.iloc[:0], SmaCross).run()
def test_assertions(self):
class Assertive(Strategy):
def init(self):
self.sma = self.I(SMA, self.data.Close, 10)
self.remains_indicator = np.r_[2] * np.cumsum(self.sma * 5 + 1) * np.r_[2]
self.transpose_invalid = self.I(lambda: np.column_stack((self.data.Open,
self.data.Close)))
resampled = resample_apply('W', SMA, self.data.Close, 3)
resampled_ind = resample_apply('W', SMA, self.sma, 3)
assert np.unique(resampled[-5:]).size == 1
assert np.unique(resampled[-6:]).size == 2
assert resampled in self._indicators, "Strategy.I not called"
assert resampled_ind in self._indicators, "Strategy.I not called"
assert 1 == try_(lambda: self.data.X, 1, AttributeError)
assert 1 == try_(lambda: self.data['X'], 1, KeyError)
assert self.data.pip == .01
assert float(self.data.Close) == self.data.Close[-1]
def next(self, FIVE_DAYS=pd.Timedelta('3 days')):
assert self.equity >= 0
assert isinstance(self.sma, _Indicator)
assert isinstance(self.remains_indicator, _Indicator)
assert self.remains_indicator.name
assert isinstance(self.remains_indicator._opts, dict)
assert not np.isnan(self.data.Open[-1])
assert not np.isnan(self.data.High[-1])
assert not np.isnan(self.data.Low[-1])
assert not np.isnan(self.data.Close[-1])
assert not np.isnan(self.data.Volume[-1])
assert not np.isnan(self.sma[-1])
assert self.data.index[-1]
self.position
self.position.size
self.position.pl
self.position.pl_pct
self.position.is_long
if crossover(self.sma, self.data.Close):
self.orders.cancel() # cancels only non-contingent
price = self.data.Close[-1]
sl, tp = 1.05 * price, .9 * price
n_orders = len(self.orders)
self.sell(size=.21, limit=price, stop=price, sl=sl, tp=tp)
assert len(self.orders) == n_orders + 1
order = self.orders[-1]
assert order.limit == price
assert order.stop == price
assert order.size == -.21
assert order.sl == sl
assert order.tp == tp
assert not order.is_contingent
elif self.position:
assert not self.position.is_long
assert self.position.is_short
assert self.position.pl
assert self.position.pl_pct
assert self.position.size < 0
trade = self.trades[0]
if self.data.index[-1] - self.data.index[trade.entry_bar] > FIVE_DAYS:
assert not trade.is_long
assert trade.is_short
assert trade.size < 0
assert trade.entry_bar > 0
assert isinstance(trade.entry_time, pd.Timestamp)
assert trade.exit_bar is None
assert trade.exit_time is None
assert trade.entry_price > 0
assert trade.exit_price is None
assert trade.pl / 1
assert trade.pl_pct / 1
assert trade.value > 0
assert trade.sl
assert trade.tp
# Close multiple times
self.position.close(.5)
self.position.close(.5)
self.position.close(.5)
self.position.close()
self.position.close()
bt = Backtest(GOOG, Assertive)
with self.assertWarns(UserWarning):
stats = bt.run()
self.assertEqual(stats['# Trades'], 145)
def test_broker_params(self):
bt = Backtest(GOOG.iloc[:100], SmaCross,
cash=1000, commission=.01, margin=.1, trade_on_close=True)
bt.run()
def test_dont_overwrite_data(self):
df = EURUSD.copy()
bt = Backtest(df, SmaCross)
bt.run()
bt.optimize(fast=4, slow=[6, 8])
bt.plot(plot_drawdown=True, open_browser=False)
self.assertTrue(df.equals(EURUSD))
def test_strategy_abstract(self):
class MyStrategy(Strategy):
pass
self.assertRaises(TypeError, MyStrategy, None, None)
def test_strategy_str(self):
bt = Backtest(GOOG.iloc[:100], SmaCross)
self.assertEqual(str(bt.run()._strategy), SmaCross.__name__)
self.assertEqual(str(bt.run(fast=11)._strategy), SmaCross.__name__ + '(fast=11)')
def test_compute_drawdown(self):
dd = pd.Series([0, 1, 7, 0, 4, 0, 0])
durations, peaks = Backtest._compute_drawdown_duration_peaks(dd)
np.testing.assert_array_equal(durations, pd.Series([3, 2], index=[3, 5]).reindex(dd.index))
np.testing.assert_array_equal(peaks, pd.Series([7, 4], index=[3, 5]).reindex(dd.index))
def test_compute_stats(self):
stats = Backtest(GOOG, SmaCross).run()
expected = pd.Series({
# NOTE: These values are also used on the website!
'# Trades': 66,
'Avg. Drawdown Duration': pd.Timedelta('41 days 00:00:00'),
'Avg. Drawdown [%]': -5.925851581948801,
'Avg. Trade Duration': pd.Timedelta('46 days 00:00:00'),
'Avg. Trade [%]': 2.531715975158555,
'Best Trade [%]': 53.59595229490424,
'Buy & Hold Return [%]': 703.4582419772772,
'Calmar Ratio': 0.4414380935608377,
'Duration': pd.Timedelta('3116 days 00:00:00'),
'End': | pd.Timestamp('2013-03-01 00:00:00') | pandas.Timestamp |
import glob
import os
import random
import soundfile as sf
import torch
import yaml
import json
import argparse
import pandas as pd
from tqdm import tqdm
from pprint import pprint
from asteroid.metrics import get_metrics
from model import load_best_model
from local.preprocess_dns import make_wav_id_dict
parser = argparse.ArgumentParser()
parser.add_argument(
"--test_dir", type=str, required=True, help="Test directory including wav files"
)
parser.add_argument(
"--use_gpu", type=int, default=0, help="Whether to use the GPU for model execution"
)
parser.add_argument("--exp_dir", default="exp/tmp", help="Experiment root")
parser.add_argument(
"--n_save_ex", type=int, default=50, help="Number of audio examples to save, -1 means all"
)
ALL_METRICS = ["si_sdr", "sdr", "sir", "sar", "stoi"]
COMPUTE_METRICS = ALL_METRICS
def main(conf):
# Get best trained model
model = load_best_model(conf["train_conf"], conf["exp_dir"])
if conf["use_gpu"]:
model = model.cuda()
# Evaluate performances separately w/ and w/o reverb
for subdir in ["with_reverb", "no_reverb"]:
dict_list = get_wavs_dict_list(os.path.join(conf["test_dir"], subdir))
save_dir = os.path.join(conf["exp_dir"], subdir + "examples/")
os.makedirs(save_dir, exist_ok=True)
all_metrics_df = evaluate(dict_list, model, conf=conf, save_dir=save_dir)
all_metrics_df.to_csv(os.path.join(conf["exp_dir"], "all_metrics_{}.csv".format(subdir)))
# Print and save summary metrics
final_results = {}
for metric_name in COMPUTE_METRICS:
input_metric_name = "input_" + metric_name
ldf = all_metrics_df[metric_name] - all_metrics_df[input_metric_name]
final_results[metric_name] = all_metrics_df[metric_name].mean()
final_results[metric_name + "_imp"] = ldf.mean()
print("Overall metrics {} :".format(subdir))
pprint(final_results)
filename = os.path.join(conf["exp_dir"], "final_metrics_{}.json".format(subdir))
with open(filename, "w") as f:
json.dump(final_results, f, indent=0)
def get_wavs_dict_list(test_dir):
"""Creates a list of example pair dictionaries.
Args:
test_dir (str): Directory where clean/ and noisy/ subdirectories can
be found.
Returns:
List[dict] : list of noisy/clean pair dictionaries.
Each dict looks like :
{'clean': clean_path,
'noisy': noisy_path,
'id': 3}
"""
# Find all clean files and make an {id: filepath} dictionary
clean_wavs = glob.glob(os.path.join(test_dir, "clean/*.wav"))
clean_dic = make_wav_id_dict(clean_wavs)
# Same for noisy files
noisy_wavs = glob.glob(os.path.join(test_dir, "noisy/*.wav"))
noisy_dic = make_wav_id_dict(noisy_wavs)
assert clean_dic.keys() == noisy_dic.keys()
# Combine both dictionaries
dict_list = [dict(clean=clean_dic[k], noisy=noisy_dic[k], id=k) for k in clean_dic.keys()]
return dict_list
def evaluate(dict_list, model, conf, save_dir=None):
model_device = next(model.parameters()).device
# Randomly choose the indexes of sentences to save.
if save_dir is None:
conf["n_save_ex"] = 0
if conf["n_save_ex"] == -1:
conf["n_save_ex"] = len(dict_list)
save_idx = random.sample(range(len(dict_list)), conf["n_save_ex"])
series_list = []
for idx, wav_dic in enumerate(tqdm(dict_list)):
# Forward the network on the mixture.
noisy_np, clean_np, fs = load_wav_dic(wav_dic)
with torch.no_grad():
net_input = torch.tensor(noisy_np)[None, None].to(model_device)
est_clean_np = model.denoise(net_input).squeeze().cpu().data.numpy()
utt_metrics = get_metrics(
mix=noisy_np,
clean=clean_np,
estimate=est_clean_np,
sample_rate=fs,
metrics_list=COMPUTE_METRICS,
)
utt_metrics["noisy_path"] = wav_dic["noisy"]
utt_metrics["clean_path"] = wav_dic["clean"]
series_list.append( | pd.Series(utt_metrics) | pandas.Series |
import pathlib
import yaml
import pandas as pd
from clumper import Clumper
from parse import compile as parse_compile
def nlu_path_to_dataframe(path):
"""
Converts a single nlu file with intents into a dataframe.
Usage:
```python
from taipo.common import nlu_path_to_dataframe
df = nlu_path_to_dataframe("path/to/nlu/nlu.yml")
```
"""
res = (
Clumper.read_yaml(path)
.explode("nlu")
.keep(lambda d: "intent" in d["nlu"].keys())
.mutate(
examples=lambda d: d["nlu"]["examples"].split("\n"),
intent=lambda d: d["nlu"]["intent"],
)
.drop("nlu", "version")
.explode(text="examples")
.mutate(text=lambda d: d["text"][2:])
.keep(lambda d: d["text"] != "")
.collect()
)
return | pd.DataFrame(res) | pandas.DataFrame |
import numpy as np
import pandas as pd
import datetime as dt
import matplotlib.pyplot as plt
import matplotlib.dates as date
import seaborn as sns
from scipy import stats
sns.set_context('talk')
data_crime_raw = pd.read_csv('.\\NYPD_Complaint_Data_Historic.csv',
usecols=['CMPLNT_FR_DT', 'OFNS_DESC', 'LAW_CAT_CD', 'Latitude', 'Longitude', 'BORO_NM'],
dtype={'OFNS_DESC':'category', 'LAW_CAT_CD':'category', 'BORO_NM':'category',
'Latitude':float, 'Longitude':float})
data_crime_raw['CMPLNT_FR_DT'] = | pd.to_datetime(data_crime_raw['CMPLNT_FR_DT'], format='%m/%d/%Y', errors='coerce') | pandas.to_datetime |
from time import time
import pandas as pd
from numpy import arange
results_df = pd.read_csv('../data/botbrnlys-rand.csv')
def extract_best_vals_index(results_df, df, classifier, hp):
final_df = pd.DataFrame()
temp_df = results_df[results_df.model == classifier]
temp_df_f = temp_df[temp_df.hp.round(3) == hp]
if len(temp_df_f) < 1:
return final_df
for i in temp_df_f.index:
new_df = df[df.cols == results_df.cols[i]]
final_df = final_df.append(
new_df[new_df.df_len == results_df.df_len[i]])
final_df.reset_index(inplace=True)
final_df.drop(columns='index', inplace=True)
print(len(final_df))
return final_df
def ein_best_vals(results_df, classifier, lor=0, ini_hyp=0, fin_hyp=0, incr=0):
final_df = pd.DataFrame()
st = time()
if ini_hyp != 0:
StP = fin_hyp + incr
lor = arange(ini_hyp, StP, incr)
elif lor == 0 and ini_hyp == 0 and fin_hyp == 0:
cl_df = | pd.read_csv('../data/' + classifier + '0_results-nb.csv') | pandas.read_csv |
""" Script that contains the functions to perform sensitivity analysis. """
import itertools
import numpy as np
import pandas as pd
from sklearn.metrics import mean_squared_error
##### GLOBAL VARIABLES #####
legs = ['LF', 'LM', 'LH', 'RF', 'RM', 'RH']
joints = [
'Coxa',
'Coxa_yaw',
'Coxa_roll',
'Femur',
'Femur_roll',
'Tibia',
'Tarsus1']
##### CALCULATE STATISTICS #####
def calculate_forces(leg, k_value, *args):
""" Computes the ground reaction force on one single leg.
Parameters
----------
leg: <string>
Name of the leg, e.g., 'LF' 'RM'.
k_value: <string>
Value of the gain, e.g. 'kp1.0_kv0.9'.
args:
Dictionary containing the measured forces.
Returns
-------
force_vector: <np.array>
Array containing GRF forces in x, y, z.
force_norm: <np.array>
Array containing the norm of the GRF forces.
"""
force = {'x': 0, 'y': 0, 'z': 0}
for key in args[0][k_value].keys():
for ax in ['x', 'y', 'z']:
if leg in key and ax in key:
force[ax] += sum(f[k_value][key]
for f in args if f is not None)
force_vector = np.vstack([force[ax] for ax in force.keys()])
force_norm = np.linalg.norm(force_vector, axis=0)
return force_vector, force_norm
def calculate_stack_array(
*args,
force_calc,
leg=None,
constant='kv0.9',
scaling_factor=1
):
""" Concatenates and scales physical quantities.
Parameters
----------
args:
arrays to be concatenated.
force_cal: <bool>
If true, then calculates the norm of the vector.
leg: <string>
Name of the leg, e.g., 'LF' 'RM'.
constant: <string>
Value of the constant gain, i.e. 'kv0.9'.
scaling_factor: <float>
Scales the force and torque measurements, used for unit changes.
Returns
-------
stack_array: <np.array>
array of values that have the same constant gain (kp or kv).
"""
first_iteration = True
for k_value in args[0].keys():
if constant in k_value:
if force_calc:
_, data_stack = calculate_forces(leg, k_value, *args)
else:
data_stack = np.array(args[0][k_value][leg])
if first_iteration:
stack_array = data_stack
first_iteration = False
else:
stack_array = np.vstack(
(stack_array, data_stack)
)
return stack_array * scaling_factor
def calculate_statistics_joints(
*args,
scaling_factor=1,
constant='kv0.9',
force_calculation=False,
joints=[
'Coxa',
'Coxa_yaw',
'Coxa_roll',
'Femur',
'Femur_roll',
'Tibia',
'Tarsus1']):
""" Calculates statistical properties of joint physical quantities.
Parameters
----------
scaling_factor: <int>
Scales the force and torque measurements, used for unit changes.
constant: <str>
Used for fixing one of two independent variables. E.g. 'kv0.9'.
force_calculation: <bool>
True-> calculates force, false->calculates torque, angles, velocity.
joints: <list>
If GRF then ['LF', 'LM', 'LH', 'RF', 'RM', 'RH'].
Returns
-------
stat_joints: <dict>
Dictionary containing mean, standard deviaton and standard error of the given data.
"""
stat_joints = {}
for leg, joint in itertools.product(legs, joints):
name = leg if force_calculation else 'joint_' + leg + joint
stack_array = calculate_stack_array(
*args,
force_calc=force_calculation,
leg=name,
constant=constant,
scaling_factor=scaling_factor)
stat_joints[name] = calculate_stats(stack_array)
return stat_joints
def calculate_stats(data):
""" Calculates, std, mean, and stderror of a given data.
Parameters
----------
data: <np.array>
Physical quantities of different gain values.
Returns
-------
stat_dict: <dict>
Dictionary containing mean, standard deviaton and standard error of the given data
"""
stat_dict = {}
stat_dict['mu'] = np.mean(data, axis=0)
stat_dict['stderr'] = np.std(data, ddof=1, axis=0) / np.sqrt(data.shape[0])
stat_dict['std'] = np.std(data, axis=0)
return stat_dict
def calculate_mse_joints(
joint_data,
ground_truth,
starting_time,
time_step,
):
""" Calculates MSE between the ground truth and given data.
Parameters
----------
joint_data: <dict>
Dictionary containing the joint information (angle or velocity).
ground_truth: <dict>
Dictionary containing the ground truth angle or velocity data.
beg: <int>
Beginning of the process. Defaults to 100.
Returns
-------
error_df: <pd.DataFrame>
Mean squared error between the baseline and the simulation values.
error_dict: <dictionary>
Mean squared error between the baseline and the simulation values.
"""
import matplotlib.pyplot as plt
leg_mse = []
error_dict = {leg: {} for leg in legs}
beg = int(np.round(starting_time / time_step))
for gain_name in joint_data.keys():
for leg in legs:
mse = 0
for joint in joints:
key_name = 'joint_' + leg + joint
joint_baseline = ground_truth[key_name]
joint_comparison = joint_data[gain_name][key_name][beg:]
assert len(joint_baseline) == len(joint_comparison), "Two arrays should be of the same length {} and {}".format(
len(joint_baseline), len(joint_comparison))
mse += mean_squared_error(joint_baseline, joint_comparison)
error_dict[leg][gain_name] = mse / len(joints)
leg_mse.append([leg,
float(gain_name[2:5]),
float(gain_name[-3:]),
mse / len(joints)])
error_df = | pd.DataFrame(leg_mse, columns=['Leg', 'Kp', 'Kv', 'MSE']) | pandas.DataFrame |
#
# Copyright 2015 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Tests for USEquityPricingLoader and related classes.
"""
from parameterized import parameterized
import sys
import numpy as np
from numpy.testing import (
assert_allclose,
assert_array_equal,
)
import pandas as pd
from pandas.testing import assert_frame_equal
from toolz.curried.operator import getitem
from zipline.lib.adjustment import Float64Multiply
from zipline.pipeline.domain import US_EQUITIES
from zipline.pipeline.loaders.synthetic import (
NullAdjustmentReader,
make_bar_data,
expected_bar_values_2d,
)
from zipline.pipeline.loaders.equity_pricing_loader import (
USEquityPricingLoader,
)
from zipline.errors import WindowLengthTooLong
from zipline.pipeline.data import USEquityPricing
from zipline.testing import (
seconds_to_timestamp,
str_to_seconds,
MockDailyBarReader,
)
from zipline.testing.fixtures import (
WithAdjustmentReader,
ZiplineTestCase,
)
import pytest
# Test calendar ranges over the month of June 2015
# June 2015
# Mo Tu We Th Fr Sa Su
# 1 2 3 4 5 6 7
# 8 9 10 11 12 13 14
# 15 16 17 18 19 20 21
# 22 23 24 25 26 27 28
# 29 30
TEST_CALENDAR_START = pd.Timestamp("2015-06-01", tz="UTC")
TEST_CALENDAR_STOP = pd.Timestamp("2015-06-30", tz="UTC")
TEST_QUERY_START = pd.Timestamp("2015-06-10", tz="UTC")
TEST_QUERY_STOP = pd.Timestamp("2015-06-19", tz="UTC")
# One asset for each of the cases enumerated in load_raw_arrays_from_bcolz.
EQUITY_INFO = pd.DataFrame(
[
# 1) The equity's trades start and end before query.
{"start_date": "2015-06-01", "end_date": "2015-06-05"},
# 2) The equity's trades start and end after query.
{"start_date": "2015-06-22", "end_date": "2015-06-30"},
# 3) The equity's data covers all dates in range.
{"start_date": "2015-06-02", "end_date": "2015-06-30"},
# 4) The equity's trades start before the query start, but stop
# before the query end.
{"start_date": "2015-06-01", "end_date": "2015-06-15"},
# 5) The equity's trades start and end during the query.
{"start_date": "2015-06-12", "end_date": "2015-06-18"},
# 6) The equity's trades start during the query, but extend through
# the whole query.
{"start_date": "2015-06-15", "end_date": "2015-06-25"},
],
index=np.arange(1, 7),
columns=["start_date", "end_date"],
).astype(np.datetime64)
EQUITY_INFO["symbol"] = [chr(ord("A") + n) for n in range(len(EQUITY_INFO))]
EQUITY_INFO["exchange"] = "TEST"
TEST_QUERY_SIDS = EQUITY_INFO.index
# ADJUSTMENTS use the following scheme to indicate information about the value
# upon inspection.
#
# 1s place is the equity
#
# 0.1s place is the action type, with:
#
# splits, 1
# mergers, 2
# dividends, 3
#
# 0.001s is the date
SPLITS = pd.DataFrame(
[
# Before query range, should be excluded.
{
"effective_date": str_to_seconds("2015-06-03"),
"ratio": 1.103,
"sid": 1,
},
# First day of query range, should be excluded.
{
"effective_date": str_to_seconds("2015-06-10"),
"ratio": 3.110,
"sid": 3,
},
# Third day of query range, should have last_row of 2
{
"effective_date": str_to_seconds("2015-06-12"),
"ratio": 3.112,
"sid": 3,
},
# After query range, should be excluded.
{
"effective_date": str_to_seconds("2015-06-21"),
"ratio": 6.121,
"sid": 6,
},
# Another action in query range, should have last_row of 1
{
"effective_date": str_to_seconds("2015-06-11"),
"ratio": 3.111,
"sid": 3,
},
# Last day of range. Should have last_row of 7
{
"effective_date": str_to_seconds("2015-06-19"),
"ratio": 3.119,
"sid": 3,
},
],
columns=["effective_date", "ratio", "sid"],
)
MERGERS = pd.DataFrame(
[
# Before query range, should be excluded.
{
"effective_date": str_to_seconds("2015-06-03"),
"ratio": 1.203,
"sid": 1,
},
# First day of query range, should be excluded.
{
"effective_date": str_to_seconds("2015-06-10"),
"ratio": 3.210,
"sid": 3,
},
# Third day of query range, should have last_row of 2
{
"effective_date": str_to_seconds("2015-06-12"),
"ratio": 3.212,
"sid": 3,
},
# After query range, should be excluded.
{
"effective_date": str_to_seconds("2015-06-25"),
"ratio": 6.225,
"sid": 6,
},
# Another action in query range, should have last_row of 2
{
"effective_date": str_to_seconds("2015-06-12"),
"ratio": 4.212,
"sid": 4,
},
# Last day of range. Should have last_row of 7
{
"effective_date": str_to_seconds("2015-06-19"),
"ratio": 3.219,
"sid": 3,
},
],
columns=["effective_date", "ratio", "sid"],
)
DIVIDENDS = pd.DataFrame(
[
# Before query range, should be excluded.
{
"declared_date": pd.Timestamp("2015-05-01", tz="UTC").to_datetime64(),
"ex_date": pd.Timestamp("2015-06-01", tz="UTC").to_datetime64(),
"record_date": pd.Timestamp("2015-06-03", tz="UTC").to_datetime64(),
"pay_date": pd.Timestamp("2015-06-05", tz="UTC").to_datetime64(),
"amount": 90.0,
"sid": 1,
},
# First day of query range, should be excluded.
{
"declared_date": pd.Timestamp("2015-06-01", tz="UTC").to_datetime64(),
"ex_date": pd.Timestamp("2015-06-10", tz="UTC").to_datetime64(),
"record_date": pd.Timestamp("2015-06-15", tz="UTC").to_datetime64(),
"pay_date": pd.Timestamp("2015-06-17", tz="UTC").to_datetime64(),
"amount": 80.0,
"sid": 3,
},
# Third day of query range, should have last_row of 2
{
"declared_date": pd.Timestamp("2015-06-01", tz="UTC").to_datetime64(),
"ex_date": pd.Timestamp("2015-06-12", tz="UTC").to_datetime64(),
"record_date": pd.Timestamp("2015-06-15", tz="UTC").to_datetime64(),
"pay_date": pd.Timestamp("2015-06-17", tz="UTC").to_datetime64(),
"amount": 70.0,
"sid": 3,
},
# After query range, should be excluded.
{
"declared_date": pd.Timestamp("2015-06-01", tz="UTC").to_datetime64(),
"ex_date": pd.Timestamp("2015-06-25", tz="UTC").to_datetime64(),
"record_date": pd.Timestamp("2015-06-28", tz="UTC").to_datetime64(),
"pay_date": pd.Timestamp("2015-06-30", tz="UTC").to_datetime64(),
"amount": 60.0,
"sid": 6,
},
# Another action in query range, should have last_row of 3
{
"declared_date": pd.Timestamp("2015-06-01", tz="UTC").to_datetime64(),
"ex_date": pd.Timestamp("2015-06-15", tz="UTC").to_datetime64(),
"record_date": pd.Timestamp("2015-06-18", tz="UTC").to_datetime64(),
"pay_date": pd.Timestamp("2015-06-20", tz="UTC").to_datetime64(),
"amount": 50.0,
"sid": 3,
},
# Last day of range. Should have last_row of 7
{
"declared_date": pd.Timestamp("2015-06-01", tz="UTC").to_datetime64(),
"ex_date": pd.Timestamp("2015-06-19", tz="UTC").to_datetime64(),
"record_date": pd.Timestamp("2015-06-22", tz="UTC").to_datetime64(),
"pay_date": pd.Timestamp("2015-06-30", tz="UTC").to_datetime64(),
"amount": 40.0,
"sid": 3,
},
],
columns=[
"declared_date",
"ex_date",
"record_date",
"pay_date",
"amount",
"sid",
],
)
DIVIDENDS_EXPECTED = pd.DataFrame(
[
# Before query range, should be excluded.
{
"effective_date": str_to_seconds("2015-06-01"),
"ratio": 0.1,
"sid": 1,
},
# First day of query range, should be excluded.
{
"effective_date": str_to_seconds("2015-06-10"),
"ratio": 0.20,
"sid": 3,
},
# Third day of query range, should have last_row of 2
{
"effective_date": str_to_seconds("2015-06-12"),
"ratio": 0.30,
"sid": 3,
},
# After query range, should be excluded.
{
"effective_date": str_to_seconds("2015-06-25"),
"ratio": 0.40,
"sid": 6,
},
# Another action in query range, should have last_row of 3
{
"effective_date": str_to_seconds("2015-06-15"),
"ratio": 0.50,
"sid": 3,
},
# Last day of range. Should have last_row of 7
{
"effective_date": str_to_seconds("2015-06-19"),
"ratio": 0.60,
"sid": 3,
},
],
columns=["effective_date", "ratio", "sid"],
)
class USEquityPricingLoaderTestCase(WithAdjustmentReader, ZiplineTestCase):
START_DATE = TEST_CALENDAR_START
END_DATE = TEST_CALENDAR_STOP
asset_ids = 1, 2, 3
@classmethod
def make_equity_info(cls):
return EQUITY_INFO
@classmethod
def make_splits_data(cls):
return SPLITS
@classmethod
def make_mergers_data(cls):
return MERGERS
@classmethod
def make_dividends_data(cls):
return DIVIDENDS
@classmethod
def make_adjustment_writer_equity_daily_bar_reader(cls):
return MockDailyBarReader(
dates=cls.calendar_days_between(cls.START_DATE, cls.END_DATE),
)
@classmethod
def make_equity_daily_bar_data(cls, country_code, sids):
return make_bar_data(
EQUITY_INFO,
cls.equity_daily_bar_days,
)
@classmethod
def init_class_fixtures(cls):
super(USEquityPricingLoaderTestCase, cls).init_class_fixtures()
cls.sids = TEST_QUERY_SIDS
cls.asset_info = EQUITY_INFO
def test_input_sanity(self):
# Ensure that the input data doesn't contain adjustments during periods
# where the corresponding asset didn't exist.
for table in SPLITS, MERGERS:
for eff_date_secs, _, sid in table.itertuples(index=False):
eff_date = pd.Timestamp(eff_date_secs, unit="s")
asset_start, asset_end = EQUITY_INFO.loc[
sid, ["start_date", "end_date"]
]
assert eff_date >= asset_start
assert eff_date <= asset_end
@classmethod
def calendar_days_between(cls, start_date, end_date, shift=0):
slice_ = cls.equity_daily_bar_days.slice_indexer(start_date, end_date)
start = slice_.start + shift
stop = slice_.stop + shift
if start < 0:
raise KeyError(start_date, shift)
return cls.equity_daily_bar_days[start:stop]
def expected_adjustments(self, start_date, end_date, tables, adjustment_type):
price_adjustments = {}
volume_adjustments = {}
should_include_price_adjustments = (
adjustment_type == "all" or adjustment_type == "price"
)
should_include_volume_adjustments = (
adjustment_type == "all" or adjustment_type == "volume"
)
query_days = self.calendar_days_between(start_date, end_date)
start_loc = query_days.get_loc(start_date)
for table in tables:
for eff_date_secs, ratio, sid in table.itertuples(index=False):
eff_date = pd.Timestamp(eff_date_secs, unit="s", tz="UTC")
# Ignore adjustments outside the query bounds.
if not (start_date <= eff_date <= end_date):
continue
eff_date_loc = query_days.get_loc(eff_date)
delta = eff_date_loc - start_loc
# Pricing adjustments should be applied on the date
# corresponding to the effective date of the input data. They
# should affect all rows **before** the effective date.
if should_include_price_adjustments:
price_adjustments.setdefault(delta, []).append(
Float64Multiply(
first_row=0,
last_row=delta,
first_col=sid - 1,
last_col=sid - 1,
value=ratio,
)
)
# Volume is *inversely* affected by *splits only*.
if table is SPLITS and should_include_volume_adjustments:
volume_adjustments.setdefault(delta, []).append(
Float64Multiply(
first_row=0,
last_row=delta,
first_col=sid - 1,
last_col=sid - 1,
value=1.0 / ratio,
)
)
output = {}
if should_include_price_adjustments:
output["price_adjustments"] = price_adjustments
if should_include_volume_adjustments:
output["volume_adjustments"] = volume_adjustments
return output
@parameterized.expand(
[
([SPLITS, MERGERS, DIVIDENDS_EXPECTED], "all"),
([SPLITS, MERGERS, DIVIDENDS_EXPECTED], "price"),
([SPLITS, MERGERS, DIVIDENDS_EXPECTED], "volume"),
([SPLITS, MERGERS, None], "all"),
([SPLITS, MERGERS, None], "price"),
]
)
def test_load_adjustments(self, tables, adjustment_type):
query_days = self.calendar_days_between(
TEST_QUERY_START,
TEST_QUERY_STOP,
)
adjustments = self.adjustment_reader.load_adjustments(
query_days,
self.sids,
should_include_splits=tables[0] is not None,
should_include_mergers=tables[1] is not None,
should_include_dividends=tables[2] is not None,
adjustment_type=adjustment_type,
)
expected_adjustments = self.expected_adjustments(
TEST_QUERY_START,
TEST_QUERY_STOP,
[table for table in tables if table is not None],
adjustment_type,
)
if adjustment_type == "all" or adjustment_type == "price":
expected_price_adjustments = expected_adjustments["price_adjustments"]
for key in expected_price_adjustments:
price_adjustment = adjustments["price"][key]
for j, adj in enumerate(price_adjustment):
expected = expected_price_adjustments[key][j]
assert adj.first_row == expected.first_row
assert adj.last_row == expected.last_row
assert adj.first_col == expected.first_col
assert adj.last_col == expected.last_col
assert_allclose(adj.value, expected.value)
if adjustment_type == "all" or adjustment_type == "volume":
expected_volume_adjustments = expected_adjustments["volume_adjustments"]
for key in expected_volume_adjustments:
volume_adjustment = adjustments["volume"][key]
for j, adj in enumerate(volume_adjustment):
expected = expected_volume_adjustments[key][j]
assert adj.first_row == expected.first_row
assert adj.last_row == expected.last_row
assert adj.first_col == expected.first_col
assert adj.last_col == expected.last_col
assert_allclose(adj.value, expected.value)
@parameterized.expand([(True,), (False,)])
@pytest.mark.skipif(sys.platform == "win32", reason="does not run on windows")
def test_load_adjustments_to_df(self, convert_dts):
reader = self.adjustment_reader
adjustment_dfs = reader.unpack_db_to_component_dfs(convert_dates=convert_dts)
name_and_raw = (
("splits", SPLITS),
("mergers", MERGERS),
("dividends", DIVIDENDS_EXPECTED),
)
def create_expected_table(df, name):
expected_df = df.copy()
if convert_dts:
for colname in reader._datetime_int_cols[name]:
expected_df[colname] = (
expected_df[colname]
.astype("datetime64[s]")
.dt.tz_localize("UTC")
)
return expected_df
def create_expected_div_table(df, name):
expected_df = df.copy()
for colname in reader._datetime_int_cols[name]:
if not convert_dts:
# todo: fix nanosecond hack
expected_df[colname] = (
expected_df[colname]
.astype("datetime64[s]")
.view(int)
.div(1000000000)
.astype(int)
)
else:
expected_df[colname] = (
expected_df[colname]
.astype("datetime64[s]")
.dt.tz_localize("UTC")
)
return expected_df
for action_name, raw_tbl in name_and_raw:
# todo: fix missing dividend value
if action_name == "dividends":
continue
exp = create_expected_table(raw_tbl, action_name)
assert_frame_equal(adjustment_dfs[action_name], exp)
# DIVIDENDS is in the opposite form from the rest of the dataframes, so
# needs to be converted separately.
div_name = "dividend_payouts"
exp = create_expected_div_table(DIVIDENDS, div_name)
assert_frame_equal(adjustment_dfs[div_name].loc[:, exp.columns], exp)
def test_read_no_adjustments(self):
adjustment_reader = NullAdjustmentReader()
columns = [USEquityPricing.close, USEquityPricing.volume]
query_days = self.calendar_days_between(TEST_QUERY_START, TEST_QUERY_STOP)
# Our expected results for each day are based on values from the
# previous day.
shifted_query_days = self.calendar_days_between(
TEST_QUERY_START,
TEST_QUERY_STOP,
shift=-1,
)
adjustments = adjustment_reader.load_pricing_adjustments(
[c.name for c in columns],
query_days,
self.sids,
)
assert adjustments == [{}, {}]
pricing_loader = USEquityPricingLoader.without_fx(
self.bcolz_equity_daily_bar_reader,
adjustment_reader,
)
results = pricing_loader.load_adjusted_array(
domain=US_EQUITIES,
columns=columns,
dates=query_days,
sids=self.sids,
mask=np.ones((len(query_days), len(self.sids)), dtype=bool),
)
closes, volumes = map(getitem(results), columns)
expected_baseline_closes = expected_bar_values_2d(
shifted_query_days,
self.sids,
self.asset_info,
"close",
)
expected_baseline_volumes = expected_bar_values_2d(
shifted_query_days,
self.sids,
self.asset_info,
"volume",
)
# AdjustedArrays should yield the same data as the expected baseline.
for windowlen in range(1, len(query_days) + 1):
for offset, window in enumerate(closes.traverse(windowlen)):
assert_array_equal(
expected_baseline_closes[offset : offset + windowlen],
window,
)
for offset, window in enumerate(volumes.traverse(windowlen)):
assert_array_equal(
expected_baseline_volumes[offset : offset + windowlen],
window,
)
# Verify that we checked up to the longest possible window.
with pytest.raises(WindowLengthTooLong):
closes.traverse(windowlen + 1)
with pytest.raises(WindowLengthTooLong):
volumes.traverse(windowlen + 1)
def apply_adjustments(self, dates, assets, baseline_values, adjustments):
min_date, max_date = dates[[0, -1]]
# HACK: Simulate the coercion to float64 we do in adjusted_array. This
# should be removed when AdjustedArray properly supports
# non-floating-point types.
orig_dtype = baseline_values.dtype
values = baseline_values.astype(np.float64).copy()
for eff_date_secs, ratio, sid in adjustments.itertuples(index=False):
eff_date = seconds_to_timestamp(eff_date_secs)
# Don't apply adjustments that aren't in the current date range.
if eff_date not in dates:
continue
eff_date_loc = dates.get_loc(eff_date)
asset_col = assets.get_loc(sid)
# Apply ratio multiplicatively to the asset column on all rows less
# than or equal adjustment effective date.
values[: eff_date_loc + 1, asset_col] *= ratio
return values.astype(orig_dtype)
def test_read_with_adjustments(self):
columns = [USEquityPricing.high, USEquityPricing.volume]
query_days = self.calendar_days_between(TEST_QUERY_START, TEST_QUERY_STOP)
# Our expected results for each day are based on values from the
# previous day.
shifted_query_days = self.calendar_days_between(
TEST_QUERY_START,
TEST_QUERY_STOP,
shift=-1,
)
pricing_loader = USEquityPricingLoader.without_fx(
self.bcolz_equity_daily_bar_reader,
self.adjustment_reader,
)
results = pricing_loader.load_adjusted_array(
domain=US_EQUITIES,
columns=columns,
dates=query_days,
sids=pd.Int64Index(np.arange(1, 7)),
mask=np.ones((len(query_days), 6), dtype=bool),
)
highs, volumes = map(getitem(results), columns)
expected_baseline_highs = expected_bar_values_2d(
shifted_query_days,
self.sids,
self.asset_info,
"high",
)
expected_baseline_volumes = expected_bar_values_2d(
shifted_query_days,
self.sids,
self.asset_info,
"volume",
)
# At each point in time, the AdjustedArrays should yield the baseline
# with all adjustments up to that date applied.
for windowlen in range(1, len(query_days) + 1):
for offset, window in enumerate(highs.traverse(windowlen)):
baseline = expected_baseline_highs[offset : offset + windowlen]
baseline_dates = query_days[offset : offset + windowlen]
expected_adjusted_highs = self.apply_adjustments(
baseline_dates,
self.sids,
baseline,
# Apply all adjustments.
| pd.concat([SPLITS, MERGERS, DIVIDENDS_EXPECTED], ignore_index=True) | pandas.concat |
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
plt.rcParams.update({'font.size': 18})
from textwrap import wrap
def plot_metrics_bar(result_dirs, plot_dir, metrics="Acc", prefix="pancreas",
groupby="features"):
'''Plot Acc, ARI, macroF1 for different methods
x-axis is different feature selection
y-axis is different matrics'
@result_dirs: given a list of result_dir
@metrics: one of evaluation metrics Acc/ARI/macroF1
@groupby:
- features: group by features and compare between differnt feature selections
- methods: group by methods and compare between methods
'''
res_dict = {}
for result_dir in result_dirs:
basename = os.path.basename(result_dir)
res_dict[basename] = {}
files = [x for x in os.listdir(result_dir) if x.endswith("_metrics.txt")]
methods = [x.replace("_metrics.txt", "") for x in files] ## get all methods
for method in methods:
if method in ['MLP_GO', 'MLP_CP', 'MLP_focal', 'DFN']:
continue
res_dict[basename][method] = {}
with open(result_dir+os.sep+method+"_metrics.txt", 'r') as fopen:
for line in fopen:
metric, value = line.split(":")
if metric == metrics:
res_dict[basename][method] = float(value)
df = pd.DataFrame.from_dict(res_dict)
df = df.reindex(sorted(df.columns), axis=1)
#df.columns = ['\n'.join(wrap(x, 14)) for x in df.columns] ## cut into shorter
## line plot
#df.T.plot.line(rot=45)
## === for all methods
#level=["scmap", "CHETAH", "RF", "SVM_linear", "SVM_RBF", "MLP", "MLP_GO", "MLP_CP", "DFN", "GEDFN", "ItClust"]
#df = df.reindex(level)
fig = plt.figure(figsize=(10, 6), dpi=300)
ax = plt.gca()
## bar plot
if "features" == groupby:
if metrics in ["Acc", "ARI", "macroF1"]:
df.T.plot(ax=ax, kind="bar", rot=45)
else:
df.T.plot(ax=ax, kind="bar", rot=45, logy=True)
plt.xlabel("Feature Selection")
elif "methods" == groupby:
if metrics in ["Acc", "ARI", "macroF1"]:
df.plot(ax=ax, kind="bar", rot=90)
else:
df.plot(ax=ax, kind="bar", rot=90, logy=True)
plt.xlabel("Classifiers")
## set y axis range
global_min = np.nanmin(df.to_numpy())
global_max = np.nanmax(df.to_numpy())
if "Acc" == metrics or "macroF1" == metrics:
min_y_axis = max(global_min*0.95, 0)
max_y_axis = min(global_max*1.05, 1)
plt.ylim([min_y_axis, max_y_axis])
elif "ARI" == metrics:
min_y_axis = max(global_min*0.95, -1)
max_y_axis = min(global_max*1.05, 1)
plt.ylim([min_y_axis, max_y_axis])
plt.legend(loc="center left", bbox_to_anchor=(1.0, 0.5))
plt.ylabel(metrics)
plt.tight_layout()
plt.savefig(plot_dir+os.sep+prefix+metrics+'.png')
def extract_sub_prediction(result_dirs, datatype="mousebrain"):
''' Extract prediction information from mousebrain sub-cell types
@datatype: mousebrain/humanPBMC -> belongs to two-stage analysis
mousebrain_sub/humanPBMC_sub -> belongs to directly prediction using sub cell types
'''
if "mousebrain" == datatype:
major_celltype_col = "mouse_celltypes"
sub_celltype_col = "cell.type"
pred_celltype_col = "pred_sub_celltypes"
elif "mousebrain_sub" == datatype:
major_celltype_col = "mouse_celltypes"
sub_celltype_col = "cell.type"
pred_celltype_col = "pred_celltypes"
elif "humanPBMC" == datatype:
major_celltype_col = "cell.type"
sub_celltype_col = "subtypes"
pred_celltype_col = "pred_sub_celltypes"
elif "humanPBMC_sub" == datatype:
major_celltype_col = "majortypes"
sub_celltype_col = "cell.type"
pred_celltype_col = "pred_celltypes"
import json
from sklearn import metrics
for result_dir in result_dirs:
basename = os.path.basename(result_dir)
basename = basename.replace("result_"+prefix+'_', '')
suffix="_predicted_obs.csv"
files = [x for x in os.listdir(result_dir) if x.endswith(suffix)]
methods = [x.replace(suffix, "") for x in files] ## get all methods
for method in methods:
json_list = []
df = | pd.read_csv(result_dir+os.sep+method+suffix) | pandas.read_csv |
from operator import eq, ge
from functools import partial
import pandas as pd
from microsetta_public_api.resources import resources
ops = {
'equal': eq,
'greater_or_equal': ge,
}
conditions = {
"AND": partial(pd.DataFrame.all, axis=1),
"OR": partial(pd.DataFrame.any, axis=1)
}
def _is_rule(node):
rule_fields = ["id", "operator", "value"]
for field in rule_fields:
if field not in node:
return False
op = node["operator"]
if op not in ops:
raise ValueError(f"Only operators in {ops} are supported. "
f"Got {op}")
return True
class MetadataRepo:
def __init__(self, metadata=None):
if metadata is not None:
self._metadata = metadata
else:
self._metadata = resources.get('metadata', pd.DataFrame())
@property
def metadata(self):
return self._metadata
@property
def categories(self):
return list(self._metadata.columns)
@property
def samples(self):
return list(self._metadata.index)
def category_values(self, category, exclude_na=True):
"""
Parameters
----------
category : str
Metadata category to return the values of
exclude_na : bool
If True, not a number (na) values will be dropped from the
category values
Returns
-------
list
Contains the unique values in the metadata category
Raises
------
ValueError
If `category` is not an existing category in the metadata
"""
if category not in self._metadata.columns:
raise ValueError(f'No category with name `{category}`')
category_values = self._metadata[category].unique()
if exclude_na:
category_values = category_values[~pd.isnull(category_values)]
return list(category_values)
def has_category(self, category):
if isinstance(category, str):
return category in self._metadata.columns
else:
cols = set(self._metadata.columns)
return [cat in cols for cat in category]
def has_sample_id(self, sample_id):
if isinstance(sample_id, str):
return sample_id in self._metadata.index
else:
index = set(self._metadata.index)
return [id_ in index for id_ in sample_id]
def get_metadata(self, categories, sample_ids=None, fillna=None):
md = self._metadata[categories]
if sample_ids is not None:
md = md.reindex(sample_ids, fill_value=None)
md = md.astype('object')
md[pd.isna(md)] = fillna
return md
def sample_id_matches(self, query):
"""
Parameters
----------
query : dict
Expects a jquerybuilder formatted query
Returns
-------
list
The sample IDs that match the given `query`
"""
slice_ = self._process_query(query)
return list(self._metadata.index[slice_])
def _process_query(self, query):
group_fields = ["condition", "rules"]
if _is_rule(query):
category, op, value = query['id'], query['operator'], \
query['value']
return ops[op](self._metadata[category], value)
else:
for field in group_fields:
if field not in query:
raise ValueError(f"query=`{query}` does not appear to be "
f"a rule or a group.")
if query['condition'] not in conditions:
raise ValueError(f"Only conditions in {conditions} are "
f"supported. Got {query['condition']}.")
else:
condition = conditions[query['condition']]
return condition(self._safe_concat([self._process_query(rule) for
rule in query['rules']],
axis=1))
def _safe_concat(self, list_of_df, **concat_kwargs):
if len(list_of_df) > 0:
return pd.concat(list_of_df, **concat_kwargs)
return pd.DataFrame( | pd.Series(True, index=self._metadata.index) | pandas.Series |
import numpy as np
import pandas as pd
from dateutil.parser import parse
import tldextract
def replace_basic_columns(dataframe):
pd.set_option('mode.chained_assignment', None)
dataframe["title"] = np.nan
dataframe["description"] = np.nan
dataframe["image"] = np.nan
for index in dataframe.index:
if(pd.isnull(dataframe['meta_title'][index])):
dataframe['title'][index] = dataframe['cse_title'][index]
else:
dataframe['title'][index] = dataframe['meta_title'][index]
if(pd.isnull(dataframe['meta_description'][index])):
dataframe['description'][index] = dataframe['cse_description'][index]
else:
dataframe['description'][index] = dataframe['meta_description'][index]
if(pd.isnull(dataframe['meta_site_name'][index])):
dataframe['meta_site_name'][index] = tldextract.extract(
dataframe['url'][index]).domain.capitalize()
if( | pd.isnull(dataframe['meta_image'][index]) | pandas.isnull |
import functools
import warnings
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
import pandas as pd
import os
from scipy.stats import poisson
import tensorflow.compat.v2 as tf
import tensorflow_probability as tfp
from tensorflow_probability import distributions as tfd
from tensorflow_probability import bijectors as tfb
from cplvm import CPLVM
import socket
from os.path import join as pjoin
if socket.gethostname() == "andyjones":
DATA_DIR = "../../data/mix_seq/data/nutlin/"
else:
DATA_DIR = "../data/mix_seq/"
import matplotlib
font = {"size": 30}
matplotlib.rc("font", **font)
matplotlib.rcParams["text.usetex"] = True
NUM_SETS_TO_PLOT = 8
if __name__ == "__main__":
# Load gene sets
gene_sets = pd.read_csv(
"../perturbseq_experiments/hallmark_genesets.csv", index_col=0
)
gene_sets_unique = gene_sets.gene_set.values
gene_sets_for_plot = np.array(
[" ".join(x.split("_")[1:]) for x in gene_sets_unique]
)
control_bfs = []
latent_dim_shared = 2
latent_dim_target = 2
X_fname = pjoin(DATA_DIR, "data/nutlin/dmso_expt1.csv")
Y_fname = pjoin(DATA_DIR, "data/nutlin/nutlin_expt1.csv")
gene_fname = pjoin(DATA_DIR, "data/nutlin/gene_symbols.csv")
# Read in data
X = pd.read_csv(X_fname, index_col=0)
Y = pd.read_csv(Y_fname, index_col=0)
gene_names = | pd.read_csv(gene_fname, index_col=0) | pandas.read_csv |
import os
import joblib
import numpy as np
import pandas as pd
from joblib import Parallel
from joblib import delayed
from Fuzzy_clustering.version2.common_utils.logging import create_logger
from Fuzzy_clustering.version2.dataset_manager.common_utils import check_empty_nwp
from Fuzzy_clustering.version2.dataset_manager.common_utils import rescale_mean
from Fuzzy_clustering.version2.dataset_manager.common_utils import stack_2d_dense
from Fuzzy_clustering.version2.dataset_manager.common_utils import stack_3d
class DatasetCreatorDense:
def __init__(self, projects_group, projects, data, path_nwp, nwp_model, nwp_resolution, data_variables, njobs=1,
test=False, dates=None):
self.projects = projects
self.is_for_test = test
self.projects_group = projects_group
self.data = data
self.path_nwp = path_nwp
self.nwp_model = nwp_model
self.nwp_resolution = nwp_resolution
self.compress = True if self.nwp_resolution == 0.05 else False
self.n_jobs = njobs
self.variables = data_variables
self.logger = create_logger(logger_name=__name__, abs_path=self.path_nwp,
logger_path=f'log_{self.projects_group}.log', write_type='a')
if not self.data is None:
self.dates = self.check_dates()
elif not dates is None:
self.dates = dates
def check_dates(self):
start_date = pd.to_datetime(self.data.index[0].strftime('%d%m%y'), format='%d%m%y')
end_date = pd.to_datetime(self.data.index[-1].strftime('%d%m%y'), format='%d%m%y')
dates = pd.date_range(start_date, end_date)
data_dates = pd.to_datetime(np.unique(self.data.index.strftime('%d%m%y')), format='%d%m%y')
dates = [d for d in dates if d in data_dates]
self.logger.info('Dates are checked. Number of time samples is %s', str(len(dates)))
return pd.DatetimeIndex(dates)
def correct_nwps(self, nwp, variables):
if nwp['lat'].shape[0] == 0:
area_group = self.projects[0]['static_data']['area_group']
resolution = self.projects[0]['static_data']['NWP_resolution']
nwp['lat'] = np.arange(area_group[0][0], area_group[1][0] + resolution / 2,
resolution).reshape(-1, 1)
nwp['long'] = np.arange(area_group[0][1], area_group[1][1] + resolution / 2,
resolution).reshape(-1, 1).T
for var in nwp.keys():
if not var in {'lat', 'long'}:
if nwp['lat'].shape[0] != nwp[var].shape[0]:
nwp[var] = nwp[var].T
if 'WS' in variables and not 'WS' in nwp.keys():
if 'Uwind' in nwp.keys() and 'Vwind' in nwp.keys():
if nwp['Uwind'].shape[0] > 0 and nwp['Vwind'].shape[0] > 0:
r2d = 45.0 / np.arctan(1.0)
wspeed = np.sqrt(np.square(nwp['Uwind']) + np.square(nwp['Vwind']))
wdir = np.arctan2(nwp['Uwind'], nwp['Vwind']) * r2d + 180
nwp['WS'] = wspeed
nwp['WD'] = wdir
if 'Temp' in nwp.keys():
nwp['Temperature'] = nwp['Temp']
del nwp['Temp']
return nwp
def stack_by_sample(self, t, data, lats, longs, path_nwp, nwp_model, projects, variables, predictions):
timestep = 60
x = dict()
y = dict()
x_3d = dict()
file_name = os.path.join(path_nwp, f"{nwp_model}_{t.strftime('%d%m%y')}.pickle")
if os.path.exists(file_name):
nwps = joblib.load(file_name)
for project in projects:
preds = predictions[project['_id']]
hor = preds.columns[-1] + timestep
p_dates = [t + pd.DateOffset(minutes=hor)]
preds = preds.loc[t].to_frame().T
dates_pred = [t + pd.DateOffset(minutes=h) for h in preds.columns]
pred = pd.DataFrame(preds.values.ravel(), index=dates_pred, columns=[project['_id']])
data_temp = pd.concat([data[project['_id']].iloc[np.where(data.index < t)].to_frame(), pred])
project_id = project['_id'] # It's the project name, the park's name
x[project_id] = pd.DataFrame()
y[project_id] = pd.DataFrame()
x_3d[project_id] = np.array([])
areas = project['static_data']['areas']
if isinstance(areas, list):
for date in p_dates:
date_nwp = date.round('H').strftime('%d%m%y%H%M')
try:
nwp = nwps[date_nwp]
nwp = self.correct_nwps(nwp, variables)
date_nwp = pd.to_datetime(date_nwp, format='%d%m%y%H%M')
nwp_prev = nwps[(date_nwp - pd.DateOffset(hours=1)).strftime('%d%m%y%H%M')]
nwp_next = nwps[(date_nwp + pd.DateOffset(hours=1)).strftime('%d%m%y%H%M')]
nwp_prev = self.correct_nwps(nwp_prev, variables)
nwp_next = self.correct_nwps(nwp_next, variables)
if check_empty_nwp(nwp, nwp_next, nwp_prev, variables):
inp, inp_cnn = self.create_sample(date, nwp, nwp_prev, nwp_next, lats[project_id],
longs[project_id], project['static_data']['type'])
if project['static_data']['horizon'] == 'short-term':
inp['Obs_lag1'] = data_temp.loc[(date - pd.DateOffset(hours=1))].values
inp['Obs_lag2'] = data_temp.loc[(date - pd.DateOffset(hours=2))].values
if not inp.isnull().any(axis=1).values and not np.isnan(data.loc[date, project_id]):
x[project_id] = pd.concat([x[project_id], inp])
x_3d[project_id] = stack_2d_dense(x_3d[project_id], inp_cnn, False)
y[project_id] = pd.concat([y[project_id], pd.DataFrame(data.loc[date, project_id],
columns=['target'],
index=[date])])
except Exception:
continue
else:
for date in p_dates:
try:
date_nwp = date.round('H').strftime('%d%m%y%H%M')
nwp = nwps[date_nwp]
nwp = self.correct_nwps(nwp, variables)
date_nwp = pd.to_datetime(date_nwp, format='%d%m%y%H%M')
nwp_prev = nwps[(date_nwp - pd.DateOffset(hours=1)).strftime('%d%m%y%H%M')]
nwp_next = nwps[(date_nwp + pd.DateOffset(hours=1)).strftime('%d%m%y%H%M')]
nwp_prev = self.correct_nwps(nwp_prev, variables)
nwp_next = self.correct_nwps(nwp_next, variables)
if check_empty_nwp(nwp, nwp_next, nwp_prev, variables):
inp, inp_cnn = self.create_sample_country(date, nwp, nwp_prev, nwp_next,
lats[project['_id']],
longs[project['_id']],
project['static_data']['type'])
if project['static_data']['horizon'] == 'short-term':
inp['Obs_lag1'] = data_temp.loc[(date - pd.DateOffset(hours=1)), project_id].values
inp['Obs_lag2'] = data_temp.loc[(date - pd.DateOffset(hours=2)), project_id].values
if not inp.isnull().any(axis=1).values and not np.isnan(data.loc[date, project_id]):
x[project['_id']] = pd.concat([x[project['_id']], inp])
x_3d[project['_id']] = stack_2d_dense(x_3d[project['_id']], inp_cnn, False)
y[project['_id']] = pd.concat(
[y[project['_id']], pd.DataFrame(data.loc[date, project['_id']],
columns=['target'], index=[date])])
except Exception:
continue
print(t.strftime('%d%m%y%H%M'), ' extracted')
for project in projects:
if len(x_3d[project['_id']].shape) == 3:
x_3d[project['_id']] = x_3d[project['_id']][np.newaxis, :, :, :]
return x, y, x_3d, t.strftime('%d%m%y%H%M')
def stack_daily_nwps(self, t, data, lats, longs, path_nwp, nwp_model, projects, variables):
x = dict()
y = dict()
x_3d = dict()
file_name = os.path.join(path_nwp, f"{nwp_model}_{t.strftime('%d%m%y')}.pickle")
if os.path.exists(file_name):
nwps = joblib.load(file_name)
for project in projects:
if project['static_data']['horizon'] == 'day_ahead':
p_dates = pd.date_range(t + pd.DateOffset(hours=24), t + pd.DateOffset(hours=47), freq='H')
else:
p_dates = pd.date_range(t + pd.DateOffset(hours=1), t + pd.DateOffset(hours=24), freq='H')
project_id = project['_id'] # It's the project name, the park's name
x[project_id] = pd.DataFrame()
y[project_id] = pd.DataFrame()
x_3d[project_id] = np.array([])
areas = project['static_data']['areas']
if isinstance(areas, list):
for date in p_dates:
try:
date_nwp = date.round('H').strftime('%d%m%y%H%M')
nwp = nwps[date_nwp]
nwp = self.correct_nwps(nwp, variables)
date_nwp = pd.to_datetime(date_nwp, format='%d%m%y%H%M')
nwp_prev = nwps[(date_nwp - pd.DateOffset(hours=1)).strftime('%d%m%y%H%M')]
nwp_next = nwps[(date_nwp + pd.DateOffset(hours=1)).strftime('%d%m%y%H%M')]
nwp_prev = self.correct_nwps(nwp_prev, variables)
nwp_next = self.correct_nwps(nwp_next, variables)
if check_empty_nwp(nwp, nwp_next, nwp_prev, variables):
inp, inp_cnn = self.create_sample(date, nwp, nwp_prev, nwp_next, lats[project_id],
longs[project_id], project['static_data']['type'])
if project['static_data']['horizon'] == 'short-term':
inp['Obs_lag1'] = data.loc[(date - pd.DateOffset(hours=1)), project_id]
inp['Obs_lag2'] = data.loc[(date - pd.DateOffset(hours=2)), project_id]
if not self.is_for_test:
inp['Obs_lag1'] = inp['Obs_lag1'] + np.random.normal(0, 0.05) * inp['Obs_lag1']
inp['Obs_lag2'] = inp['Obs_lag2'] + np.random.normal(0, 0.05) * inp['Obs_lag2']
if not inp.isnull().any(axis=1).values and not np.isnan(data.loc[date, project_id]):
x[project_id] = pd.concat([x[project_id], inp])
x_3d[project_id] = stack_2d_dense(x_3d[project_id], inp_cnn, False)
y[project_id] = pd.concat([y[project_id], pd.DataFrame(data.loc[date, project_id],
columns=['target'],
index=[date])])
except Exception:
continue
else:
for date in p_dates:
try:
date_nwp = date.round('H').strftime('%d%m%y%H%M')
nwp = nwps[date_nwp]
nwp = self.correct_nwps(nwp, variables)
date = pd.to_datetime(date_nwp, format='%d%m%y%H%M')
nwp_prev = nwps[(date_nwp - pd.DateOffset(hours=1)).strftime('%d%m%y%H%M')]
nwp_next = nwps[(date_nwp + pd.DateOffset(hours=1)).strftime('%d%m%y%H%M')]
nwp_prev = self.correct_nwps(nwp_prev, variables)
nwp_next = self.correct_nwps(nwp_next, variables)
if check_empty_nwp(nwp, nwp_next, nwp_prev, variables):
inp, inp_cnn = self.create_sample_country(date, nwp, nwp_prev, nwp_next,
lats[project['_id']],
longs[project['_id']],
project['static_data']['type'])
if project['static_data']['horizon'] == 'short-term':
inp['Obs_lag1'] = data.loc[(date - pd.DateOffset(hours=1)), project_id]
inp['Obs_lag2'] = data.loc[(date - pd.DateOffset(hours=2)), project_id]
if not inp.isnull().any(axis=1).values and not np.isnan(data.loc[date, project_id]):
x[project['_id']] = pd.concat([x[project['_id']], inp])
x_3d[project['_id']] = stack_2d_dense(x_3d[project['_id']], inp_cnn, False)
y[project['_id']] = pd.concat(
[y[project['_id']], pd.DataFrame(data.loc[date, project['_id']],
columns=['target'], index=[date])])
except Exception:
continue
print(t.strftime('%d%m%y%H%M'), ' extracted')
return x, y, x_3d, t.strftime('%d%m%y%H%M')
def stack_daily_nwps_rabbitmq(self, t, path_nwp, nwp_model, project, variables):
x = dict()
x_3d = dict()
nwps = project['nwp']
p_dates = pd.date_range(t, t + pd.DateOffset(days=3) - pd.DateOffset(hours=1), freq='H')
project_id = project['_id'] # It's the project name, the park's name
x[project_id] = pd.DataFrame()
x_3d[project_id] = np.array([])
areas = project['static_data']['areas']
if isinstance(areas, list):
for date in p_dates:
try:
date_nwp = date.strftime('%d%m%y%H%M')
nwp = nwps[date_nwp]
date_nwp = pd.to_datetime(date_nwp, format='%d%m%y%H%M')
nwp_prev = nwps[(date_nwp - pd.DateOffset(hours=1)).strftime('%d%m%y%H%M')]
nwp_next = nwps[(date_nwp + pd.DateOffset(hours=1)).strftime('%d%m%y%H%M')]
if check_empty_nwp(nwp, nwp_next, nwp_prev, variables):
inp, inp_cnn = self.create_sample_rabbitmq(date, nwp, nwp_prev, nwp_next, project['static_data']['type'])
x[project_id] = pd.concat([x[project_id], inp])
x_3d[project_id] = stack_2d_dense(x_3d[project_id], inp_cnn, False)
except Exception:
continue
else:
for date in p_dates:
try:
date_nwp = date.strftime('%d%m%y%H%M')
nwp = nwps[date_nwp]
date = pd.to_datetime(date_nwp, format='%d%m%y%H%M')
nwp_prev = nwps[(date_nwp - pd.DateOffset(hours=1)).strftime('%d%m%y%H%M')]
nwp_next = nwps[(date_nwp + pd.DateOffset(hours=1)).strftime('%d%m%y%H%M')]
if check_empty_nwp(nwp, nwp_next, nwp_prev, variables):
inp, inp_cnn = self.create_sample_country(date, nwp, nwp_prev, nwp_next,
lats[project['_id']],
longs[project['_id']],
project['static_data']['type'])
x[project['_id']] = pd.concat([x[project['_id']], inp])
x_3d[project['_id']] = stack_2d_dense(x_3d[project['_id']], inp_cnn, False)
except Exception:
continue
print(t.strftime('%d%m%y%H%M'), ' extracted')
return x, x_3d, t.strftime('%d%m%y%H%M')
def stack_daily_nwps_online(self, t, data, lats, longs, path_nwp, nwp_model, projects, variables):
x = dict()
x_3d = dict()
file_name = os.path.join(path_nwp, f"{nwp_model}_{t.strftime('%d%m%y')}.pickle")
if os.path.exists(file_name):
nwps = joblib.load(file_name)
for project in projects:
if project['static_data']['horizon'] == 'day_ahead':
p_dates = pd.date_range(t + pd.DateOffset(hours=24), t + pd.DateOffset(hours=47), freq='H')
else:
p_dates = pd.date_range(t + pd.DateOffset(hours=1), t + pd.DateOffset(hours=24), freq='15min')
project_id = project['_id'] # It's the project name, the park's name
x[project_id] = pd.DataFrame()
x_3d[project_id] = np.array([])
areas = project['static_data']['areas']
if isinstance(areas, list):
for date in p_dates:
try:
date_nwp = date.round('H').strftime('%d%m%y%H%M')
nwp = nwps[date_nwp]
nwp = self.correct_nwps(nwp, variables)
date_nwp = pd.to_datetime(date_nwp, format='%d%m%y%H%M')
nwp_prev = nwps[(date_nwp - pd.DateOffset(hours=1)).strftime('%d%m%y%H%M')]
nwp_next = nwps[(date_nwp + pd.DateOffset(hours=1)).strftime('%d%m%y%H%M')]
nwp_prev = self.correct_nwps(nwp_prev, variables)
nwp_next = self.correct_nwps(nwp_next, variables)
if check_empty_nwp(nwp, nwp_next, nwp_prev, variables):
inp, inp_cnn = self.create_sample(date, nwp, nwp_prev, nwp_next, lats[project_id],
longs[project_id], project['static_data']['type'])
if project['static_data']['horizon'] == 'short-term':
inp['Obs_lag1'] = data.loc[(date - | pd.DateOffset(hours=1) | pandas.DateOffset |
import os
import numpy as np
import pandas as pd
import lightgbm as lgb
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import LogisticRegression, RidgeClassifierCV, ElasticNetCV, LassoCV, LassoLarsCV
from sklearn.svm import SVC
from sklearn.model_selection import cross_val_score
from sklearn.metrics import f1_score
from scipy.sparse import hstack
from HelperFunctions import get_now
class_names = ['INFORMATIVE', 'UNINFORMATIVE']
train = | pd.read_table('../data/raw/train.tsv', sep='\t') | pandas.read_table |
# -*- coding: utf-8 -*-
from __future__ import print_function
from distutils.version import LooseVersion
from numpy import nan, random
import numpy as np
from pandas.compat import lrange
from pandas import (DataFrame, Series, Timestamp,
date_range)
import pandas as pd
from pandas.util.testing import (assert_series_equal,
assert_frame_equal,
assertRaisesRegexp)
import pandas.util.testing as tm
from pandas.tests.frame.common import TestData, _check_mixed_float
def _skip_if_no_pchip():
try:
from scipy.interpolate import pchip_interpolate # noqa
except ImportError:
import nose
raise nose.SkipTest('scipy.interpolate.pchip missing')
class TestDataFrameMissingData(tm.TestCase, TestData):
_multiprocess_can_split_ = True
def test_dropEmptyRows(self):
N = len(self.frame.index)
mat = random.randn(N)
mat[:5] = nan
frame = DataFrame({'foo': mat}, index=self.frame.index)
original = Series(mat, index=self.frame.index, name='foo')
expected = original.dropna()
inplace_frame1, inplace_frame2 = frame.copy(), frame.copy()
smaller_frame = frame.dropna(how='all')
# check that original was preserved
assert_series_equal(frame['foo'], original)
inplace_frame1.dropna(how='all', inplace=True)
assert_series_equal(smaller_frame['foo'], expected)
assert_series_equal(inplace_frame1['foo'], expected)
smaller_frame = frame.dropna(how='all', subset=['foo'])
inplace_frame2.dropna(how='all', subset=['foo'], inplace=True)
assert_series_equal(smaller_frame['foo'], expected)
assert_series_equal(inplace_frame2['foo'], expected)
def test_dropIncompleteRows(self):
N = len(self.frame.index)
mat = random.randn(N)
mat[:5] = nan
frame = DataFrame({'foo': mat}, index=self.frame.index)
frame['bar'] = 5
original = Series(mat, index=self.frame.index, name='foo')
inp_frame1, inp_frame2 = frame.copy(), frame.copy()
smaller_frame = frame.dropna()
assert_series_equal(frame['foo'], original)
inp_frame1.dropna(inplace=True)
exp = Series(mat[5:], index=self.frame.index[5:], name='foo')
| tm.assert_series_equal(smaller_frame['foo'], exp) | pandas.util.testing.assert_series_equal |
import datetime
import logging
import pandas
import sqlobject
def get_last_values(currency, frecuency, count=None):
"""Get last values."""
logging.debug('nb_values: %d', count)
result = Bollinger.select(
Bollinger.q.currency == currency
).orderBy(Bollinger.q.date_time)
if count:
result = result[-count:]
return result
def insert_value(currency, frequency, values):
kwargs = {'currency': currency,
'date_time': datetime.datetime.now(),
'frequency': frequency}
if values is not None and len(values) >= frequency:
mean = pandas.Series(values[-frequency:]).mean().item()
std = | pandas.Series(values[-frequency:]) | pandas.Series |
import warnings
import numpy as np
def to_dataframe(result):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
import pandas as pd
def collection_to_dataframe(n, x):
n = str(n).replace('[', '(').replace(']', ')')
df = | pd.DataFrame() | pandas.DataFrame |
import unittest
import pickle
import pathlib
import cobra
import pandas as pd
from BFAIR.mfa.sampling import (
model_rxn_overlap,
rxn_coverage,
split_lumped_rxns,
split_lumped_reverse_rxns,
find_reverse_rxns,
combine_split_rxns,
cobra_add_split_rxns,
add_constraints,
add_feasible_constraints,
find_biomass_reaction,
get_min_solution_val,
replace_biomass_rxn_name,
bound_relaxation,
)
current_dir = str(pathlib.Path(__file__).parent.absolute())
class test_methods(unittest.TestCase):
maxDiff = None
# Create method to compare dataframes
def assertDataframeEqual(self, a, b, msg):
try:
pd.testing.assert_frame_equal(a, b)
except AssertionError as e:
raise self.failureException(msg) from e
# Create method to compare Series
def assertSeriesEqual(self, a, b, msg):
try:
pd.testing.assert_series_equal(a, b)
except AssertionError as e:
raise self.failureException(msg) from e
def setUp(self):
file_obj = open(
current_dir
+ "/test_data/MFA_sampling/sampling_test_data.obj",
"rb",
)
(
fittedFluxes,
unconstraint_bounds,
biomass_rxn,
adj_fittedFluxes,
coverage,
lumped_rxns,
overlap,
fittedFluxes_split_temp,
lumped_reverse_rxns,
reverse_df,
fittedFluxes_split,
fittedFluxes_split_combined,
rxns_to_split,
model_preproces_bounds,
constrained_bounds,
min_val,
adj_fittedFluxes,
problems,
feasible_constrained_bounds,
cons_table,
relaxed_bounds,
) = pickle.load(file_obj)
file_obj.close()
self.simulation_info = pd.read_csv(
current_dir
+ "/test_data/MFA_modelInputsData/experimentalMS_data_I.csv"
)
self.simulation_id = "WTEColi_113C80_U13C20_01"
self.model = cobra.io.load_json_model(
current_dir + "/test_data/MFA_modelInputsData/iJO1366.json")
self.constrained_model = add_constraints(
self.model.copy(), adj_fittedFluxes
)
self.fittedFluxes = fittedFluxes
self.unconstraint_bounds = unconstraint_bounds
self.biomass_rxn = biomass_rxn
self.adj_fittedFluxes = adj_fittedFluxes
self.coverage = coverage
self.lumped_rxns = lumped_rxns
self.overlap = overlap
self.fittedFluxes_split_temp = fittedFluxes_split_temp
self.lumped_reverse_rxns = lumped_reverse_rxns
self.reverse_df = reverse_df
self.fittedFluxes_split = fittedFluxes_split
self.fittedFluxes_split_combined = fittedFluxes_split_combined
self.rxns_to_split = rxns_to_split
self.model_preproces_bounds = model_preproces_bounds
self.constrained_bounds = constrained_bounds
self.min_val = min_val
self.adj_fittedFluxes = adj_fittedFluxes
self.problems = problems
self.feasible_constrained_bounds = feasible_constrained_bounds
self.cons_table = cons_table
self.relaxed_bounds = relaxed_bounds
# Add the method to compare dataframes in the class
self.addTypeEqualityFunc(pd.DataFrame, self.assertDataframeEqual)
self.addTypeEqualityFunc(pd.Series, self.assertSeriesEqual)
@staticmethod
def get_bounds_df(model):
# Helper function to have a way to compare the bounds
bounds_temp = {}
# Round to 5 decimal places to avoid issues in very low values
for cnt, rxn in enumerate(model.reactions):
bounds_temp[cnt] = {
"rxn_id": rxn.id,
"lb": rxn.lower_bound,
"ub": rxn.upper_bound,
}
return pd.DataFrame.from_dict(bounds_temp, "index")
def test_model_rxn_overlap(self):
overlap = self.overlap
overlap_ = model_rxn_overlap(self.adj_fittedFluxes, self.model)
self.assertEqual(overlap, overlap_)
def test_rxn_coverage(self):
coverage_ = rxn_coverage(self.adj_fittedFluxes, self.model)
self.assertEqual(self.coverage, coverage_)
def test_split_lumped_rxns(self):
lumped_rxns = self.lumped_rxns
# Check the example notebook for details
lumped_ids = [1, 21, 26, 27, 53, 54, 67, 74, 82]
mask = []
overlap = model_rxn_overlap(self.adj_fittedFluxes, self.model)
for i in overlap.iteritems():
if i[0] in lumped_ids:
mask.append(True)
else:
mask.append(False)
lumped_rxns_ = model_rxn_overlap(
self.adj_fittedFluxes, self.model)[mask]
self.assertEqual(lumped_rxns, lumped_rxns_)
fittedFluxes_split_temp_ = split_lumped_rxns(
lumped_rxns_, self.adj_fittedFluxes)
self.assertEqual(
self.fittedFluxes_split_temp, fittedFluxes_split_temp_)
def test_split_lumped_reverse_rxns(self):
lumped_reverse_rxns = self.lumped_reverse_rxns
lumped_reverse_ids = [2, 28, 55, 68]
mask_reverse = []
for i in model_rxn_overlap(
self.fittedFluxes_split_temp, self.model).iteritems():
if i[0] in lumped_reverse_ids:
mask_reverse.append(True)
else:
mask_reverse.append(False)
lumped_reverse_rxns_ = model_rxn_overlap(
self.fittedFluxes_split_temp, self.model)[mask_reverse]
self.assertEqual(lumped_reverse_rxns, lumped_reverse_rxns_)
fittedFluxes_split_ = split_lumped_reverse_rxns(
lumped_reverse_rxns_, self.fittedFluxes_split_temp)
self.assertEqual(self.fittedFluxes_split, fittedFluxes_split_)
def test_find_reverse_rxns(self):
reverse_df_ = find_reverse_rxns(self.fittedFluxes_split)
self.assertEqual(self.reverse_df, reverse_df_)
def test_combine_split_rxns(self):
# Check the example notebook for details
lumped_ids = [1, 21, 26, 27, 53, 54, 67, 74, 82]
mask = []
overlap = model_rxn_overlap(self.adj_fittedFluxes, self.model)
for i in overlap.iteritems():
if i[0] in lumped_ids:
mask.append(True)
else:
mask.append(False)
lumped_rxns = model_rxn_overlap(
self.adj_fittedFluxes, self.model)[mask]
fittedFluxes_split_temp = split_lumped_rxns(
lumped_rxns, self.adj_fittedFluxes)
lumped_reverse_ids = [2, 28, 55, 68]
mask_reverse = []
for i in model_rxn_overlap(
fittedFluxes_split_temp, self.model).iteritems():
if i[0] in lumped_reverse_ids:
mask_reverse.append(True)
else:
mask_reverse.append(False)
lumped_reverse_rxns = model_rxn_overlap(
fittedFluxes_split_temp, self.model)[mask_reverse]
fittedFluxes_split_ = split_lumped_reverse_rxns(
lumped_reverse_rxns, fittedFluxes_split_temp)
fittedFluxes_split_combined_, rxns_to_split_ = combine_split_rxns(
fittedFluxes_split_)
self.assertEqual(self.fittedFluxes_split_combined, fittedFluxes_split_combined_)
self.assertEqual(self.rxns_to_split, rxns_to_split_)
def test_cobra_add_split_rxns(self):
model_split = self.model.copy()
cobra_add_split_rxns(self.rxns_to_split, model_split)
model_preproces_bounds_ = self.get_bounds_df(model_split)
self.assertEqual(self.model_preproces_bounds, model_preproces_bounds_)
def test_add_constraints(self):
unconstraint_bounds = self.unconstraint_bounds
constrained_bounds = self.constrained_bounds
constrained_model = add_constraints(
self.model.copy(),
self.adj_fittedFluxes,
)
constrained_bounds_ = self.get_bounds_df(constrained_model)
self.assertFalse(unconstraint_bounds.equals(constrained_bounds_))
self.assertEqual(constrained_bounds, constrained_bounds_)
def test_find_biomass_reaction(self):
biomass_reaction_ids = find_biomass_reaction(
self.constrained_model,
biomass_string=["Biomass", "BIOMASS", "biomass"],
)
# This one basically just checks if there is an output
self.assertIsInstance(biomass_reaction_ids, list)
# This one makes sure that the output list contains a reaction name
self.assertIsInstance(biomass_reaction_ids[0], str)
def test_get_min_solution_val(self):
# Find in fittedFluxes
fittedFluxes = self.fittedFluxes
min_val = self.min_val
min_val_ = get_min_solution_val(
fittedFluxes, biomass_string="Biomass"
)
self.assertEqual(min_val, min_val_)
# Do not find in fake example (=0)
fauxfittedFluxes = self.fittedFluxes
fauxfittedFluxes.at[11, "rxn_id"] = "Removed_ID"
no_BM_val = get_min_solution_val(
fauxfittedFluxes, biomass_string="Biomass"
)
self.assertEqual(no_BM_val, 0)
def test_replace_biomass_rxn_name(self):
# Replace in fittedFluxes
adj_fittedFluxes = self.adj_fittedFluxes
adj_fittedFluxes_ = replace_biomass_rxn_name(
self.fittedFluxes,
biomass_rxn_name="BIOMASS_Ec_iJO1366_core_53p95M",
biomass_string="Biomass",
)
self.assertEqual(adj_fittedFluxes, adj_fittedFluxes_)
# Replace in a fake sample
test_df = | pd.DataFrame({"rxn_id": "Biomass"}, index=[0]) | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# # SW 1700
# In[2]:
import numpy as np
import os
#import ipdb
def connect_dataset(file_list, icond_file_list, outputdir,
topodx=15, roi=2500, offset=5000,gclass_num=5,test_data_num=500):
"""
複数のデータセットを連結する
"""
#ipdb.set_trace()
#Reading and combining files Decide start and end points of the learning area and convert them to grid numbers
H = np.loadtxt(file_list[0], delimiter = ',')
icond = np.loadtxt(icond_file_list[0], delimiter = ',')
#Reading and combining files
if len(file_list) > 1:
for i in range(1, len(file_list)):
H_temp = np.loadtxt(file_list[i], delimiter = ',')
icond_temp = np.loadtxt(icond_file_list[i], delimiter = ',')
H = np.concatenate((H,H_temp),axis=0)
icond = np.concatenate((icond,icond_temp),axis = 0)
roi_grids = int(roi / topodx)
num_grids = int(H.shape[1] / gclass_num)
H_subset = np.zeros([H.shape[0], roi_grids * gclass_num])
for i in range(gclass_num):
H_subset[:, i*roi_grids:(i+1)*roi_grids] = H[:, i*num_grids:(i*num_grids+roi_grids)]
#Obtain the maximum and minimum values of data
max_x = np.max(H_subset)
min_x = np.min(H_subset)
icond_max = np.max(icond, axis=0)
icond_min = np.min(icond, axis=0)
#Split the data into tests and training
H_train = H_subset[0:-test_data_num,:]
H_test = H_subset[H_subset.shape[0] - test_data_num:,:]
icond_train = icond[0:-test_data_num,:]
icond_test = icond[H.shape[0] - test_data_num:,:]
#Save the data
if not os.path.exists(outputdir):
os.mkdir(outputdir)
np.savetxt(outputdir + '/x_train.txt',H_train,delimiter = ',')
np.savetxt(outputdir + '/x_test.txt',H_test,delimiter = ',')
np.savetxt(outputdir + '/icond_train.txt',icond_train,delimiter = ',')
np.savetxt(outputdir + '/icond_test.txt',icond_test,delimiter = ',')
np.savetxt(outputdir + '/icond_min.txt',icond_min,delimiter = ',')
np.savetxt(outputdir + '/icond_max.txt',icond_max,delimiter = ',')
np.savetxt(outputdir + '/x_minmax.txt',[min_x, max_x],delimiter = ',')
if __name__=="__main__":
original_data_dir = "/home/rimali2009/Journal_2"
parent_dir = "/home/rimali2009/Journal_2"
if not os.path.exists(parent_dir):
os.mkdir(parent_dir)
outputdir = parent_dir + "/data_g6_j2_roi1700_thai"
file_list = ['/home/rimali2009/Journal_2/eta_5000_g6_300grid_thai_g5.csv']
initial_conditions = ['/home/rimali2009/Journal_2/start_param_random_5000_thai_g5.csv']
connect_dataset(file_list, initial_conditions, outputdir, test_data_num=500, gclass_num=5, topodx=15., roi=1700)
# In[3]:
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 7 15:43:18 2017
@author: hanar
"""
import time
import numpy as np
import os
from keras.utils import np_utils
from keras.models import Sequential
from keras.layers import Dense, Activation, Dropout
from keras.optimizers import SGD
from keras.optimizers import RMSprop
from keras.optimizers import Adagrad
from keras.optimizers import Adadelta
from keras.optimizers import Adam
from keras.optimizers import Adamax
from keras.optimizers import Nadam
from keras.callbacks import ModelCheckpoint
from keras.callbacks import EarlyStopping
from keras.callbacks import TensorBoard
from keras.models import load_model
#from keras.utils.visualize_util import plot
import matplotlib.pyplot as plt
import keras.callbacks
import keras.backend.tensorflow_backend as KTF
import tensorflow as tf
#Global variables for normalizing parameters
max_x = 1.0
min_x = 0.0
max_y = 1.0
min_y = 0.0
def deep_learning_tsunami(resdir, X_train_raw, y_train_raw, X_test_raw, y_test_raw,
_lr=0.02, _decay=0,
_validation_split=0.2, _batch_size=32,
_momentum=0.9, _nesterov=True,
num_layers=4, dropout=0.5,
node_num = 2500,
_epochs=2000):
"""
Creating the inversion model of turbidity currents by deep learning
"""
#Normalizing dataset
X_train = get_normalized_data(X_train_raw, min_x, max_x)
X_test = get_normalized_data(X_test_raw, min_x, max_x)
y_train = get_normalized_data(y_train_raw, min_y, max_y)
y_test = get_normalized_data(y_test_raw, min_y, max_y)
#Generation of neural network model
model = Sequential()
model.add(Dense(node_num, input_dim=X_train.shape[1], activation='relu', kernel_initializer ='glorot_uniform'))#1st layer
model.add(Dropout(dropout))
for i in range(num_layers - 2):
model.add(Dense(node_num, activation='relu', kernel_initializer ='glorot_uniform'))#2nd layer
model.add(Dropout(dropout))
model.add(Dense(y_train.shape[1], activation = 'relu', kernel_initializer ='glorot_uniform')) #last layer
#Compiling the model
model.compile(loss="mean_squared_error",
optimizer=SGD(lr=_lr, decay=_decay, momentum=_momentum, nesterov=_nesterov),
#optimizer=Adadelta(),
metrics=["mean_squared_error"])
#Perform learning
t = time.time()
check = ModelCheckpoint("model3.hdf5")
#es_cb = EarlyStopping(monitor='val_loss', patience=5, verbose=0, mode='auto')
#tb_cb = TensorBoard(log_dir=resdir, histogram_freq=2, write_graph=True, write_images=True)
history = model.fit(X_train, y_train, epochs=_epochs,
validation_split=_validation_split, batch_size=_batch_size,
callbacks=[check])
#Evaluate learning result
loss_and_metrics = model.evaluate(X_test,y_test)
print("\nloss:{} mse:{}".format(loss_and_metrics[0],loss_and_metrics[1]))
print("Elapsed time: {:.1f} sec.".format(time.time()-t))
#Visualize learning result
#plot(model, to_file="model.png", show_shapes=True, show_layer_names=True)
# model The state of change when letting you learnplot
plot_history(history)
return model, history
def apply_model(model, X, min_x, max_x, min_y, max_y):
"""
Apply model
Maximum and minimum values of X and Y are required to normalize
"""
X_norm = (X - min_x) / (max_x - min_x)
Y_norm = model.predict(X_norm)
Y = Y_norm*(max_y - min_y)+min_y
return Y
def plot_history(history):
# Plot accuracy history
plt.plot(history.history['mean_squared_error'],"o-",label="mse")
plt.plot(history.history['val_mean_squared_error'],"o-",label="val mse")
plt.title('model mse')
plt.xlabel('epoch')
plt.ylabel('mse')
plt.legend(loc="upper right")
plt.show()
# # 損失の履歴をプロット
# plt.plot(history.history['loss'],"o-",label="loss",)
# plt.plot(history.history['val_loss'],"o-",label="val_loss")
# plt.title('model loss')
# plt.xlabel('epoch')
# plt.ylabel('loss')
# plt.legend(loc='upper right')
# plt.show()
def test_model(model, x_test):
#Test the results
x_test_norm = get_normalized_data(x_test, min_x, max_x)
test_result_norm = model.predict(x_test_norm)
test_result = get_raw_data(test_result_norm, min_y, max_y)
return test_result
def save_result(savedir, model, history, test_result):
np.savetxt(savedir + 'test_result.txt',test_result,delimiter=',')
np.savetxt(savedir+'loss.txt',history.history.get('loss'),delimiter=',')
np.savetxt(savedir+'val_loss.txt',history.history.get('val_loss'),delimiter=',')
#Serialize model and save
print('save the model')
model.save(savedir + 'model3.hdf5')
def load_data(datadir):
"""
This function load training and test data sets, and returns variables
"""
global min_x, max_x, min_y, max_y
x_train = np.loadtxt(datadir + 'x_train.txt',delimiter=',')
x_test = np.loadtxt(datadir + 'x_test.txt',delimiter=',')
y_train = np.loadtxt(datadir + 'icond_train.txt',delimiter=',')
y_test = np.loadtxt(datadir + 'icond_test.txt',delimiter=',')
min_y = np.loadtxt(datadir + 'icond_min.txt',delimiter=',')
max_y = np.loadtxt(datadir + 'icond_max.txt',delimiter=',')
[min_x, max_x] = np.loadtxt(datadir + 'x_minmax.txt',delimiter=',')
return x_train, y_train, x_test, y_test
def set_minmax_data(_min_x, _max_x, _min_y, _max_y):
global min_x, max_x, min_y, max_y
min_x, max_x, min_y, max_y = _min_x, _max_x, _min_y, _max_y
return
def get_normalized_data(x, min_val, max_val):
"""
Normalizing the training and test dataset
"""
x_norm = (x - min_val) / (max_val - min_val)
return x_norm
def get_raw_data(x_norm, min_val, max_val):
"""
Get raw data from the normalized dataset
"""
x = x_norm * (max_val - min_val) + min_val
return x
if __name__ == "__main__":
#Reading data
datadir = '/home/rimali2009/Journal_2/data_g6_j2_roi1700_thai/'
resdir = '/home/rimali2009/Journal_2/result_g6_j2_roi1700_thai/'
if not os.path.exists(resdir):
os.mkdir(resdir)
x_train, y_train, x_test, y_test = load_data(datadir)
#Execution of learning
testcases = [5000]
for i in range(len(testcases)):
resdir_case = resdir + '{}/'.format(testcases[i])
if not os.path.exists(resdir_case):
os.mkdir(resdir_case)
x_train_sub = x_train[0:testcases[i],:]
y_train_sub = y_train[0:testcases[i],:]
model, history = deep_learning_tsunami(resdir_case, x_train_sub, y_train_sub, x_test, y_test, num_layers=5)
#Verify and save results
result = test_model(model, x_test)
save_result(resdir_case,model,history,result)
# In[1]:
import numpy as np
import matplotlib.pyplot as plt
import ipdb
get_ipython().run_line_magic('matplotlib', 'inline')
datadir = '/home/rimali2009/Journal_2/data_g6_j2_roi1700_thai/'
resdir = '/home/rimali2009/Journal_2/result_g6_j2_roi1700_thai/5000/'
test_result = np.loadtxt(resdir + 'test_result.txt',delimiter=',')
icond = np.loadtxt(datadir + 'icond_test.txt',delimiter=',')
print(icond.shape)
loss = np.loadtxt(resdir+'loss.txt',delimiter=',')
epoch = range(0,2000)
vloss = np.loadtxt(resdir+'val_loss.txt',delimiter=',')
resi = test_result - icond
fig = plt.figure(num=None,dpi=250, facecolor='w', edgecolor='k')
plt.plot(epoch, loss, 'bo',label='Loss')
plt.plot(epoch, vloss, 'yo',label='Validation')
plt.xlabel('Epoch')
plt.ylabel('Mean Squared Error')
plt.legend(loc="upper right")
plt.savefig(resdir+ 'mse.pdf')
plt.show()
fig2 = plt.figure()
hfont = {'fontname':'Century Gothic'}
textcol = 'k'
titlelabel = ['Max Inundation Length','Flow Velocity', 'Max. Flow Depth', '$C_1$', '$C_2$', '$C_3$', '$C_4$','$C_5$']
xymin=[1700,2.0,1.5,0.0001,0.0001,0.0001,0.0001,0.0001]
xymax=[4500,10.0,12.0,0.02,0.02,0.02,0.02,0.02]
xstep=[500,1.5,1.5,0.005,0.005,0.005,0.005,0.005]
stepmin=[1700,1.0,2.0,0.000,0.0000,0.0000,0.0000,0.0000]
stepmax=[4550,10.5,13.0,0.025,0.025,0.025,0.025,0.025]
for i in range(len(titlelabel)):
plt.figure(num=None,dpi=250, facecolor='w', edgecolor='k')
plt.plot(icond[:,i],test_result[:,i],"o",markersize = 2.5)
x=icond[:,i]
y=test_result[:,i]
max_value = np.max([x, y])
min_value = np.min([x, y])
y_lim = plt.ylim([min_value * 0.8, max_value * 1.1])
x_lim = plt.xlim([min_value * 0.8, max_value * 1.1])
plt.plot(x_lim, y_lim, 'k-', color = 'k')
#plt.plot([xymin[i],xymax[i]],[xymin[i],xymax[i]],"-",color = 'k')
plt.axes().set_aspect('equal')
#plt.ylim(xymin[i],xymax[i])
#plt.xlim(xymin[i],xymax[i])
plt.xticks(np.arange(stepmin[i],stepmax[i], step=xstep[i]))
plt.yticks(np.arange(stepmin[i],stepmax[i], step=xstep[i]))
plt.xlabel('Original Value',color=textcol,size=14,**hfont)
plt.ylabel('Estimated Value',color=textcol,size=14,**hfont)
plt.title(titlelabel[i],color=textcol,size=14,**hfont)
plt.tick_params(labelsize=14,colors=textcol)
plt.savefig(resdir+titlelabel[i] + '.eps')
plt.savefig(resdir+titlelabel[i] + '.pdf')
#plt.show()
for i in range(len(titlelabel)):
plt.figure(num=None,dpi=250, facecolor='w', edgecolor='k')
plt.hist(resi[:,i],bins=20)
plt.title(titlelabel[i],color=textcol,size=14,**hfont)
plt.xlabel('Deviation from true value',color=textcol,size=14,**hfont)
plt.ylabel('Frequency',color=textcol,size=14,**hfont)
plt.tick_params(labelsize=14,colors=textcol)
plt.savefig(resdir+titlelabel[i] + 'hist' + '.eps')
plt.savefig(resdir+titlelabel[i] + 'hist' + '.pdf')
plt.show()
# In[2]:
from scipy.stats import variation
import numpy as np
import matplotlib.pyplot as plt
get_ipython().run_line_magic('matplotlib', 'inline')
datadir = '/home/rimali2009/Journal_2/data_g6_j2_roi1700_thai/'
resdir = '/home/rimali2009/Journal_2/result_g6_j2_roi1700_thai/5000/'
test_result = np.loadtxt(resdir + 'test_result.txt',delimiter=',')
icond = np.loadtxt(datadir + 'icond_test.txt',delimiter=',')
print(icond.shape)
resi = test_result - icond
titlelabel = ['Max Inundation Length','Flow Velocity', 'Max. Flow Depth', 'C_1', 'C_2', 'C_3', 'C_4','C_5']
for i in range(len(titlelabel)):
plt.figure(num=None,dpi=250, facecolor='w', edgecolor='k')
plt.hist(resi[:,i],bins=20)
print('Standard Deviation:', np.std(resi[:,i]))
print('Standard Deviation sample:', np.std(resi[:,i],ddof=1))
print('Mean:', np.mean(resi[:,i]))
print('CV:', np.std(resi[:,i],ddof=1)/np.mean(resi[:,i]))
plt.title(titlelabel[i],color=textcol,size=14,**hfont)
plt.xlabel('Deviation from true value',color=textcol,size=14,**hfont)
plt.ylabel('Frequency',color=textcol,size=14,**hfont)
plt.tick_params(labelsize=14,colors=textcol)
plt.savefig(resdir+titlelabel[i] + 'hist' + '.eps')
plt.savefig(resdir+titlelabel[i] + 'hist' + '.pdf')
plt.show()
# In[3]:
import numpy as np
get_ipython().run_line_magic('matplotlib', 'inline')
import matplotlib.pyplot as plt
from tensorflow.keras.models import load_model
from scipy import stats
from scipy.interpolate import interp1d
import pandas as pd
from pykrige import OrdinaryKriging as OK
import ipdb
from scipy import stats
#import ipdb
#ipdb.set_trace()
datadir = '/home/rimali2009/Journal_2/data_g6_j2_roi1700_thai/'
resdir = '/home/rimali2009/Journal_2/result_g6_j2_roi1700_thai/5000/'
#Initial setting
if not "model" in locals():
model = load_model(resdir+'model3.hdf5')
# Load test datasets
X_test = np.loadtxt(datadir + 'x_test.txt',delimiter=',')
y_test = np.loadtxt(datadir + 'icond_test.txt',delimiter=',')
# Normalize the test datasets
min_x, max_x = np.loadtxt(datadir + 'x_minmax.txt',delimiter=',')
X_test_norm = (X_test - min_x) / (max_x - min_x)
#gclass = 3
#gclass_label = ["500 $\mu$m","125 $\mu$m","63 $\mu$m"]
gclass = 5
#gclass_label = ["615 ${\mu}m$","406 ${\mu}m$","268 ${\mu}m$","177 ${\mu}m$", "117 ${\mu}m$","77 ${\mu}m$"]
gclass_label = ["659 ${\mu}m$","329 ${\mu}m$","164 ${\mu}m$","82 ${\mu}m$","41 ${\mu}m$"]
topodx = 15.0
coord_num = int(model.layers[0].input_shape[1]/gclass)
#Acquires a value for normalizing input data to [0, 1]
y_min = np.loadtxt(datadir + 'icond_min.txt',delimiter=',')
y_max = np.loadtxt(datadir + 'icond_max.txt',delimiter=',')
# Load outcrop data
outcrop = pd.read_csv('../Journal_2/Thai_gs5.csv')
outcrop = outcrop.sort_values('distance')
outcrop['distance'] = outcrop['distance'] - 0
outcrop_num = len(outcrop['distance'])
print(outcrop)
#Preparation under interpolation
thick_interp_at_outcrop = np.zeros([X_test.shape[0],outcrop_num*gclass])
thick_interp = np.zeros([X_test.shape[0],coord_num*gclass])#Interpolated sample thickness data
outcrop_x_id = np.round(outcrop['distance']/topodx).astype(np.int32) #Index number of sampling point in inverse analysis system
x = np.arange(0,coord_num*topodx,topodx)
# Interpolation of test datasets at the outcrop locations
for i in range(X_test.shape[0]):
for j in range(gclass):
#f = interp1d(x,np.log10(X_test_norm[i,j * coord_num : (j+1) * coord_num]), kind="cubic",bounds_error=False,fill_value='extrapolate')
f= interp1d(x,X_test_norm[i,j * coord_num : (j+1) * coord_num], kind="cubic",bounds_error=False,fill_value='extrapolate')
thick_interp_at_outcrop[i,outcrop_num*j:outcrop_num*(j+1)] = f(outcrop['distance']) #Supplemented data
# Interpolation of test datasets at the grids of the forward model
for j in range(gclass):
#f = interp1d(outcrop['distance'],np.log10(thick_interp_at_outcrop[i,j * outcrop_num : (j+1) * outcrop_num]), kind="cubic",bounds_error=False,fill_value='extrapolate')
f = interp1d(outcrop['distance'],thick_interp_at_outcrop[i,j * outcrop_num : (j+1) * outcrop_num], kind="cubic",bounds_error=False,fill_value='extrapolate')
thick_interp[i,coord_num*j:coord_num*(j+1)] = f(x) #Supplemented data
#ipdb.set_trace()
#Kriging Interpolation by
#vparams = np.array([[0.035, 10000., 0.001],[0.006, 10000., 0.002],[0.005, 10000., 0.002],[0.035, 10000., 0.001]])
#for j in range(gclass):
#okip = OK(outcrop['distance'],np.zeros(outcrop['distance'].shape),outcrop.iloc[:,j+1],variogram_model='linear',)
#okip.display_variogram_model()
#ipdata, ipstd = okip.execute('grid',x,np.array([0.]))
#ipdata = np.squeeze(ipdata)
#thick_interp[0,coord_num*j:coord_num*(j+1)] = ipdata #Assign complemented d
#Normalize data
thick_interp[thick_interp < 0] = 0
#Perform inverse analysis
test_result_outcrop = model.predict(thick_interp)
test_result_outcrop = test_result_outcrop * (y_max - y_min) + y_min
print(test_result_outcrop)
np.savetxt('outcrop_location_interp.txt',test_result_outcrop, delimiter=',')
test_result=np.loadtxt('outcrop_location_interp.txt', delimiter=',')
test_result_normal = np.loadtxt(resdir + 'test_result.txt',delimiter=',')
resi=test_result-y_test
titlelabel = ['Max Inundation Length','Flow Velocity', 'Max. Flow Depth', 'C_1', 'C_2', 'C_3', 'C_4','C_5']
hfont = {'fontname':'Century Gothic'}
textcol = 'k'
xymin=[1700,2.0,1.5,0.0001,0.0001,0.0001,0.0001,0.0001]
xymax=[4500,10.0,12.0,0.02,0.02,0.02,0.02,0.02]
xstep=[500,1.5,1.5,0.005,0.005,0.005,0.005,0.005]
stepmin=[2500,1.0,2.0,0.000,0.0000,0.0000,0.0000,0.0000]
stepmax=[4550,10.5,13.0,0.025,0.025,0.025,0.025,0.025]
# Plot curve fitting
for i in range(len(gclass_label)):
plt.plot(x,thick_interp[0,coord_num * i:coord_num * (i+1)], label='estimated')
for j in range(gclass):
plt.plot(x,X_test_norm[0,j * coord_num : (j+1) * coord_num],'o',label='test')
#plt.plot(outcrop['distance'], thick_interp_at_outcrop[0,outcrop_num*j:outcrop_num*(j+1)],'o',label='test')
plt.plot()
plt.legend()
for i in range(len(titlelabel)):
plt.figure(num=None,dpi=250, facecolor='w', edgecolor='k')
plt.plot(y_test[:,i],test_result[:,i],"o", markersize=4.5)
plt.plot(y_test[:,i],test_result_normal[:,i],"*",label='estimate',markersize=3.5)
x=y_test[:,i]
y=test_result_normal[:,i]
max_value = np.max([x, y])
min_value = np.min([x, y])
y_lim = plt.ylim([min_value * 0.8, max_value * 1.1])
x_lim = plt.xlim([min_value * 0.8, max_value * 1.1])
plt.plot(x_lim, y_lim, 'k-', color = 'k')
plt.title(titlelabel[i],color=textcol,size=14,**hfont)
plt.xlabel('True values',color=textcol,size=14,**hfont)
plt.ylabel('Estimated values',color=textcol,size=14,**hfont)
plt.legend()
plt.axes().set_aspect('equal')
plt.xticks(np.arange(stepmin[i],stepmax[i], step=xstep[i]))
plt.yticks(np.arange(stepmin[i],stepmax[i], step=xstep[i]))
#plt.plot(x_lim, y_lim, color = 'k')
plt.tick_params(labelsize=14,colors='k')
plt.savefig(resdir+titlelabel[i] + 'outcrop_location' + '.pdf')
plt.show()
for i in range(len(titlelabel)):
plt.figure(num=None,dpi=250, facecolor='w', edgecolor='k')
plt.hist(resi[:,i],bins=20)
print('Standard Deviation sample:', np.std(resi[:,i],ddof=1))
print('Mean:', np.mean(resi[:,i]))
print('mode',stats.mode(resi[:,i]))
print('m',np.median(resi[:,i]))
plt.title(titlelabel[i],color=textcol,size=14,**hfont)
plt.xlabel('Deviation from true value',color=textcol,size=14,**hfont)
plt.ylabel('Frequency',color=textcol,size=14,**hfont)
plt.tick_params(labelsize=14,colors=textcol)
plt.savefig(resdir+titlelabel[i] + 'hist_outcrop_location' + '.eps')
plt.savefig(resdir+titlelabel[i] + 'hist_outcrop_location' + '.pdf')
plt.show()
# In[1]:
import numpy as np
import matplotlib.pyplot as plt
from tensorflow.keras.models import load_model
from scipy import stats
from scipy.interpolate import interp1d
import pandas as pd
from pykrige import OrdinaryKriging as OK
import ipdb
datadir = '/home/rimali2009/Journal_2/data_g6_j2_roi1700_thai/'
resdir = '/home/rimali2009/Journal_2/result_g6_j2_roi1700_thai/5000/'
#Initial setting
if not "model" in locals():
model = load_model(resdir+'model3.hdf5')
gclass = 5
gclass_label = ["659 ${\mu}m$","329 ${\mu}m$","164 ${\mu}m$", "82 ${\mu}m$","41 ${\mu}m$"]
topodx = 15.0
coord_num = int(model.layers[0].input_shape[1]/gclass)
#Acquires a value for normalizing input data to [0, 1]
min_x, max_x = np.loadtxt(datadir + 'x_minmax.txt',delimiter=',')
y_min = np.loadtxt(datadir + 'icond_min.txt',delimiter=',')
y_max = np.loadtxt(datadir + 'icond_max.txt',delimiter=',')
#Read outcrop data
#dist_max = 21700. #Distance of distal end of learning data
outcrop = | pd.read_csv('../Journal_2/Thai_gs5.csv') | pandas.read_csv |
import sys
import pandas as pd
import numpy as np
import json
from sqlalchemy import create_engine
def load_data(messages_filepath, categories_filepath):
'''
This function load the datasets messages and categories and merge based on id column.
Params:
messages_filepath (str): String that contain the path to messages file
categories_filepath (str): String that contain the path to categories file
Returns:
df (pandas DataFrame): DataFrame with columns: id,message,original,genre,categories
row: A single messages
columns:
id-->Id for each message
messages--> Text of message
categories --> A single column containing the categories marks for the message
'''
messages = pd.read_csv(messages_filepath)
categories = | pd.read_csv(categories_filepath) | pandas.read_csv |
import sys
import csv
import pandas as pd
import ctdcal.sbe_reader as sbe_rd
import ctdcal.sbe_equations_dict as sbe_eq
import gsw
DEBUG = False
#lookup table for sensor data
###DOUBLE CHECK TYPE IS CORRECT###
short_lookup = {
'55':{'short_name': 'CTDTMP', 'long_name':'SBE 3+ Temperature', 'units': 'ITS-90', 'type': 'float64'},
'45':{'short_name': 'CTDPRS', 'long_name':'SBE 9+ Pressure', 'units': 'DBAR', 'type': 'float64'},
'3':{'short_name': 'CTDCOND', 'long_name':'SBE 4 Conductivity', 'units': 'MSPCM', 'type':'float64'},
'38':{'short_name': 'CTDOXY', 'long_name':'SBE 43 Oxygen', 'units': 'MLPL', 'type':'float64'},
#'38':{'short_name': 'CTDOXYVOLTS', 'long_name':'SBE 43 Oxygen Volts', 'units': '0-5VDC', 'type':'float64'},
'11':{'short_name': 'FLUOR', 'long_name':'Seapoint Fluorometer', 'units': '0-5VDC', 'type':'float64'},
'27':{'short_name': 'FREE', 'long_name':'empty', 'units':'NA', 'type':'NA'},
'0':{'short_name': 'ALT', 'long_name':'Altitude', 'units':'M', 'type':'float64'},
'71':{'short_name': 'CTDXMISS', 'long_name':'CStar', 'units': '0-5VDC', 'type':'float64'},
'61':{'short_name': 'U_DEF', 'long_name':'user defined', 'units':'0-5VDC', 'type':'float64'},
'1000':{'short_name': 'CTDSAL', 'long_name':'Salinity (C1 T1)', 'units':'PSU', 'type':'float64'},
'20':{'short_name': 'CTDFLUOR', 'long_name':'WetlabECO_AFL_FL_Sensor', 'units':'0-5VDC', 'type':'float64'}, #check short_name later
'42':{'short_name':'PAR', 'long_name':'PAR/Irradiance, Biospherical/Licor', 'units':'0-5VDC', 'type':'float64'},
'51':{'short_name':'REF_PAR', 'long_name':'Surface PAR/Irradiance, Biospherical/Licor', 'units':'0-5VDC', 'type':'float64'},
'70':{'short_name': 'CTDBACKSCATTER', 'long_name': 'WetlabECO_BB_Sensor', 'units':'0-5VDC', 'type':'float64'}
}
def debugPrint(*args, **kwargs):
if DEBUG:
errPrint(*args, **kwargs)
def errPrint(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
def convertFromFiles(hex_file, xmlcon_file, debug=False):
"""Handler to convert engineering data to sci units automatically.
Takes the full path and filename of the .hex and .XMLCON as arguments.
Optionally takes a boolean debug flag to specify whether or not to display
verbose messages to stderr
"""
global DEBUG
DEBUG = debug
sbeReader = sbe_rd.SBEReader.from_paths(hex_file, xmlcon_file)
return convertFromSBEReader(sbeReader, DEBUG)
def convertFromSBEReader(sbeReader, debug=False):
"""Handler to convert engineering data to sci units automatically.
Takes SBEReader object that is already connected to the .hex and .XMLCON files.
Optionally takes a boolean debug flag to specify whether or not to display
verbose messages to stderr
"""
global DEBUG
DEBUG = debug
# Retrieve parsed scans
rawData = sbeReader.parsed_scans
# Convert raw data to dataframe
raw_df = pd.DataFrame(rawData)
raw_df.index.name = 'index'
raw_df = raw_df.apply(pd.to_numeric, errors="ignore")
#debugPrint("Raw Data Types:", raw_df.dtypes)
#debugPrint("Raw Data:", raw_df.head)
# Retrieve Config data
rawConfig = sbeReader.parsed_config()
# The meta data field needs to be processed seperately and then joined with the converted_df
debugPrint("Building meta data dataframe... ", end='')
metaArray = [line.split(',') for line in sbeReader._parse_scans_meta().tolist()]
metaArrayheaders = sbeReader._breakdown_header()
meta_df = pd.DataFrame(metaArray)
#print(meta_df)
#print(metaArrayheaders[0])
meta_df.columns = metaArrayheaders[0]
meta_df.index.name = 'index'
for i, x in enumerate(metaArrayheaders[0]):
#debugPrint('Set', metaArrayheaders[0][i], 'to', metaArrayheaders[1][i])
if not metaArrayheaders[1][i] == 'bool_':
meta_df[metaArrayheaders[0][i]] = meta_df[metaArrayheaders[0][i]].astype(metaArrayheaders[1][i])
else:
meta_df[metaArrayheaders[0][i]] = meta_df[metaArrayheaders[0][i]].str.match('True', na=False)
#debugPrint(meta_df[metaArrayheaders[0][i]].head())
debugPrint('Success!')
pressure_temp = meta_df['pressure_temp_int'].tolist()
#needs to search sensor dictionary, and compute in order:
#temp, pressure, cond, salinity, oxygen, all aux.
#run one loop that builds a queue to determine order of processing, must track which column to pull
#process queue, store results in seperate arrays for reuse later
#once queue is empty, attach results together according to format order or xmlcon order - structure to keep track
queue_metadata = []
results = {}
temp_counter = 0
cond_counter = 0
oxygen_counter = 0
u_def_counter = 0
empty_counter = 0
processed_data = []
#Temporary arrays to hold sci_data in order to compute following sci_data (pressure, cond, temp, etc)
t_array = []
p_array = []
c_array = []
k_array = []
######
# The following are definitions for every key in the dict below:
#
# sensor_id = number assigned by SBE for identification in XML
# list_id = place in XML array by SBE for determining which sensor is which, alternatively channel number (freq+volt)
# channel_pos = is it the first, second, third, etc sensor of its type in the data file, aux sensors default to 0
# ranking = data processing ranking - temp first, then pressure, then conductivity, then oxygen, then aux
# column = column in the raw_df containing the engineering units to be converted to sci units
# sensor_info = xml sensor info to convert from eng units to sci units
######
for i, x in enumerate(rawConfig['Sensors']):
sensor_id = rawConfig['Sensors'][i]['SensorID']
#temp block
if sensor_id == '55':
temp_counter += 1
queue_metadata.append({'sensor_id': '55', 'list_id': i, 'channel_pos': temp_counter, 'ranking': 1, 'column': i, 'sensor_info':rawConfig['Sensors'][i]})
#cond block
elif str(sensor_id) == '3':
cond_counter += 1
queue_metadata.append({'sensor_id': '3', 'list_id': i, 'channel_pos': cond_counter, 'ranking': 3, 'column': i, 'sensor_info':rawConfig['Sensors'][i]})
#pressure block
elif str(sensor_id) == '45':
queue_metadata.append({'sensor_id': '45', 'list_id': i, 'channel_pos': '', 'ranking': 2, 'column': i, 'sensor_info':rawConfig['Sensors'][i]})
#oxygen block
elif str(sensor_id) == '38':
oxygen_counter += 1
queue_metadata.append({'sensor_id': '38', 'list_id': i, 'channel_pos': oxygen_counter, 'ranking': 5, 'column': i, 'sensor_info':rawConfig['Sensors'][i]})
#empty block
elif str(sensor_id) == '27':
empty_counter += 1
queue_metadata.append({'sensor_id': '27', 'list_id': i, 'channel_pos': empty_counter, 'ranking': 6, 'column': i, 'sensor_info':rawConfig['Sensors'][i]})
#u_def block
elif str(sensor_id) == '61':
u_def_counter += 1
queue_metadata.append({'sensor_id': '61', 'list_id': i, 'channel_pos': u_def_counter, 'ranking': 6, 'column': i, 'sensor_info':rawConfig['Sensors'][i]})
#aux block
else:
queue_metadata.append({'sensor_id': sensor_id, 'list_id': i, 'channel_pos': '', 'ranking': 7, 'column': i, 'sensor_info':rawConfig['Sensors'][i]})
#a temporary block in order to append basic salinity (t1, c1) to file. If additional salinity is needed (different combinations), it'll need a full reworking
queue_metadata.append({'sensor_id': '1000', 'list_id': 1000, 'channel_pos':'', 'ranking': 4, 'column': '', 'sensor_info':''})
#queue sorting forces it to be in order, so we don't worry about order here
#assumes first channel for each sensor is primary for computing following data, rework to accept file to determine which is primary
queue_metadata = sorted(queue_metadata, key = lambda sensor: sensor['ranking'])
#debugPrint("Queue Metadata:", json.dumps(queue_metadata, indent = 2))
#empty converted dataframs
converted_df = pd.DataFrame()
for temp_meta in queue_metadata:
column_name = '{0}{1}'.format(short_lookup[temp_meta['sensor_id']]['short_name'], temp_meta['channel_pos'])
###Temperature block
if temp_meta['sensor_id'] == '55':
debugPrint('Processing Sensor ID:', temp_meta['sensor_id'] + ',', short_lookup[temp_meta['sensor_id']]['long_name'])
converted_df[column_name] = sbe_eq.temp_its90_dict(temp_meta['sensor_info'], raw_df[temp_meta['column']])
if temp_meta['list_id'] == 0:
t_array = converted_df[column_name].astype(type('float', (float,), {}))
k_array = [273.15+celcius for celcius in t_array]
debugPrint('\tPrimary temperature first reading:', t_array[0], short_lookup[temp_meta['sensor_id']]['units'])
#processed_data.append(temp_meta)
### Pressure block
elif temp_meta['sensor_id'] == '45':
debugPrint('Processing Sensor ID:', temp_meta['sensor_id'] + ',', short_lookup[temp_meta['sensor_id']]['long_name'])
converted_df[column_name] = sbe_eq.pressure_dict(temp_meta['sensor_info'], raw_df[temp_meta['column']], pressure_temp)
if temp_meta['list_id'] == 2:
p_array = converted_df[column_name].astype(type('float', (float,), {}))
debugPrint('\tPressure first reading:', p_array[0], short_lookup[temp_meta['sensor_id']]['units'])
#processed_data.append(temp_meta)
### Conductivity block
elif temp_meta['sensor_id'] == '3':
debugPrint('Processing Sensor ID:', temp_meta['sensor_id'] + ',', short_lookup[temp_meta['sensor_id']]['long_name'])
converted_df[column_name] = sbe_eq.cond_dict(temp_meta['sensor_info'], raw_df[temp_meta['column']], t_array, p_array)
if temp_meta['list_id'] == 1:
c_array = converted_df[column_name].astype(type('float', (float,), {}))
debugPrint('\tPrimary cond first reading:', c_array[0], short_lookup[temp_meta['sensor_id']]['units'])
#processed_data.append(temp_meta)
### Oxygen block
elif temp_meta['sensor_id'] == '38':
debugPrint('Processing Sensor ID:', temp_meta['sensor_id'] + ',', short_lookup[temp_meta['sensor_id']]['long_name'])
converted_df[column_name] = sbe_eq.oxy_dict(temp_meta['sensor_info'], p_array, k_array, t_array, c_array, raw_df[temp_meta['column']])
converted_df['CTDOXYVOLTS'] = raw_df[temp_meta['column']]
#processed_data.append(temp_meta)
### Fluorometer Seapoint block
elif temp_meta['sensor_id'] == '11':
debugPrint('Processing Sensor ID:', temp_meta['sensor_id'] + ',', short_lookup[temp_meta['sensor_id']]['long_name'])
converted_df[column_name] = sbe_eq.fluoro_seapoint_dict(temp_meta['sensor_info'], raw_df[temp_meta['column']])
#processed_data.append(temp_meta)
###Salinity block
elif temp_meta['sensor_id'] == '1000':
debugPrint('Processing Sensor ID:', temp_meta['sensor_id'] + ',', short_lookup[temp_meta['sensor_id']]['long_name'])
converted_df[column_name] = gsw.SP_from_C(c_array, t_array, p_array)
#processed_data.append(temp_meta)
###Altimeter block
elif temp_meta['sensor_id'] == '0':
debugPrint('Processing Sensor ID:', temp_meta['sensor_id'] + ',', short_lookup[temp_meta['sensor_id']]['long_name'])
converted_df[column_name] = sbe_eq.altimeter_voltage(temp_meta['sensor_info'], raw_df[temp_meta['column']])
### Aux block
else:
debugPrint('Passing along Sensor ID:', temp_meta['sensor_id'] + ',', short_lookup[temp_meta['sensor_id']]['long_name'])
converted_df[column_name] = raw_df[temp_meta['column']]
#processed_data.append(temp_meta)
# Set the column name for the index
converted_df.index.name = 'index'
debugPrint("Joining meta data dataframe with converted data... ", end='')
converted_df = converted_df.join(meta_df)
debugPrint('Success!')
# return the converted data as a dataframe
return converted_df
def importConvertedFile(file_name, debug=False):
"""Handler to import converted data from a csv-formatted file created by run.py
"""
try:
output_df = pd.read_pickle(file_name)
except FileNotFoundError:
global DEBUG
DEBUG = debug
debugPrint("Importing data from:", file_name + '... ', end='')
output_df = | pd.read_csv(file_name, index_col=0, skiprows=[1], parse_dates=False) | pandas.read_csv |
import os
import joblib
import numpy as np
import pandas as pd
from joblib import Parallel
from joblib import delayed
from Fuzzy_clustering.version2.common_utils.logging import create_logger
from Fuzzy_clustering.version2.dataset_manager.common_utils import check_empty_nwp
from Fuzzy_clustering.version2.dataset_manager.common_utils import rescale_mean
from Fuzzy_clustering.version2.dataset_manager.common_utils import stack_2d_dense
from Fuzzy_clustering.version2.dataset_manager.common_utils import stack_3d
class DatasetCreatorDense:
def __init__(self, projects_group, projects, data, path_nwp, nwp_model, nwp_resolution, data_variables, njobs=1,
test=False, dates=None):
self.projects = projects
self.is_for_test = test
self.projects_group = projects_group
self.data = data
self.path_nwp = path_nwp
self.nwp_model = nwp_model
self.nwp_resolution = nwp_resolution
self.compress = True if self.nwp_resolution == 0.05 else False
self.n_jobs = njobs
self.variables = data_variables
self.logger = create_logger(logger_name=__name__, abs_path=self.path_nwp,
logger_path=f'log_{self.projects_group}.log', write_type='a')
if not self.data is None:
self.dates = self.check_dates()
elif not dates is None:
self.dates = dates
def check_dates(self):
start_date = pd.to_datetime(self.data.index[0].strftime('%d%m%y'), format='%d%m%y')
end_date = pd.to_datetime(self.data.index[-1].strftime('%d%m%y'), format='%d%m%y')
dates = pd.date_range(start_date, end_date)
data_dates = pd.to_datetime(np.unique(self.data.index.strftime('%d%m%y')), format='%d%m%y')
dates = [d for d in dates if d in data_dates]
self.logger.info('Dates are checked. Number of time samples is %s', str(len(dates)))
return pd.DatetimeIndex(dates)
def correct_nwps(self, nwp, variables):
if nwp['lat'].shape[0] == 0:
area_group = self.projects[0]['static_data']['area_group']
resolution = self.projects[0]['static_data']['NWP_resolution']
nwp['lat'] = np.arange(area_group[0][0], area_group[1][0] + resolution / 2,
resolution).reshape(-1, 1)
nwp['long'] = np.arange(area_group[0][1], area_group[1][1] + resolution / 2,
resolution).reshape(-1, 1).T
for var in nwp.keys():
if not var in {'lat', 'long'}:
if nwp['lat'].shape[0] != nwp[var].shape[0]:
nwp[var] = nwp[var].T
if 'WS' in variables and not 'WS' in nwp.keys():
if 'Uwind' in nwp.keys() and 'Vwind' in nwp.keys():
if nwp['Uwind'].shape[0] > 0 and nwp['Vwind'].shape[0] > 0:
r2d = 45.0 / np.arctan(1.0)
wspeed = np.sqrt(np.square(nwp['Uwind']) + np.square(nwp['Vwind']))
wdir = np.arctan2(nwp['Uwind'], nwp['Vwind']) * r2d + 180
nwp['WS'] = wspeed
nwp['WD'] = wdir
if 'Temp' in nwp.keys():
nwp['Temperature'] = nwp['Temp']
del nwp['Temp']
return nwp
def stack_by_sample(self, t, data, lats, longs, path_nwp, nwp_model, projects, variables, predictions):
timestep = 60
x = dict()
y = dict()
x_3d = dict()
file_name = os.path.join(path_nwp, f"{nwp_model}_{t.strftime('%d%m%y')}.pickle")
if os.path.exists(file_name):
nwps = joblib.load(file_name)
for project in projects:
preds = predictions[project['_id']]
hor = preds.columns[-1] + timestep
p_dates = [t + pd.DateOffset(minutes=hor)]
preds = preds.loc[t].to_frame().T
dates_pred = [t + pd.DateOffset(minutes=h) for h in preds.columns]
pred = pd.DataFrame(preds.values.ravel(), index=dates_pred, columns=[project['_id']])
data_temp = pd.concat([data[project['_id']].iloc[np.where(data.index < t)].to_frame(), pred])
project_id = project['_id'] # It's the project name, the park's name
x[project_id] = pd.DataFrame()
y[project_id] = pd.DataFrame()
x_3d[project_id] = np.array([])
areas = project['static_data']['areas']
if isinstance(areas, list):
for date in p_dates:
date_nwp = date.round('H').strftime('%d%m%y%H%M')
try:
nwp = nwps[date_nwp]
nwp = self.correct_nwps(nwp, variables)
date_nwp = pd.to_datetime(date_nwp, format='%d%m%y%H%M')
nwp_prev = nwps[(date_nwp - pd.DateOffset(hours=1)).strftime('%d%m%y%H%M')]
nwp_next = nwps[(date_nwp + pd.DateOffset(hours=1)).strftime('%d%m%y%H%M')]
nwp_prev = self.correct_nwps(nwp_prev, variables)
nwp_next = self.correct_nwps(nwp_next, variables)
if check_empty_nwp(nwp, nwp_next, nwp_prev, variables):
inp, inp_cnn = self.create_sample(date, nwp, nwp_prev, nwp_next, lats[project_id],
longs[project_id], project['static_data']['type'])
if project['static_data']['horizon'] == 'short-term':
inp['Obs_lag1'] = data_temp.loc[(date - pd.DateOffset(hours=1))].values
inp['Obs_lag2'] = data_temp.loc[(date - pd.DateOffset(hours=2))].values
if not inp.isnull().any(axis=1).values and not np.isnan(data.loc[date, project_id]):
x[project_id] = pd.concat([x[project_id], inp])
x_3d[project_id] = stack_2d_dense(x_3d[project_id], inp_cnn, False)
y[project_id] = pd.concat([y[project_id], pd.DataFrame(data.loc[date, project_id],
columns=['target'],
index=[date])])
except Exception:
continue
else:
for date in p_dates:
try:
date_nwp = date.round('H').strftime('%d%m%y%H%M')
nwp = nwps[date_nwp]
nwp = self.correct_nwps(nwp, variables)
date_nwp = pd.to_datetime(date_nwp, format='%d%m%y%H%M')
nwp_prev = nwps[(date_nwp - pd.DateOffset(hours=1)).strftime('%d%m%y%H%M')]
nwp_next = nwps[(date_nwp + pd.DateOffset(hours=1)).strftime('%d%m%y%H%M')]
nwp_prev = self.correct_nwps(nwp_prev, variables)
nwp_next = self.correct_nwps(nwp_next, variables)
if check_empty_nwp(nwp, nwp_next, nwp_prev, variables):
inp, inp_cnn = self.create_sample_country(date, nwp, nwp_prev, nwp_next,
lats[project['_id']],
longs[project['_id']],
project['static_data']['type'])
if project['static_data']['horizon'] == 'short-term':
inp['Obs_lag1'] = data_temp.loc[(date - pd.DateOffset(hours=1)), project_id].values
inp['Obs_lag2'] = data_temp.loc[(date - pd.DateOffset(hours=2)), project_id].values
if not inp.isnull().any(axis=1).values and not np.isnan(data.loc[date, project_id]):
x[project['_id']] = pd.concat([x[project['_id']], inp])
x_3d[project['_id']] = stack_2d_dense(x_3d[project['_id']], inp_cnn, False)
y[project['_id']] = pd.concat(
[y[project['_id']], pd.DataFrame(data.loc[date, project['_id']],
columns=['target'], index=[date])])
except Exception:
continue
print(t.strftime('%d%m%y%H%M'), ' extracted')
for project in projects:
if len(x_3d[project['_id']].shape) == 3:
x_3d[project['_id']] = x_3d[project['_id']][np.newaxis, :, :, :]
return x, y, x_3d, t.strftime('%d%m%y%H%M')
def stack_daily_nwps(self, t, data, lats, longs, path_nwp, nwp_model, projects, variables):
x = dict()
y = dict()
x_3d = dict()
file_name = os.path.join(path_nwp, f"{nwp_model}_{t.strftime('%d%m%y')}.pickle")
if os.path.exists(file_name):
nwps = joblib.load(file_name)
for project in projects:
if project['static_data']['horizon'] == 'day_ahead':
p_dates = pd.date_range(t + pd.DateOffset(hours=24), t + pd.DateOffset(hours=47), freq='H')
else:
p_dates = pd.date_range(t + pd.DateOffset(hours=1), t + pd.DateOffset(hours=24), freq='H')
project_id = project['_id'] # It's the project name, the park's name
x[project_id] = pd.DataFrame()
y[project_id] = pd.DataFrame()
x_3d[project_id] = np.array([])
areas = project['static_data']['areas']
if isinstance(areas, list):
for date in p_dates:
try:
date_nwp = date.round('H').strftime('%d%m%y%H%M')
nwp = nwps[date_nwp]
nwp = self.correct_nwps(nwp, variables)
date_nwp = pd.to_datetime(date_nwp, format='%d%m%y%H%M')
nwp_prev = nwps[(date_nwp - pd.DateOffset(hours=1)).strftime('%d%m%y%H%M')]
nwp_next = nwps[(date_nwp + pd.DateOffset(hours=1)).strftime('%d%m%y%H%M')]
nwp_prev = self.correct_nwps(nwp_prev, variables)
nwp_next = self.correct_nwps(nwp_next, variables)
if check_empty_nwp(nwp, nwp_next, nwp_prev, variables):
inp, inp_cnn = self.create_sample(date, nwp, nwp_prev, nwp_next, lats[project_id],
longs[project_id], project['static_data']['type'])
if project['static_data']['horizon'] == 'short-term':
inp['Obs_lag1'] = data.loc[(date - pd.DateOffset(hours=1)), project_id]
inp['Obs_lag2'] = data.loc[(date - pd.DateOffset(hours=2)), project_id]
if not self.is_for_test:
inp['Obs_lag1'] = inp['Obs_lag1'] + np.random.normal(0, 0.05) * inp['Obs_lag1']
inp['Obs_lag2'] = inp['Obs_lag2'] + np.random.normal(0, 0.05) * inp['Obs_lag2']
if not inp.isnull().any(axis=1).values and not np.isnan(data.loc[date, project_id]):
x[project_id] = pd.concat([x[project_id], inp])
x_3d[project_id] = stack_2d_dense(x_3d[project_id], inp_cnn, False)
y[project_id] = pd.concat([y[project_id], pd.DataFrame(data.loc[date, project_id],
columns=['target'],
index=[date])])
except Exception:
continue
else:
for date in p_dates:
try:
date_nwp = date.round('H').strftime('%d%m%y%H%M')
nwp = nwps[date_nwp]
nwp = self.correct_nwps(nwp, variables)
date = pd.to_datetime(date_nwp, format='%d%m%y%H%M')
nwp_prev = nwps[(date_nwp - pd.DateOffset(hours=1)).strftime('%d%m%y%H%M')]
nwp_next = nwps[(date_nwp + pd.DateOffset(hours=1)).strftime('%d%m%y%H%M')]
nwp_prev = self.correct_nwps(nwp_prev, variables)
nwp_next = self.correct_nwps(nwp_next, variables)
if check_empty_nwp(nwp, nwp_next, nwp_prev, variables):
inp, inp_cnn = self.create_sample_country(date, nwp, nwp_prev, nwp_next,
lats[project['_id']],
longs[project['_id']],
project['static_data']['type'])
if project['static_data']['horizon'] == 'short-term':
inp['Obs_lag1'] = data.loc[(date - pd.DateOffset(hours=1)), project_id]
inp['Obs_lag2'] = data.loc[(date - pd.DateOffset(hours=2)), project_id]
if not inp.isnull().any(axis=1).values and not np.isnan(data.loc[date, project_id]):
x[project['_id']] = pd.concat([x[project['_id']], inp])
x_3d[project['_id']] = stack_2d_dense(x_3d[project['_id']], inp_cnn, False)
y[project['_id']] = pd.concat(
[y[project['_id']], pd.DataFrame(data.loc[date, project['_id']],
columns=['target'], index=[date])])
except Exception:
continue
print(t.strftime('%d%m%y%H%M'), ' extracted')
return x, y, x_3d, t.strftime('%d%m%y%H%M')
def stack_daily_nwps_rabbitmq(self, t, path_nwp, nwp_model, project, variables):
x = dict()
x_3d = dict()
nwps = project['nwp']
p_dates = pd.date_range(t, t + pd.DateOffset(days=3) - pd.DateOffset(hours=1), freq='H')
project_id = project['_id'] # It's the project name, the park's name
x[project_id] = pd.DataFrame()
x_3d[project_id] = np.array([])
areas = project['static_data']['areas']
if isinstance(areas, list):
for date in p_dates:
try:
date_nwp = date.strftime('%d%m%y%H%M')
nwp = nwps[date_nwp]
date_nwp = pd.to_datetime(date_nwp, format='%d%m%y%H%M')
nwp_prev = nwps[(date_nwp - pd.DateOffset(hours=1)).strftime('%d%m%y%H%M')]
nwp_next = nwps[(date_nwp + pd.DateOffset(hours=1)).strftime('%d%m%y%H%M')]
if check_empty_nwp(nwp, nwp_next, nwp_prev, variables):
inp, inp_cnn = self.create_sample_rabbitmq(date, nwp, nwp_prev, nwp_next, project['static_data']['type'])
x[project_id] = pd.concat([x[project_id], inp])
x_3d[project_id] = stack_2d_dense(x_3d[project_id], inp_cnn, False)
except Exception:
continue
else:
for date in p_dates:
try:
date_nwp = date.strftime('%d%m%y%H%M')
nwp = nwps[date_nwp]
date = pd.to_datetime(date_nwp, format='%d%m%y%H%M')
nwp_prev = nwps[(date_nwp - pd.DateOffset(hours=1)).strftime('%d%m%y%H%M')]
nwp_next = nwps[(date_nwp + pd.DateOffset(hours=1)).strftime('%d%m%y%H%M')]
if check_empty_nwp(nwp, nwp_next, nwp_prev, variables):
inp, inp_cnn = self.create_sample_country(date, nwp, nwp_prev, nwp_next,
lats[project['_id']],
longs[project['_id']],
project['static_data']['type'])
x[project['_id']] = pd.concat([x[project['_id']], inp])
x_3d[project['_id']] = stack_2d_dense(x_3d[project['_id']], inp_cnn, False)
except Exception:
continue
print(t.strftime('%d%m%y%H%M'), ' extracted')
return x, x_3d, t.strftime('%d%m%y%H%M')
def stack_daily_nwps_online(self, t, data, lats, longs, path_nwp, nwp_model, projects, variables):
x = dict()
x_3d = dict()
file_name = os.path.join(path_nwp, f"{nwp_model}_{t.strftime('%d%m%y')}.pickle")
if os.path.exists(file_name):
nwps = joblib.load(file_name)
for project in projects:
if project['static_data']['horizon'] == 'day_ahead':
p_dates = pd.date_range(t + pd.DateOffset(hours=24), t + pd.DateOffset(hours=47), freq='H')
else:
p_dates = pd.date_range(t + pd.DateOffset(hours=1), t + | pd.DateOffset(hours=24) | pandas.DateOffset |
import ibeis
import six
import vtool
import utool
import numpy as np
import numpy.linalg as npl # NOQA
import pandas as pd
from vtool import clustering2 as clustertool
from vtool import nearest_neighbors as nntool
from plottool import draw_func2 as df2
np.set_printoptions(precision=2)
pd.set_option('display.max_rows', 10)
pd.set_option('display.max_columns', 10)
pd.set_option('isplay.notebook_repr_html', True)
ibeis.ensure_pz_mtest()
#taids = ibs.get_valid_aids()
#tvecs_list = ibs.get_annot_vecs(taids)
#tkpts_list = ibs.get_annot_kpts(taids)
#tvec_list = np.vstack(tvecs_list)
#print(idx2_vec)
#labels, words = vtool.clustering.cached_akmeans(tvec_list, 1000, 30, cache_dir='.')
#tvecdf_list = [pd.DataFrame(vecs) for vecs in tvecs_list]
#tvecs_df = pd.DataFrame(tvecdf_list, index=taids)
#kpts_col = pd.DataFrame(tkpts_list, index=taids, columns=['kpts'])
#vecs_col = pd.DataFrame(tvecs_list, index=taids, columns=['vecs'])
#tvecs_dflist = [pd.DataFrame(vecs, index=np.arange(len(vecs))) for vecs in tvecs_list]
#pd.concat(tvecs_dflist)
## Bui
#taids = ibs.get_valid_aids()
#tvecs_list = ibs.get_annot_vecs(taids)
#tkpts_list = ibs.get_annot_kpts(taids)
#orig_idx2_vec, orig_idx2_ax, orig_idx2_fx = vtool.nearest_neighbors.invertible_stack(tvecs_list, taids)
#annots_df = pd.concat([vecs_col, kpts_col], axis=1)
#annots_df
#idx2_vec = np.vstack(annots_df['vecs'].values)
##idx2_ax =
#idx2_vec, idx2_ax, idx2_fx = vtool.nearest_neighbors.invertible_stack(tvecs_list, taids)
#labels, words = vtool.clustering2.cached_akmeans(tvec_list, 1000, 30)
#words = centroids
def display_info(ibs, invindex, annots_df):
#################
#from ibeis.other import dbinfo
#print(ibs.get_infostr())
#dbinfo.get_dbinfo(ibs, verbose=True)
#################
#print('Inverted Index Stats: vectors per word')
#print(utool.get_stats_str(map(len, invindex.wx2_idxs.values())))
#################
qfx2_vec = annots_df['vecs'][1]
centroids = invindex.words
num_pca_dims = 3 # 3
whiten = False
kwd = dict(num_pca_dims=num_pca_dims,
whiten=whiten,)
#clustertool.rrr()
def makeplot_(fnum, prefix, data, labels='centroids', centroids=centroids):
return clustertool.plot_centroids(data, centroids, labels=labels,
fnum=fnum, prefix=prefix + '\n', **kwd)
#makeplot_(1, 'centroid vecs', centroids)
#makeplot_(2, 'database vecs', invindex.idx2_vec)
#makeplot_(3, 'query vecs', qfx2_vec)
#makeplot_(4, 'database vecs', invindex.idx2_vec)
#makeplot_(5, 'query vecs', qfx2_vec)
#################
def make_annot_df(ibs):
aid_list = ibs.get_valid_aids()
_kpts_col = pd.DataFrame(ibs.get_annot_kpts(aid_list),
index=aid_list, columns=['kpts'])
_vecs_col = pd.DataFrame(ibs.get_annot_vecs(aid_list),
index=aid_list, columns=['vecs'])
annots_df = pd.concat([_vecs_col, _kpts_col], axis=1)
return annots_df
def learn_visual_words(annots_df, train_aids, nCentroids):
vecs_list = annots_df['vecs'][train_aids].as_matrix()
train_vecs = np.vstack(vecs_list)
print('Training %d word vocabulary with %d annots and %d descriptors' %
(nCentroids, len(train_aids), len(train_vecs)))
words = clustertool.cached_akmeans(train_vecs, nCentroids, max_iters=100)
return words
def index_data_annots(annots_df, daids, words):
vecs_list = annots_df['vecs'][daids]
flann_params = {}
wordflann = vtool.nearest_neighbors.flann_cache(words, flann_params=flann_params)
ax2_aid = np.array(daids)
idx2_vec, idx2_ax, idx2_fx = nntool.invertible_stack(vecs_list, np.arange(len(ax2_aid)))
invindex = InvertedIndex(words, wordflann, idx2_vec, idx2_ax, idx2_fx, ax2_aid)
invindex.compute_internals()
return invindex
@six.add_metaclass(utool.ReloadingMetaclass)
class InvertedIndex(object):
def __init__(invindex, words, wordflann, idx2_vec, idx2_ax, idx2_fx, ax2_aid):
invindex.wordflann = wordflann
invindex.words = words # visual word centroids
invindex.ax2_aid = ax2_aid # annot index -> annot id
invindex.idx2_vec = idx2_vec # stacked index -> descriptor vector
invindex.idx2_ax = idx2_ax # stacked index -> annot index
invindex.idx2_fx = idx2_fx # stacked index -> feature index
invindex.idx2_wx = None # stacked index -> word index
invindex.wx2_idxs = None # word index -> stacked indexes
invindex.wx2_drvecs = None # word index -> residual vectors
#invindex.compute_internals()
def compute_internals(invindex):
idx2_vec = invindex.idx2_vec
wx2_idxs, idx2_wx = invindex.assign_to_words(idx2_vec)
wx2_drvecs = invindex.compute_residuals(idx2_vec, wx2_idxs)
invindex.idx2_wx = idx2_wx
invindex.wx2_idxs = wx2_idxs
invindex.wx2_drvecs = wx2_drvecs
def assign_to_words(invindex, idx2_vec):
idx2_wx, _idx2_wdist = invindex.wordflann.nn_index(idx2_vec, 1)
if True:
assign_df = pd.DataFrame(idx2_wx, columns=['wordindex'])
grouping = assign_df.groupby('wordindex')
wx2_idxs = grouping.wordindex.indices
else:
# TODO: replace with pandas groupby
idx_list = list(range(len(idx2_wx)))
wx2_idxs = utool.group_items(idx_list, idx2_wx.tolist())
return wx2_idxs, idx2_wx
def compute_residuals(invindex, idx2_vec, wx2_idxs):
""" returns mapping from word index to a set of residual vectors """
words = invindex.words
wx2_rvecs = {}
for word_index in wx2_idxs.keys():
# for each word
idxs = wx2_idxs[word_index]
vecs = np.array(idx2_vec[idxs], dtype=np.float64)
word = np.array(words[word_index], dtype=np.float64)
# compute residuals of all vecs assigned to this word
residuals = np.array([word - vec for vec in vecs])
# normalize residuals
residuals_n = vtool.linalg.normalize_rows(residuals)
wx2_rvecs[word_index] = residuals_n
return wx2_rvec
#def smk_similarity(wx2_qrvecs, wx2_drvecs):
# similarity_matrix = (rvecs1.dot(rvecs2.T))
def query_inverted_index(annots_df, qaid, invindex):
qfx2_vec = annots_df['vecs'][qaid]
wx2_qfxs, qfx2_wx = invindex.assign_to_words(qfx2_vec)
wx2_qrvecs = invindex.compute_residuals(qfx2_vec, wx2_qfxs)
daid = invindex.ax2_aid[0]
def single_daid_similairty(invindex, daid):
""" daid = 4
FIXME: Inefficient code
"""
ax = np.where(invindex.ax2_aid == daid)[0]
wx2_dfxs = {}
wx2_drvecs = {}
for wx, idxs in invindex.wx2_idxs.items():
valid = (invindex.idx2_ax[idxs] == ax)
dfxs = invindex.idx2_fx[idxs][valid]
drvecs = invindex.wx2_drvecs[wx][valid]
wx2_dfxs[wx] = dfxs
wx2_drvecs[wx] = drvecs
# Similarity to a single database annotation
query_wxs = set(wx2_qrvecs.keys())
data_wxs = set(wx2_drvecs.keys())
total_score = 0
for wx in data_wxs.intersection(query_wxs):
qrvecs = wx2_qrvecs[wx]
drvecs = wx2_drvecs[wx]
residual_similarity = qrvecs.dot(drvecs.T)
scores = selectivity_function(residual_similarity)
total_score += scores.sum()
return total_score
def selectivity_function(residual_similarity, alpha=3, thresh=0):
""" sigma from SMK paper """
u = residual_similarity
scores = (np.sign(u) * np.abs(u)) ** alpha
scores[scores <= thresh] = 0
return scores
# Entire database
daid2_score = utool.ddict(lambda: 0)
query_wxs = set(wx2_qrvecs.keys())
data_wxs = set(invindex.wx2_drvecs.keys())
qfx2_axs = []
qfx2_fm = []
qfx2_fs = []
aid_fm = []
aid_fs = []
idx2_daid = | pd.Series(invindex.ax2_aid[invindex.idx2_ax], name='daid') | pandas.Series |
# ---
# jupyter:
# jupytext:
# formats: ipynb,py
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.6.0
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Telecom Churn Case Study
# With 21 predictor variables we need to predict whether a particular customer will switch to another telecom provider or not. In telecom terminology, this is referred to as churning and not churning, respectively.
# ### Step 1: Importing and Merging Data
# Suppressing Warnings
import warnings
warnings.filterwarnings('ignore')
# Importing Pandas and NumPy
import pandas as pd, numpy as np
# Importing all datasets
churn_data = pd.read_csv("churn_data.csv")
churn_data.head()
customer_data = pd.read_csv("customer_data.csv")
customer_data.head()
internet_data = pd.read_csv("internet_data.csv")
internet_data.head()
# #### Combining all data files into one consolidated dataframe
# Merging on 'customerID'
df_1 = pd.merge(churn_data, customer_data, how='inner', on='customerID')
# Final dataframe with all predictor variables
telecom = pd.merge(df_1, internet_data, how='inner', on='customerID')
# ### Step 2: Inspecting the Dataframe
telecom.OnlineBackup.value_counts()
# Let's see the head of our master dataset
telecom.head()
# Let's check the dimensions of the dataframe
telecom.shape
# let's look at the statistical aspects of the dataframe
telecom.describe()
# Let's see the type of each column
telecom.info()
# ### Step 3: Data Preparation
# #### Converting some binary variables (Yes/No) to 0/1
# +
# List of variables to map
varlist = ['PhoneService', 'PaperlessBilling', 'Churn', 'Partner', 'Dependents']
# Defining the map function
def binary_map(x):
return x.map({'Yes': 1, "No": 0})
# Applying the function to the housing list
telecom[varlist] = telecom[varlist].apply(binary_map)
# -
telecom.head()
# #### For categorical variables with multiple levels, create dummy features (one-hot encoded)
# +
# Creating a dummy variable for some of the categorical variables and dropping the first one.
dummy1 = pd.get_dummies(telecom[['Contract', 'PaymentMethod', 'gender', 'InternetService']], drop_first=True)
# Adding the results to the master dataframe
telecom = pd.concat([telecom, dummy1], axis=1)
# -
telecom.head()
# +
# Creating dummy variables for the remaining categorical variables and dropping the level with big names.
# Creating dummy variables for the variable 'MultipleLines'
ml = | pd.get_dummies(telecom['MultipleLines'], prefix='MultipleLines') | pandas.get_dummies |
"""Class to process raw TomTom MultiNet data into a network dataset.
Copyright 2022 Esri
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.'''
"""
import pandas as pd
import numpy as np
import time
import functools
import datetime
import os
import uuid
import enum
from lxml import etree
import arcpy
CURDIR = os.path.dirname(os.path.abspath(__file__))
LNG_CODES = {
"ALB": "sq", # Albanian
"ALS": "", # Alsacian
"ARA": "ar", # Arabic
"BAQ": "eu", # Basque
"BAT": "", # Baltic (Other)
"BEL": "be", # Belarusian
"BET": "be", # Belarusian (Latin)
"BOS": "bs", # Bosnian
"BRE": "br", # Breton
"BUL": "bg", # Bulgarian
"BUN": "bg", # Bulgarian (Latin)
"BUR": "my", # Burmese
"CAT": "ca", # Catalan
"CEL": "", # Celtic (Other)
"CHI": "zh", # Chinese, Han Simplified
"CHL": "zh", # Chinese, Mandarin Pinyin
"CHT": "zh", # Chinese, Han Traditional
"CTN": "zh", # Chinese, Cantonese Pinyin
"CZE": "cs", # Czech
"DAN": "da", # Danish
"DUT": "nl", # Dutch
"ENG": "en", # English
"EST": "et", # Estonian
"FAO": "fo", # Faroese
"FIL": "", # Filipino
"FIN": "fi", # Finnish
"FRE": "fr", # French
"FRY": "fy", # Frisian
"FUR": "", # Friulian
"GEM": "", # Franco-Provencal
"GER": "de", # German
"GLA": "gd", # Gaelic (Scots)
"GLE": "ga", # Irish
"GLG": "gl", # Galician
"GRE": "el", # Greek (Modern)
"GRL": "el", # Greek (Latin Transcription)
"HEB": "he", # Hebrew
"HIN": "hi", # Hindi
"HUN": "hu", # Hungarian
"ICE": "is", # Icelandic
"IND": "id", # Indonesian
"ITA": "it", # Italian
"KHM": "km", # Khmer
"KOL": "ko", # Korean (Latin)
"KOR": "ko", # Korean
"LAD": "", # Ladin
"LAO": "lo", # Lao
"LAT": "la", # Latin
"LAV": "lv", # Latvian
"LIT": "lt", # Lithuanian
"LTZ": "lb", # Letzeburgesch
"MAC": "mk", # Macedonian
"MAP": "", # Austronesian (Other)
"MAT": "mk", # Macedonian (Latin Transcription)
"MAY": "ms", # Malaysian
"MLT": "mt", # Maltese
"MOL": "mo", # Moldavian
"MYN": "", # Mayan Languages
"NOR": "no", # Norwegian
"OCI": "oc", # Occitan
"PAA": "", # Papuan-Australian (Other)
"POL": "pl", # Polish
"POR": "pt", # Portuguese
"PRO": "", # Provencal
"ROA": "", # Romance (Other)
"ROH": "rm", # Raeto-Romance
"ROM": "", # Romani
"RUL": "ru", # Russian (Latin Transcription)
"RUM": "ro", # Romanian
"RUS": "ru", # Russian
"SCC": "sh", # Serbian (Latin)
"SCO": "gd", # Scots
"SCR": "sh", # Croatian
"SCY": "sh", # Serbian (Cyrillic)
"SLA": "cu", # Slavic
"SLO": "sk", # Slovak
"SLV": "sv", # Slovenian
"SMC": "", # Montenegrin (Cyrillic)
"SMI": "se", # Lapp (Sami)
"SML": "", # Montenegrin (Latin)
"SPA": "es", # Spanish
"SRD": "sc", # Sardinian
"SWE": "sv", # Swedish
"THA": "th", # Thai
"THL": "th", # Thai (Latin)
"TUR": "tr", # Turkish
"UKL": "uk", # Ukranian (Latin)
"UKR": "uk", # Ukranian
"UND": "", # Undefined
"VAL": "ca", # Valencian
"VIE": "vi", # Vietnamese
"WEL": "cy", # Welsh
"WEN": "", # Sorbian (Other)
}
PRINT_TIMINGS = False # Set to True to log timings for various methods (primarily for debugging and development)
def timed_exec(func):
"""Measure time in seconds to execute a function.
This function is meant to be used as a decorator on a function.
Args:
func: The decorated function that is being timed
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
"""Wrap the function to be run."""
# Using an inner function so the timing can happen directly around the function under test.
def inner_func():
t0 = time.time()
return_val = func(*args, **kwargs)
if PRINT_TIMINGS:
arcpy.AddMessage(f"Time to run {func.__name__}: {time.time() - t0}")
return return_val
return inner_func()
return wrapper
class TimeZoneType(enum.Enum):
"""Defines the time zone type to use."""
NoTimeZone = 1
Single = 2
Table = 3
class UnitType(enum.Enum):
"""Defines whether the units are imperial or metric."""
Imperial = 1
Metric = 2
class MultiNetInputData:
"""Defines a collection of MultiNet inputs to process."""
def __init__(
self, network_geometry_fc, maneuvers_geometry_fc, maneuver_path_idx_table, sign_info_table, sign_path_table,
restrictions_table, network_profile_link_table=None, historical_speed_profiles_table=None,
rds_tmc_info_table=None, logistics_truck_routes_table=None, logistics_lrs_table=None, logistics_lvc_table=None
):
"""Initialize an input MultiNet dataset with all the appropriate feature classes and tables"""
self.nw = network_geometry_fc
self.mn = maneuvers_geometry_fc
self.mp = maneuver_path_idx_table
self.si = sign_info_table
self.sp = sign_path_table
self.rs = restrictions_table
self.hsnp = network_profile_link_table
self.hspr = historical_speed_profiles_table
self.rd = rds_tmc_info_table
self.ltr = logistics_truck_routes_table
self.lrs = logistics_lrs_table
self.lvc = logistics_lvc_table
self.required_tables = [self.nw, self.mn, self.mp, self.si, self.sp, self.rs]
self.required_traffic_tables = [self.hsnp, self.hspr]
self.required_logistics_tables = [self.lrs, self.lvc]
def validate_data(self, check_traffic, check_logistics):
"""Validate that the data exists and has the required fields."""
# Check that all tables that need to be specified are specified.
for table in self.required_tables:
if not table:
arcpy.AddError("Required MultiNet input table not specified.")
return False
if check_traffic:
for table in self.required_traffic_tables:
if not table:
arcpy.AddError("Required MultiNet traffic input table not specified.")
return False
if check_logistics:
for table in self.required_logistics_tables:
if not table:
arcpy.AddError("Required MultiNet Logistics input table not specified.")
return False
# Verify existence of tables and appropriate schema
required_fields = {
self.nw: [
("ID", "Double"),
("FEATTYP", "SmallInteger"),
("F_JNCTID", "Double"),
("T_JNCTID", "Double"),
("PJ", "SmallInteger"),
("METERS", "Double"),
("NET2CLASS", "SmallInteger"),
("NAME", "String"),
("FOW", "SmallInteger"),
("FREEWAY", "SmallInteger"),
("BACKRD", "SmallInteger"),
("TOLLRD", "SmallInteger"),
("RDCOND", "SmallInteger"),
("PRIVATERD", "SmallInteger"),
("CONSTATUS", "String"),
("ONEWAY", "String"),
("F_ELEV", "SmallInteger"),
("T_ELEV", "SmallInteger"),
("KPH", "SmallInteger"),
("MINUTES", "Single"), # Float
("NTHRUTRAF", "SmallInteger"),
("ROUGHRD", "SmallInteger"),
],
self.mn: [
("ID", "Double"),
("JNCTID", "Double"),
("FEATTYP", "SmallInteger")
],
self.mp: [
("ID", "Double"),
("TRPELID", "Double"),
("SEQNR", "Integer")
],
self.si: [
("ID", "Double"),
("INFOTYP", "String"),
("TXTCONT", "String"),
("TXTCONTLC", "String"),
("CONTYP", "SmallInteger"),
("SEQNR", "Integer"),
("DESTSEQ", "Integer"),
("RNPART", "SmallInteger")
],
self.sp: [
("ID", "Double"),
("TRPELID", "Double"),
("SEQNR", "Integer")
],
self.rs: [
("ID", "Double"),
("VT", "SmallInteger"),
("DIR_POS", "SmallInteger"),
("RESTRTYP", "String")
],
self.hsnp: [
("NETWORK_ID", "Double"),
("VAL_DIR", "SmallInteger"),
("SPFREEFLOW", "SmallInteger"),
("SPWEEKDAY", "SmallInteger"),
("SPWEEKEND", "SmallInteger"),
("SPWEEK", "SmallInteger"),
("PROFILE_1", "SmallInteger"),
("PROFILE_2", "SmallInteger"),
("PROFILE_3", "SmallInteger"),
("PROFILE_4", "SmallInteger"),
("PROFILE_5", "SmallInteger"),
("PROFILE_6", "SmallInteger"),
("PROFILE_7", "SmallInteger")
],
self.hspr: [
("PROFILE_ID", "SmallInteger"),
("TIME_SLOT", "Integer"),
("REL_SP", "Single")
],
self.rd: [
("ID", "Double"),
("RDSTMC", "String")
],
self.ltr: [
("ID", "Double"),
("PREFERRED", "SmallInteger"),
("RESTRICTED", "SmallInteger")
],
self.lrs: [
("ID", "Double"),
("SEQNR", "SmallInteger"),
("RESTRTYP", "String"),
("VT", "SmallInteger"),
("RESTRVAL", "SmallInteger"),
("LIMIT", "Double"),
("UNIT_MEAS", "SmallInteger")
],
self.lvc: [
("ID", "Double"),
("SEQNR", "SmallInteger")
]
}
for table in [t for t in required_fields if t]:
if not arcpy.Exists(table):
arcpy.AddError(f"Input table {table} does not exist.")
return False
actual_fields = {(f.name, f.type) for f in arcpy.ListFields(table)}
if not set(required_fields[table]).issubset(actual_fields):
arcpy.AddError(
f"Input table {table} does not have the correct schema. Required fields: {required_fields[table]}")
return False
if int(arcpy.management.GetCount(table).getOutput(0)) == 0:
arcpy.AddWarning(f"Input table {table} has no rows.")
# Everything is valid
return True
class MultiNetProcessor:
def __init__(
self, out_folder: str, gdb_name: str, in_multinet: MultiNetInputData, unit_type: UnitType,
include_historical_traffic: bool, include_logistics: bool,
time_zone_type: TimeZoneType, time_zone_name: str = "", in_time_zone_table=None,
time_zone_ft_field: str = None, time_zone_tf_field: str = None, build_network: bool = True
):
"""Initialize a class to process MultiNet data into a network dataset."""
self.in_multinet = in_multinet
self.unit_type = unit_type
self.include_historical_traffic = include_historical_traffic
self.include_logistics = include_logistics
self.time_zone_type = time_zone_type
self.time_zone_name = time_zone_name
self.in_time_zone_table = in_time_zone_table
self.time_zone_ft_field = time_zone_ft_field
self.time_zone_tf_field = time_zone_tf_field
self.build_network = build_network
self.out_folder = out_folder
self.gdb_name = gdb_name
if not self.gdb_name.endswith(".gdb"):
self.gdb_name += ".gdb"
self.feature_dataset = os.path.join(self.out_folder, self.gdb_name, "Routing")
self.streets = os.path.join(self.feature_dataset, "Streets")
self.turns = os.path.join(self.feature_dataset, "RestrictedTurns")
self.road_splits = os.path.join(self.out_folder, self.gdb_name, "Streets_RoadSplits")
self.signposts = os.path.join(self.feature_dataset, "Signposts")
self.signposts_streets = os.path.join(self.out_folder, self.gdb_name, "Signposts_Streets")
self.streets_profiles = os.path.join(self.out_folder, self.gdb_name, "Streets_DailyProfiles")
self.profiles = os.path.join(self.out_folder, self.gdb_name, "DailyProfiles")
self.streets_tmc = os.path.join(self.out_folder, self.gdb_name, "Streets_TMC")
self.time_zone_table = os.path.join(self.out_folder, self.gdb_name, "TimeZones")
self.network = os.path.join(self.feature_dataset, "Routing_ND")
self.out_sr = arcpy.Describe(self.in_multinet.nw).spatialReference
# Maps VT field codes to restriction names
self.vt_field_map = {
0: "AllVehicles_Restricted",
11: "PassengerCars_Restricted",
12: "ResidentialVehicles_Restricted",
16: "Taxis_Restricted",
17: "PublicBuses_Restricted"
}
self.restriction_field_names = [self.vt_field_map[vt] for vt in sorted(self.vt_field_map)]
# Historical traffic base field names
self.historical_traffic_fields = ["Weekday", "Weekend", "AllWeek"]
# Logistics Truck Routes field names
self.ltr_fields = [
"NationalSTAARoute", "NationalRouteAccess", "DesignatedTruckRoute", "TruckBypassRoad",
"NoCommercialVehicles", "ImmediateAccessOnly", "TrucksRestricted"
]
# Global dataframes and variables used by multiple processes and initialized later
self.r_df = None # Restrictions table indexed by ID for quick lookups
self.mp_df = None # Maneuver paths table indexed by ID for quick lookups
self.streets_df = None # Dataframe of output streets indexed by ID for quick lookups
self.lrs_df = None # Dataframe of logistics LRS table
self.unique_lrs_df = None # Dataframe holding unique combinations of logistics restriction data
self.max_turn_edges = None # Maximum number of edges participating in a turn
self.fc_id = None # Streets feature class dataset ID used in Edge#FCID fields
def process_multinet_data(self):
"""Process multinet data into a network dataset."""
# Validate the input data
if not self._validate_inputs():
return
# Create the output location
self._create_feature_dataset()
# Read in some tables we're going to need to reference later in multiple places
self._read_and_index_restrictions()
self._read_and_index_maneuver_paths()
self._read_and_index_logistics_tables()
# Create the output Streets feature class and populate it
self._copy_streets()
self._detect_and_delete_duplicate_streets()
self._populate_streets_fields()
# We're now done with the Logistics restrictions table, so clear the variable to free up memory
del self.lrs_df
self.lrs_df = None
# Read in output streets for future look-ups
self._read_and_index_streets()
# Create and populate the turn feature class
self._create_turn_fc()
self._generate_turn_features()
# We're now done with the restrictions table, so clear the variable to free up memory
del self.r_df
self.r_df = None
# Create and populate the road forks table
self._create_road_forks_table()
self._populate_road_forks()
# We're now done with the maneuver path table, so clear the variable to free up memory
del self.mp_df
self.mp_df = None
# Create and populate Signposts and Signposts_Streets
self._create_signposts_fc()
self._create_signposts_streets_table()
self._populate_signposts_and_signposts_streets()
# Create and populate historical traffic tables
if self.include_historical_traffic:
self._create_and_populate_streets_profiles_table()
self._create_and_populate_profiles_table()
self._create_and_populate_streets_tmc_table()
# We're done with the streets table, so clear the variable to free up memory
del self.streets_df
self.streets_df = None
# Handle time zone table if needed
if self.time_zone_type != TimeZoneType.NoTimeZone:
self._handle_time_zone()
# Create the network dataset from a template and build it
self._create_and_build_nd()
@timed_exec
def _validate_inputs(self):
"""Validate the input data."""
arcpy.AddMessage("Validating inputs...")
# Do some simple checks
if not os.path.exists(self.out_folder):
arcpy.AddMessage(f"Output folder {self.out_folder} does not exist.")
return False
if os.path.exists(os.path.join(self.out_folder, self.gdb_name)):
arcpy.AddMessage(f"Output geodatabase {os.path.join(self.out_folder, self.gdb_name)} already exists.")
return False
if self.out_sr.name == "Unknown":
arcpy.AddError("The input data has an unknown spatial reference.")
return False
# Make sure the license is available.
if arcpy.CheckExtension("network").lower() == "available":
arcpy.CheckOutExtension("network")
else:
arcpy.AddError("The Network Analyst extension license is unavailable.")
return False
# Check the input data
if not self.in_multinet.validate_data(self.include_historical_traffic, self.include_logistics):
return False
arcpy.AddMessage("Inputs validated successfully.")
return True
@timed_exec
def _create_feature_dataset(self):
"""Create the output geodatabase and feature dataset."""
arcpy.AddMessage(f"Creating output geodatabase and feature dataset at {self.feature_dataset}...")
arcpy.management.CreateFileGDB(self.out_folder, self.gdb_name)
arcpy.management.CreateFeatureDataset(
os.path.dirname(self.feature_dataset),
os.path.basename(self.feature_dataset),
self.out_sr
)
@timed_exec
def _copy_streets(self):
"""Copy the network geometry feature class to the target feature dataset and add fields."""
arcpy.AddMessage("Copying input network geometry feature class to target feature dataset...")
# Filter out address area boundary elements
nw_layer = arcpy.management.MakeFeatureLayer(self.in_multinet.nw, "NW layer", "FEATTYP <> 4165").getOutput(0)
# Construct field mappings to use when copying the original data.
field_mappings = arcpy.FieldMappings()
# Add all the fields from the input data
field_mappings.addTable(self.in_multinet.nw)
# Add the TOLLRDDIR and restriction fields and historical traffic fields if relevant.
field_mappings = field_mappings.exportToString()
field_mappings += self._create_string_field_map("TOLLRDDIR", "Text", 2)
for restr_field in self.restriction_field_names:
field_mappings += self._create_string_field_map(f"FT_{restr_field}", "Text", 1)
field_mappings += self._create_string_field_map(f"TF_{restr_field}", "Text", 1)
if self.include_historical_traffic:
for trf_fld in self.historical_traffic_fields:
field_mappings += self._create_string_field_map(f"FT_{trf_fld}", "Short")
field_mappings += self._create_string_field_map(f"TF_{trf_fld}", "Short")
field_mappings += self._create_string_field_map(f"FT_{trf_fld}Minutes", "Float")
field_mappings += self._create_string_field_map(f"TF_{trf_fld}Minutes", "Float")
if self.in_multinet.ltr:
for ltr_field in self.ltr_fields:
field_mappings += self._create_string_field_map(ltr_field, "Text", 1)
if self.include_logistics:
# Derive a list of logistics restriction field names based on data from the LRS table
for _, record in self.unique_lrs_df.iterrows():
field_mappings += self._create_string_field_map(
record["FieldName"], record["FieldType"], record["FieldLength"])
# Copy the input network geometry feature class to the target feature dataset
arcpy.conversion.FeatureClassToFeatureClass(
nw_layer, self.feature_dataset, os.path.basename(self.streets), field_mapping=field_mappings)
# Update the fc_id that will be used to relate back to this Streets feature class in Edge#FCID fields
self.fc_id = arcpy.Describe(self.streets).DSID
@timed_exec
def _detect_and_delete_duplicate_streets(self):
"""Determine if there are duplicate street IDs, and if so, delete them."""
# Duplicate street features occur along tile boundaries.
# Use Pandas to identify duplicate ID values and associated OIDs to delete.
with arcpy.da.SearchCursor(self.streets, ["OID@", "ID"]) as cur:
id_df = pd.DataFrame(cur, columns=["OID", "ID"])
duplicate_streets = id_df[id_df.duplicated(subset="ID")]["OID"].to_list()
# If there are any duplicates, delete them.
if duplicate_streets:
duplicate_streets = [str(oid) for oid in duplicate_streets]
oid_field = arcpy.Describe(self.streets).OIDFieldName
arcpy.AddMessage("Duplicate streets were detected and will be removed.")
where = f"{oid_field} IN ({', '.join(duplicate_streets)})"
layer_name = "Temp_Streets"
arcpy.management.MakeFeatureLayer(self.streets, layer_name, where)
arcpy.management.DeleteRows(layer_name)
@timed_exec
def _create_turn_fc(self):
"""Create the turn feature class and add necessary fields."""
assert self.max_turn_edges is not None
arcpy.AddMessage("Creating turn feature class...")
arcpy.na.CreateTurnFeatureClass(self.feature_dataset, os.path.basename(self.turns), self.max_turn_edges)
# Add restriction fields
# The ID field is added to easily relate this back to the original data but is not required by the network.
field_defs = [["ID", "DOUBLE"]] + [[field, "TEXT", "", 1] for field in self.restriction_field_names]
arcpy.management.AddFields(self.turns, field_defs)
@timed_exec
def _create_road_forks_table(self):
"""Create the road forks table Streets_RoadSplits with the correct schema."""
arcpy.AddMessage("Creating road forks table...")
arcpy.management.CreateTable(os.path.dirname(self.road_splits), os.path.basename(self.road_splits))
# Schema for the road forks table:
# https://pro.arcgis.com/en/pro-app/latest/tool-reference/data-management/add-fields.htm
# The ID field is added to easily relate this back to the original data but is not required by the schema.
field_defs = [
["ID", "DOUBLE"],
["EdgeFCID", "LONG"],
["EdgeFID", "LONG"],
["EdgeFrmPos", "DOUBLE"],
["EdgeToPos", "DOUBLE"],
["Branch0FCID", "LONG"],
["Branch0FID", "LONG"],
["Branch0FrmPos", "DOUBLE"],
["Branch0ToPos", "DOUBLE"],
["Branch1FCID", "LONG"],
["Branch1FID", "LONG"],
["Branch1FrmPos", "DOUBLE"],
["Branch1ToPos", "DOUBLE"],
["Branch2FCID", "LONG"],
["Branch2FID", "LONG"],
["Branch2FrmPos", "DOUBLE"],
["Branch2ToPos", "DOUBLE"]
]
arcpy.management.AddFields(self.road_splits, field_defs)
@timed_exec
def _create_signposts_fc(self):
"""Create the Signposts feature class with correct schema."""
arcpy.AddMessage("Creating Signposts feature class...")
arcpy.management.CreateFeatureclass(
os.path.dirname(self.signposts), os.path.basename(self.signposts),
"POLYLINE", has_m="DISABLED", has_z="DISABLED"
)
# Schema for the signposts feature class:
# https://pro.arcgis.com/en/pro-app/latest/help/analysis/networks/signposts.htm
field_defs = [
["ExitName", "TEXT", "ExitName", 24],
]
for i in range(10):
field_defs += [
[f"Branch{i}", "TEXT", f"Branch{i}", 180],
[f"Branch{i}Dir", "TEXT", f"Branch{i}Dir", 5],
[f"Branch{i}Lng", "TEXT", f"Branch{i}Lng", 2],
[f"Toward{i}", "TEXT", f"Toward{i}", 180],
[f"Toward{i}Lng", "TEXT", f"Toward{i}Lng", 2],
]
arcpy.management.AddFields(self.signposts, field_defs)
@timed_exec
def _create_signposts_streets_table(self):
"""Create the Signposts_Streets table with correct schema."""
arcpy.AddMessage("Creating Signposts_Streets table...")
arcpy.management.CreateTable(os.path.dirname(self.signposts_streets), os.path.basename(self.signposts_streets))
# Schema for the Signposts_Streets table:
# https://pro.arcgis.com/en/pro-app/latest/help/analysis/networks/signposts.htm
field_defs = [
["SignpostID", "LONG"],
["Sequence", "LONG"],
["EdgeFCID", "LONG"],
["EdgeFID", "LONG"],
["EdgeFrmPos", "DOUBLE"],
["EdgeToPos", "DOUBLE"]
]
arcpy.management.AddFields(self.signposts_streets, field_defs)
@timed_exec
def _create_and_populate_streets_profiles_table(self):
"""Create the Streets_DailyProfiles table."""
if not self.include_historical_traffic:
return
arcpy.AddMessage("Creating and populating Streets_DailyProfiles table...")
assert self.streets_df is not None # Sanity check
# Create the table with desired schema
arcpy.management.CreateTable(
os.path.dirname(self.streets_profiles),
os.path.basename(self.streets_profiles),
self.in_multinet.hsnp # Template table used to define schema
)
field_defs = [
["EdgeFCID", "LONG"],
["EdgeFID", "LONG"],
["EdgeFrmPos", "DOUBLE"],
["EdgeToPos", "DOUBLE"]
]
arcpy.management.AddFields(self.streets_profiles, field_defs)
# Insert rows
desc = arcpy.Describe(self.in_multinet.hsnp)
input_fields = [f.name for f in desc.fields if f.name != desc.OIDFieldName]
output_fields = input_fields + [f[0] for f in field_defs]
network_id_idx = input_fields.index("NETWORK_ID")
val_dir_idx = input_fields.index("VAL_DIR")
with arcpy.da.InsertCursor(self.streets_profiles, output_fields) as cur:
for row in arcpy.da.SearchCursor(
self.in_multinet.hsnp, input_fields, "SPFREEFLOW > 0 And VAL_DIR IN (2, 3)"
):
# Initialize the row to insert with all the values from the original table
new_row = [val for val in row]
# Calculate the additional, new fields: EdgeFCID, EdgeFID, EdgeFrmPos, EdgeToPos
street_id = np.int64(row[network_id_idx])
try:
# Find the street record associated with this street profile record
edge_fid = self.streets_df.loc[street_id]["OID"]
except KeyError:
arcpy.AddWarning((
f"The Streets table is missing an entry with ID {row[network_id_idx]}, which is used in the "
"network profile link historical traffic table."))
# Just skip this row and don't add it
continue
val_dir = row[val_dir_idx]
if val_dir == 2:
edge_from_pos = 0
edge_to_pos = 1
elif val_dir == 3:
edge_from_pos = 1
edge_to_pos = 0
else:
# This should never happen because of our where clause, but check just in case
continue
new_row += [self.fc_id, edge_fid, edge_from_pos, edge_to_pos]
# Insert the row
cur.insertRow(new_row)
@timed_exec
def _create_and_populate_profiles_table(self):
"""Create the DailyProfiles table."""
if not self.include_historical_traffic:
return
arcpy.AddMessage("Creating and populating DailyProfiles table...")
# Create the table with correct schema
arcpy.management.CreateTable(os.path.dirname(self.profiles), os.path.basename(self.profiles))
field_defs = [["ProfileID", "SHORT"]]
added_minutes = 0
midnight = datetime.datetime(2021, 1, 1, 0, 0, 0) # Initialize midnight on an arbitrary date
# Add a field for each 5-minute increment until 11:55 at night
while added_minutes < 1440:
current_time = midnight + datetime.timedelta(minutes=added_minutes)
field_defs.append([f"SpeedFactor_{current_time.strftime('%H%M')}", "FLOAT"])
added_minutes += 5
arcpy.management.AddFields(self.profiles, field_defs)
# Read the Historical Speed Profiles table into a temporary dataframe so we can quickly sort it. Normally we
# could sort it using the da.SearchCursor's sql clause, but the ORDER BY option doesn't work with shapefile
# tables.
fields = ["PROFILE_ID", "TIME_SLOT", "REL_SP"]
with arcpy.da.SearchCursor(self.in_multinet.hspr, fields) as cur:
hspr_df = pd.DataFrame(cur, columns=fields)
hspr_df = hspr_df.sort_values("PROFILE_ID").groupby(["PROFILE_ID"])
# Insert the rows
output_fields = [f[0] for f in field_defs]
with arcpy.da.InsertCursor(self.profiles, output_fields) as cur:
# Loop through the records in the HSPR table and calculate the SpeedFactor fields accordingly
for profile_id, group in hspr_df:
# Initialize a new row with the ProfileID and defaulting all the SpeedFactor fields to None.
new_row = [profile_id] + [None] * (len(output_fields) - 1)
# Iterate through the records in this group and populate the SpeedFactor fields
for _, record in group.iterrows():
# Figure out which SpeedFactor field this record is for based on the TIME_SLOT field value
# The TIME_SLOT field indicates the time of day as measured in seconds since midnight. Since the
# granularity is 5 minutes, the TIME_SLOT values are all multiples of 300 (e.g., TIME_SLOT=0
# represents 12:00am, TIME_SLOT=300 represents 12:05am, TIME_SLOT=600 represents 12:10am, etc.).
# Add 1 to the index because ProfileID is the first field in the row
time_slot_index = int((record['TIME_SLOT'] / 300)) + 1
new_row[time_slot_index] = record["REL_SP"] / 100
# Check if the row is missing any values, and if so, default them to 1 and add a warning.
if None in new_row:
arcpy.AddWarning((
"The Historical Speed Profiles table has incomplete TIME_SLOT records for PROFILE_ID "
f"{profile_id}. The missing values have been filled in with a value of 1."
))
new_row = [val if val is not None else 1 for val in new_row]
# Finally, insert the row
cur.insertRow(new_row)
@timed_exec
def _create_and_populate_streets_tmc_table(self):
if not self.include_historical_traffic or not self.in_multinet.rd:
return
arcpy.AddMessage("Creating and populating Streets_TMC table...")
assert self.streets_df is not None # Sanity check
arcpy.management.CreateTable(os.path.dirname(self.streets_tmc), os.path.basename(self.streets_tmc))
field_defs = [
["ID", "DOUBLE"],
["TMC", "TEXT", "TMC", 9],
["EdgeFCID", "LONG"],
["EdgeFID", "LONG"],
["EdgeFrmPos", "DOUBLE"],
["EdgeToPos", "DOUBLE"]
]
arcpy.management.AddFields(self.streets_tmc, field_defs)
with arcpy.da.InsertCursor(self.streets_tmc, [f[0] for f in field_defs]) as cur:
for row in arcpy.da.SearchCursor(self.in_multinet.rd, ["ID", "RDSTMC"]):
id = row[0]
# The TMC field value comes from the last 9 characters of the RDSTMC field of the RD table.
rdstmc = row[1]
tmc = rdstmc[-9:]
try:
# Find the street record associated with this street profile record
edge_fid = self.streets_df.loc[np.int64(row[0])]["OID"]
except KeyError:
arcpy.AddWarning((
f"The Streets table is missing an entry with ID {id}, which is used in the RDS-TMC Information "
"(RD) historical traffic table."))
# Just skip this row and don't add it
continue
if rdstmc[0] == "+":
edge_from_pos = 0
edge_to_pos = 1
elif rdstmc[0] == "-":
edge_from_pos = 1
edge_to_pos = 0
else:
arcpy.AddWarning((
"The RDS-TMC Information (RD) historical traffic table has an invalid RDSTMC field value for "
f"ID {id}."
))
continue
cur.insertRow([id, tmc, self.fc_id, edge_fid, edge_from_pos, edge_to_pos])
@timed_exec
def _read_and_index_restrictions(self):
"""Read in the restrictions table and index it for quick lookups."""
arcpy.AddMessage("Reading and grouping restrictions table...")
where = f"VT IN ({', '.join([str(vt) for vt in self.vt_field_map])})"
fields = ["ID", "VT", "DIR_POS", "RESTRTYP"]
with arcpy.da.SearchCursor(self.in_multinet.rs, fields, where) as cur:
self.r_df = pd.DataFrame(cur, columns=fields)
# Cast the ID column from its original double to an explicit int64 so we can use it for indexing and lookups
self.r_df = self.r_df.astype({"ID": np.int64})
# Index the dataframe by ID for quick retrieval later, and sort the index to make those lookups even faster
self.r_df.set_index("ID", inplace=True)
self.r_df.sort_index(inplace=True)
@timed_exec
def _read_and_index_maneuver_paths(self):
"""Read in the maneuver paths table and index it for quick lookups."""
arcpy.AddMessage("Reading and grouping maneuver paths table...")
fields = ["ID", "TRPELID", "SEQNR"]
with arcpy.da.SearchCursor(self.in_multinet.mp, fields) as cur:
# Explicitly read it in using int64 to convert the double-based ID field for easy indexing and lookups
self.mp_df = pd.DataFrame(cur, columns=fields, dtype=np.int64)
# Index the dataframe by ID for quick retrieval later, and sort the index to make those lookups even faster
self.mp_df.set_index("ID", inplace=True)
self.mp_df.sort_index(inplace=True)
# Determine the max number of edges participating in a turn. This will be used when creating the turn feature
# class to initialize the proper number of fields.
self.max_turn_edges = int(self.mp_df["SEQNR"].max())
@timed_exec
def _read_and_index_historical_traffic(self):
"""Read and index historical traffic tables."""
if not self.include_historical_traffic:
# Sanity check
return None
fields = ["NETWORK_ID", "VAL_DIR", "SPWEEKDAY", "SPWEEKEND", "SPWEEK"]
with arcpy.da.SearchCursor(self.in_multinet.hsnp, fields, "VAL_DIR IN (2, 3)") as cur:
# Explicitly read it in using int64 to convert the double-based ID field for easy indexing and lookups
hsnp_df = pd.DataFrame(cur, columns=fields, dtype=np.int64)
# Index the dataframe by NETWORK_ID for quick retrieval later,
# and sort the index to make those lookups even faster
hsnp_df.set_index("NETWORK_ID", inplace=True)
hsnp_df.sort_index(inplace=True)
return hsnp_df
@timed_exec
def _read_and_index_logistics_tables(self):
"""Read and index MultiNet Logistics tables."""
if not self.include_logistics:
# Sanity check
return
# Read in lookup tables
restrtype_df = pd.read_csv(
os.path.join(CURDIR, "LogisticsAttributeLookupTables", "RESTRTYP.csv"), index_col="RESTRTYP")
vt_df = pd.read_csv(os.path.join(CURDIR, "LogisticsAttributeLookupTables", "VT.csv"), index_col="VT")
restrval_df = pd.read_csv(
os.path.join(CURDIR, "LogisticsAttributeLookupTables", "RESTRVAL.csv"), index_col="RESTRVAL")
# Because restrictions that require additional caveats from the LVC table are quite complex and are difficult to
# model accurately, these are not included in our output network dataset. Read the LVC table to weed out any
# records in the LRS table that have a matching ID and SEQNR combination.
fields = ["ID", "SEQNR"]
with arcpy.da.SearchCursor(self.in_multinet.lvc, fields) as cur:
# Explicitly read it in using int64 to convert the double-based ID field for easy indexing and lookups
lvc_df = pd.DataFrame(cur, columns=fields, dtype=np.int64)
# Add a field to use as a mask after joining
lvc_df["DROP"] = True
# Index the dataframe by ID and SEQNR for joining
lvc_df.set_index(["ID", "SEQNR"], inplace=True)
# Read the LRS table
fields = ["ID", "SEQNR", "RESTRTYP", "VT", "RESTRVAL", "LIMIT", "UNIT_MEAS"]
codes = [f"'{r}'" for r in restrtype_df.index.tolist()]
where = f"RESTRTYP IN ({', '.join(codes)})"
with arcpy.da.SearchCursor(self.in_multinet.lrs, fields, where) as cur:
self.lrs_df = | pd.DataFrame(cur, columns=fields) | pandas.DataFrame |
'''
Create a csv with inspection report, permit_id, date, time, inspec type, critical violation count, non-crit violation count, crit violation corrected on site, non-crit violation corrected on site, crit violation to be resolved, non-crit violation to be resolved, critical violation repeat violation, and non-crit violation repeat violation.
Create a second csv with inspection report, permit_id, critical violation count, non-crit violation count, lat, lon for mapping purposes.
20160623 <NAME>
'''
import pandas as pd
def map_list():
#business list
bus = pd.read_csv('geo_master.csv')
#restaurant_inspections
insp = pd.read_csv('restaurant_inspections.csv')
merged =pd.merge(left=insp, right=bus, left_on='permit_id', right_on='permit_id')
merged.drop(['name', 'address', 'category', 'permit_url',
'map_url', 'phone_num', 'YelpID'], axis=1, inplace=True)
merged.to_csv('mapping.csv', index=False)
def insp_compilation():
#report details list
rpt_list = | pd.read_csv('cleaned_report_results.csv') | pandas.read_csv |
from typing import List, Any
from itertools import chain
from app_utils import AppFileHandler
import pandas as pd
import sqlite3
import logging
import os
import re
pd.set_option('display.max_rows', None)
class SansNotesApp(AppFileHandler):
APP_FILES = os.path.join(os.getcwd(),'SansNotesAppFiles')
APP_DATABASE_FILES = os.path.join(APP_FILES,'NotesAppDbFiles')
CREATE_TABLE = """
CREATE TABLE IF NOT EXISTS {table_name_field}
(
subject VARCHAR(50),
topic VARCHAR(50),
book VARCHAR(3),
page VARCHAR(3),
notes VARCHAR(1000)
);
"""
DROP_TABLE = """
DROP TABLE {table_name_field};
"""
INSERT = """
INSERT INTO {table_name_field}
(
subject,
topic,
book,
page,
notes
)
VALUES
(
{subject_},
{topic_},
{book_},
{page_},
{notes_}
);
"""
SEARCH = """
SELECT DISTINCT
*
FROM
{table_name_field}
WHERE
{col_name}
"""
SHOW_TABLE_DATA = """
SELECT DISTINCT
*
FROM {table_name_field}
"""
DELETE_DATA = """
DELETE FROM {table_name_field}
"""
def __init__(self):
super().__init__()
if not os.path.exists(SansNotesApp.APP_FILES):
os.mkdir(SansNotesApp.APP_FILES)
logging.basicConfig(
filename=os.path.join(SansNotesApp.APP_FILES,'NotesAppDb.log'),
level=logging.DEBUG,
filemode='w'
)
logging.debug(SansNotesApp.APP_FILES)
if not os.path.exists(SansNotesApp.APP_DATABASE_FILES):
os.mkdir(SansNotesApp.APP_DATABASE_FILES)
logging.debug(os.listdir(SansNotesApp.APP_FILES))
def __format_db_name(self,db_name_fmt:str) -> str:
db_path = os.path.join(SansNotesApp.APP_DATABASE_FILES,'{}.db'.format(SansNotesApp.check_char_string(db_name_fmt)))
logging.debug(db_path)
return db_path
@property
def database_name(self) -> str:
return self.__db_name
@database_name.setter
def database_name(self,db_name:str):
"""
Enter a string with no spaces or special characters
as the database name
"""
self.__db_name = self.__format_db_name(db_name)
logging.debug(self.__db_name)
def db_connect_and_cursor(self) -> bool:
"""
This function will create a database if it doesn't exist.
"""
self.__con = sqlite3.connect(self.__db_name)
logging.debug(self.__con)
self.__cur = self.__con.cursor()
logging.debug(self.__cur)
return True
def get_cursor(self):
return self.__cur
def check_db_file(self) -> bool:
return os.path.exists(self.__db_name)
def show_tables(self) -> List[str]:
self.__cur.execute("SELECT name FROM sqlite_master WHERE type='table';")
tables_normalized = list(chain.from_iterable(self.__cur.fetchall()))
return tables_normalized
def committ_and_close(self) -> bool:
self.__con.commit()
self.__con.close()
return True
def drop_table(self, table_to_drop) -> bool:
clean_drp_tbl_nm = SansNotesApp.check_char_string(table_to_drop)
self.__cur.execute(
SansNotesApp.DROP_TABLE.format(table_name_field=clean_drp_tbl_nm)
)
success = clean_drp_tbl_nm not in self.show_tables()
logging.debug(f'{SansNotesApp.check_char_string(clean_drp_tbl_nm)} dropped:{success}')
return success
def create_table(self, table_name) -> bool:
clean_crt_tbl_nm = SansNotesApp.check_char_string(table_name)
self.__cur.execute(
SansNotesApp.CREATE_TABLE.format(table_name_field=clean_crt_tbl_nm)
)
success = clean_crt_tbl_nm in self.show_tables()
logging.debug(f'{SansNotesApp.check_char_string(clean_crt_tbl_nm)} created:{success}')
return success
def insert_values(
self,
table_name,
subject,
topic,
book,
page,
notes = ''
)-> bool:
values_list = list(
map(
lambda x: str(x),
[
subject.replace("'","`"),
topic.replace("'","`"),
book.replace("'","`"),
page.replace("'","`"),
notes.replace("'","`")
]
)
)
print(values_list)
fmt_func = SansNotesApp.__format_values_string
insert_value_query_string = SansNotesApp.INSERT.format(
table_name_field=table_name,
subject_=fmt_func(values_list[0]),
topic_=fmt_func(values_list[1]),
book_=fmt_func(values_list[2]),
page_=fmt_func(values_list[3]),
notes_=fmt_func(values_list[4])
)
logging.debug(insert_value_query_string)
self.__cur.execute(
insert_value_query_string
)
return True
def show_table_data(self,table_name) -> List[Any]:
clean_table_name = SansNotesApp.check_char_string(table_name,strict=False)
show_table_query_string = SansNotesApp.SHOW_TABLE_DATA.format(table_name_field = clean_table_name)
logging.debug(show_table_query_string)
table_data = [*self.__cur.execute(
show_table_query_string
)]
table_data_df = pd.DataFrame( table_data , columns=[tuple[0] for tuple in self.__cur.description])
logging.debug(f'table_data: {[[tuple[0] for tuple in self.__cur.description]]+table_data}')
return table_data_df
def delete_data(self,
table_name,
subject = None,
topic = None,
book = None,
page = None,
notes = None,
strict_search = True) -> bool:
del_query_string = SansNotesApp.DELETE_DATA.format(table_name_field=SansNotesApp.check_char_string(table_name)) \
if not any({subject,topic,book,page,notes}) \
else SansNotesApp.DELETE_DATA.format(table_name_field=SansNotesApp.check_char_string(table_name)) + SansNotesApp.format_where_clause(
subject,
topic,
book,
page,
notes,
strict_search
)
self.__cur.execute(
del_query_string
)
logging.debug(f'del_query_string: {del_query_string}')
return True
def search_data(self,
table_name,
subject = None,
topic = None,
book = None,
page = None,
notes = None,
strict_search = True
) -> List[Any]:
search_query_string = SansNotesApp.SHOW_TABLE_DATA.format(table_name_field=SansNotesApp.check_char_string(table_name)) \
if not any({subject,topic,book,page,notes}) \
else SansNotesApp.SHOW_TABLE_DATA.format(table_name_field=SansNotesApp.check_char_string(table_name)) + SansNotesApp.format_where_clause(
subject,
topic,
book,
page,
notes,
strict_search
)
logging.debug(f'search_query_string: {search_query_string}')
search_table_data = [*self.__cur.execute(
search_query_string
)]
search_data_list = | pd.DataFrame(search_table_data,columns=[tuple[0] for tuple in self.__cur.description]) | pandas.DataFrame |
import inspect
import re
from functools import wraps
from typing import Union, Any, List
from uuid import uuid4
import pandas
import sys
from .six import string_types, integer_types
from .fields import (FIELD_DATAFRAME, FIELD_TEXT, FIELD_NUMERIC, FIELD_NO_INPUT,
FIELD_SELECT, FIELD_SELECT_MULTIPLE)
from .utils import fn_name_to_pretty_label, float_to_decimal, vectorized_is_valid, vectorized_compare_dates, \
vectorized_is_complete_date, vectorized_len, vectorized_get_dict_key, vectorized_is_in, vectorized_case_insensitive_is_in
from decimal import Decimal, Inexact, Context
import operator
import numpy as np
import pandas as pd
class BaseType(object):
def __init__(self, value):
self.value = self._assert_valid_value_and_cast(value)
def _assert_valid_value_and_cast(self, value):
raise NotImplemented()
@classmethod
def get_all_operators(cls):
methods = inspect.getmembers(cls)
return [{'name': m[0],
'label': m[1].label,
'input_type': m[1].input_type}
for m in methods if getattr(m[1], 'is_operator', False)]
def export_type(cls):
""" Decorator to expose the given class to business_rules.export_rule_data. """
cls.export_in_rule_data = True
return cls
def type_operator(input_type, label=None,
assert_type_for_arguments=True):
""" Decorator to make a function into a type operator.
- assert_type_for_arguments - if True this patches the operator function
so that arguments passed to it will have _assert_valid_value_and_cast
called on them to make type errors explicit.
"""
def wrapper(func):
func.is_operator = True
func.label = label \
or fn_name_to_pretty_label(func.__name__)
func.input_type = input_type
@wraps(func)
def inner(self, *args, **kwargs):
if assert_type_for_arguments:
args = [self._assert_valid_value_and_cast(arg) for arg in args]
kwargs = dict((k, self._assert_valid_value_and_cast(v))
for k, v in kwargs.items())
return func(self, *args, **kwargs)
return inner
return wrapper
@export_type
class StringType(BaseType):
name = "string"
def _assert_valid_value_and_cast(self, value):
value = value or ""
if not isinstance(value, string_types):
raise AssertionError("{0} is not a valid string type.".
format(value))
return value
@type_operator(FIELD_TEXT)
def equal_to(self, other_string):
return self.value == other_string
@type_operator(FIELD_TEXT)
def not_equal_to(self, other_string):
return self.value != other_string
@type_operator(FIELD_TEXT, label="Equal To (case insensitive)")
def equal_to_case_insensitive(self, other_string):
return self.value.lower() == other_string.lower()
@type_operator(FIELD_TEXT)
def starts_with(self, other_string):
return self.value.startswith(other_string)
@type_operator(FIELD_TEXT)
def ends_with(self, other_string):
return self.value.endswith(other_string)
@type_operator(FIELD_TEXT)
def contains(self, other_string):
return other_string in self.value
@type_operator(FIELD_TEXT)
def matches_regex(self, regex):
return re.search(regex, self.value)
@type_operator(FIELD_NO_INPUT)
def non_empty(self):
return bool(self.value)
@export_type
class NumericType(BaseType):
EPSILON = Decimal('0.000001')
name = "numeric"
@staticmethod
def _assert_valid_value_and_cast(value):
if isinstance(value, float):
# In python 2.6, casting float to Decimal doesn't work
return float_to_decimal(value)
if isinstance(value, integer_types):
return Decimal(value)
if isinstance(value, Decimal):
return value
else:
raise AssertionError("{0} is not a valid numeric type.".
format(value))
@type_operator(FIELD_NUMERIC)
def equal_to(self, other_numeric):
return abs(self.value - other_numeric) <= self.EPSILON
@type_operator(FIELD_NUMERIC)
def not_equal_to(self, other_numeric):
return abs(self.value - other_numeric) > self.EPSILON
@type_operator(FIELD_NUMERIC)
def greater_than(self, other_numeric):
return (self.value - other_numeric) > self.EPSILON
@type_operator(FIELD_NUMERIC)
def greater_than_or_equal_to(self, other_numeric):
return self.greater_than(other_numeric) or self.equal_to(other_numeric)
@type_operator(FIELD_NUMERIC)
def less_than(self, other_numeric):
return (other_numeric - self.value) > self.EPSILON
@type_operator(FIELD_NUMERIC)
def less_than_or_equal_to(self, other_numeric):
return self.less_than(other_numeric) or self.equal_to(other_numeric)
@export_type
class BooleanType(BaseType):
name = "boolean"
def _assert_valid_value_and_cast(self, value):
if type(value) != bool:
raise AssertionError("{0} is not a valid boolean type".
format(value))
return value
@type_operator(FIELD_NO_INPUT)
def is_true(self):
return self.value
@type_operator(FIELD_NO_INPUT)
def is_false(self):
return not self.value
@export_type
class SelectType(BaseType):
name = "select"
def _assert_valid_value_and_cast(self, value):
if not hasattr(value, '__iter__'):
raise AssertionError("{0} is not a valid select type".
format(value))
return value
@staticmethod
def _case_insensitive_equal_to(value_from_list, other_value):
if isinstance(value_from_list, string_types) and \
isinstance(other_value, string_types):
return value_from_list.lower() == other_value.lower()
else:
return value_from_list == other_value
@type_operator(FIELD_SELECT, assert_type_for_arguments=False)
def contains(self, other_value):
for val in self.value:
if self._case_insensitive_equal_to(val, other_value):
return True
return False
@type_operator(FIELD_SELECT, assert_type_for_arguments=False)
def does_not_contain(self, other_value):
for val in self.value:
if self._case_insensitive_equal_to(val, other_value):
return False
return True
@export_type
class SelectMultipleType(BaseType):
name = "select_multiple"
def _assert_valid_value_and_cast(self, value):
if not hasattr(value, '__iter__'):
raise AssertionError("{0} is not a valid select multiple type".
format(value))
return value
@type_operator(FIELD_SELECT_MULTIPLE)
def contains_all(self, other_value):
select = SelectType(self.value)
for other_val in other_value:
if not select.contains(other_val):
return False
return True
@type_operator(FIELD_SELECT_MULTIPLE)
def is_contained_by(self, other_value):
other_select_multiple = SelectMultipleType(other_value)
return other_select_multiple.contains_all(self.value)
@type_operator(FIELD_SELECT_MULTIPLE)
def is_not_contained_by(self, other_value):
return not self.is_contained_by(other_value)
@type_operator(FIELD_SELECT_MULTIPLE)
def shares_at_least_one_element_with(self, other_value):
select = SelectType(self.value)
for other_val in other_value:
if select.contains(other_val):
return True
return False
@type_operator(FIELD_SELECT_MULTIPLE)
def shares_exactly_one_element_with(self, other_value):
found_one = False
select = SelectType(self.value)
for other_val in other_value:
if select.contains(other_val):
if found_one:
return False
found_one = True
return found_one
@type_operator(FIELD_SELECT_MULTIPLE)
def shares_no_elements_with(self, other_value):
return not self.shares_at_least_one_element_with(other_value)
@export_type
class DataframeType(BaseType):
name = "dataframe"
def __init__(self, data):
self.value: pd.DataFrame = self._assert_valid_value_and_cast(data["value"])
self.column_prefix_map = data.get("column_prefix_map", {})
self.relationship_data = data.get("relationship_data", {})
self.value_level_metadata = data.get("value_level_metadata", [])
self.column_codelist_map = data.get("column_codelist_map", {})
self.codelist_term_maps = data.get("codelist_term_maps", [])
def _assert_valid_value_and_cast(self, value):
if not hasattr(value, '__iter__'):
raise AssertionError("{0} is not a valid select multiple type".
format(value))
return value
def convert_string_data_to_lower(self, data):
if isinstance(data, pd.core.series.Series):
data = data.str.lower()
else:
data = data.lower()
return data
def replace_prefix(self, value: str) -> Union[str, Any]:
if isinstance(value, str):
for prefix, replacement in self.column_prefix_map.items():
if value.startswith(prefix):
return value.replace(prefix, replacement, 1)
return value
def replace_all_prefixes(self, values: [str]) -> [str]:
for i in range(len(values)):
values[i] = self.replace_prefix(values[i])
return values
def get_comparator_data(self, comparator, value_is_literal: bool = False) -> Union[str, pd.Series]:
if value_is_literal:
return comparator
else:
return self.value.get(comparator, comparator)
def is_column_of_iterables(self, column):
return isinstance(column, pandas.core.series.Series) and (isinstance(column.iloc[0], list) or isinstance(column.iloc[0], set))
@type_operator(FIELD_DATAFRAME)
def exists(self, other_value) -> pd.Series:
target_column = self.replace_prefix(other_value.get("target"))
return pd.Series([target_column in self.value] * len(self.value))
@type_operator(FIELD_DATAFRAME)
def not_exists(self, other_value):
return ~self.exists(other_value)
@type_operator(FIELD_DATAFRAME)
def equal_to(self, other_value) -> pd.Series:
target = self.replace_prefix(other_value.get("target"))
value_is_literal = other_value.get("value_is_literal", False)
comparator = self.replace_prefix(other_value.get("comparator")) if not value_is_literal else other_value.get("comparator")
comparison_data = self.get_comparator_data(comparator, value_is_literal)
return self.value[target].eq(comparison_data) & ~self.value[target].isin(["", None])
@type_operator(FIELD_DATAFRAME)
def equal_to_case_insensitive(self, other_value):
target = self.replace_prefix(other_value.get("target"))
value_is_literal = other_value.get("value_is_literal", False)
comparator = self.replace_prefix(other_value.get("comparator")) if not value_is_literal else other_value.get("comparator")
comparison_data = self.get_comparator_data(comparator, value_is_literal)
comparison_data = self.convert_string_data_to_lower(comparison_data)
return (self.value[target].str.lower() == comparison_data) & ~self.value[target].isin(["", None])
@type_operator(FIELD_DATAFRAME)
def not_equal_to_case_insensitive(self, other_value):
return ~self.equal_to_case_insensitive(other_value)
@type_operator(FIELD_DATAFRAME)
def not_equal_to(self, other_value):
return ~self.equal_to(other_value)
@type_operator(FIELD_DATAFRAME)
def suffix_equal_to(self, other_value: dict) -> pd.Series:
"""
Checks if target suffix is equal to comparator.
"""
target: str = self.replace_prefix(other_value.get("target"))
value_is_literal: bool = other_value.get("value_is_literal", False)
comparator: Union[str, Any] = self.replace_prefix(other_value.get("comparator")) if not value_is_literal else other_value.get("comparator")
comparison_data: Union[str, pd.Series] = self.get_comparator_data(comparator, value_is_literal)
suffix: int = self.replace_prefix(other_value.get("suffix"))
return self._check_equality_of_string_part(target, comparison_data, "suffix", suffix)
@type_operator(FIELD_DATAFRAME)
def suffix_not_equal_to(self, other_value: dict) -> pd.Series:
"""
Checks if target suffix is not equal to comparator.
"""
return ~self.suffix_equal_to(other_value)
@type_operator(FIELD_DATAFRAME)
def prefix_equal_to(self, other_value: dict) -> pd.Series:
"""
Checks if target prefix is equal to comparator.
"""
target: str = self.replace_prefix(other_value.get("target"))
value_is_literal: bool = other_value.get("value_is_literal", False)
comparator: Union[str, Any] = self.replace_prefix(other_value.get("comparator")) if not value_is_literal else other_value.get("comparator")
comparison_data: Union[str, pd.Series] = self.get_comparator_data(comparator, value_is_literal)
prefix: int = self.replace_prefix(other_value.get("prefix"))
return self._check_equality_of_string_part(target, comparison_data, "prefix", prefix)
@type_operator(FIELD_DATAFRAME)
def prefix_not_equal_to(self, other_value: dict) -> pd.Series:
"""
Checks if target prefix is not equal to comparator.
"""
return ~self.prefix_equal_to(other_value)
def _check_equality_of_string_part(
self,
target: str,
comparison_data: Union[str, pd.Series],
part_to_validate: str,
length: int
) -> pd.Series:
"""
Checks if the given string part is equal to comparison data.
"""
if not self.value[target].apply(type).eq(str).all():
raise ValueError("The operator can't be used with non-string values")
# compare
if part_to_validate == "suffix":
series_to_validate: pd.Series = self.value[target].str.slice(-length)
elif part_to_validate == "prefix":
series_to_validate: pd.Series = self.value[target].str.slice(start=0, step=length + 1)
else:
raise ValueError(f"Invalid part to validate: {part_to_validate}. Valid values are: suffix, prefix")
return series_to_validate.eq(comparison_data)
@type_operator(FIELD_DATAFRAME)
def less_than(self, other_value):
target = self.replace_prefix(other_value.get("target"))
value_is_literal = other_value.get("value_is_literal", False)
comparator = self.replace_prefix(other_value.get("comparator")) if not value_is_literal else other_value.get("comparator")
comparison_data = self.get_comparator_data(comparator, value_is_literal)
results = np.where(self.value[target] < comparison_data, True, False)
return pd.Series(results)
@type_operator(FIELD_DATAFRAME)
def less_than_or_equal_to(self, other_value):
target = self.replace_prefix(other_value.get("target"))
value_is_literal = other_value.get("value_is_literal", False)
comparator = self.replace_prefix(other_value.get("comparator")) if not value_is_literal else other_value.get("comparator")
comparison_data = self.get_comparator_data(comparator, value_is_literal)
results = np.where(self.value[target] <= comparison_data, True, False)
return pd.Series(results)
@type_operator(FIELD_DATAFRAME)
def greater_than_or_equal_to(self, other_value):
target = self.replace_prefix(other_value.get("target"))
value_is_literal = other_value.get("value_is_literal", False)
comparator = self.replace_prefix(other_value.get("comparator")) if not value_is_literal else other_value.get("comparator")
comparison_data = self.get_comparator_data(comparator, value_is_literal)
results = np.where(self.value[target] >= comparison_data, True, False)
return pd.Series(results)
@type_operator(FIELD_DATAFRAME)
def greater_than(self, other_value):
target = self.replace_prefix(other_value.get("target"))
value_is_literal = other_value.get("value_is_literal", False)
comparator = self.replace_prefix(other_value.get("comparator")) if not value_is_literal else other_value.get("comparator")
comparison_data = self.get_comparator_data(comparator, value_is_literal)
results = np.where(self.value[target] > comparison_data, True, False)
return pd.Series(results)
@type_operator(FIELD_DATAFRAME)
def contains(self, other_value):
target = self.replace_prefix(other_value.get("target"))
value_is_literal = other_value.get("value_is_literal", False)
comparator = self.replace_prefix(other_value.get("comparator")) if not value_is_literal else other_value.get("comparator")
comparison_data = self.get_comparator_data(comparator, value_is_literal)
if self.is_column_of_iterables(self.value[target]):
results = vectorized_is_in(comparison_data, self.value[target])
elif isinstance(comparator, pandas.core.series.Series):
results = np.where(comparison_data.isin(self.value[target]), True, False)
else:
results = np.where(self.value[target] == comparison_data, True, False)
return pd.Series(results)
@type_operator(FIELD_DATAFRAME)
def does_not_contain(self, other_value):
return ~self.contains(other_value)
@type_operator(FIELD_DATAFRAME)
def contains_case_insensitive(self, other_value):
target = self.replace_prefix(other_value.get("target"))
value_is_literal = other_value.get("value_is_literal", False)
comparator = self.replace_prefix(other_value.get("comparator")) if not value_is_literal else other_value.get("comparator")
comparison_data = self.get_comparator_data(comparator, value_is_literal)
comparison_data = self.convert_string_data_to_lower(comparison_data)
if self.is_column_of_iterables(self.value[target]):
results = vectorized_case_insensitive_is_in(comparison_data, self.value[target])
elif isinstance(comparator, pandas.core.series.Series):
results = np.where(comparison_data.isin(self.value[target].str.lower()), True, False)
else:
results = np.where(self.value[target].str.lower() == comparison_data, True, False)
return pd.Series(results)
@type_operator(FIELD_DATAFRAME)
def does_not_contain_case_insensitive(self, other_value):
return ~self.contains_case_insensitive(other_value)
@type_operator(FIELD_DATAFRAME)
def is_contained_by(self, other_value):
target = self.replace_prefix(other_value.get("target"))
value_is_literal = other_value.get("value_is_literal", False)
comparator = other_value.get("comparator")
if isinstance(comparator, str) and not value_is_literal:
# column name provided
comparator = self.replace_prefix(comparator)
comparison_data = self.get_comparator_data(comparator, value_is_literal)
if self.is_column_of_iterables(comparison_data):
results = vectorized_is_in(self.value[target], comparison_data)
else:
results = self.value[target].isin(comparison_data)
return pd.Series(results)
@type_operator(FIELD_DATAFRAME)
def is_not_contained_by(self, other_value):
return ~self.is_contained_by(other_value)
@type_operator(FIELD_DATAFRAME)
def is_contained_by_case_insensitive(self, other_value):
target = self.replace_prefix(other_value.get("target"))
comparator = other_value.get("comparator", [])
value_is_literal = other_value.get("value_is_literal", False)
if isinstance(comparator, list):
comparator = [val.lower() for val in comparator]
elif isinstance(comparator, str) and not value_is_literal:
# column name provided
comparator = self.replace_prefix(comparator)
comparison_data = self.get_comparator_data(comparator, value_is_literal)
if self.is_column_of_iterables(comparison_data):
results = vectorized_case_insensitive_is_in(self.value[target].str.lower(), comparison_data)
return pd.Series(results)
elif isinstance(comparison_data, pd.core.series.Series):
results = self.value[target].str.lower().isin(comparison_data.str.lower())
else:
results = self.value[target].str.lower().isin(comparison_data)
return pd.Series(results.values)
@type_operator(FIELD_DATAFRAME)
def is_not_contained_by_case_insensitive(self, other_value):
return ~self.is_contained_by_case_insensitive(other_value)
@type_operator(FIELD_DATAFRAME)
def prefix_matches_regex(self, other_value):
target = self.replace_prefix(other_value.get("target"))
comparator = other_value.get("comparator")
prefix = other_value.get("prefix")
results = self.value[target].map(lambda x: re.search(comparator, x[:prefix]) is not None)
return pd.Series(results.values)
@type_operator(FIELD_DATAFRAME)
def not_prefix_matches_regex(self, other_value):
return ~self.prefix_matches_regex(other_value)
@type_operator(FIELD_DATAFRAME)
def suffix_matches_regex(self, other_value):
target = self.replace_prefix(other_value.get("target"))
comparator = other_value.get("comparator")
suffix = other_value.get("suffix")
results = self.value[target].apply(lambda x: re.search(comparator, x[-suffix:]) is not None)
return pd.Series(results.values)
@type_operator(FIELD_DATAFRAME)
def not_suffix_matches_regex(self, other_value):
return ~self.suffix_matches_regex(other_value)
@type_operator(FIELD_DATAFRAME)
def matches_regex(self, other_value):
target = self.replace_prefix(other_value.get("target"))
comparator = other_value.get("comparator")
results = self.value[target].str.match(comparator)
return pd.Series(results.values)
@type_operator(FIELD_DATAFRAME)
def not_matches_regex(self, other_value):
return ~self.matches_regex(other_value)
@type_operator(FIELD_DATAFRAME)
def starts_with(self, other_value):
target = self.replace_prefix(other_value.get("target"))
comparator = other_value.get("comparator")
results = self.value[target].str.startswith(comparator)
return pd.Series(results.values)
@type_operator(FIELD_DATAFRAME)
def ends_with(self, other_value):
target = self.replace_prefix(other_value.get("target"))
comparator = other_value.get("comparator")
results = self.value[target].str.endswith(comparator)
return pd.Series(results.values)
@type_operator(FIELD_DATAFRAME)
def has_equal_length(self, other_value: dict):
target = self.replace_prefix(other_value.get("target"))
comparator = other_value.get("comparator")
results = self.value[target].str.len().eq(comparator)
return pd.Series(results)
@type_operator(FIELD_DATAFRAME)
def has_not_equal_length(self, other_value: dict):
return ~self.has_equal_length(other_value)
@type_operator(FIELD_DATAFRAME)
def longer_than(self, other_value: dict):
target = self.replace_prefix(other_value.get("target"))
comparator = other_value.get("comparator")
results = self.value[target].str.len().gt(comparator)
return pd.Series(results.values)
@type_operator(FIELD_DATAFRAME)
def longer_than_or_equal_to(self, other_value: dict):
target = self.replace_prefix(other_value.get("target"))
comparator = other_value.get("comparator")
results = self.value[target].str.len().ge(comparator)
return pd.Series(results.values)
@type_operator(FIELD_DATAFRAME)
def shorter_than(self, other_value: dict):
return ~self.longer_than_or_equal_to(other_value)
@type_operator(FIELD_DATAFRAME)
def shorter_than_or_equal_to(self, other_value: dict):
return ~self.longer_than(other_value)
@type_operator(FIELD_DATAFRAME)
def empty(self, other_value: dict):
target = self.replace_prefix(other_value.get("target"))
results = np.where(self.value[target].isin(["", None]), True, False)
return pd.Series(results)
@type_operator(FIELD_DATAFRAME)
def empty_within_except_last_row(self, other_value: dict):
target = self.replace_prefix(other_value.get("target"))
comparator = other_value.get("comparator")
# group all targets by comparator
grouped_target = self.value.groupby(comparator)[target]
# validate all targets except the last one
results = grouped_target.apply(lambda x: x[:-1]).apply(lambda x: x in ["", None])
# extract values with corresponding indexes from results
self.value[f"result_{uuid4()}"] = results.reset_index(level=0, drop=True)
return True in results.values
@type_operator(FIELD_DATAFRAME)
def non_empty(self, other_value: dict):
return ~self.empty(other_value)
@type_operator(FIELD_DATAFRAME)
def non_empty_within_except_last_row(self, other_value: dict):
target = self.replace_prefix(other_value.get("target"))
comparator = other_value.get("comparator")
# group all targets by comparator
grouped_target = self.value.groupby(comparator)[target]
# validate all targets except the last one
results = ~grouped_target.apply(lambda x: x[:-1]).apply(lambda x: x in ["", None])
# extract values with corresponding indexes from results
self.value[f"result_{uuid4()}"] = results.reset_index(level=0, drop=True)
return not(False in results.values)
@type_operator(FIELD_DATAFRAME)
def contains_all(self, other_value: dict):
target = self.replace_prefix(other_value.get("target"))
comparator = other_value.get("comparator")
if isinstance(comparator, list):
# get column as array of values
values = comparator
else:
comparator = self.replace_prefix(comparator)
values = self.value[comparator].unique()
return set(values).issubset(set(self.value[target].unique()))
@type_operator(FIELD_DATAFRAME)
def not_contains_all(self, other_value: dict):
return not self.contains_all(other_value)
@type_operator(FIELD_DATAFRAME)
def invalid_date(self, other_value):
target = self.replace_prefix(other_value.get("target"))
results = ~vectorized_is_valid(self.value[target])
return pd.Series(results)
def date_comparison(self, other_value, operator):
target = self.replace_prefix(other_value.get("target"))
comparator = self.replace_prefix(other_value.get("comparator"))
component = other_value.get("date_component")
results = np.where(vectorized_compare_dates(component, self.value[target], self.value.get(comparator, comparator), operator), True, False)
return pd.Series(results)
@type_operator(FIELD_DATAFRAME)
def date_equal_to(self, other_value):
return self.date_comparison(other_value, operator.eq)
@type_operator(FIELD_DATAFRAME)
def date_not_equal_to(self, other_value):
return self.date_comparison(other_value, operator.ne)
@type_operator(FIELD_DATAFRAME)
def date_less_than(self, other_value):
return self.date_comparison(other_value, operator.lt)
@type_operator(FIELD_DATAFRAME)
def date_less_than_or_equal_to(self, other_value):
return self.date_comparison(other_value, operator.le)
@type_operator(FIELD_DATAFRAME)
def date_greater_than_or_equal_to(self, other_value):
return self.date_comparison(other_value, operator.ge)
@type_operator(FIELD_DATAFRAME)
def date_greater_than(self, other_value):
return self.date_comparison(other_value, operator.gt)
@type_operator(FIELD_DATAFRAME)
def is_incomplete_date(self, other_value):
return ~self.is_complete_date(other_value)
@type_operator(FIELD_DATAFRAME)
def is_complete_date(self, other_value):
target = self.replace_prefix(other_value.get("target"))
results = vectorized_is_complete_date(self.value[target])
return pd.Series(results)
@type_operator(FIELD_DATAFRAME)
def is_unique_set(self, other_value):
target = self.replace_prefix(other_value.get("target"))
value = other_value.get("comparator")
if isinstance(value, list):
value.append(target)
target_data = value
else:
target_data = [value, target]
target_data = self.replace_all_prefixes(target_data)
counts = self.value[target_data].groupby(target_data)[target].transform('size')
results = np.where(counts <= 1, True, False)
return pd.Series(results)
@type_operator(FIELD_DATAFRAME)
def is_not_unique_relationship(self, other_value) -> pd.Series:
"""
Validates one-to-one relationship between two columns (target and comparator) against a dataset.
One-to-one means that a pair of columns can be duplicated but its integrity must not be violated:
one value of target always corresponds to one value of comparator. Examples:
Valid dataset:
STUDYID STUDYDESC
1 A
2 B
3 C
1 A
2 B
Invalid dataset:
STUDYID STUDYDESC
1 A
2 A
3 C
"""
target = self.replace_prefix(other_value.get("target"))
comparator = other_value.get("comparator")
if isinstance(comparator, list):
comparator = self.replace_all_prefixes(comparator)
else:
comparator = self.replace_prefix(comparator)
# remove repeating rows
df_without_duplicates: pd.DataFrame = self.value[[target, comparator]].drop_duplicates()
# we need to check if ANY of the columns (target or comparator) is duplicated
duplicated_comparator: pd.Series = df_without_duplicates[comparator].duplicated(keep=False)
duplicated_target: pd.Series = df_without_duplicates[target].duplicated(keep=False)
result = pd.Series([False] * len(self.value))
if duplicated_comparator.any():
duplicated_comparator_values = set(df_without_duplicates[duplicated_comparator][comparator])
result += self.value[comparator].isin(duplicated_comparator_values)
if duplicated_target.any():
duplicated_target_values = set(df_without_duplicates[duplicated_target][target])
result += self.value[target].isin(duplicated_target_values)
return result
@type_operator(FIELD_DATAFRAME)
def is_unique_relationship(self, other_value) -> pd.Series:
return ~self.is_not_unique_relationship(other_value)
@type_operator(FIELD_DATAFRAME)
def is_not_unique_set(self, other_value):
return ~self.is_unique_set(other_value)
@type_operator(FIELD_DATAFRAME)
def is_ordered_set(self, other_value):
target = self.replace_prefix(other_value.get("target"))
value = other_value.get("comparator")
if isinstance(value, list):
raise Exception('Comparator must be a single String value')
return not (False in self.value.groupby(value).agg(lambda x : list(x))[target].map(lambda x: sorted(x) == x).tolist())
@type_operator(FIELD_DATAFRAME)
def is_not_ordered_set(self, other_value):
target = self.replace_prefix(other_value.get("target"))
value = other_value.get("comparator")
if isinstance(value, list):
raise Exception('Comparator must be a single String value')
return False in self.value.groupby(value).agg(lambda x : list(x))[target].map(lambda x: sorted(x) == x).tolist()
@type_operator(FIELD_DATAFRAME)
def is_valid_reference(self, other_value):
target = self.replace_prefix(other_value.get("target"))
context = self.replace_prefix(other_value.get("context"))
if context:
results = self.value.apply(lambda row: row[target] in self.relationship_data.get(row[context], {}), axis=1)
else:
results = self.value[target].isin(self.relationship_data)
return pd.Series(results.values)
@type_operator(FIELD_DATAFRAME)
def is_not_valid_reference(self, other_value):
return ~self.is_valid_reference(other_value)
@type_operator(FIELD_DATAFRAME)
def is_valid_relationship(self, other_value):
target = self.replace_prefix(other_value.get("target"))
value_column = self.replace_prefix(other_value.get("comparator"))
context = self.replace_prefix(other_value.get("context"))
results = self.value.apply(lambda row: self.detect_reference(row, value_column, target, context), axis=1)
return pd.Series(results.values)
@type_operator(FIELD_DATAFRAME)
def is_not_valid_relationship(self, other_value):
return ~self.is_valid_relationship(other_value)
@type_operator(FIELD_DATAFRAME)
def non_conformant_value_data_type(self, other_value):
results = False
for vlm in self.value_level_metadata:
results |= self.value.apply(lambda row: vlm["filter"](row) and not vlm["type_check"](row), axis=1)
return pd.Series(results.values)
@type_operator(FIELD_DATAFRAME)
def non_conformant_value_length(self, other_value):
results = False
for vlm in self.value_level_metadata:
results |= self.value.apply(lambda row: vlm["filter"](row) and not vlm["length_check"](row), axis=1)
return pd.Series(results.values)
@type_operator(FIELD_DATAFRAME)
def conformant_value_data_type(self, other_value):
results = False
for vlm in self.value_level_metadata:
results |= self.value.apply(lambda row: vlm["filter"](row) and vlm["type_check"](row), axis=1)
return pd.Series(results.values)
@type_operator(FIELD_DATAFRAME)
def conformant_value_length(self, other_value):
results = False
for vlm in self.value_level_metadata:
results |= self.value.apply(lambda row: vlm["filter"](row) and vlm["length_check"](row), axis=1)
return pd.Series(results.values)
@type_operator(FIELD_DATAFRAME)
def has_next_corresponding_record(self, other_value: dict):
"""
The operator ensures that value of target in current row
is the same as value of comparator in the next row.
In order to achieve this, we just remove last row from target
and first row from comparator and compare the resulting contents.
The result is reported for target.
"""
target = self.replace_prefix(other_value.get("target"))
comparator = self.replace_prefix(other_value.get("comparator"))
group_by_column: str = self.replace_prefix(other_value.get("within"))
order_by_column: str = self.replace_prefix(other_value.get("ordering"))
ordered_df = self.value.sort_values(by=[order_by_column])
grouped_df = ordered_df.groupby(group_by_column)
results = grouped_df.apply(lambda x: self.compare_target_with_comparator_next_row(x, target, comparator))
return pd.Series(results.explode().tolist())
@type_operator(FIELD_DATAFRAME)
def does_not_have_next_corresponding_record(self, other_value: dict):
return ~self.has_next_corresponding_record(other_value)
def compare_target_with_comparator_next_row(self, df: pd.DataFrame, target: str, comparator: str):
"""
Compares current row of a target with the next row of comparator.
We can't compare last row of target with the next row of comparator
because there is no row after the last one.
"""
target_without_last_row = df[target].drop(df[target].tail(1).index)
comparator_without_first_row = df[comparator].drop(df[comparator].head(1).index)
results = np.where(target_without_last_row.values == comparator_without_first_row.values, True, False)
return [*results, pandas.NA] # appending NA here to make the length of results list the same as length of df
@type_operator(FIELD_DATAFRAME)
def present_on_multiple_rows_within(self, other_value: dict):
"""
The operator ensures that the target is present on multiple rows
within a group_by column. The dataframe is grouped by a certain column
and the check is applied to each group.
"""
target = self.replace_prefix(other_value.get("target"))
min_count: int = other_value.get("comparator") or 1
group_by_column = self.replace_prefix(other_value.get("within"))
grouped = self.value.groupby(group_by_column)
results = grouped.apply(lambda x: self.validate_series_length(x[target], min_count))
return pd.Series(results.explode().tolist())
def validate_series_length(self, ser: pd.Series, min_length: int):
if len(ser) > min_length:
return [True] * len(ser)
else:
return [False] * min_length
@type_operator(FIELD_DATAFRAME)
def not_present_on_multiple_rows_within(self, other_value: dict):
return ~self.present_on_multiple_rows_within(other_value)
def detect_reference(self, row, value_column, target_column, context=None):
if context:
target_data = self.relationship_data.get(row[context], {}).get(row[target_column], pd.Series([]).values)
else:
target_data = self.relationship_data.get(row[target_column], pd.Series([]).values)
value = row[value_column]
return (value in target_data) or (value in target_data.astype(int).astype(str)) or (value in target_data.astype(str))
@type_operator(FIELD_DATAFRAME)
def additional_columns_empty(self, other_value: dict):
"""
The dataframe column might have some additional columns.
If the next additional column exists, the previous one cannot be empty.
Example:
column - TSVAL
additional columns - TSVAL1, TSVAL2, ...
If TSVAL2 exists -> TSVAL1 cannot be empty.
Original column (TSVAL) can be empty.
The operator extracts these additional columns from the DF
and ensures they are not empty.
"""
target: str = self.replace_prefix(other_value.get("target"))
regex: str = rf"^{target}\d+$" # starting from target, ending with integers and nothing is between them
df: pd.DataFrame = self.value.filter(regex=regex)
# applying a function to each row
result: pd.Series = df.apply(lambda row: self.next_column_exists_and_previous_is_null(row), axis=1)
return result
@type_operator(FIELD_DATAFRAME)
def additional_columns_not_empty(self, other_value: dict):
return ~self.additional_columns_empty(other_value)
@type_operator(FIELD_DATAFRAME)
def references_correct_codelist(self, other_value: dict):
target: str = self.replace_prefix(other_value.get("target"))
comparator = self.replace_prefix(other_value.get("comparator"))
result: pd.Series = self.value.apply(lambda row: self.valid_codelist_reference(row[target], row[comparator]), axis=1)
return result
@type_operator(FIELD_DATAFRAME)
def does_not_reference_correct_codelist(self, other_value: dict):
return ~self.references_correct_codelist(other_value)
@type_operator(FIELD_DATAFRAME)
def uses_valid_codelist_terms(self, other_value: dict):
target: str = self.replace_prefix(other_value.get("target"))
comparator = self.replace_prefix(other_value.get("comparator"))
result: pd.Series = self.value.apply(lambda row: self.valid_terms(row[target], row[comparator]), axis=1)
return result
@type_operator(FIELD_DATAFRAME)
def does_not_use_valid_codelist_terms(self, other_value: dict):
return ~self.uses_valid_codelist_terms(other_value)
def next_column_exists_and_previous_is_null(self, row: pd.Series) -> bool:
row.reset_index(drop=True, inplace=True)
for index in row[row.isin([[], {}, "", None])].index: # leaving null values only
next_position: int = index + 1
if next_position < len(row) and row[next_position] is not None:
return True
return False
def valid_codelist_reference(self, column_name, codelist):
if column_name in self.column_codelist_map:
return codelist in self.column_codelist_map[column_name]
elif self.column_prefix_map:
# Check for generic versions of variables (i.e --DECOD)
for key in self.column_prefix_map:
if column_name.startswith(self.column_prefix_map[key]):
generic_column_name = column_name.replace(self.column_prefix_map[key], key, 1)
if generic_column_name in self.column_codelist_map:
return codelist in self.column_codelist_map.get(generic_column_name)
return True
def valid_terms(self, codelist, terms_list):
if not codelist:
return True
valid_term = False
for codelist_term_map in self.codelist_term_maps:
if codelist in codelist_term_map:
valid_term = valid_term or (codelist_term_map[codelist].get("extensible") or set(terms_list).issubset(codelist_term_map[codelist].get("allowed_terms", [])))
return valid_term
@type_operator(FIELD_DATAFRAME)
def has_different_values(self, other_value: dict):
"""
The operator ensures that the target columns has different values.
"""
target: str = self.replace_prefix(other_value.get("target"))
is_valid: bool = len(self.value[target].unique()) > 1
return pandas.Series([is_valid] * len(self.value[target]))
@type_operator(FIELD_DATAFRAME)
def has_same_values(self, other_value: dict):
return ~self.has_different_values(other_value)
@type_operator(FIELD_DATAFRAME)
def is_ordered_by(self, other_value: dict) -> pd.Series:
"""
Checking validity based on target order.
"""
target: str = self.replace_prefix(other_value.get("target"))
sort_order: str =other_value.get("order","asc")
if sort_order not in ["asc","dsc"]:
raise ValueError ("invalid sorting order")
sort_order_bool: bool = sort_order == "asc"
return self.value[target].eq(self.value[target].sort_values(ascending=sort_order_bool, ignore_index=True))
@type_operator(FIELD_DATAFRAME)
def is_not_ordered_by(self, other_value: dict) -> pd.Series:
return ~self.is_ordered_by(other_value)
@type_operator(FIELD_DATAFRAME)
def value_has_multiple_references(self, other_value: dict) -> pd.Series:
"""
Requires a target column and a reference count column whose values
are a dictionary containing the number of times that value appears.
"""
target: str = self.replace_prefix(other_value.get("target"))
reference_count_column: str = self.replace_prefix(other_value.get("comparator"))
result = np.where(vectorized_get_dict_key(self.value[reference_count_column], self.value[target]) > 1, True, False)
return | pd.Series(result) | pandas.Series |
import pytest
from vetiver.vetiver_model import VetiverModel
from vetiver.mock import get_mock_data, get_mock_model
import pandas as pd
from numpy import int64
# Load data, model
X_df, y = get_mock_data()
X_array = | pd.DataFrame(X_df) | pandas.DataFrame |
#!/usr/bin/env python3
# Author: <NAME>
import numpy as np
import pandas as pd
import scipy.stats as stats
import warnings
def normalize_quantiles(df):
"""
Quantile normalization to the average empirical distribution
Note: replicates behavior of R function normalize.quantiles from library("preprocessCore")
Reference:
[1] Bolstad et al., Bioinformatics 19(2), pp. 185-193, 2003
Adapted from https://github.com/andrewdyates/quantile_normalize
"""
M = df.values.copy()
Q = M.argsort(axis=0)
m,n = M.shape
# compute quantile vector
quantiles = np.zeros(m)
for i in range(n):
quantiles += M[Q[:,i],i]
quantiles = quantiles / n
for i in range(n):
# Get equivalence classes; unique values == 0
dupes = np.zeros(m, dtype=np.int)
for j in range(m-1):
if M[Q[j,i],i]==M[Q[j+1,i],i]:
dupes[j+1] = dupes[j]+1
# Replace column with quantile ranks
M[Q[:,i],i] = quantiles
# Average together equivalence classes
j = m-1
while j >= 0:
if dupes[j] == 0:
j -= 1
else:
idxs = Q[j-dupes[j]:j+1,i]
M[idxs,i] = np.median(M[idxs,i])
j -= 1 + dupes[j]
assert j == -1
return | pd.DataFrame(M, index=df.index, columns=df.columns) | pandas.DataFrame |
import pandas as pd
from mfy_data_core.adapters.es_index_store import ESIndexStore
from mfy_data_core_fakes.adapters.fake_storage import FakeIndexStore, FakeEsPandasClient, FakeObjectStore
def test_fake_index_store():
index_store = FakeIndexStore()
df = | pd.DataFrame([{"col1": "A", "col2": "B"}]) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Wed May 16 12:39:55 2018
@author: malopez
"""
import math
import numba
import numpy as np
import pandas as pd
from collisionTimes import CollisionDetector
from measure import MeasureClass
n_particles = 50
class EventList():
def __init__(self, n_particles, particle_radius, size_X, size_Y, periodicWalls, periodicSideWalls):
self.n_particles = n_particles
self.particle_radius = particle_radius
self.size_X = size_X
self.size_Y = size_Y
self.periodicWalls = periodicWalls
self.periodicSideWalls = periodicSideWalls
# We will init this attribute (the event times list itself) outside
# the class definition, by calling the 'updateEventList' function
self.eventTimesList = self.initEventList()
def updateEventList(self, pos, vel):
# We calculate collision times (from position and speed data)
# and fill the list previously initialized (self.eventTimesList)
evTimes = self.fillList(self.eventTimesList, pos, vel)
# After that we order the list by 'dt' in ascending order
evTimes = self.orderList(evTimes)
# We save the list as an attribute of the class
self.eventTimesList = evTimes
def initEventList(self):
part_i = pd.DataFrame(np.arange(self.n_particles, dtype=np.int32),
columns=('first_element',))
wallLeft = pd.DataFrame(np.full((self.n_particles,), 'leftWall'),
columns=('second_element',))
wallRight = pd.DataFrame(np.full((self.n_particles,), 'rightWall'),
columns=('second_element',))
wallTop = pd.DataFrame(np.full((self.n_particles,), 'topWall'),
columns=('second_element',))
wallBottom = pd.DataFrame(np.full((self.n_particles,), 'bottomWall'),
columns=('second_element',))
dt = pd.DataFrame(np.zeros((self.n_particles, ), dtype=np.float64),
columns=('dt',))
eventType = pd.DataFrame(np.full((self.n_particles,), 'particleWall_collision'),
columns=('eventType',))
wallTimesList_left = | pd.concat((part_i, wallLeft, dt, eventType), axis=1) | pandas.concat |
"""
A warehouse for constant values required to initilize the PUDL Database.
This constants module stores and organizes a bunch of constant values which are
used throughout PUDL to populate static lists within the data packages or for
data cleaning purposes.
"""
import pandas as pd
import sqlalchemy as sa
######################################################################
# Constants used within the init.py module.
######################################################################
prime_movers = [
'steam_turbine',
'gas_turbine',
'hydro',
'internal_combustion',
'solar_pv',
'wind_turbine'
]
"""list: A list of the types of prime movers"""
rto_iso = {
'CAISO': 'California ISO',
'ERCOT': 'Electric Reliability Council of Texas',
'MISO': 'Midcontinent ISO',
'ISO-NE': 'ISO New England',
'NYISO': 'New York ISO',
'PJM': 'PJM Interconnection',
'SPP': 'Southwest Power Pool'
}
"""dict: A dictionary containing ISO/RTO abbreviations (keys) and names (values)
"""
us_states = {
'AK': 'Alaska',
'AL': 'Alabama',
'AR': 'Arkansas',
'AS': 'American Samoa',
'AZ': 'Arizona',
'CA': 'California',
'CO': 'Colorado',
'CT': 'Connecticut',
'DC': 'District of Columbia',
'DE': 'Delaware',
'FL': 'Florida',
'GA': 'Georgia',
'GU': 'Guam',
'HI': 'Hawaii',
'IA': 'Iowa',
'ID': 'Idaho',
'IL': 'Illinois',
'IN': 'Indiana',
'KS': 'Kansas',
'KY': 'Kentucky',
'LA': 'Louisiana',
'MA': 'Massachusetts',
'MD': 'Maryland',
'ME': 'Maine',
'MI': 'Michigan',
'MN': 'Minnesota',
'MO': 'Missouri',
'MP': 'Northern Mariana Islands',
'MS': 'Mississippi',
'MT': 'Montana',
'NA': 'National',
'NC': 'North Carolina',
'ND': 'North Dakota',
'NE': 'Nebraska',
'NH': 'New Hampshire',
'NJ': 'New Jersey',
'NM': 'New Mexico',
'NV': 'Nevada',
'NY': 'New York',
'OH': 'Ohio',
'OK': 'Oklahoma',
'OR': 'Oregon',
'PA': 'Pennsylvania',
'PR': 'Puerto Rico',
'RI': 'Rhode Island',
'SC': 'South Carolina',
'SD': 'South Dakota',
'TN': 'Tennessee',
'TX': 'Texas',
'UT': 'Utah',
'VA': 'Virginia',
'VI': 'Virgin Islands',
'VT': 'Vermont',
'WA': 'Washington',
'WI': 'Wisconsin',
'WV': 'West Virginia',
'WY': 'Wyoming'
}
"""dict: A dictionary containing US state abbreviations (keys) and names
(values)
"""
canada_prov_terr = {
'AB': 'Alberta',
'BC': 'British Columbia',
'CN': 'Canada',
'MB': 'Manitoba',
'NB': 'New Brunswick',
'NS': 'Nova Scotia',
'NL': 'Newfoundland and Labrador',
'NT': 'Northwest Territories',
'NU': 'Nunavut',
'ON': 'Ontario',
'PE': 'Prince Edwards Island',
'QC': 'Quebec',
'SK': 'Saskatchewan',
'YT': 'Yukon Territory',
}
"""dict: A dictionary containing Canadian provinces' and territories'
abbreviations (keys) and names (values)
"""
cems_states = {k: v for k, v in us_states.items() if v not in
{'Alaska',
'American Samoa',
'Guam',
'Hawaii',
'Northern Mariana Islands',
'National',
'Puerto Rico',
'Virgin Islands'}
}
"""dict: A dictionary containing US state abbreviations (keys) and names
(values) that are present in the CEMS dataset
"""
# This is imperfect for states that have split timezones. See:
# https://en.wikipedia.org/wiki/List_of_time_offsets_by_U.S._state_and_territory
# For states that are split, I went with where there seem to be more people
# List of timezones in pytz.common_timezones
# Canada: https://en.wikipedia.org/wiki/Time_in_Canada#IANA_time_zone_database
state_tz_approx = {
"AK": "US/Alaska", # Alaska; Not in CEMS
"AL": "US/Central", # Alabama
"AR": "US/Central", # Arkansas
"AS": "Pacific/Pago_Pago", # American Samoa; Not in CEMS
"AZ": "US/Arizona", # Arizona
"CA": "US/Pacific", # California
"CO": "US/Mountain", # Colorado
"CT": "US/Eastern", # Connecticut
"DC": "US/Eastern", # District of Columbia
"DE": "US/Eastern", # Delaware
"FL": "US/Eastern", # Florida (split state)
"GA": "US/Eastern", # Georgia
"GU": "Pacific/Guam", # Guam; Not in CEMS
"HI": "US/Hawaii", # Hawaii; Not in CEMS
"IA": "US/Central", # Iowa
"ID": "US/Mountain", # Idaho (split state)
"IL": "US/Central", # Illinois
"IN": "US/Eastern", # Indiana (split state)
"KS": "US/Central", # Kansas (split state)
"KY": "US/Eastern", # Kentucky (split state)
"LA": "US/Central", # Louisiana
"MA": "US/Eastern", # Massachusetts
"MD": "US/Eastern", # Maryland
"ME": "US/Eastern", # Maine
"MI": "America/Detroit", # Michigan (split state)
"MN": "US/Central", # Minnesota
"MO": "US/Central", # Missouri
"MP": "Pacific/Saipan", # Northern Mariana Islands; Not in CEMS
"MS": "US/Central", # Mississippi
"MT": "US/Mountain", # Montana
"NC": "US/Eastern", # North Carolina
"ND": "US/Central", # North Dakota (split state)
"NE": "US/Central", # Nebraska (split state)
"NH": "US/Eastern", # New Hampshire
"NJ": "US/Eastern", # New Jersey
"NM": "US/Mountain", # New Mexico
"NV": "US/Pacific", # Nevada
"NY": "US/Eastern", # New York
"OH": "US/Eastern", # Ohio
"OK": "US/Central", # Oklahoma
"OR": "US/Pacific", # Oregon (split state)
"PA": "US/Eastern", # Pennsylvania
"PR": "America/Puerto_Rico", # Puerto Rico; Not in CEMS
"RI": "US/Eastern", # Rhode Island
"SC": "US/Eastern", # South Carolina
"SD": "US/Central", # South Dakota (split state)
"TN": "US/Central", # Tennessee
"TX": "US/Central", # Texas
"UT": "US/Mountain", # Utah
"VA": "US/Eastern", # Virginia
"VI": "America/Puerto_Rico", # Virgin Islands; Not in CEMS
"VT": "US/Eastern", # Vermont
"WA": "US/Pacific", # Washington
"WI": "US/Central", # Wisconsin
"WV": "US/Eastern", # West Virginia
"WY": "US/Mountain", # Wyoming
# Canada (none of these are in CEMS)
"AB": "America/Edmonton", # Alberta
"BC": "America/Vancouver", # British Columbia (split province)
"MB": "America/Winnipeg", # Manitoba
"NB": "America/Moncton", # New Brunswick
"NS": "America/Halifax", # Nova Scotia
"NL": "America/St_Johns", # Newfoundland and Labrador (split province)
"NT": "America/Yellowknife", # Northwest Territories (split province)
"NU": "America/Iqaluit", # Nunavut (split province)
"ON": "America/Toronto", # Ontario (split province)
"PE": "America/Halifax", # Prince Edwards Island
"QC": "America/Montreal", # Quebec (split province)
"SK": "America/Regina", # Saskatchewan (split province)
"YT": "America/Whitehorse", # Yukon Territory
}
"""dict: A dictionary containing US and Canadian state/territory abbreviations
(keys) and timezones (values)
"""
ferc1_power_purchase_type = {
'RQ': 'requirement',
'LF': 'long_firm',
'IF': 'intermediate_firm',
'SF': 'short_firm',
'LU': 'long_unit',
'IU': 'intermediate_unit',
'EX': 'electricity_exchange',
'OS': 'other_service',
'AD': 'adjustment'
}
"""dict: A dictionary of abbreviations (keys) and types (values) for power
purchase agreements from FERC Form 1.
"""
# Dictionary mapping DBF files (w/o .DBF file extension) to DB table names
ferc1_dbf2tbl = {
'F1_1': 'f1_respondent_id',
'F1_2': 'f1_acb_epda',
'F1_3': 'f1_accumdepr_prvsn',
'F1_4': 'f1_accumdfrrdtaxcr',
'F1_5': 'f1_adit_190_detail',
'F1_6': 'f1_adit_190_notes',
'F1_7': 'f1_adit_amrt_prop',
'F1_8': 'f1_adit_other',
'F1_9': 'f1_adit_other_prop',
'F1_10': 'f1_allowances',
'F1_11': 'f1_bal_sheet_cr',
'F1_12': 'f1_capital_stock',
'F1_13': 'f1_cash_flow',
'F1_14': 'f1_cmmn_utlty_p_e',
'F1_15': 'f1_comp_balance_db',
'F1_16': 'f1_construction',
'F1_17': 'f1_control_respdnt',
'F1_18': 'f1_co_directors',
'F1_19': 'f1_cptl_stk_expns',
'F1_20': 'f1_csscslc_pcsircs',
'F1_21': 'f1_dacs_epda',
'F1_22': 'f1_dscnt_cptl_stk',
'F1_23': 'f1_edcfu_epda',
'F1_24': 'f1_elctrc_erg_acct',
'F1_25': 'f1_elctrc_oper_rev',
'F1_26': 'f1_elc_oper_rev_nb',
'F1_27': 'f1_elc_op_mnt_expn',
'F1_28': 'f1_electric',
'F1_29': 'f1_envrnmntl_expns',
'F1_30': 'f1_envrnmntl_fclty',
'F1_31': 'f1_fuel',
'F1_32': 'f1_general_info',
'F1_33': 'f1_gnrt_plant',
'F1_34': 'f1_important_chg',
'F1_35': 'f1_incm_stmnt_2',
'F1_36': 'f1_income_stmnt',
'F1_37': 'f1_miscgen_expnelc',
'F1_38': 'f1_misc_dfrrd_dr',
'F1_39': 'f1_mthly_peak_otpt',
'F1_40': 'f1_mtrl_spply',
'F1_41': 'f1_nbr_elc_deptemp',
'F1_42': 'f1_nonutility_prop',
'F1_43': 'f1_note_fin_stmnt', # 37% of DB
'F1_44': 'f1_nuclear_fuel',
'F1_45': 'f1_officers_co',
'F1_46': 'f1_othr_dfrrd_cr',
'F1_47': 'f1_othr_pd_in_cptl',
'F1_48': 'f1_othr_reg_assets',
'F1_49': 'f1_othr_reg_liab',
'F1_50': 'f1_overhead',
'F1_51': 'f1_pccidica',
'F1_52': 'f1_plant_in_srvce',
'F1_53': 'f1_pumped_storage',
'F1_54': 'f1_purchased_pwr',
'F1_55': 'f1_reconrpt_netinc',
'F1_56': 'f1_reg_comm_expn',
'F1_57': 'f1_respdnt_control',
'F1_58': 'f1_retained_erng',
'F1_59': 'f1_r_d_demo_actvty',
'F1_60': 'f1_sales_by_sched',
'F1_61': 'f1_sale_for_resale',
'F1_62': 'f1_sbsdry_totals',
'F1_63': 'f1_schedules_list',
'F1_64': 'f1_security_holder',
'F1_65': 'f1_slry_wg_dstrbtn',
'F1_66': 'f1_substations',
'F1_67': 'f1_taxacc_ppchrgyr',
'F1_68': 'f1_unrcvrd_cost',
'F1_69': 'f1_utltyplnt_smmry',
'F1_70': 'f1_work',
'F1_71': 'f1_xmssn_adds',
'F1_72': 'f1_xmssn_elc_bothr',
'F1_73': 'f1_xmssn_elc_fothr',
'F1_74': 'f1_xmssn_line',
'F1_75': 'f1_xtraordnry_loss',
'F1_76': 'f1_codes_val',
'F1_77': 'f1_sched_lit_tbl',
'F1_78': 'f1_audit_log',
'F1_79': 'f1_col_lit_tbl',
'F1_80': 'f1_load_file_names',
'F1_81': 'f1_privilege',
'F1_82': 'f1_sys_error_log',
'F1_83': 'f1_unique_num_val',
'F1_84': 'f1_row_lit_tbl',
'F1_85': 'f1_footnote_data',
'F1_86': 'f1_hydro',
'F1_87': 'f1_footnote_tbl', # 52% of DB
'F1_88': 'f1_ident_attsttn',
'F1_89': 'f1_steam',
'F1_90': 'f1_leased',
'F1_91': 'f1_sbsdry_detail',
'F1_92': 'f1_plant',
'F1_93': 'f1_long_term_debt',
'F1_106_2009': 'f1_106_2009',
'F1_106A_2009': 'f1_106a_2009',
'F1_106B_2009': 'f1_106b_2009',
'F1_208_ELC_DEP': 'f1_208_elc_dep',
'F1_231_TRN_STDYCST': 'f1_231_trn_stdycst',
'F1_324_ELC_EXPNS': 'f1_324_elc_expns',
'F1_325_ELC_CUST': 'f1_325_elc_cust',
'F1_331_TRANSISO': 'f1_331_transiso',
'F1_338_DEP_DEPL': 'f1_338_dep_depl',
'F1_397_ISORTO_STL': 'f1_397_isorto_stl',
'F1_398_ANCL_PS': 'f1_398_ancl_ps',
'F1_399_MTH_PEAK': 'f1_399_mth_peak',
'F1_400_SYS_PEAK': 'f1_400_sys_peak',
'F1_400A_ISO_PEAK': 'f1_400a_iso_peak',
'F1_429_TRANS_AFF': 'f1_429_trans_aff',
'F1_ALLOWANCES_NOX': 'f1_allowances_nox',
'F1_CMPINC_HEDGE_A': 'f1_cmpinc_hedge_a',
'F1_CMPINC_HEDGE': 'f1_cmpinc_hedge',
'F1_EMAIL': 'f1_email',
'F1_RG_TRN_SRV_REV': 'f1_rg_trn_srv_rev',
'F1_S0_CHECKS': 'f1_s0_checks',
'F1_S0_FILING_LOG': 'f1_s0_filing_log',
'F1_SECURITY': 'f1_security'
# 'F1_PINS': 'f1_pins', # private data, not publicized.
# 'F1_FREEZE': 'f1_freeze', # private data, not publicized
}
"""dict: A dictionary mapping FERC Form 1 DBF files(w / o .DBF file extension)
(keys) to database table names (values).
"""
ferc1_huge_tables = {
'f1_footnote_tbl',
'f1_footnote_data',
'f1_note_fin_stmnt',
}
"""set: A set containing large FERC Form 1 tables.
"""
# Invert the map above so we can go either way as needed
ferc1_tbl2dbf = {v: k for k, v in ferc1_dbf2tbl.items()}
"""dict: A dictionary mapping database table names (keys) to FERC Form 1 DBF
files(w / o .DBF file extension) (values).
"""
# This dictionary maps the strings which are used to denote field types in the
# DBF objects to the corresponding generic SQLAlchemy Column types:
# These definitions come from a combination of the dbfread example program
# dbf2sqlite and this DBF file format documentation page:
# http://www.dbase.com/KnowledgeBase/int/db7_file_fmt.htm
# Un-mapped types left as 'XXX' which should obviously make an error...
dbf_typemap = {
'C': sa.String,
'D': sa.Date,
'F': sa.Float,
'I': sa.Integer,
'L': sa.Boolean,
'M': sa.Text, # 10 digit .DBT block number, stored as a string...
'N': sa.Float,
'T': sa.DateTime,
'0': sa.Integer, # based on dbf2sqlite mapping
'B': 'XXX', # .DBT block number, binary string
'@': 'XXX', # Timestamp... Date = Julian Day, Time is in milliseconds?
'+': 'XXX', # Autoincrement (e.g. for IDs)
'O': 'XXX', # Double, 8 bytes
'G': 'XXX', # OLE 10 digit/byte number of a .DBT block, stored as string
}
"""dict: A dictionary mapping field types in the DBF objects (keys) to the
corresponding generic SQLAlchemy Column types.
"""
# This is the set of tables which have been successfully integrated into PUDL:
ferc1_pudl_tables = (
'fuel_ferc1', # Plant-level data, linked to plants_steam_ferc1
'plants_steam_ferc1', # Plant-level data
'plants_small_ferc1', # Plant-level data
'plants_hydro_ferc1', # Plant-level data
'plants_pumped_storage_ferc1', # Plant-level data
'purchased_power_ferc1', # Inter-utility electricity transactions
'plant_in_service_ferc1', # Row-mapped plant accounting data.
# 'accumulated_depreciation_ferc1' # Requires row-mapping to be useful.
)
"""tuple: A tuple containing the FERC Form 1 tables that can be successfully
integrated into PUDL.
"""
table_map_ferc1_pudl = {
'fuel_ferc1': 'f1_fuel',
'plants_steam_ferc1': 'f1_steam',
'plants_small_ferc1': 'f1_gnrt_plant',
'plants_hydro_ferc1': 'f1_hydro',
'plants_pumped_storage_ferc1': 'f1_pumped_storage',
'plant_in_service_ferc1': 'f1_plant_in_srvce',
'purchased_power_ferc1': 'f1_purchased_pwr',
# 'accumulated_depreciation_ferc1': 'f1_accumdepr_prvsn'
}
"""dict: A dictionary mapping PUDL table names (keys) to the corresponding FERC
Form 1 DBF table names.
"""
# This is the list of EIA923 tables that can be successfully pulled into PUDL
eia923_pudl_tables = ('generation_fuel_eia923',
'boiler_fuel_eia923',
'generation_eia923',
'coalmine_eia923',
'fuel_receipts_costs_eia923')
"""tuple: A tuple containing the EIA923 tables that can be successfully
integrated into PUDL.
"""
epaipm_pudl_tables = (
'transmission_single_epaipm',
'transmission_joint_epaipm',
'load_curves_epaipm',
'plant_region_map_epaipm',
)
"""tuple: A tuple containing the EPA IPM tables that can be successfully
integrated into PUDL.
"""
# List of entity tables
entity_tables = ['utilities_entity_eia',
'plants_entity_eia',
'generators_entity_eia',
'boilers_entity_eia',
'regions_entity_epaipm', ]
"""list: A list of PUDL entity tables.
"""
xlsx_maps_pkg = 'pudl.package_data.meta.xlsx_maps'
"""string: The location of the xlsx maps within the PUDL package data."""
##############################################################################
# EIA 923 Spreadsheet Metadata
##############################################################################
##############################################################################
# EIA 860 Spreadsheet Metadata
##############################################################################
# This is the list of EIA860 tables that can be successfully pulled into PUDL
eia860_pudl_tables = (
'boiler_generator_assn_eia860',
'utilities_eia860',
'plants_eia860',
'generators_eia860',
'ownership_eia860'
)
"""tuple: A tuple enumerating EIA 860 tables for which PUDL's ETL works."""
# The set of FERC Form 1 tables that have the same composite primary keys: [
# respondent_id, report_year, report_prd, row_number, spplmnt_num ].
# TODO: THIS ONLY PERTAINS TO 2015 AND MAY NEED TO BE ADJUSTED BY YEAR...
ferc1_data_tables = (
'f1_acb_epda', 'f1_accumdepr_prvsn', 'f1_accumdfrrdtaxcr',
'f1_adit_190_detail', 'f1_adit_190_notes', 'f1_adit_amrt_prop',
'f1_adit_other', 'f1_adit_other_prop', 'f1_allowances', 'f1_bal_sheet_cr',
'f1_capital_stock', 'f1_cash_flow', 'f1_cmmn_utlty_p_e',
'f1_comp_balance_db', 'f1_construction', 'f1_control_respdnt',
'f1_co_directors', 'f1_cptl_stk_expns', 'f1_csscslc_pcsircs',
'f1_dacs_epda', 'f1_dscnt_cptl_stk', 'f1_edcfu_epda', 'f1_elctrc_erg_acct',
'f1_elctrc_oper_rev', 'f1_elc_oper_rev_nb', 'f1_elc_op_mnt_expn',
'f1_electric', 'f1_envrnmntl_expns', 'f1_envrnmntl_fclty', 'f1_fuel',
'f1_general_info', 'f1_gnrt_plant', 'f1_important_chg', 'f1_incm_stmnt_2',
'f1_income_stmnt', 'f1_miscgen_expnelc', 'f1_misc_dfrrd_dr',
'f1_mthly_peak_otpt', 'f1_mtrl_spply', 'f1_nbr_elc_deptemp',
'f1_nonutility_prop', 'f1_note_fin_stmnt', 'f1_nuclear_fuel',
'f1_officers_co', 'f1_othr_dfrrd_cr', 'f1_othr_pd_in_cptl',
'f1_othr_reg_assets', 'f1_othr_reg_liab', 'f1_overhead', 'f1_pccidica',
'f1_plant_in_srvce', 'f1_pumped_storage', 'f1_purchased_pwr',
'f1_reconrpt_netinc', 'f1_reg_comm_expn', 'f1_respdnt_control',
'f1_retained_erng', 'f1_r_d_demo_actvty', 'f1_sales_by_sched',
'f1_sale_for_resale', 'f1_sbsdry_totals', 'f1_schedules_list',
'f1_security_holder', 'f1_slry_wg_dstrbtn', 'f1_substations',
'f1_taxacc_ppchrgyr', 'f1_unrcvrd_cost', 'f1_utltyplnt_smmry', 'f1_work',
'f1_xmssn_adds', 'f1_xmssn_elc_bothr', 'f1_xmssn_elc_fothr',
'f1_xmssn_line', 'f1_xtraordnry_loss',
'f1_hydro', 'f1_steam', 'f1_leased', 'f1_sbsdry_detail',
'f1_plant', 'f1_long_term_debt', 'f1_106_2009', 'f1_106a_2009',
'f1_106b_2009', 'f1_208_elc_dep', 'f1_231_trn_stdycst', 'f1_324_elc_expns',
'f1_325_elc_cust', 'f1_331_transiso', 'f1_338_dep_depl',
'f1_397_isorto_stl', 'f1_398_ancl_ps', 'f1_399_mth_peak',
'f1_400_sys_peak', 'f1_400a_iso_peak', 'f1_429_trans_aff',
'f1_allowances_nox', 'f1_cmpinc_hedge_a', 'f1_cmpinc_hedge',
'f1_rg_trn_srv_rev')
"""tuple: A tuple containing the FERC Form 1 tables that have the same composite
primary keys: [respondent_id, report_year, report_prd, row_number,
spplmnt_num].
"""
# Line numbers, and corresponding FERC account number
# from FERC Form 1 pages 204-207, Electric Plant in Service.
# Descriptions from: https://www.law.cornell.edu/cfr/text/18/part-101
ferc_electric_plant_accounts = pd.DataFrame.from_records([
# 1. Intangible Plant
(2, '301', 'Intangible: Organization'),
(3, '302', 'Intangible: Franchises and consents'),
(4, '303', 'Intangible: Miscellaneous intangible plant'),
(5, 'subtotal_intangible', 'Subtotal: Intangible Plant'),
# 2. Production Plant
# A. steam production
(8, '310', 'Steam production: Land and land rights'),
(9, '311', 'Steam production: Structures and improvements'),
(10, '312', 'Steam production: Boiler plant equipment'),
(11, '313', 'Steam production: Engines and engine-driven generators'),
(12, '314', 'Steam production: Turbogenerator units'),
(13, '315', 'Steam production: Accessory electric equipment'),
(14, '316', 'Steam production: Miscellaneous power plant equipment'),
(15, '317', 'Steam production: Asset retirement costs for steam production\
plant'),
(16, 'subtotal_steam_production', 'Subtotal: Steam Production Plant'),
# B. nuclear production
(18, '320', 'Nuclear production: Land and land rights (Major only)'),
(19, '321', 'Nuclear production: Structures and improvements (Major\
only)'),
(20, '322', 'Nuclear production: Reactor plant equipment (Major only)'),
(21, '323', 'Nuclear production: Turbogenerator units (Major only)'),
(22, '324', 'Nuclear production: Accessory electric equipment (Major\
only)'),
(23, '325', 'Nuclear production: Miscellaneous power plant equipment\
(Major only)'),
(24, '326', 'Nuclear production: Asset retirement costs for nuclear\
production plant (Major only)'),
(25, 'subtotal_nuclear_produciton', 'Subtotal: Nuclear Production Plant'),
# C. hydraulic production
(27, '330', 'Hydraulic production: Land and land rights'),
(28, '331', 'Hydraulic production: Structures and improvements'),
(29, '332', 'Hydraulic production: Reservoirs, dams, and waterways'),
(30, '333', 'Hydraulic production: Water wheels, turbines and generators'),
(31, '334', 'Hydraulic production: Accessory electric equipment'),
(32, '335', 'Hydraulic production: Miscellaneous power plant equipment'),
(33, '336', 'Hydraulic production: Roads, railroads and bridges'),
(34, '337', 'Hydraulic production: Asset retirement costs for hydraulic\
production plant'),
(35, 'subtotal_hydraulic_production', 'Subtotal: Hydraulic Production\
Plant'),
# D. other production
(37, '340', 'Other production: Land and land rights'),
(38, '341', 'Other production: Structures and improvements'),
(39, '342', 'Other production: Fuel holders, producers, and accessories'),
(40, '343', 'Other production: Prime movers'),
(41, '344', 'Other production: Generators'),
(42, '345', 'Other production: Accessory electric equipment'),
(43, '346', 'Other production: Miscellaneous power plant equipment'),
(44, '347', 'Other production: Asset retirement costs for other production\
plant'),
(None, '348', 'Other production: Energy Storage Equipment'),
(45, 'subtotal_other_production', 'Subtotal: Other Production Plant'),
(46, 'subtotal_production', 'Subtotal: Production Plant'),
# 3. Transmission Plant,
(48, '350', 'Transmission: Land and land rights'),
(None, '351', 'Transmission: Energy Storage Equipment'),
(49, '352', 'Transmission: Structures and improvements'),
(50, '353', 'Transmission: Station equipment'),
(51, '354', 'Transmission: Towers and fixtures'),
(52, '355', 'Transmission: Poles and fixtures'),
(53, '356', 'Transmission: Overhead conductors and devices'),
(54, '357', 'Transmission: Underground conduit'),
(55, '358', 'Transmission: Underground conductors and devices'),
(56, '359', 'Transmission: Roads and trails'),
(57, '359.1', 'Transmission: Asset retirement costs for transmission\
plant'),
(58, 'subtotal_transmission', 'Subtotal: Transmission Plant'),
# 4. Distribution Plant
(60, '360', 'Distribution: Land and land rights'),
(61, '361', 'Distribution: Structures and improvements'),
(62, '362', 'Distribution: Station equipment'),
(63, '363', 'Distribution: Storage battery equipment'),
(64, '364', 'Distribution: Poles, towers and fixtures'),
(65, '365', 'Distribution: Overhead conductors and devices'),
(66, '366', 'Distribution: Underground conduit'),
(67, '367', 'Distribution: Underground conductors and devices'),
(68, '368', 'Distribution: Line transformers'),
(69, '369', 'Distribution: Services'),
(70, '370', 'Distribution: Meters'),
(71, '371', 'Distribution: Installations on customers\' premises'),
(72, '372', 'Distribution: Leased property on customers\' premises'),
(73, '373', 'Distribution: Street lighting and signal systems'),
(74, '374', 'Distribution: Asset retirement costs for distribution plant'),
(75, 'subtotal_distribution', 'Subtotal: Distribution Plant'),
# 5. Regional Transmission and Market Operation Plant
(77, '380', 'Regional transmission: Land and land rights'),
(78, '381', 'Regional transmission: Structures and improvements'),
(79, '382', 'Regional transmission: Computer hardware'),
(80, '383', 'Regional transmission: Computer software'),
(81, '384', 'Regional transmission: Communication Equipment'),
(82, '385', 'Regional transmission: Miscellaneous Regional Transmission\
and Market Operation Plant'),
(83, '386', 'Regional transmission: Asset Retirement Costs for Regional\
Transmission and Market Operation\
Plant'),
(84, 'subtotal_regional_transmission', 'Subtotal: Transmission and Market\
Operation Plant'),
(None, '387', 'Regional transmission: [Reserved]'),
# 6. General Plant
(86, '389', 'General: Land and land rights'),
(87, '390', 'General: Structures and improvements'),
(88, '391', 'General: Office furniture and equipment'),
(89, '392', 'General: Transportation equipment'),
(90, '393', 'General: Stores equipment'),
(91, '394', 'General: Tools, shop and garage equipment'),
(92, '395', 'General: Laboratory equipment'),
(93, '396', 'General: Power operated equipment'),
(94, '397', 'General: Communication equipment'),
(95, '398', 'General: Miscellaneous equipment'),
(96, 'subtotal_general', 'Subtotal: General Plant'),
(97, '399', 'General: Other tangible property'),
(98, '399.1', 'General: Asset retirement costs for general plant'),
(99, 'total_general', 'TOTAL General Plant'),
(100, '101_and_106', 'Electric plant in service (Major only)'),
(101, '102_purchased', 'Electric plant purchased'),
(102, '102_sold', 'Electric plant sold'),
(103, '103', 'Experimental plant unclassified'),
(104, 'total_electric_plant', 'TOTAL Electric Plant in Service')],
columns=['row_number', 'ferc_account_id', 'ferc_account_description'])
"""list: A list of tuples containing row numbers, FERC account IDs, and FERC
account descriptions from FERC Form 1 pages 204 - 207, Electric Plant in
Service.
"""
# Line numbers, and corresponding FERC account number
# from FERC Form 1 page 219, ACCUMULATED PROVISION FOR DEPRECIATION
# OF ELECTRIC UTILITY PLANT (Account 108).
ferc_accumulated_depreciation = pd.DataFrame.from_records([
# Section A. Balances and Changes During Year
(1, 'balance_beginning_of_year', 'Balance Beginning of Year'),
(3, 'depreciation_expense', '(403) Depreciation Expense'),
(4, 'depreciation_expense_asset_retirement', \
'(403.1) Depreciation Expense for Asset Retirement Costs'),
(5, 'expense_electric_plant_leased_to_others', \
'(413) Exp. of Elec. Plt. Leas. to Others'),
(6, 'transportation_expenses_clearing',\
'Transportation Expenses-Clearing'),
(7, 'other_clearing_accounts', 'Other Clearing Accounts'),
(8, 'other_accounts_specified',\
'Other Accounts (Specify, details in footnote):'),
# blank: might also be other charges like line 17.
(9, 'other_charges', 'Other Charges:'),
(10, 'total_depreciation_provision_for_year',\
'TOTAL Deprec. Prov for Year (Enter Total of lines 3 thru 9)'),
(11, 'net_charges_for_plant_retired', 'Net Charges for Plant Retired:'),
(12, 'book_cost_of_plant_retired', 'Book Cost of Plant Retired'),
(13, 'cost_of_removal', 'Cost of Removal'),
(14, 'salvage_credit', 'Salvage (Credit)'),
(15, 'total_net_charges_for_plant_retired',\
'TOTAL Net Chrgs. for Plant Ret. (Enter Total of lines 12 thru 14)'),
(16, 'other_debit_or_credit_items',\
'Other Debit or Cr. Items (Describe, details in footnote):'),
# blank: can be "Other Charges", e.g. in 2012 for PSCo.
(17, 'other_charges_2', 'Other Charges 2'),
(18, 'book_cost_or_asset_retirement_costs_retired',\
'Book Cost or Asset Retirement Costs Retired'),
(19, 'balance_end_of_year', \
'Balance End of Year (Enter Totals of lines 1, 10, 15, 16, and 18)'),
# Section B. Balances at End of Year According to Functional Classification
(20, 'steam_production_end_of_year', 'Steam Production'),
(21, 'nuclear_production_end_of_year', 'Nuclear Production'),
(22, 'hydraulic_production_end_of_year',\
'Hydraulic Production-Conventional'),
(23, 'pumped_storage_end_of_year', 'Hydraulic Production-Pumped Storage'),
(24, 'other_production', 'Other Production'),
(25, 'transmission', 'Transmission'),
(26, 'distribution', 'Distribution'),
(27, 'regional_transmission_and_market_operation',
'Regional Transmission and Market Operation'),
(28, 'general', 'General'),
(29, 'total', 'TOTAL (Enter Total of lines 20 thru 28)')],
columns=['row_number', 'line_id', 'ferc_account_description'])
"""list: A list of tuples containing row numbers, FERC account IDs, and FERC
account descriptions from FERC Form 1 page 219, Accumulated Provision for
Depreciation of electric utility plant(Account 108).
"""
######################################################################
# Constants from EIA From 923 used within init.py module
######################################################################
# From Page 7 of EIA Form 923, Census Region a US state is located in
census_region = {
'NEW': 'New England',
'MAT': 'Middle Atlantic',
'SAT': 'South Atlantic',
'ESC': 'East South Central',
'WSC': 'West South Central',
'ENC': 'East North Central',
'WNC': 'West North Central',
'MTN': 'Mountain',
'PACC': 'Pacific Contiguous (OR, WA, CA)',
'PACN': 'Pacific Non-Contiguous (AK, HI)',
}
"""dict: A dictionary mapping Census Region abbreviations (keys) to Census
Region names (values).
"""
# From Page 7 of EIA Form923
# Static list of NERC (North American Electric Reliability Corporation)
# regions, used for where plant is located
nerc_region = {
'NPCC': 'Northeast Power Coordinating Council',
'ASCC': 'Alaska Systems Coordinating Council',
'HICC': 'Hawaiian Islands Coordinating Council',
'MRO': 'Midwest Reliability Organization',
'SERC': 'SERC Reliability Corporation',
'RFC': 'Reliability First Corporation',
'SPP': 'Southwest Power Pool',
'TRE': 'Texas Regional Entity',
'FRCC': 'Florida Reliability Coordinating Council',
'WECC': 'Western Electricity Coordinating Council'
}
"""dict: A dictionary mapping NERC Region abbreviations (keys) to NERC
Region names (values).
"""
# From Page 7 of EIA Form 923 EIA’s internal consolidated NAICS sectors.
# For internal purposes, EIA consolidates NAICS categories into seven groups.
sector_eia = {
# traditional regulated electric utilities
'1': 'Electric Utility',
# Independent power producers which are not cogenerators
'2': 'NAICS-22 Non-Cogen',
# Independent power producers which are cogenerators, but whose
# primary business purpose is the sale of electricity to the public
'3': 'NAICS-22 Cogen',
# Commercial non-cogeneration facilities that produce electric power,
# are connected to the gird, and can sell power to the public
'4': 'Commercial NAICS Non-Cogen',
# Commercial cogeneration facilities that produce electric power, are
# connected to the grid, and can sell power to the public
'5': 'Commercial NAICS Cogen',
# Industrial non-cogeneration facilities that produce electric power, are
# connected to the gird, and can sell power to the public
'6': 'Industrial NAICS Non-Cogen',
# Industrial cogeneration facilities that produce electric power, are
# connected to the gird, and can sell power to the public
'7': 'Industrial NAICS Cogen'
}
"""dict: A dictionary mapping EIA numeric codes (keys) to EIA’s internal
consolidated NAICS sectors (values).
"""
# EIA 923: EIA Type of prime mover:
prime_movers_eia923 = {
'BA': 'Energy Storage, Battery',
'BT': 'Turbines Used in a Binary Cycle. Including those used for geothermal applications',
'CA': 'Combined-Cycle -- Steam Part',
'CC': 'Combined-Cycle, Total Unit',
'CE': 'Energy Storage, Compressed Air',
'CP': 'Energy Storage, Concentrated Solar Power',
'CS': 'Combined-Cycle Single-Shaft Combustion Turbine and Steam Turbine share of single',
'CT': 'Combined-Cycle Combustion Turbine Part',
'ES': 'Energy Storage, Other (Specify on Schedule 9, Comments)',
'FC': 'Fuel Cell',
'FW': 'Energy Storage, Flywheel',
'GT': 'Combustion (Gas) Turbine. Including Jet Engine design',
'HA': 'Hydrokinetic, Axial Flow Turbine',
'HB': 'Hydrokinetic, Wave Buoy',
'HK': 'Hydrokinetic, Other',
'HY': 'Hydraulic Turbine. Including turbines associated with delivery of water by pipeline.',
'IC': 'Internal Combustion (diesel, piston, reciprocating) Engine',
'PS': 'Energy Storage, Reversible Hydraulic Turbine (Pumped Storage)',
'OT': 'Other',
'ST': 'Steam Turbine. Including Nuclear, Geothermal, and Solar Steam (does not include Combined Cycle).',
'PV': 'Photovoltaic',
'WT': 'Wind Turbine, Onshore',
'WS': 'Wind Turbine, Offshore'
}
"""dict: A dictionary mapping EIA 923 prime mover codes (keys) and prime mover
names / descriptions (values).
"""
# EIA 923: The fuel code reported to EIA.Two or three letter alphanumeric:
fuel_type_eia923 = {
'AB': 'Agricultural By-Products',
'ANT': 'Anthracite Coal',
'BFG': 'Blast Furnace Gas',
'BIT': 'Bituminous Coal',
'BLQ': 'Black Liquor',
'CBL': 'Coal, Blended',
'DFO': 'Distillate Fuel Oil. Including diesel, No. 1, No. 2, and No. 4 fuel oils.',
'GEO': 'Geothermal',
'JF': 'Jet Fuel',
'KER': 'Kerosene',
'LFG': 'Landfill Gas',
'LIG': 'Lignite Coal',
'MSB': 'Biogenic Municipal Solid Waste',
'MSN': 'Non-biogenic Municipal Solid Waste',
'MSW': 'Municipal Solid Waste',
'MWH': 'Electricity used for energy storage',
'NG': 'Natural Gas',
'NUC': 'Nuclear. Including Uranium, Plutonium, and Thorium.',
'OBG': 'Other Biomass Gas. Including digester gas, methane, and other biomass gases.',
'OBL': 'Other Biomass Liquids',
'OBS': 'Other Biomass Solids',
'OG': 'Other Gas',
'OTH': 'Other Fuel',
'PC': 'Petroleum Coke',
'PG': 'Gaseous Propane',
'PUR': 'Purchased Steam',
'RC': 'Refined Coal',
'RFO': 'Residual Fuel Oil. Including No. 5 & 6 fuel oils and bunker C fuel oil.',
'SC': 'Coal-based Synfuel. Including briquettes, pellets, or extrusions, which are formed by binding materials or processes that recycle materials.',
'SGC': 'Coal-Derived Synthesis Gas',
'SGP': 'Synthesis Gas from Petroleum Coke',
'SLW': 'Sludge Waste',
'SUB': 'Subbituminous Coal',
'SUN': 'Solar',
'TDF': 'Tire-derived Fuels',
'WAT': 'Water at a Conventional Hydroelectric Turbine and water used in Wave Buoy Hydrokinetic Technology, current Hydrokinetic Technology, Tidal Hydrokinetic Technology, and Pumping Energy for Reversible (Pumped Storage) Hydroelectric Turbines.',
'WC': 'Waste/Other Coal. Including anthracite culm, bituminous gob, fine coal, lignite waste, waste coal.',
'WDL': 'Wood Waste Liquids, excluding Black Liquor. Including red liquor, sludge wood, spent sulfite liquor, and other wood-based liquids.',
'WDS': 'Wood/Wood Waste Solids. Including paper pellets, railroad ties, utility polies, wood chips, bark, and other wood waste solids.',
'WH': 'Waste Heat not directly attributed to a fuel source',
'WND': 'Wind',
'WO': 'Waste/Other Oil. Including crude oil, liquid butane, liquid propane, naphtha, oil waste, re-refined moto oil, sludge oil, tar oil, or other petroleum-based liquid wastes.'
}
"""dict: A dictionary mapping EIA 923 fuel type codes (keys) and fuel type
names / descriptions (values).
"""
# Fuel type strings for EIA 923 generator fuel table
fuel_type_eia923_gen_fuel_coal_strings = [
'ant', 'bit', 'cbl', 'lig', 'pc', 'rc', 'sc', 'sub', 'wc', ]
"""list: The list of EIA 923 Generation Fuel strings associated with coal fuel.
"""
fuel_type_eia923_gen_fuel_oil_strings = [
'dfo', 'rfo', 'wo', 'jf', 'ker', ]
"""list: The list of EIA 923 Generation Fuel strings associated with oil fuel.
"""
fuel_type_eia923_gen_fuel_gas_strings = [
'bfg', 'lfg', 'ng', 'og', 'obg', 'pg', 'sgc', 'sgp', ]
"""list: The list of EIA 923 Generation Fuel strings associated with gas fuel.
"""
fuel_type_eia923_gen_fuel_solar_strings = ['sun', ]
"""list: The list of EIA 923 Generation Fuel strings associated with solar
power.
"""
fuel_type_eia923_gen_fuel_wind_strings = ['wnd', ]
"""list: The list of EIA 923 Generation Fuel strings associated with wind
power.
"""
fuel_type_eia923_gen_fuel_hydro_strings = ['wat', ]
"""list: The list of EIA 923 Generation Fuel strings associated with hydro
power.
"""
fuel_type_eia923_gen_fuel_nuclear_strings = ['nuc', ]
"""list: The list of EIA 923 Generation Fuel strings associated with nuclear
power.
"""
fuel_type_eia923_gen_fuel_waste_strings = [
'ab', 'blq', 'msb', 'msn', 'msw', 'obl', 'obs', 'slw', 'tdf', 'wdl', 'wds']
"""list: The list of EIA 923 Generation Fuel strings associated with solid waste
fuel.
"""
fuel_type_eia923_gen_fuel_other_strings = ['geo', 'mwh', 'oth', 'pur', 'wh', ]
"""list: The list of EIA 923 Generation Fuel strings associated with geothermal
power.
"""
fuel_type_eia923_gen_fuel_simple_map = {
'coal': fuel_type_eia923_gen_fuel_coal_strings,
'oil': fuel_type_eia923_gen_fuel_oil_strings,
'gas': fuel_type_eia923_gen_fuel_gas_strings,
'solar': fuel_type_eia923_gen_fuel_solar_strings,
'wind': fuel_type_eia923_gen_fuel_wind_strings,
'hydro': fuel_type_eia923_gen_fuel_hydro_strings,
'nuclear': fuel_type_eia923_gen_fuel_nuclear_strings,
'waste': fuel_type_eia923_gen_fuel_waste_strings,
'other': fuel_type_eia923_gen_fuel_other_strings,
}
"""dict: A dictionary mapping EIA 923 Generation Fuel fuel types (keys) to lists
of strings associated with that fuel type (values).
"""
# Fuel type strings for EIA 923 boiler fuel table
fuel_type_eia923_boiler_fuel_coal_strings = [
'ant', 'bit', 'lig', 'pc', 'rc', 'sc', 'sub', 'wc', ]
"""list: A list of strings from EIA 923 Boiler Fuel associated with fuel type
coal.
"""
fuel_type_eia923_boiler_fuel_oil_strings = ['dfo', 'rfo', 'wo', 'jf', 'ker', ]
"""list: A list of strings from EIA 923 Boiler Fuel associated with fuel type
oil.
"""
fuel_type_eia923_boiler_fuel_gas_strings = [
'bfg', 'lfg', 'ng', 'og', 'obg', 'pg', 'sgc', 'sgp', ]
"""list: A list of strings from EIA 923 Boiler Fuel associated with fuel type
gas.
"""
fuel_type_eia923_boiler_fuel_waste_strings = [
'ab', 'blq', 'msb', 'msn', 'obl', 'obs', 'slw', 'tdf', 'wdl', 'wds', ]
"""list: A list of strings from EIA 923 Boiler Fuel associated with fuel type
waste.
"""
fuel_type_eia923_boiler_fuel_other_strings = ['oth', 'pur', 'wh', ]
"""list: A list of strings from EIA 923 Boiler Fuel associated with fuel type
other.
"""
fuel_type_eia923_boiler_fuel_simple_map = {
'coal': fuel_type_eia923_boiler_fuel_coal_strings,
'oil': fuel_type_eia923_boiler_fuel_oil_strings,
'gas': fuel_type_eia923_boiler_fuel_gas_strings,
'waste': fuel_type_eia923_boiler_fuel_waste_strings,
'other': fuel_type_eia923_boiler_fuel_other_strings,
}
"""dict: A dictionary mapping EIA 923 Boiler Fuel fuel types (keys) to lists
of strings associated with that fuel type (values).
"""
# PUDL consolidation of EIA923 AER fuel type strings into same categories as
# 'energy_source_eia923' plus additional renewable and nuclear categories.
# These classifications are not currently used, as the EIA fuel type and energy
# source designations provide more detailed information.
aer_coal_strings = ['col', 'woc', 'pc']
"""list: A list of EIA 923 AER fuel type strings associated with coal.
"""
aer_gas_strings = ['mlg', 'ng', 'oog']
"""list: A list of EIA 923 AER fuel type strings associated with gas.
"""
aer_oil_strings = ['dfo', 'rfo', 'woo']
"""list: A list of EIA 923 AER fuel type strings associated with oil.
"""
aer_solar_strings = ['sun']
"""list: A list of EIA 923 AER fuel type strings associated with solar power.
"""
aer_wind_strings = ['wnd']
"""list: A list of EIA 923 AER fuel type strings associated with wind power.
"""
aer_hydro_strings = ['hps', 'hyc']
"""list: A list of EIA 923 AER fuel type strings associated with hydro power.
"""
aer_nuclear_strings = ['nuc']
"""list: A list of EIA 923 AER fuel type strings associated with nuclear power.
"""
aer_waste_strings = ['www']
"""list: A list of EIA 923 AER fuel type strings associated with waste.
"""
aer_other_strings = ['geo', 'orw', 'oth']
"""list: A list of EIA 923 AER fuel type strings associated with other fuel.
"""
aer_fuel_type_strings = {
'coal': aer_coal_strings,
'gas': aer_gas_strings,
'oil': aer_oil_strings,
'solar': aer_solar_strings,
'wind': aer_wind_strings,
'hydro': aer_hydro_strings,
'nuclear': aer_nuclear_strings,
'waste': aer_waste_strings,
'other': aer_other_strings
}
"""dict: A dictionary mapping EIA 923 AER fuel types (keys) to lists
of strings associated with that fuel type (values).
"""
# EIA 923: A partial aggregation of the reported fuel type codes into
# larger categories used by EIA in, for example,
# the Annual Energy Review (AER).Two or three letter alphanumeric.
# See the Fuel Code table (Table 5), below:
fuel_type_aer_eia923 = {
'SUN': 'Solar PV and thermal',
'COL': 'Coal',
'DFO': 'Distillate Petroleum',
'GEO': 'Geothermal',
'HPS': 'Hydroelectric Pumped Storage',
'HYC': 'Hydroelectric Conventional',
'MLG': 'Biogenic Municipal Solid Waste and Landfill Gas',
'NG': 'Natural Gas',
'NUC': 'Nuclear',
'OOG': 'Other Gases',
'ORW': 'Other Renewables',
'OTH': 'Other (including nonbiogenic MSW)',
'PC': 'Petroleum Coke',
'RFO': 'Residual Petroleum',
'WND': 'Wind',
'WOC': 'Waste Coal',
'WOO': 'Waste Oil',
'WWW': 'Wood and Wood Waste'
}
"""dict: A dictionary mapping EIA 923 AER fuel types (keys) to lists
of strings associated with that fuel type (values).
"""
fuel_type_eia860_coal_strings = ['ant', 'bit', 'cbl', 'lig', 'pc', 'rc', 'sc',
'sub', 'wc', 'coal', 'petroleum coke', 'col',
'woc']
"""list: A list of strings from EIA 860 associated with fuel type coal.
"""
fuel_type_eia860_oil_strings = ['dfo', 'jf', 'ker', 'rfo', 'wo', 'woo',
'petroleum']
"""list: A list of strings from EIA 860 associated with fuel type oil.
"""
fuel_type_eia860_gas_strings = ['bfg', 'lfg', 'mlg', 'ng', 'obg', 'og', 'pg',
'sgc', 'sgp', 'natural gas', 'other gas',
'oog', 'sg']
"""list: A list of strings from EIA 860 associated with fuel type gas.
"""
fuel_type_eia860_solar_strings = ['sun', 'solar']
"""list: A list of strings from EIA 860 associated with solar power.
"""
fuel_type_eia860_wind_strings = ['wnd', 'wind', 'wt']
"""list: A list of strings from EIA 860 associated with wind power.
"""
fuel_type_eia860_hydro_strings = ['wat', 'hyc', 'hps', 'hydro']
"""list: A list of strings from EIA 860 associated with hydro power.
"""
fuel_type_eia860_nuclear_strings = ['nuc', 'nuclear']
"""list: A list of strings from EIA 860 associated with nuclear power.
"""
fuel_type_eia860_waste_strings = ['ab', 'blq', 'bm', 'msb', 'msn', 'obl',
'obs', 'slw', 'tdf', 'wdl', 'wds', 'biomass',
'msw', 'www']
"""list: A list of strings from EIA 860 associated with fuel type waste.
"""
fuel_type_eia860_other_strings = ['mwh', 'oth', 'pur', 'wh', 'geo', 'none',
'orw', 'other']
"""list: A list of strings from EIA 860 associated with fuel type other.
"""
fuel_type_eia860_simple_map = {
'coal': fuel_type_eia860_coal_strings,
'oil': fuel_type_eia860_oil_strings,
'gas': fuel_type_eia860_gas_strings,
'solar': fuel_type_eia860_solar_strings,
'wind': fuel_type_eia860_wind_strings,
'hydro': fuel_type_eia860_hydro_strings,
'nuclear': fuel_type_eia860_nuclear_strings,
'waste': fuel_type_eia860_waste_strings,
'other': fuel_type_eia860_other_strings,
}
"""dict: A dictionary mapping EIA 860 fuel types (keys) to lists
of strings associated with that fuel type (values).
"""
# EIA 923/860: Lumping of energy source categories.
energy_source_eia_simple_map = {
'coal': ['ANT', 'BIT', 'LIG', 'PC', 'SUB', 'WC', 'RC'],
'oil': ['DFO', 'JF', 'KER', 'RFO', 'WO'],
'gas': ['BFG', 'LFG', 'NG', 'OBG', 'OG', 'PG', 'SG', 'SGC', 'SGP'],
'solar': ['SUN'],
'wind': ['WND'],
'hydro': ['WAT'],
'nuclear': ['NUC'],
'waste': ['AB', 'BLQ', 'MSW', 'OBL', 'OBS', 'SLW', 'TDF', 'WDL', 'WDS'],
'other': ['GEO', 'MWH', 'OTH', 'PUR', 'WH']
}
"""dict: A dictionary mapping EIA fuel types (keys) to fuel codes (values).
"""
fuel_group_eia923_simple_map = {
'coal': ['coal', 'petroleum coke'],
'oil': ['petroleum'],
'gas': ['natural gas', 'other gas']
}
"""dict: A dictionary mapping EIA 923 simple fuel types("oil", "coal", "gas")
(keys) to fuel types (values).
"""
# EIA 923: The type of physical units fuel consumption is reported in.
# All consumption is reported in either short tons for solids,
# thousands of cubic feet for gases, and barrels for liquids.
fuel_units_eia923 = {
'mcf': 'Thousands of cubic feet (for gases)',
'short_tons': 'Short tons (for solids)',
'barrels': 'Barrels (for liquids)'
}
"""dict: A dictionary mapping EIA 923 fuel units (keys) to fuel unit
descriptions (values).
"""
# EIA 923: Designates the purchase type under which receipts occurred
# in the reporting month. One or two character alphanumeric:
contract_type_eia923 = {
'C': 'Contract - Fuel received under a purchase order or contract with a term of one year or longer. Contracts with a shorter term are considered spot purchases ',
'NC': 'New Contract - Fuel received under a purchase order or contract with duration of one year or longer, under which deliveries were first made during the reporting month',
'N': 'New Contract - see NC code. This abbreviation existed only in 2008 before being replaced by NC.',
'S': 'Spot Purchase',
'T': 'Tolling Agreement – Fuel received under a tolling agreement (bartering arrangement of fuel for generation)'
}
"""dict: A dictionary mapping EIA 923 contract codes (keys) to contract
descriptions (values) for each month in the Fuel Receipts and Costs table.
"""
# EIA 923: The fuel code associated with the fuel receipt.
# Defined on Page 7 of EIA Form 923
# Two or three character alphanumeric:
energy_source_eia923 = {
'ANT': 'Anthracite Coal',
'BFG': 'Blast Furnace Gas',
'BM': 'Biomass',
'BIT': 'Bituminous Coal',
'DFO': 'Distillate Fuel Oil. Including diesel, No. 1, No. 2, and No. 4 fuel oils.',
'JF': 'Jet Fuel',
'KER': 'Kerosene',
'LIG': 'Lignite Coal',
'NG': 'Natural Gas',
'PC': 'Petroleum Coke',
'PG': 'Gaseous Propone',
'OG': 'Other Gas',
'RC': 'Refined Coal',
'RFO': 'Residual Fuel Oil. Including No. 5 & 6 fuel oils and bunker C fuel oil.',
'SG': 'Synthesis Gas from Petroleum Coke',
'SGP': 'Petroleum Coke Derived Synthesis Gas',
'SC': 'Coal-based Synfuel. Including briquettes, pellets, or extrusions, which are formed by binding materials or processes that recycle materials.',
'SUB': 'Subbituminous Coal',
'WC': 'Waste/Other Coal. Including anthracite culm, bituminous gob, fine coal, lignite waste, waste coal.',
'WO': 'Waste/Other Oil. Including crude oil, liquid butane, liquid propane, naphtha, oil waste, re-refined moto oil, sludge oil, tar oil, or other petroleum-based liquid wastes.',
}
"""dict: A dictionary mapping fuel codes (keys) to fuel descriptions (values)
for each fuel receipt from the EIA 923 Fuel Receipts and Costs table.
"""
# EIA 923 Fuel Group, from Page 7 EIA Form 923
# Groups fossil fuel energy sources into fuel groups that are located in the
# Electric Power Monthly: Coal, Natural Gas, Petroleum, Petroleum Coke.
fuel_group_eia923 = (
'coal',
'natural_gas',
'petroleum',
'petroleum_coke',
'other_gas'
)
"""tuple: A tuple containing EIA 923 fuel groups.
"""
# EIA 923: Type of Coal Mine as defined on Page 7 of EIA Form 923
coalmine_type_eia923 = {
'P': 'Preparation Plant',
'S': 'Surface',
'U': 'Underground',
'US': 'Both an underground and surface mine with most coal extracted from underground',
'SU': 'Both an underground and surface mine with most coal extracted from surface',
}
"""dict: A dictionary mapping EIA 923 coal mine type codes (keys) to
descriptions (values).
"""
# EIA 923: State abbreviation related to coal mine location.
# Country abbreviations are also used in this category, but they are
# non-standard because of collisions with US state names. Instead of using
# the provided non-standard names, we convert to ISO-3166-1 three letter
# country codes https://en.wikipedia.org/wiki/ISO_3166-1_alpha-3
coalmine_country_eia923 = {
'AU': 'AUS', # Australia
'CL': 'COL', # Colombia
'CN': 'CAN', # Canada
'IS': 'IDN', # Indonesia
'PL': 'POL', # Poland
'RS': 'RUS', # Russia
'UK': 'GBR', # United Kingdom of Great Britain
'VZ': 'VEN', # Venezuela
'OT': 'other_country',
'IM': 'unknown'
}
"""dict: A dictionary mapping coal mine country codes (keys) to ISO-3166-1 three
letter country codes (values).
"""
# EIA 923: Mode for the longest / second longest distance.
transport_modes_eia923 = {
'RR': 'Rail: Shipments of fuel moved to consumers by rail \
(private or public/commercial). Included is coal hauled to or \
away from a railroad siding by truck if the truck did not use public\
roads.',
'RV': 'River: Shipments of fuel moved to consumers via river by barge. \
Not included are shipments to Great Lakes coal loading docks, \
tidewater piers, or coastal ports.',
'GL': 'Great Lakes: Shipments of coal moved to consumers via \
the Great Lakes. These shipments are moved via the Great Lakes \
coal loading docks, which are identified by name and location as \
follows: Conneaut Coal Storage & Transfer, Conneaut, Ohio; \
NS Coal Dock (Ashtabula Coal Dock), Ashtabula, Ohio; \
Sandusky Coal Pier, Sandusky, Ohio; Toledo Docks, Toledo, Ohio; \
KCBX Terminals Inc., Chicago, Illinois; \
Superior Midwest Energy Terminal, Superior, Wisconsin',
'TP': 'Tidewater Piers and Coastal Ports: Shipments of coal moved to \
Tidewater Piers and Coastal Ports for further shipments to consumers \
via coastal water or ocean. The Tidewater Piers and Coastal Ports \
are identified by name and location as follows: Dominion Terminal \
Associates, Newport News, Virginia; McDuffie Coal Terminal, Mobile, \
Alabama; IC Railmarine Terminal, Convent, Louisiana; \
International Marine Terminals, Myrtle Grove, Louisiana; \
Cooper/T. Smith Stevedoring Co. Inc., Darrow, Louisiana; \
Seward Terminal Inc., Seward, Alaska; Los Angeles Export Terminal, \
Inc., Los Angeles, California; Levin-Richmond Terminal Corp., \
Richmond, California; Baltimore Terminal, Baltimore, Maryland; \
Norfolk Southern Lamberts Point P-6, Norfolk, Virginia; \
Chesapeake Bay Piers, Baltimore, Maryland; Pier IX Terminal Company, \
Newport News, Virginia; Electro-Coal Transport Corp., Davant, \
Louisiana',
'WT': 'Water: Shipments of fuel moved to consumers by other waterways.',
'TR': 'Truck: Shipments of fuel moved to consumers by truck. \
Not included is fuel hauled to or away from a railroad siding by \
truck on non-public roads.',
'tr': 'Truck: Shipments of fuel moved to consumers by truck. \
Not included is fuel hauled to or away from a railroad siding by \
truck on non-public roads.',
'TC': 'Tramway/Conveyor: Shipments of fuel moved to consumers \
by tramway or conveyor.',
'SP': 'Slurry Pipeline: Shipments of coal moved to consumers \
by slurry pipeline.',
'PL': 'Pipeline: Shipments of fuel moved to consumers by pipeline'
}
"""dict: A dictionary mapping primary and secondary transportation mode codes
(keys) to descriptions (values).
"""
# we need to include all of the columns which we want to keep for either the
# entity or annual tables. The order here matters. We need to harvest the plant
# location before harvesting the location of the utilites for example.
entities = {
'plants': [
# base cols
['plant_id_eia'],
# static cols
['balancing_authority_code_eia', 'balancing_authority_name_eia',
'city', 'county', 'ferc_cogen_status',
'ferc_exempt_wholesale_generator', 'ferc_small_power_producer',
'grid_voltage_2_kv', 'grid_voltage_3_kv', 'grid_voltage_kv',
'iso_rto_code', 'latitude', 'longitude', 'service_area',
'plant_name_eia', 'primary_purpose_naics_id',
'sector_id', 'sector_name', 'state', 'street_address', 'zip_code'],
# annual cols
['ash_impoundment', 'ash_impoundment_lined', 'ash_impoundment_status',
'datum', 'energy_storage', 'ferc_cogen_docket_no', 'water_source',
'ferc_exempt_wholesale_generator_docket_no',
'ferc_small_power_producer_docket_no',
'liquefied_natural_gas_storage',
'natural_gas_local_distribution_company', 'natural_gas_storage',
'natural_gas_pipeline_name_1', 'natural_gas_pipeline_name_2',
'natural_gas_pipeline_name_3', 'nerc_region', 'net_metering',
'pipeline_notes', 'regulatory_status_code',
'transmission_distribution_owner_id',
'transmission_distribution_owner_name',
'transmission_distribution_owner_state', 'utility_id_eia'],
# need type fixing
{},
],
'generators': [
# base cols
['plant_id_eia', 'generator_id'],
# static cols
['prime_mover_code', 'duct_burners', 'operating_date',
'topping_bottoming_code', 'solid_fuel_gasification',
'pulverized_coal_tech', 'fluidized_bed_tech', 'subcritical_tech',
'supercritical_tech', 'ultrasupercritical_tech', 'stoker_tech',
'other_combustion_tech', 'bypass_heat_recovery',
'rto_iso_lmp_node_id', 'rto_iso_location_wholesale_reporting_id',
'associated_combined_heat_power', 'original_planned_operating_date',
'operating_switch', 'previously_canceled'],
# annual cols
['capacity_mw', 'fuel_type_code_pudl', 'multiple_fuels',
'ownership_code', 'owned_by_non_utility', 'deliver_power_transgrid',
'summer_capacity_mw', 'winter_capacity_mw', 'summer_capacity_estimate',
'winter_capacity_estimate', 'minimum_load_mw', 'distributed_generation',
'technology_description', 'reactive_power_output_mvar',
'energy_source_code_1', 'energy_source_code_2',
'energy_source_code_3', 'energy_source_code_4',
'energy_source_code_5', 'energy_source_code_6',
'energy_source_1_transport_1', 'energy_source_1_transport_2',
'energy_source_1_transport_3', 'energy_source_2_transport_1',
'energy_source_2_transport_2', 'energy_source_2_transport_3',
'startup_source_code_1', 'startup_source_code_2',
'startup_source_code_3', 'startup_source_code_4',
'time_cold_shutdown_full_load_code', 'syncronized_transmission_grid',
'turbines_num', 'operational_status_code', 'operational_status',
'planned_modifications', 'planned_net_summer_capacity_uprate_mw',
'planned_net_winter_capacity_uprate_mw', 'planned_new_capacity_mw',
'planned_uprate_date', 'planned_net_summer_capacity_derate_mw',
'planned_net_winter_capacity_derate_mw', 'planned_derate_date',
'planned_new_prime_mover_code', 'planned_energy_source_code_1',
'planned_repower_date', 'other_planned_modifications',
'other_modifications_date', 'planned_retirement_date',
'carbon_capture', 'cofire_fuels', 'switch_oil_gas',
'turbines_inverters_hydrokinetics', 'nameplate_power_factor',
'uprate_derate_during_year', 'uprate_derate_completed_date',
'current_planned_operating_date', 'summer_estimated_capability_mw',
'winter_estimated_capability_mw', 'retirement_date',
'utility_id_eia', 'data_source'],
# need type fixing
{}
],
# utilities must come after plants. plant location needs to be
# removed before the utility locations are compiled
'utilities': [
# base cols
['utility_id_eia'],
# static cols
['utility_name_eia'],
# annual cols
['street_address', 'city', 'state', 'zip_code', 'entity_type',
'plants_reported_owner', 'plants_reported_operator',
'plants_reported_asset_manager', 'plants_reported_other_relationship',
'attention_line', 'address_2', 'zip_code_4',
'contact_firstname', 'contact_lastname', 'contact_title',
'contact_firstname_2', 'contact_lastname_2', 'contact_title_2',
'phone_extension_1', 'phone_extension_2', 'phone_number_1',
'phone_number_2'],
# need type fixing
{'utility_id_eia': 'int64', }, ],
'boilers': [
# base cols
['plant_id_eia', 'boiler_id'],
# static cols
['prime_mover_code'],
# annual cols
[],
# need type fixing
{},
]
}
"""dict: A dictionary containing table name strings (keys) and lists of columns
to keep for those tables (values).
"""
epacems_tables = ("hourly_emissions_epacems")
"""tuple: A tuple containing tables of EPA CEMS data to pull into PUDL.
"""
files_dict_epaipm = {
'transmission_single_epaipm': '*table_3-21*',
'transmission_joint_epaipm': '*transmission_joint_ipm*',
'load_curves_epaipm': '*table_2-2_*',
'plant_region_map_epaipm': '*needs_v6*',
}
"""dict: A dictionary of EPA IPM tables and strings that files of those tables
contain.
"""
epaipm_url_ext = {
'transmission_single_epaipm': 'table_3-21_annual_transmission_capabilities_of_u.s._model_regions_in_epa_platform_v6_-_2021.xlsx',
'load_curves_epaipm': 'table_2-2_load_duration_curves_used_in_epa_platform_v6.xlsx',
'plant_region_map_epaipm': 'needs_v6_november_2018_reference_case_0.xlsx',
}
"""dict: A dictionary of EPA IPM tables and associated URLs extensions for
downloading that table's data.
"""
epaipm_region_names = [
'ERC_PHDL', 'ERC_REST', 'ERC_FRNT', 'ERC_GWAY', 'ERC_WEST',
'FRCC', 'NENG_CT', 'NENGREST', 'NENG_ME', 'MIS_AR', 'MIS_IL',
'MIS_INKY', 'MIS_IA', 'MIS_MIDA', 'MIS_LA', 'MIS_LMI', 'MIS_MNWI',
'MIS_D_MS', 'MIS_MO', 'MIS_MAPP', 'MIS_AMSO', 'MIS_WOTA',
'MIS_WUMS', 'NY_Z_A', 'NY_Z_B', 'NY_Z_C&E', 'NY_Z_D', 'NY_Z_F',
'NY_Z_G-I', 'NY_Z_J', 'NY_Z_K', 'PJM_West', 'PJM_AP', 'PJM_ATSI',
'PJM_COMD', 'PJM_Dom', 'PJM_EMAC', 'PJM_PENE', 'PJM_SMAC',
'PJM_WMAC', 'S_C_KY', 'S_C_TVA', 'S_D_AECI', 'S_SOU', 'S_VACA',
'SPP_NEBR', 'SPP_N', 'SPP_SPS', 'SPP_WEST', 'SPP_KIAM', 'SPP_WAUE',
'WECC_AZ', 'WEC_BANC', 'WECC_CO', 'WECC_ID', 'WECC_IID',
'WEC_LADW', 'WECC_MT', 'WECC_NM', 'WEC_CALN', 'WECC_NNV',
'WECC_PNW', 'WEC_SDGE', 'WECC_SCE', 'WECC_SNV', 'WECC_UT',
'WECC_WY', 'CN_AB', 'CN_BC', 'CN_NL', 'CN_MB', 'CN_NB', 'CN_NF',
'CN_NS', 'CN_ON', 'CN_PE', 'CN_PQ', 'CN_SK',
]
"""list: A list of EPA IPM region names."""
epaipm_region_aggregations = {
'PJM': [
'PJM_AP', 'PJM_ATSI', 'PJM_COMD', 'PJM_Dom',
'PJM_EMAC', 'PJM_PENE', 'PJM_SMAC', 'PJM_WMAC'
],
'NYISO': [
'NY_Z_A', 'NY_Z_B', 'NY_Z_C&E', 'NY_Z_D',
'NY_Z_F', 'NY_Z_G-I', 'NY_Z_J', 'NY_Z_K'
],
'ISONE': ['NENG_CT', 'NENGREST', 'NENG_ME'],
'MISO': [
'MIS_AR', 'MIS_IL', 'MIS_INKY', 'MIS_IA',
'MIS_MIDA', 'MIS_LA', 'MIS_LMI', 'MIS_MNWI', 'MIS_D_MS',
'MIS_MO', 'MIS_MAPP', 'MIS_AMSO', 'MIS_WOTA', 'MIS_WUMS'
],
'SPP': [
'SPP_NEBR', 'SPP_N', 'SPP_SPS', 'SPP_WEST', 'SPP_KIAM', 'SPP_WAUE'
],
'WECC_NW': [
'WECC_CO', 'WECC_ID', 'WECC_MT', 'WECC_NNV',
'WECC_PNW', 'WECC_UT', 'WECC_WY'
]
}
"""
dict: A dictionary containing EPA IPM regions (keys) and lists of their
associated abbreviations (values).
"""
epaipm_rename_dict = {
'transmission_single_epaipm': {
'From': 'region_from',
'To': 'region_to',
'Capacity TTC (MW)': 'firm_ttc_mw',
'Energy TTC (MW)': 'nonfirm_ttc_mw',
'Transmission Tariff (2016 mills/kWh)': 'tariff_mills_kwh',
},
'load_curves_epaipm': {
'day': 'day_of_year',
'region': 'region_id_epaipm',
},
'plant_region_map_epaipm': {
'ORIS Plant Code': 'plant_id_eia',
'Region Name': 'region',
},
}
glue_pudl_tables = ('plants_eia', 'plants_ferc', 'plants', 'utilities_eia',
'utilities_ferc', 'utilities', 'utility_plant_assn')
"""
dict: A dictionary of dictionaries containing EPA IPM tables (keys) and items
for each table to be renamed along with the replacement name (values).
"""
data_sources = (
'eia860',
'eia861',
'eia923',
'epacems',
'epaipm',
'ferc1',
'ferc714',
# 'pudl'
)
"""tuple: A tuple containing the data sources we are able to pull into PUDL."""
# All the years for which we ought to be able to download these data sources
data_years = {
'eia860': tuple(range(2001, 2020)),
'eia861': tuple(range(1990, 2020)),
'eia923': tuple(range(2001, 2020)),
'epacems': tuple(range(1995, 2021)),
'epaipm': (None, ),
'ferc1': tuple(range(1994, 2020)),
'ferc714': (None, ),
}
"""
dict: A dictionary of data sources (keys) and tuples containing the years
that we expect to be able to download for each data source (values).
"""
# The full set of years we currently expect to be able to ingest, per source:
working_partitions = {
'eia860': {
'years': tuple(range(2004, 2020))
},
'eia860m': {
'year_month': '2020-11'
},
'eia861': {
'years': tuple(range(2001, 2020))
},
'eia923': {
'years': tuple(range(2001, 2020))
},
'epacems': {
'years': tuple(range(1995, 2021)),
'states': tuple(cems_states.keys())},
'ferc1': {
'years': tuple(range(1994, 2020))
},
'ferc714': {},
}
"""
dict: A dictionary of data sources (keys) and dictionaries (values) of names of
partition type (sub-key) and paritions (sub-value) containing the paritions
such as tuples of years for each data source that are able to be ingested
into PUDL.
"""
pudl_tables = {
'eia860': eia860_pudl_tables,
'eia861': (
"service_territory_eia861",
"balancing_authority_eia861",
"sales_eia861",
"advanced_metering_infrastructure_eia861",
"demand_response_eia861",
"demand_side_management_eia861",
"distributed_generation_eia861",
"distribution_systems_eia861",
"dynamic_pricing_eia861",
"energy_efficiency_eia861",
"green_pricing_eia861",
"mergers_eia861",
"net_metering_eia861",
"non_net_metering_eia861",
"operational_data_eia861",
"reliability_eia861",
"utility_data_eia861",
),
'eia923': eia923_pudl_tables,
'epacems': epacems_tables,
'epaipm': epaipm_pudl_tables,
'ferc1': ferc1_pudl_tables,
'ferc714': (
"respondent_id_ferc714",
"id_certification_ferc714",
"gen_plants_ba_ferc714",
"demand_monthly_ba_ferc714",
"net_energy_load_ba_ferc714",
"adjacency_ba_ferc714",
"interchange_ba_ferc714",
"lambda_hourly_ba_ferc714",
"lambda_description_ferc714",
"description_pa_ferc714",
"demand_forecast_pa_ferc714",
"demand_hourly_pa_ferc714",
),
'glue': glue_pudl_tables,
}
"""
dict: A dictionary containing data sources (keys) and the list of associated
tables from that datasource that can be pulled into PUDL (values).
"""
base_data_urls = {
'eia860': 'https://www.eia.gov/electricity/data/eia860',
'eia861': 'https://www.eia.gov/electricity/data/eia861/zip',
'eia923': 'https://www.eia.gov/electricity/data/eia923',
'epacems': 'ftp://newftp.epa.gov/dmdnload/emissions/hourly/monthly',
'ferc1': 'ftp://eforms1.ferc.gov/f1allyears',
'ferc714': 'https://www.ferc.gov/docs-filing/forms/form-714/data',
'ferceqr': 'ftp://eqrdownload.ferc.gov/DownloadRepositoryProd/BulkNew/CSV',
'msha': 'https://arlweb.msha.gov/OpenGovernmentData/DataSets',
'epaipm': 'https://www.epa.gov/sites/production/files/2019-03',
'pudl': 'https://catalyst.coop/pudl/'
}
"""
dict: A dictionary containing data sources (keys) and their base data URLs
(values).
"""
need_fix_inting = {
'plants_steam_ferc1': ('construction_year', 'installation_year'),
'plants_small_ferc1': ('construction_year', 'ferc_license_id'),
'plants_hydro_ferc1': ('construction_year', 'installation_year',),
'plants_pumped_storage_ferc1': ('construction_year', 'installation_year',),
'hourly_emissions_epacems': ('facility_id', 'unit_id_epa',),
}
"""
dict: A dictionary containing tables (keys) and column names (values)
containing integer - type columns whose null values need fixing.
"""
contributors = {
"catalyst-cooperative": {
"title": "Catalyst Cooperative",
"path": "https://catalyst.coop/",
"role": "publisher",
"email": "<EMAIL>",
"organization": "Catalyst Cooperative",
},
"zane-selvans": {
"title": "<NAME>",
"email": "<EMAIL>",
"path": "https://amateurearthling.org/",
"role": "wrangler",
"organization": "Catalyst Cooperative"
},
"christina-gosnell": {
"title": "<NAME>",
"email": "<EMAIL>",
"role": "contributor",
"organization": "Catalyst Cooperative",
},
"steven-winter": {
"title": "<NAME>",
"email": "<EMAIL>",
"role": "contributor",
"organization": "Catalyst Cooperative",
},
"alana-wilson": {
"title": "<NAME>",
"email": "<EMAIL>",
"role": "contributor",
"organization": "Catalyst Cooperative",
},
"karl-dunkle-werner": {
"title": "<NAME>",
"email": "<EMAIL>",
"path": "https://karldw.org/",
"role": "contributor",
"organization": "UC Berkeley",
},
'greg-schivley': {
"title": "<NAME>",
"role": "contributor",
},
}
"""
dict: A dictionary of dictionaries containing organization names (keys) and
their attributes (values).
"""
data_source_info = {
"eia860": {
"title": "EIA Form 860",
"path": "https://www.eia.gov/electricity/data/eia860/",
},
"eia861": {
"title": "EIA Form 861",
"path": "https://www.eia.gov/electricity/data/eia861/",
},
"eia923": {
"title": "EIA Form 923",
"path": "https://www.eia.gov/electricity/data/eia923/",
},
"eiawater": {
"title": "EIA Water Use for Power",
"path": "https://www.eia.gov/electricity/data/water/",
},
"epacems": {
"title": "EPA Air Markets Program Data",
"path": "https://ampd.epa.gov/ampd/",
},
"epaipm": {
"title": "EPA Integrated Planning Model",
"path": "https://www.epa.gov/airmarkets/national-electric-energy-data-system-needs-v6",
},
"ferc1": {
"title": "FERC Form 1",
"path": "https://www.ferc.gov/docs-filing/forms/form-1/data.asp",
},
"ferc714": {
"title": "FERC Form 714",
"path": "https://www.ferc.gov/docs-filing/forms/form-714/data.asp",
},
"ferceqr": {
"title": "FERC Electric Quarterly Report",
"path": "https://www.ferc.gov/docs-filing/eqr.asp",
},
"msha": {
"title": "Mining Safety and Health Administration",
"path": "https://www.msha.gov/mine-data-retrieval-system",
},
"phmsa": {
"title": "Pipelines and Hazardous Materials Safety Administration",
"path": "https://www.phmsa.dot.gov/data-and-statistics/pipeline/data-and-statistics-overview",
},
"pudl": {
"title": "The Public Utility Data Liberation Project (PUDL)",
"path": "https://catalyst.coop/pudl/",
"email": "<EMAIL>",
},
}
"""
dict: A dictionary of dictionaries containing datasources (keys) and
associated attributes (values)
"""
contributors_by_source = {
"pudl": [
"catalyst-cooperative",
"zane-selvans",
"christina-gosnell",
"steven-winter",
"alana-wilson",
"karl-dunkle-werner",
],
"eia923": [
"catalyst-cooperative",
"zane-selvans",
"christina-gosnell",
"steven-winter",
],
"eia860": [
"catalyst-cooperative",
"zane-selvans",
"christina-gosnell",
"steven-winter",
"alana-wilson",
],
"ferc1": [
"catalyst-cooperative",
"zane-selvans",
"christina-gosnell",
"steven-winter",
"alana-wilson",
],
"epacems": [
"catalyst-cooperative",
"karl-dunkle-werner",
"zane-selvans",
],
"epaipm": [
"greg-schivley",
],
}
"""
dict: A dictionary of data sources (keys) and lists of contributors (values).
"""
licenses = {
"cc-by-4.0": {
"name": "CC-BY-4.0",
"title": "Creative Commons Attribution 4.0",
"path": "https://creativecommons.org/licenses/by/4.0/"
},
"us-govt": {
"name": "other-pd",
"title": "U.S. Government Work",
"path": "http://www.usa.gov/publicdomain/label/1.0/",
}
}
"""
dict: A dictionary of dictionaries containing license types and their
attributes.
"""
output_formats = [
'sqlite',
'parquet',
'datapkg',
]
"""list: A list of types of PUDL output formats."""
keywords_by_data_source = {
'pudl': [
'us', 'electricity',
],
'eia860': [
'electricity', 'electric', 'boiler', 'generator', 'plant', 'utility',
'fuel', 'coal', 'natural gas', 'prime mover', 'eia860', 'retirement',
'capacity', 'planned', 'proposed', 'energy', 'hydro', 'solar', 'wind',
'nuclear', 'form 860', 'eia', 'annual', 'gas', 'ownership', 'steam',
'turbine', 'combustion', 'combined cycle', 'eia',
'energy information administration'
],
'eia923': [
'fuel', 'boiler', 'generator', 'plant', 'utility', 'cost', 'price',
'natural gas', 'coal', 'eia923', 'energy', 'electricity', 'form 923',
'receipts', 'generation', 'net generation', 'monthly', 'annual', 'gas',
'fuel consumption', 'MWh', 'energy information administration', 'eia',
'mercury', 'sulfur', 'ash', 'lignite', 'bituminous', 'subbituminous',
'heat content'
],
'epacems': [
'epa', 'us', 'emissions', 'pollution', 'ghg', 'so2', 'co2', 'sox',
'nox', 'load', 'utility', 'electricity', 'plant', 'generator', 'unit',
'generation', 'capacity', 'output', 'power', 'heat content', 'mmbtu',
'steam', 'cems', 'continuous emissions monitoring system', 'hourly'
'environmental protection agency', 'ampd', 'air markets program data',
],
'ferc1': [
'electricity', 'electric', 'utility', 'plant', 'steam', 'generation',
'cost', 'expense', 'price', 'heat content', 'ferc', 'form 1',
'federal energy regulatory commission', 'capital', 'accounting',
'depreciation', 'finance', 'plant in service', 'hydro', 'coal',
'natural gas', 'gas', 'opex', 'capex', 'accounts', 'investment',
'capacity'
],
'ferc714': [
'electricity', 'electric', 'utility', 'planning area', 'form 714',
'balancing authority', 'demand', 'system lambda', 'ferc',
'federal energy regulatory commission', "hourly", "generation",
"interchange", "forecast", "load", "adjacency", "plants",
],
'epaipm': [
'epaipm', 'integrated planning',
]
}
"""dict: A dictionary of datasets (keys) and keywords (values). """
ENTITY_TYPE_DICT = {
'M': 'Municipal',
'C': 'Cooperative',
'R': 'Retail Power Marketer',
'I': 'Investor Owned',
'P': 'Political Subdivision',
'T': 'Transmission',
'S': 'State',
'W': 'Wholesale Power Marketer',
'F': 'Federal',
'A': 'Municipal Mktg Authority',
'G': 'Community Choice Aggregator',
'D': 'Nonutility DSM Administrator',
'B': 'Behind the Meter',
'Q': 'Independent Power Producer',
'IND': 'Industrial',
'COM': 'Commercial',
'PR': 'Private', # Added by AES for OD table (Arbitrary moniker)
'PO': 'Power Marketer', # Added by AES for OD table
'U': 'Unknown', # Added by AES for OD table
'O': 'Other' # Added by AES for OD table
}
# Confirm these designations -- educated guess based on the form instructions
MOMENTARY_INTERRUPTION_DEF = { # Added by AES for R table
'L': 'Less than 1 minute',
'F': 'Less than or equal to 5 minutes',
'O': 'Other',
}
# https://www.eia.gov/electricity/data/eia411/#tabs_NERC-3
RECOGNIZED_NERC_REGIONS = [
'BASN', # ASSESSMENT AREA Basin (WECC)
'CALN', # ASSESSMENT AREA California (WECC)
'CALS', # ASSESSMENT AREA California (WECC)
'DSW', # ASSESSMENT AREA Desert Southwest (WECC)
'ASCC', # Alaska
'ISONE', # ISO New England (NPCC)
'ERCOT', # lumped under TRE in 2017 Form instructions
'NORW', # ASSESSMENT AREA Northwest (WECC)
'NYISO', # ISO (NPCC)
'PJM', # RTO
'ROCK', # ASSESSMENT AREA Rockies (WECC)
'ECAR', # OLD RE Now part of RFC and SERC
'FRCC', # included in 2017 Form instructions, recently joined with SERC
'HICC', # Hawaii
'MAAC', # OLD RE Now part of RFC
'MAIN', # OLD RE Now part of SERC, RFC, MRO
'MAPP', # OLD/NEW RE Became part of MRO, resurfaced in 2010
'MRO', # RE included in 2017 Form instructions
'NPCC', # RE included in 2017 Form instructions
'RFC', # RE included in 2017 Form instructions
'SERC', # RE included in 2017 Form instructions
'SPP', # RE included in 2017 Form instructions
'TRE', # RE included in 2017 Form instructions (included ERCOT)
'WECC', # RE included in 2017 Form instructions
'WSCC', # OLD RE pre-2002 version of WECC
'MISO', # ISO unclear whether technically a regional entity, but lots of entries
'ECAR_MAAC',
'MAPP_WECC',
'RFC_SERC',
'SPP_WECC',
'MRO_WECC',
'ERCOT_SPP',
'SPP_TRE',
'ERCOT_TRE',
'MISO_TRE',
'VI', # Virgin Islands
'GU', # Guam
'PR', # Puerto Rico
'AS', # American Samoa
'UNK',
]
CUSTOMER_CLASSES = [
"commercial",
"industrial",
"direct_connection",
"other",
"residential",
"total",
"transportation"
]
TECH_CLASSES = [
'backup', # WHERE Is this used? because removed from DG table b/c not a real component
'chp_cogen',
'combustion_turbine',
'fuel_cell',
'hydro',
'internal_combustion',
'other',
'pv',
'steam',
'storage_pv',
'all_storage', # need 'all' as prefix so as not to confuse with other storage category
'total',
'virtual_pv',
'wind',
]
REVENUE_CLASSES = [
'retail_sales',
'unbundled',
'delivery_customers',
'sales_for_resale',
'credits_or_adjustments',
'other',
'transmission',
'total',
]
RELIABILITY_STANDARDS = [
'ieee_standard',
'other_standard'
]
FUEL_CLASSES = [
'gas',
'oil',
'other',
'renewable',
'water',
'wind',
'wood',
]
RTO_CLASSES = [
'caiso',
'ercot',
'pjm',
'nyiso',
'spp',
'miso',
'isone',
'other'
]
ESTIMATED_OR_ACTUAL = {'E': 'estimated', 'A': 'actual'}
TRANSIT_TYPE_DICT = {
'CV': 'conveyer',
'PL': 'pipeline',
'RR': 'railroad',
'TK': 'truck',
'WA': 'water',
'UN': 'unknown',
}
"""dict: A dictionary of datasets (keys) and keywords (values). """
column_dtypes = {
"ferc1": { # Obviously this is not yet a complete list...
"construction_year": pd.Int64Dtype(),
"installation_year": pd.Int64Dtype(),
"plant_id_ferc1": pd.Int64Dtype(),
"plant_id_pudl": pd.Int64Dtype(),
"report_date": "datetime64[ns]",
"report_year": pd.Int64Dtype(),
"utility_id_ferc1": pd.Int64Dtype(),
"utility_id_pudl": pd.Int64Dtype(),
},
"ferc714": { # INCOMPLETE
"demand_mwh": float,
"demand_annual_mwh": float,
"eia_code": pd.Int64Dtype(),
"peak_demand_summer_mw": float,
"peak_demand_winter_mw": float,
"report_date": "datetime64[ns]",
"respondent_id_ferc714": pd.Int64Dtype(),
"respondent_name_ferc714": pd.StringDtype(),
"respondent_type": pd.CategoricalDtype(categories=[
"utility", "balancing_authority",
]),
"timezone": pd.CategoricalDtype(categories=[
"America/New_York", "America/Chicago", "America/Denver",
"America/Los_Angeles", "America/Anchorage", "Pacific/Honolulu"]),
"utc_datetime": "datetime64[ns]",
},
"epacems": {
'state': pd.StringDtype(),
'plant_id_eia': pd.Int64Dtype(), # Nullable Integer
'unitid': pd.StringDtype(),
'operating_datetime_utc': "datetime64[ns]",
'operating_time_hours': float,
'gross_load_mw': float,
'steam_load_1000_lbs': float,
'so2_mass_lbs': float,
'so2_mass_measurement_code': pd.StringDtype(),
'nox_rate_lbs_mmbtu': float,
'nox_rate_measurement_code': pd.StringDtype(),
'nox_mass_lbs': float,
'nox_mass_measurement_code': pd.StringDtype(),
'co2_mass_tons': float,
'co2_mass_measurement_code': pd.StringDtype(),
'heat_content_mmbtu': float,
'facility_id': pd.Int64Dtype(), # Nullable Integer
'unit_id_epa': pd.Int64Dtype(), # Nullable Integer
},
"eia": {
'actual_peak_demand_savings_mw': float, # Added by AES for DR table
'address_2': pd.StringDtype(), # Added by AES for 860 utilities table
'advanced_metering_infrastructure': pd.Int64Dtype(), # Added by AES for AMI table
# Added by AES for UD misc table
'alternative_fuel_vehicle_2_activity': pd.BooleanDtype(),
'alternative_fuel_vehicle_activity': pd.BooleanDtype(),
'annual_indirect_program_cost': float,
'annual_total_cost': float,
'ash_content_pct': float,
'ash_impoundment': pd.BooleanDtype(),
'ash_impoundment_lined': pd.BooleanDtype(),
# TODO: convert this field to more descriptive words
'ash_impoundment_status': pd.StringDtype(),
'associated_combined_heat_power': pd.BooleanDtype(),
'attention_line': pd.StringDtype(),
'automated_meter_reading': pd.Int64Dtype(), # Added by AES for AMI table
'backup_capacity_mw': float, # Added by AES for NNM & DG misc table
'balancing_authority_code_eia': pd.CategoricalDtype(),
'balancing_authority_id_eia': pd.Int64Dtype(),
'balancing_authority_name_eia': pd.StringDtype(),
'bga_source': pd.StringDtype(),
'boiler_id': pd.StringDtype(),
'bunded_activity': pd.BooleanDtype(),
'business_model': pd.CategoricalDtype(categories=[
"retail", "energy_services"]),
'buy_distribution_activity': | pd.BooleanDtype() | pandas.BooleanDtype |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.