prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
##########################################################################
## Summary
##########################################################################
'''
Creates flat table of decisions from our Postgres database and runs the prediction pipeline.
Starting point for running our models.
'''
##########################################################################
## Imports & Configuration
##########################################################################
import logging
import numpy, pandas
from sklearn import metrics
from sklearn.metrics import classification_report
#Configure logging. See /logs/example-logging.py for examples of how to use this.
logging_filename = "../logs/pipeline.log"
logging.basicConfig(filename=logging_filename, level=logging.DEBUG)
#Pushes everything from the logger to the command line output as well.
logging.getLogger().addHandler(logging.StreamHandler())
#Allow modules to import each other at parallel file structure (TODO clean up this configuration in a refactor, it's messy...)
from inspect import getsourcefile
import os, sys, json
current_path = os.path.abspath(getsourcefile(lambda:0))
current_dir = os.path.dirname(current_path)
parent_dir = current_dir[:current_dir.rfind(os.path.sep)]
repo_dir = parent_dir[:parent_dir.rfind(os.path.sep)]
sys.path.insert(0, parent_dir)
import database_management
##########################################################################
## Classes
##########################################################################
class ManyModels:
'''
A wrapper class for training multiple sklearn models on a single dataset
The wrapper contains:
-The models themselves (fitted or not), passed as a dictionary from the calling function
-X and y arrays of training data.
-an X_test set of testing data
-The predicted answers of all models, stored as a dataframe with rows matching the X_test dataset
Not optimized for memory use - instead it is designed for as much flexibility and access to source data,
models, and prediction performance as possible for use in a learning context.
Example Use:
#set it up:
modeler = ManyModels()
modeler.models = {} #change this to a dictionary of model instances
modeler.X = X_train
modeler.y = y_train
modeler.y_names = ['A','B']
#Fit:
modeler.fit("RandomForestClassifier") #fit just one model
modeler.fit(model_list=['KNeighborsClassifier_12', 'RandomForestClassifier']) #fit a list of models
modeler.fit() #fits all models
#Attach testing data
modeler.X_test = X_test
modeler.y_test = y_test
#Predict:
predicted_df = modeler.predict() #returns a dataframe of the predicted answers for each model, but also stores the fitted models on the modeler object
'''
def __init__(self):
self.models = {} #dict of 'modelname':sklearn.model_instance
self.X = numpy.array([[],[]]) #blank 2-d array, contains training data
self.y = numpy.array([]) #blank 1-d array, contains training answers
self.pipe = None #a pipeline for transforming this data. Should not contain a final model to predict.
self.answers = | pandas.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Sun Mar 21 14:21:25 2021
@author: mchini
"""
from scipy.io import loadmat
from scipy.optimize import curve_fit
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
folder2load = 'D:/models_neonates/autocorr_spikes/data/'
# see excel file in the repo
exps = pd.read_excel('D:/models_neonates/autocorr_spikes/ExperimentPlanPython.xlsx')
animals = pd.unique(exps['animal_ID'])
# initialize variables
age = np.zeros(np.shape(animals))
timescale = np.zeros(np.shape(animals))
# whether or not to plot individual fits
to_plot = 0
# define function to extract timescale
def func(lags, A, tau, B):
return A * (np.exp(-lags/tau) + B)
for idx, animal in enumerate(animals):
# load autocorr stuff & take 50-500ms window
ac_stuff = loadmat(folder2load + str(animal) + '.mat')
autocorr = ac_stuff['auto_corr2fit'].flatten()[:10]
lags = ac_stuff['lag2fit'].flatten()[:10].astype(float)
try:
# fit the curve
popt, pcov = curve_fit(f=func, xdata=lags,
ydata=autocorr, p0=np.r_[0.5, 5, 0.01])
if to_plot > 0:
plt.figure()
plt.plot(lags, autocorr, 'b-', label='data')
plt.plot(lags, func(lags, *popt), 'g--')
except RuntimeError:
popt = np.tile(np.nan, 3)
# extract age & timescale
age[idx] = | pd.unique(exps['Age'].loc[exps['animal_ID'] == animal]) | pandas.unique |
from collections import OrderedDict
from datetime import timedelta
import numpy as np
import pytest
from pandas.core.dtypes.dtypes import CategoricalDtype, DatetimeTZDtype
import pandas as pd
from pandas import (
Categorical,
DataFrame,
Series,
Timedelta,
Timestamp,
_np_version_under1p14,
concat,
date_range,
option_context,
)
from pandas.core.arrays import integer_array
import pandas.util.testing as tm
def _check_cast(df, v):
"""
Check if all dtypes of df are equal to v
"""
assert all(s.dtype.name == v for _, s in df.items())
class TestDataFrameDataTypes:
def test_concat_empty_dataframe_dtypes(self):
df = DataFrame(columns=list("abc"))
df["a"] = df["a"].astype(np.bool_)
df["b"] = df["b"].astype(np.int32)
df["c"] = df["c"].astype(np.float64)
result = pd.concat([df, df])
assert result["a"].dtype == np.bool_
assert result["b"].dtype == np.int32
assert result["c"].dtype == np.float64
result = pd.concat([df, df.astype(np.float64)])
assert result["a"].dtype == np.object_
assert result["b"].dtype == np.float64
assert result["c"].dtype == np.float64
def test_empty_frame_dtypes_ftypes(self):
empty_df = pd.DataFrame()
tm.assert_series_equal(empty_df.dtypes, | pd.Series(dtype=np.object) | pandas.Series |
#
# Copyright (C) 2019 Databricks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from distutils.version import LooseVersion
from itertools import product
import unittest
import pandas as pd
import numpy as np
import pyspark
from databricks import koalas as ks
from databricks.koalas.config import set_option, reset_option
from databricks.koalas.frame import DataFrame
from databricks.koalas.testing.utils import ReusedSQLTestCase, SQLTestUtils
from databricks.koalas.typedef.typehints import (
extension_dtypes,
extension_dtypes_available,
extension_float_dtypes_available,
extension_object_dtypes_available,
)
class OpsOnDiffFramesEnabledTest(ReusedSQLTestCase, SQLTestUtils):
@classmethod
def setUpClass(cls):
super().setUpClass()
set_option("compute.ops_on_diff_frames", True)
@classmethod
def tearDownClass(cls):
reset_option("compute.ops_on_diff_frames")
super().tearDownClass()
@property
def pdf1(self):
return pd.DataFrame(
{"a": [1, 2, 3, 4, 5, 6, 7, 8, 9], "b": [4, 5, 6, 3, 2, 1, 0, 0, 0]},
index=[0, 1, 3, 5, 6, 8, 9, 10, 11],
)
@property
def pdf2(self):
return pd.DataFrame(
{"a": [9, 8, 7, 6, 5, 4, 3, 2, 1], "b": [0, 0, 0, 4, 5, 6, 1, 2, 3]},
index=list(range(9)),
)
@property
def pdf3(self):
return pd.DataFrame(
{"b": [1, 1, 1, 1, 1, 1, 1, 1, 1], "c": [1, 1, 1, 1, 1, 1, 1, 1, 1]},
index=list(range(9)),
)
@property
def pdf4(self):
return pd.DataFrame(
{"e": [2, 2, 2, 2, 2, 2, 2, 2, 2], "f": [2, 2, 2, 2, 2, 2, 2, 2, 2]},
index=list(range(9)),
)
@property
def pdf5(self):
return pd.DataFrame(
{
"a": [1, 2, 3, 4, 5, 6, 7, 8, 9],
"b": [4, 5, 6, 3, 2, 1, 0, 0, 0],
"c": [4, 5, 6, 3, 2, 1, 0, 0, 0],
},
index=[0, 1, 3, 5, 6, 8, 9, 10, 11],
).set_index(["a", "b"])
@property
def pdf6(self):
return pd.DataFrame(
{
"a": [9, 8, 7, 6, 5, 4, 3, 2, 1],
"b": [0, 0, 0, 4, 5, 6, 1, 2, 3],
"c": [9, 8, 7, 6, 5, 4, 3, 2, 1],
"e": [4, 5, 6, 3, 2, 1, 0, 0, 0],
},
index=list(range(9)),
).set_index(["a", "b"])
@property
def pser1(self):
midx = pd.MultiIndex(
[["lama", "cow", "falcon", "koala"], ["speed", "weight", "length", "power"]],
[[0, 3, 1, 1, 1, 2, 2, 2], [0, 2, 0, 3, 2, 0, 1, 3]],
)
return pd.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1], index=midx)
@property
def pser2(self):
midx = pd.MultiIndex(
[["lama", "cow", "falcon"], ["speed", "weight", "length"]],
[[0, 0, 0, 1, 1, 1, 2, 2, 2], [0, 1, 2, 0, 1, 2, 0, 1, 2]],
)
return pd.Series([-45, 200, -1.2, 30, -250, 1.5, 320, 1, -0.3], index=midx)
@property
def pser3(self):
midx = pd.MultiIndex(
[["koalas", "cow", "falcon"], ["speed", "weight", "length"]],
[[0, 0, 0, 1, 1, 1, 2, 2, 2], [1, 1, 2, 0, 0, 2, 2, 2, 1]],
)
return pd.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3], index=midx)
@property
def kdf1(self):
return ks.from_pandas(self.pdf1)
@property
def kdf2(self):
return ks.from_pandas(self.pdf2)
@property
def kdf3(self):
return ks.from_pandas(self.pdf3)
@property
def kdf4(self):
return ks.from_pandas(self.pdf4)
@property
def kdf5(self):
return ks.from_pandas(self.pdf5)
@property
def kdf6(self):
return ks.from_pandas(self.pdf6)
@property
def kser1(self):
return ks.from_pandas(self.pser1)
@property
def kser2(self):
return ks.from_pandas(self.pser2)
@property
def kser3(self):
return ks.from_pandas(self.pser3)
def test_ranges(self):
self.assert_eq(
(ks.range(10) + ks.range(10)).sort_index(),
(
ks.DataFrame({"id": list(range(10))}) + ks.DataFrame({"id": list(range(10))})
).sort_index(),
)
def test_no_matched_index(self):
with self.assertRaisesRegex(ValueError, "Index names must be exactly matched"):
ks.DataFrame({"a": [1, 2, 3]}).set_index("a") + ks.DataFrame(
{"b": [1, 2, 3]}
).set_index("b")
def test_arithmetic(self):
self._test_arithmetic_frame(self.pdf1, self.pdf2, check_extension=False)
self._test_arithmetic_series(self.pser1, self.pser2, check_extension=False)
@unittest.skipIf(not extension_dtypes_available, "pandas extension dtypes are not available")
def test_arithmetic_extension_dtypes(self):
self._test_arithmetic_frame(
self.pdf1.astype("Int64"), self.pdf2.astype("Int64"), check_extension=True
)
self._test_arithmetic_series(
self.pser1.astype(int).astype("Int64"),
self.pser2.astype(int).astype("Int64"),
check_extension=True,
)
@unittest.skipIf(
not extension_float_dtypes_available, "pandas extension float dtypes are not available"
)
def test_arithmetic_extension_float_dtypes(self):
self._test_arithmetic_frame(
self.pdf1.astype("Float64"), self.pdf2.astype("Float64"), check_extension=True
)
self._test_arithmetic_series(
self.pser1.astype("Float64"), self.pser2.astype("Float64"), check_extension=True
)
def _test_arithmetic_frame(self, pdf1, pdf2, *, check_extension):
kdf1 = ks.from_pandas(pdf1)
kdf2 = ks.from_pandas(pdf2)
def assert_eq(actual, expected):
if LooseVersion("1.1") <= LooseVersion(pd.__version__) < LooseVersion("1.2.2"):
self.assert_eq(actual, expected, check_exact=not check_extension)
if check_extension:
if isinstance(actual, DataFrame):
for dtype in actual.dtypes:
self.assertTrue(isinstance(dtype, extension_dtypes))
else:
self.assertTrue(isinstance(actual.dtype, extension_dtypes))
else:
self.assert_eq(actual, expected)
# Series
assert_eq((kdf1.a - kdf2.b).sort_index(), (pdf1.a - pdf2.b).sort_index())
assert_eq((kdf1.a * kdf2.a).sort_index(), (pdf1.a * pdf2.a).sort_index())
if check_extension and not extension_float_dtypes_available:
self.assert_eq(
(kdf1["a"] / kdf2["a"]).sort_index(), (pdf1["a"] / pdf2["a"]).sort_index()
)
else:
assert_eq((kdf1["a"] / kdf2["a"]).sort_index(), (pdf1["a"] / pdf2["a"]).sort_index())
# DataFrame
assert_eq((kdf1 + kdf2).sort_index(), (pdf1 + pdf2).sort_index())
# Multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b")])
kdf1.columns = columns
kdf2.columns = columns
pdf1.columns = columns
pdf2.columns = columns
# Series
assert_eq(
(kdf1[("x", "a")] - kdf2[("x", "b")]).sort_index(),
(pdf1[("x", "a")] - pdf2[("x", "b")]).sort_index(),
)
assert_eq(
(kdf1[("x", "a")] - kdf2["x"]["b"]).sort_index(),
(pdf1[("x", "a")] - pdf2["x"]["b"]).sort_index(),
)
assert_eq(
(kdf1["x"]["a"] - kdf2[("x", "b")]).sort_index(),
(pdf1["x"]["a"] - pdf2[("x", "b")]).sort_index(),
)
# DataFrame
assert_eq((kdf1 + kdf2).sort_index(), (pdf1 + pdf2).sort_index())
def _test_arithmetic_series(self, pser1, pser2, *, check_extension):
kser1 = ks.from_pandas(pser1)
kser2 = ks.from_pandas(pser2)
def assert_eq(actual, expected):
if LooseVersion("1.1") <= LooseVersion(pd.__version__) < LooseVersion("1.2.2"):
self.assert_eq(actual, expected, check_exact=not check_extension)
if check_extension:
self.assertTrue(isinstance(actual.dtype, extension_dtypes))
else:
self.assert_eq(actual, expected)
# MultiIndex Series
assert_eq((kser1 + kser2).sort_index(), (pser1 + pser2).sort_index())
assert_eq((kser1 - kser2).sort_index(), (pser1 - pser2).sort_index())
assert_eq((kser1 * kser2).sort_index(), (pser1 * pser2).sort_index())
if check_extension and not extension_float_dtypes_available:
self.assert_eq((kser1 / kser2).sort_index(), (pser1 / pser2).sort_index())
else:
assert_eq((kser1 / kser2).sort_index(), (pser1 / pser2).sort_index())
def test_arithmetic_chain(self):
self._test_arithmetic_chain_frame(self.pdf1, self.pdf2, self.pdf3, check_extension=False)
self._test_arithmetic_chain_series(
self.pser1, self.pser2, self.pser3, check_extension=False
)
@unittest.skipIf(not extension_dtypes_available, "pandas extension dtypes are not available")
def test_arithmetic_chain_extension_dtypes(self):
self._test_arithmetic_chain_frame(
self.pdf1.astype("Int64"),
self.pdf2.astype("Int64"),
self.pdf3.astype("Int64"),
check_extension=True,
)
self._test_arithmetic_chain_series(
self.pser1.astype(int).astype("Int64"),
self.pser2.astype(int).astype("Int64"),
self.pser3.astype(int).astype("Int64"),
check_extension=True,
)
@unittest.skipIf(
not extension_float_dtypes_available, "pandas extension float dtypes are not available"
)
def test_arithmetic_chain_extension_float_dtypes(self):
self._test_arithmetic_chain_frame(
self.pdf1.astype("Float64"),
self.pdf2.astype("Float64"),
self.pdf3.astype("Float64"),
check_extension=True,
)
self._test_arithmetic_chain_series(
self.pser1.astype("Float64"),
self.pser2.astype("Float64"),
self.pser3.astype("Float64"),
check_extension=True,
)
def _test_arithmetic_chain_frame(self, pdf1, pdf2, pdf3, *, check_extension):
kdf1 = ks.from_pandas(pdf1)
kdf2 = ks.from_pandas(pdf2)
kdf3 = ks.from_pandas(pdf3)
common_columns = set(kdf1.columns).intersection(kdf2.columns).intersection(kdf3.columns)
def assert_eq(actual, expected):
if LooseVersion("1.1") <= LooseVersion(pd.__version__) < LooseVersion("1.2.2"):
self.assert_eq(actual, expected, check_exact=not check_extension)
if check_extension:
if isinstance(actual, DataFrame):
for column, dtype in zip(actual.columns, actual.dtypes):
if column in common_columns:
self.assertTrue(isinstance(dtype, extension_dtypes))
else:
self.assertFalse(isinstance(dtype, extension_dtypes))
else:
self.assertTrue(isinstance(actual.dtype, extension_dtypes))
else:
self.assert_eq(actual, expected)
# Series
assert_eq((kdf1.a - kdf2.b - kdf3.c).sort_index(), (pdf1.a - pdf2.b - pdf3.c).sort_index())
assert_eq(
(kdf1.a * (kdf2.a * kdf3.c)).sort_index(), (pdf1.a * (pdf2.a * pdf3.c)).sort_index()
)
if check_extension and not extension_float_dtypes_available:
self.assert_eq(
(kdf1["a"] / kdf2["a"] / kdf3["c"]).sort_index(),
(pdf1["a"] / pdf2["a"] / pdf3["c"]).sort_index(),
)
else:
assert_eq(
(kdf1["a"] / kdf2["a"] / kdf3["c"]).sort_index(),
(pdf1["a"] / pdf2["a"] / pdf3["c"]).sort_index(),
)
# DataFrame
if check_extension and (
LooseVersion("1.0") <= LooseVersion(pd.__version__) < LooseVersion("1.1")
):
self.assert_eq(
(kdf1 + kdf2 - kdf3).sort_index(), (pdf1 + pdf2 - pdf3).sort_index(), almost=True
)
else:
assert_eq((kdf1 + kdf2 - kdf3).sort_index(), (pdf1 + pdf2 - pdf3).sort_index())
# Multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b")])
kdf1.columns = columns
kdf2.columns = columns
pdf1.columns = columns
pdf2.columns = columns
columns = pd.MultiIndex.from_tuples([("x", "b"), ("y", "c")])
kdf3.columns = columns
pdf3.columns = columns
common_columns = set(kdf1.columns).intersection(kdf2.columns).intersection(kdf3.columns)
# Series
assert_eq(
(kdf1[("x", "a")] - kdf2[("x", "b")] - kdf3[("y", "c")]).sort_index(),
(pdf1[("x", "a")] - pdf2[("x", "b")] - pdf3[("y", "c")]).sort_index(),
)
assert_eq(
(kdf1[("x", "a")] * (kdf2[("x", "b")] * kdf3[("y", "c")])).sort_index(),
(pdf1[("x", "a")] * (pdf2[("x", "b")] * pdf3[("y", "c")])).sort_index(),
)
# DataFrame
if check_extension and (
LooseVersion("1.0") <= LooseVersion(pd.__version__) < LooseVersion("1.1")
):
self.assert_eq(
(kdf1 + kdf2 - kdf3).sort_index(), (pdf1 + pdf2 - pdf3).sort_index(), almost=True
)
else:
assert_eq((kdf1 + kdf2 - kdf3).sort_index(), (pdf1 + pdf2 - pdf3).sort_index())
def _test_arithmetic_chain_series(self, pser1, pser2, pser3, *, check_extension):
kser1 = ks.from_pandas(pser1)
kser2 = ks.from_pandas(pser2)
kser3 = ks.from_pandas(pser3)
def assert_eq(actual, expected):
if LooseVersion("1.1") <= LooseVersion(pd.__version__) < LooseVersion("1.2.2"):
self.assert_eq(actual, expected, check_exact=not check_extension)
if check_extension:
self.assertTrue(isinstance(actual.dtype, extension_dtypes))
else:
self.assert_eq(actual, expected)
# MultiIndex Series
assert_eq((kser1 + kser2 - kser3).sort_index(), (pser1 + pser2 - pser3).sort_index())
assert_eq((kser1 * kser2 * kser3).sort_index(), (pser1 * pser2 * pser3).sort_index())
if check_extension and not extension_float_dtypes_available:
if LooseVersion(pd.__version__) >= LooseVersion("1.0"):
self.assert_eq(
(kser1 - kser2 / kser3).sort_index(), (pser1 - pser2 / pser3).sort_index()
)
else:
expected = pd.Series(
[249.0, np.nan, 0.0, 0.88, np.nan, np.nan, np.nan, np.nan, np.nan, -np.inf]
+ [np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan],
index=pd.MultiIndex(
[
["cow", "falcon", "koala", "koalas", "lama"],
["length", "power", "speed", "weight"],
],
[
[0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 3, 3, 3, 4, 4, 4],
[0, 1, 2, 2, 3, 0, 0, 1, 2, 3, 0, 0, 3, 3, 0, 2, 3],
],
),
)
self.assert_eq((kser1 - kser2 / kser3).sort_index(), expected)
else:
assert_eq((kser1 - kser2 / kser3).sort_index(), (pser1 - pser2 / pser3).sort_index())
assert_eq((kser1 + kser2 * kser3).sort_index(), (pser1 + pser2 * pser3).sort_index())
def test_mod(self):
pser = pd.Series([100, None, -300, None, 500, -700])
pser_other = pd.Series([-150] * 6)
kser = ks.from_pandas(pser)
kser_other = ks.from_pandas(pser_other)
self.assert_eq(kser.mod(kser_other).sort_index(), pser.mod(pser_other))
self.assert_eq(kser.mod(kser_other).sort_index(), pser.mod(pser_other))
self.assert_eq(kser.mod(kser_other).sort_index(), pser.mod(pser_other))
def test_rmod(self):
pser = pd.Series([100, None, -300, None, 500, -700])
pser_other = pd.Series([-150] * 6)
kser = ks.from_pandas(pser)
kser_other = ks.from_pandas(pser_other)
self.assert_eq(kser.rmod(kser_other).sort_index(), pser.rmod(pser_other))
self.assert_eq(kser.rmod(kser_other).sort_index(), pser.rmod(pser_other))
self.assert_eq(kser.rmod(kser_other).sort_index(), pser.rmod(pser_other))
def test_getitem_boolean_series(self):
pdf1 = pd.DataFrame(
{"A": [0, 1, 2, 3, 4], "B": [100, 200, 300, 400, 500]}, index=[20, 10, 30, 0, 50]
)
pdf2 = pd.DataFrame(
{"A": [0, -1, -2, -3, -4], "B": [-100, -200, -300, -400, -500]},
index=[0, 30, 10, 20, 50],
)
kdf1 = ks.from_pandas(pdf1)
kdf2 = ks.from_pandas(pdf2)
self.assert_eq(pdf1[pdf2.A > -3].sort_index(), kdf1[kdf2.A > -3].sort_index())
self.assert_eq(pdf1.A[pdf2.A > -3].sort_index(), kdf1.A[kdf2.A > -3].sort_index())
self.assert_eq(
(pdf1.A + 1)[pdf2.A > -3].sort_index(), (kdf1.A + 1)[kdf2.A > -3].sort_index()
)
def test_loc_getitem_boolean_series(self):
pdf1 = pd.DataFrame(
{"A": [0, 1, 2, 3, 4], "B": [100, 200, 300, 400, 500]}, index=[20, 10, 30, 0, 50]
)
pdf2 = pd.DataFrame(
{"A": [0, -1, -2, -3, -4], "B": [-100, -200, -300, -400, -500]},
index=[20, 10, 30, 0, 50],
)
kdf1 = ks.from_pandas(pdf1)
kdf2 = ks.from_pandas(pdf2)
self.assert_eq(pdf1.loc[pdf2.A > -3].sort_index(), kdf1.loc[kdf2.A > -3].sort_index())
self.assert_eq(pdf1.A.loc[pdf2.A > -3].sort_index(), kdf1.A.loc[kdf2.A > -3].sort_index())
self.assert_eq(
(pdf1.A + 1).loc[pdf2.A > -3].sort_index(), (kdf1.A + 1).loc[kdf2.A > -3].sort_index()
)
def test_bitwise(self):
pser1 = pd.Series([True, False, True, False, np.nan, np.nan, True, False, np.nan])
pser2 = pd.Series([True, False, False, True, True, False, np.nan, np.nan, np.nan])
kser1 = ks.from_pandas(pser1)
kser2 = ks.from_pandas(pser2)
self.assert_eq(pser1 | pser2, (kser1 | kser2).sort_index())
self.assert_eq(pser1 & pser2, (kser1 & kser2).sort_index())
pser1 = pd.Series([True, False, np.nan], index=list("ABC"))
pser2 = pd.Series([False, True, np.nan], index=list("DEF"))
kser1 = ks.from_pandas(pser1)
kser2 = ks.from_pandas(pser2)
self.assert_eq(pser1 | pser2, (kser1 | kser2).sort_index())
self.assert_eq(pser1 & pser2, (kser1 & kser2).sort_index())
@unittest.skipIf(
not extension_object_dtypes_available, "pandas extension object dtypes are not available"
)
def test_bitwise_extension_dtype(self):
def assert_eq(actual, expected):
if LooseVersion("1.1") <= LooseVersion(pd.__version__) < LooseVersion("1.2.2"):
self.assert_eq(actual, expected, check_exact=False)
self.assertTrue(isinstance(actual.dtype, extension_dtypes))
else:
self.assert_eq(actual, expected)
pser1 = pd.Series(
[True, False, True, False, np.nan, np.nan, True, False, np.nan], dtype="boolean"
)
pser2 = pd.Series(
[True, False, False, True, True, False, np.nan, np.nan, np.nan], dtype="boolean"
)
kser1 = ks.from_pandas(pser1)
kser2 = ks.from_pandas(pser2)
assert_eq((kser1 | kser2).sort_index(), pser1 | pser2)
assert_eq((kser1 & kser2).sort_index(), pser1 & pser2)
pser1 = pd.Series([True, False, np.nan], index=list("ABC"), dtype="boolean")
pser2 = pd.Series([False, True, np.nan], index=list("DEF"), dtype="boolean")
kser1 = ks.from_pandas(pser1)
kser2 = ks.from_pandas(pser2)
# a pandas bug?
# assert_eq((kser1 | kser2).sort_index(), pser1 | pser2)
# assert_eq((kser1 & kser2).sort_index(), pser1 & pser2)
assert_eq(
(kser1 | kser2).sort_index(),
pd.Series([True, None, None, None, True, None], index=list("ABCDEF"), dtype="boolean"),
)
assert_eq(
(kser1 & kser2).sort_index(),
pd.Series(
[None, False, None, False, None, None], index=list("ABCDEF"), dtype="boolean"
),
)
def test_concat_column_axis(self):
pdf1 = pd.DataFrame({"A": [0, 2, 4], "B": [1, 3, 5]}, index=[1, 2, 3])
pdf1.columns.names = ["AB"]
pdf2 = pd.DataFrame({"C": [1, 2, 3], "D": [4, 5, 6]}, index=[1, 3, 5])
pdf2.columns.names = ["CD"]
kdf1 = ks.from_pandas(pdf1)
kdf2 = ks.from_pandas(pdf2)
kdf3 = kdf1.copy()
kdf4 = kdf2.copy()
pdf3 = pdf1.copy()
pdf4 = pdf2.copy()
columns = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B")], names=["X", "AB"])
pdf3.columns = columns
kdf3.columns = columns
columns = pd.MultiIndex.from_tuples([("X", "C"), ("X", "D")], names=["Y", "CD"])
pdf4.columns = columns
kdf4.columns = columns
pdf5 = pd.DataFrame({"A": [0, 2, 4], "B": [1, 3, 5]}, index=[1, 2, 3])
pdf6 = pd.DataFrame({"C": [1, 2, 3]}, index=[1, 3, 5])
kdf5 = ks.from_pandas(pdf5)
kdf6 = ks.from_pandas(pdf6)
ignore_indexes = [True, False]
joins = ["inner", "outer"]
objs = [
([kdf1.A, kdf2.C], [pdf1.A, pdf2.C]),
# TODO: ([kdf1, kdf2.C], [pdf1, pdf2.C]),
([kdf1.A, kdf2], [pdf1.A, pdf2]),
([kdf1.A, kdf2.C], [pdf1.A, pdf2.C]),
([kdf3[("X", "A")], kdf4[("X", "C")]], [pdf3[("X", "A")], pdf4[("X", "C")]]),
([kdf3, kdf4[("X", "C")]], [pdf3, pdf4[("X", "C")]]),
([kdf3[("X", "A")], kdf4], [pdf3[("X", "A")], pdf4]),
([kdf3, kdf4], [pdf3, pdf4]),
([kdf5, kdf6], [pdf5, pdf6]),
([kdf6, kdf5], [pdf6, pdf5]),
]
for ignore_index, join in product(ignore_indexes, joins):
for i, (kdfs, pdfs) in enumerate(objs):
with self.subTest(ignore_index=ignore_index, join=join, pdfs=pdfs, pair=i):
actual = ks.concat(kdfs, axis=1, ignore_index=ignore_index, join=join)
expected = pd.concat(pdfs, axis=1, ignore_index=ignore_index, join=join)
self.assert_eq(
repr(actual.sort_values(list(actual.columns)).reset_index(drop=True)),
repr(expected.sort_values(list(expected.columns)).reset_index(drop=True)),
)
def test_combine_first(self):
pser1 = pd.Series({"falcon": 330.0, "eagle": 160.0})
pser2 = pd.Series({"falcon": 345.0, "eagle": 200.0, "duck": 30.0})
kser1 = ks.from_pandas(pser1)
kser2 = ks.from_pandas(pser2)
self.assert_eq(
kser1.combine_first(kser2).sort_index(), pser1.combine_first(pser2).sort_index()
)
with self.assertRaisesRegex(
ValueError, "`combine_first` only allows `Series` for parameter `other`"
):
kser1.combine_first(50)
kser1.name = ("X", "A")
kser2.name = ("Y", "B")
pser1.name = ("X", "A")
pser2.name = ("Y", "B")
self.assert_eq(
kser1.combine_first(kser2).sort_index(), pser1.combine_first(pser2).sort_index()
)
# MultiIndex
midx1 = pd.MultiIndex(
[["lama", "cow", "falcon", "koala"], ["speed", "weight", "length", "power"]],
[[0, 3, 1, 1, 1, 2, 2, 2], [0, 2, 0, 3, 2, 0, 1, 3]],
)
midx2 = pd.MultiIndex(
[["lama", "cow", "falcon"], ["speed", "weight", "length"]],
[[0, 0, 0, 1, 1, 1, 2, 2, 2], [0, 1, 2, 0, 1, 2, 0, 1, 2]],
)
pser1 = pd.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1], index=midx1)
pser2 = pd.Series([-45, 200, -1.2, 30, -250, 1.5, 320, 1, -0.3], index=midx2)
kser1 = ks.from_pandas(pser1)
kser2 = ks.from_pandas(pser2)
self.assert_eq(
kser1.combine_first(kser2).sort_index(), pser1.combine_first(pser2).sort_index()
)
# Series come from same DataFrame
pdf = pd.DataFrame(
{
"A": {"falcon": 330.0, "eagle": 160.0},
"B": {"falcon": 345.0, "eagle": 200.0, "duck": 30.0},
}
)
pser1 = pdf.A
pser2 = pdf.B
kser1 = ks.from_pandas(pser1)
kser2 = ks.from_pandas(pser2)
self.assert_eq(
kser1.combine_first(kser2).sort_index(), pser1.combine_first(pser2).sort_index()
)
kser1.name = ("X", "A")
kser2.name = ("Y", "B")
pser1.name = ("X", "A")
pser2.name = ("Y", "B")
self.assert_eq(
kser1.combine_first(kser2).sort_index(), pser1.combine_first(pser2).sort_index()
)
def test_insert(self):
#
# Basic DataFrame
#
pdf = pd.DataFrame([1, 2, 3])
kdf = ks.from_pandas(pdf)
pser = pd.Series([4, 5, 6])
kser = ks.from_pandas(pser)
kdf.insert(1, "y", kser)
pdf.insert(1, "y", pser)
self.assert_eq(kdf.sort_index(), pdf.sort_index())
#
# DataFrame with Index different from inserting Series'
#
pdf = pd.DataFrame([1, 2, 3], index=[10, 20, 30])
kdf = ks.from_pandas(pdf)
pser = pd.Series([4, 5, 6])
kser = ks.from_pandas(pser)
kdf.insert(1, "y", kser)
pdf.insert(1, "y", pser)
self.assert_eq(kdf.sort_index(), pdf.sort_index())
#
# DataFrame with Multi-index columns
#
pdf = pd.DataFrame({("x", "a"): [1, 2, 3]})
kdf = ks.from_pandas(pdf)
pser = pd.Series([4, 5, 6])
kser = ks.from_pandas(pser)
pdf = pd.DataFrame({("x", "a", "b"): [1, 2, 3]})
kdf = ks.from_pandas(pdf)
kdf.insert(0, "a", kser)
pdf.insert(0, "a", pser)
self.assert_eq(kdf.sort_index(), pdf.sort_index())
kdf.insert(0, ("b", "c", ""), kser)
pdf.insert(0, ("b", "c", ""), pser)
self.assert_eq(kdf.sort_index(), pdf.sort_index())
def test_compare(self):
if LooseVersion(pd.__version__) >= LooseVersion("1.1"):
pser1 = pd.Series(["b", "c", np.nan, "g", np.nan])
pser2 = pd.Series(["a", "c", np.nan, np.nan, "h"])
kser1 = ks.from_pandas(pser1)
kser2 = ks.from_pandas(pser2)
self.assert_eq(
pser1.compare(pser2).sort_index(), kser1.compare(kser2).sort_index(),
)
# `keep_shape=True`
self.assert_eq(
pser1.compare(pser2, keep_shape=True).sort_index(),
kser1.compare(kser2, keep_shape=True).sort_index(),
)
# `keep_equal=True`
self.assert_eq(
pser1.compare(pser2, keep_equal=True).sort_index(),
kser1.compare(kser2, keep_equal=True).sort_index(),
)
# `keep_shape=True` and `keep_equal=True`
self.assert_eq(
pser1.compare(pser2, keep_shape=True, keep_equal=True).sort_index(),
kser1.compare(kser2, keep_shape=True, keep_equal=True).sort_index(),
)
# MultiIndex
pser1.index = pd.MultiIndex.from_tuples(
[("a", "x"), ("b", "y"), ("c", "z"), ("x", "k"), ("q", "l")]
)
pser2.index = pd.MultiIndex.from_tuples(
[("a", "x"), ("b", "y"), ("c", "z"), ("x", "k"), ("q", "l")]
)
kser1 = ks.from_pandas(pser1)
kser2 = ks.from_pandas(pser2)
self.assert_eq(
pser1.compare(pser2).sort_index(), kser1.compare(kser2).sort_index(),
)
# `keep_shape=True` with MultiIndex
self.assert_eq(
pser1.compare(pser2, keep_shape=True).sort_index(),
kser1.compare(kser2, keep_shape=True).sort_index(),
)
# `keep_equal=True` with MultiIndex
self.assert_eq(
pser1.compare(pser2, keep_equal=True).sort_index(),
kser1.compare(kser2, keep_equal=True).sort_index(),
)
# `keep_shape=True` and `keep_equal=True` with MultiIndex
self.assert_eq(
pser1.compare(pser2, keep_shape=True, keep_equal=True).sort_index(),
kser1.compare(kser2, keep_shape=True, keep_equal=True).sort_index(),
)
else:
kser1 = ks.Series(["b", "c", np.nan, "g", np.nan])
kser2 = ks.Series(["a", "c", np.nan, np.nan, "h"])
expected = ks.DataFrame(
[["b", "a"], ["g", None], [None, "h"]], index=[0, 3, 4], columns=["self", "other"]
)
self.assert_eq(expected, kser1.compare(kser2).sort_index())
# `keep_shape=True`
expected = ks.DataFrame(
[["b", "a"], [None, None], [None, None], ["g", None], [None, "h"]],
index=[0, 1, 2, 3, 4],
columns=["self", "other"],
)
self.assert_eq(
expected, kser1.compare(kser2, keep_shape=True).sort_index(),
)
# `keep_equal=True`
expected = ks.DataFrame(
[["b", "a"], ["g", None], [None, "h"]], index=[0, 3, 4], columns=["self", "other"]
)
self.assert_eq(
expected, kser1.compare(kser2, keep_equal=True).sort_index(),
)
# `keep_shape=True` and `keep_equal=True`
expected = ks.DataFrame(
[["b", "a"], ["c", "c"], [None, None], ["g", None], [None, "h"]],
index=[0, 1, 2, 3, 4],
columns=["self", "other"],
)
self.assert_eq(
expected, kser1.compare(kser2, keep_shape=True, keep_equal=True).sort_index(),
)
# MultiIndex
kser1 = ks.Series(
["b", "c", np.nan, "g", np.nan],
index=pd.MultiIndex.from_tuples(
[("a", "x"), ("b", "y"), ("c", "z"), ("x", "k"), ("q", "l")]
),
)
kser2 = ks.Series(
["a", "c", np.nan, np.nan, "h"],
index=pd.MultiIndex.from_tuples(
[("a", "x"), ("b", "y"), ("c", "z"), ("x", "k"), ("q", "l")]
),
)
expected = ks.DataFrame(
[["b", "a"], [None, "h"], ["g", None]],
index=pd.MultiIndex.from_tuples([("a", "x"), ("q", "l"), ("x", "k")]),
columns=["self", "other"],
)
self.assert_eq(expected, kser1.compare(kser2).sort_index())
# `keep_shape=True`
expected = ks.DataFrame(
[["b", "a"], [None, None], [None, None], [None, "h"], ["g", None]],
index=pd.MultiIndex.from_tuples(
[("a", "x"), ("b", "y"), ("c", "z"), ("q", "l"), ("x", "k")]
),
columns=["self", "other"],
)
self.assert_eq(
expected, kser1.compare(kser2, keep_shape=True).sort_index(),
)
# `keep_equal=True`
expected = ks.DataFrame(
[["b", "a"], [None, "h"], ["g", None]],
index=pd.MultiIndex.from_tuples([("a", "x"), ("q", "l"), ("x", "k")]),
columns=["self", "other"],
)
self.assert_eq(
expected, kser1.compare(kser2, keep_equal=True).sort_index(),
)
# `keep_shape=True` and `keep_equal=True`
expected = ks.DataFrame(
[["b", "a"], ["c", "c"], [None, None], [None, "h"], ["g", None]],
index=pd.MultiIndex.from_tuples(
[("a", "x"), ("b", "y"), ("c", "z"), ("q", "l"), ("x", "k")]
),
columns=["self", "other"],
)
self.assert_eq(
expected, kser1.compare(kser2, keep_shape=True, keep_equal=True).sort_index(),
)
# Different Index
with self.assertRaisesRegex(
ValueError, "Can only compare identically-labeled Series objects"
):
kser1 = ks.Series([1, 2, 3, 4, 5], index=pd.Index([1, 2, 3, 4, 5]),)
kser2 = ks.Series([2, 2, 3, 4, 1], index=pd.Index([5, 4, 3, 2, 1]),)
kser1.compare(kser2)
# Different MultiIndex
with self.assertRaisesRegex(
ValueError, "Can only compare identically-labeled Series objects"
):
kser1 = ks.Series(
[1, 2, 3, 4, 5],
index=pd.MultiIndex.from_tuples(
[("a", "x"), ("b", "y"), ("c", "z"), ("x", "k"), ("q", "l")]
),
)
kser2 = ks.Series(
[2, 2, 3, 4, 1],
index=pd.MultiIndex.from_tuples(
[("a", "x"), ("b", "y"), ("c", "a"), ("x", "k"), ("q", "l")]
),
)
kser1.compare(kser2)
def test_different_columns(self):
kdf1 = self.kdf1
kdf4 = self.kdf4
pdf1 = self.pdf1
pdf4 = self.pdf4
self.assert_eq((kdf1 + kdf4).sort_index(), (pdf1 + pdf4).sort_index(), almost=True)
# Multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b")])
kdf1.columns = columns
pdf1.columns = columns
columns = pd.MultiIndex.from_tuples([("z", "e"), ("z", "f")])
kdf4.columns = columns
pdf4.columns = columns
self.assert_eq((kdf1 + kdf4).sort_index(), (pdf1 + pdf4).sort_index(), almost=True)
def test_assignment_series(self):
kdf = ks.from_pandas(self.pdf1)
pdf = self.pdf1
kser = kdf.a
pser = pdf.a
kdf["a"] = self.kdf2.a
pdf["a"] = self.pdf2.a
self.assert_eq(kdf.sort_index(), pdf.sort_index())
self.assert_eq(kser, pser)
kdf = ks.from_pandas(self.pdf1)
pdf = self.pdf1
kser = kdf.a
pser = pdf.a
kdf["a"] = self.kdf2.b
pdf["a"] = self.pdf2.b
self.assert_eq(kdf.sort_index(), pdf.sort_index())
self.assert_eq(kser, pser)
kdf = ks.from_pandas(self.pdf1)
pdf = self.pdf1
kdf["c"] = self.kdf2.a
pdf["c"] = self.pdf2.a
self.assert_eq(kdf.sort_index(), pdf.sort_index())
# Multi-index columns
kdf = ks.from_pandas(self.pdf1)
pdf = self.pdf1
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b")])
kdf.columns = columns
pdf.columns = columns
kdf[("y", "c")] = self.kdf2.a
pdf[("y", "c")] = self.pdf2.a
self.assert_eq(kdf.sort_index(), pdf.sort_index())
pdf = pd.DataFrame({"a": [1, 2, 3], "Koalas": [0, 1, 2]}).set_index("Koalas", drop=False)
kdf = ks.from_pandas(pdf)
kdf.index.name = None
kdf["NEW"] = ks.Series([100, 200, 300])
pdf.index.name = None
pdf["NEW"] = pd.Series([100, 200, 300])
self.assert_eq(kdf.sort_index(), pdf.sort_index())
def test_assignment_frame(self):
kdf = ks.from_pandas(self.pdf1)
pdf = self.pdf1
kser = kdf.a
pser = pdf.a
kdf[["a", "b"]] = self.kdf1
pdf[["a", "b"]] = self.pdf1
self.assert_eq(kdf.sort_index(), pdf.sort_index())
self.assert_eq(kser, pser)
# 'c' does not exist in `kdf`.
kdf = ks.from_pandas(self.pdf1)
pdf = self.pdf1
kser = kdf.a
pser = pdf.a
kdf[["b", "c"]] = self.kdf1
pdf[["b", "c"]] = self.pdf1
self.assert_eq(kdf.sort_index(), pdf.sort_index())
self.assert_eq(kser, pser)
# 'c' and 'd' do not exist in `kdf`.
kdf = ks.from_pandas(self.pdf1)
pdf = self.pdf1
kdf[["c", "d"]] = self.kdf1
pdf[["c", "d"]] = self.pdf1
self.assert_eq(kdf.sort_index(), pdf.sort_index())
# Multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b")])
kdf = ks.from_pandas(self.pdf1)
pdf = self.pdf1
kdf.columns = columns
pdf.columns = columns
kdf[[("y", "c"), ("z", "d")]] = self.kdf1
pdf[[("y", "c"), ("z", "d")]] = self.pdf1
self.assert_eq(kdf.sort_index(), pdf.sort_index())
kdf = ks.from_pandas(self.pdf1)
pdf = self.pdf1
kdf1 = ks.from_pandas(self.pdf1)
pdf1 = self.pdf1
kdf1.columns = columns
pdf1.columns = columns
kdf[["c", "d"]] = kdf1
pdf[["c", "d"]] = pdf1
self.assert_eq(kdf.sort_index(), pdf.sort_index())
def test_assignment_series_chain(self):
kdf = ks.from_pandas(self.pdf1)
pdf = self.pdf1
kdf["a"] = self.kdf1.a
pdf["a"] = self.pdf1.a
kdf["a"] = self.kdf2.b
pdf["a"] = self.pdf2.b
kdf["d"] = self.kdf3.c
pdf["d"] = self.pdf3.c
self.assert_eq(kdf.sort_index(), pdf.sort_index())
def test_assignment_frame_chain(self):
kdf = ks.from_pandas(self.pdf1)
pdf = self.pdf1
kdf[["a", "b"]] = self.kdf1
pdf[["a", "b"]] = self.pdf1
kdf[["e", "f"]] = self.kdf3
pdf[["e", "f"]] = self.pdf3
kdf[["b", "c"]] = self.kdf2
pdf[["b", "c"]] = self.pdf2
self.assert_eq(kdf.sort_index(), pdf.sort_index())
def test_multi_index_arithmetic(self):
kdf5 = self.kdf5
kdf6 = self.kdf6
pdf5 = self.pdf5
pdf6 = self.pdf6
# Series
self.assert_eq((kdf5.c - kdf6.e).sort_index(), (pdf5.c - pdf6.e).sort_index())
self.assert_eq((kdf5["c"] / kdf6["e"]).sort_index(), (pdf5["c"] / pdf6["e"]).sort_index())
# DataFrame
self.assert_eq((kdf5 + kdf6).sort_index(), (pdf5 + pdf6).sort_index(), almost=True)
def test_multi_index_assignment_series(self):
kdf = ks.from_pandas(self.pdf5)
pdf = self.pdf5
kdf["x"] = self.kdf6.e
pdf["x"] = self.pdf6.e
self.assert_eq(kdf.sort_index(), pdf.sort_index())
kdf = ks.from_pandas(self.pdf5)
pdf = self.pdf5
kdf["e"] = self.kdf6.e
pdf["e"] = self.pdf6.e
self.assert_eq(kdf.sort_index(), pdf.sort_index())
kdf = ks.from_pandas(self.pdf5)
pdf = self.pdf5
kdf["c"] = self.kdf6.e
pdf["c"] = self.pdf6.e
self.assert_eq(kdf.sort_index(), pdf.sort_index())
def test_multi_index_assignment_frame(self):
kdf = ks.from_pandas(self.pdf5)
pdf = self.pdf5
kdf[["c"]] = self.kdf5
pdf[["c"]] = self.pdf5
self.assert_eq(kdf.sort_index(), pdf.sort_index())
kdf = ks.from_pandas(self.pdf5)
pdf = self.pdf5
kdf[["x"]] = self.kdf5
pdf[["x"]] = self.pdf5
self.assert_eq(kdf.sort_index(), pdf.sort_index())
kdf = ks.from_pandas(self.pdf6)
pdf = self.pdf6
kdf[["x", "y"]] = self.kdf6
pdf[["x", "y"]] = self.pdf6
self.assert_eq(kdf.sort_index(), pdf.sort_index())
def test_frame_loc_setitem(self):
pdf_orig = pd.DataFrame(
[[1, 2], [4, 5], [7, 8]],
index=["cobra", "viper", "sidewinder"],
columns=["max_speed", "shield"],
)
kdf_orig = ks.DataFrame(pdf_orig)
pdf = pdf_orig.copy()
kdf = kdf_orig.copy()
pser1 = pdf.max_speed
pser2 = pdf.shield
kser1 = kdf.max_speed
kser2 = kdf.shield
another_kdf = ks.DataFrame(pdf_orig)
kdf.loc[["viper", "sidewinder"], ["shield"]] = -another_kdf.max_speed
pdf.loc[["viper", "sidewinder"], ["shield"]] = -pdf.max_speed
self.assert_eq(kdf, pdf)
self.assert_eq(kser1, pser1)
self.assert_eq(kser2, pser2)
pdf = pdf_orig.copy()
kdf = kdf_orig.copy()
pser1 = pdf.max_speed
pser2 = pdf.shield
kser1 = kdf.max_speed
kser2 = kdf.shield
kdf.loc[another_kdf.max_speed < 5, ["shield"]] = -kdf.max_speed
pdf.loc[pdf.max_speed < 5, ["shield"]] = -pdf.max_speed
self.assert_eq(kdf, pdf)
self.assert_eq(kser1, pser1)
self.assert_eq(kser2, pser2)
pdf = pdf_orig.copy()
kdf = kdf_orig.copy()
pser1 = pdf.max_speed
pser2 = pdf.shield
kser1 = kdf.max_speed
kser2 = kdf.shield
kdf.loc[another_kdf.max_speed < 5, ["shield"]] = -another_kdf.max_speed
pdf.loc[pdf.max_speed < 5, ["shield"]] = -pdf.max_speed
self.assert_eq(kdf, pdf)
self.assert_eq(kser1, pser1)
self.assert_eq(kser2, pser2)
def test_frame_iloc_setitem(self):
pdf = pd.DataFrame(
[[1, 2], [4, 5], [7, 8]],
index=["cobra", "viper", "sidewinder"],
columns=["max_speed", "shield"],
)
kdf = ks.DataFrame(pdf)
another_kdf = ks.DataFrame(pdf)
kdf.iloc[[0, 1, 2], 1] = -another_kdf.max_speed
pdf.iloc[[0, 1, 2], 1] = -pdf.max_speed
self.assert_eq(kdf, pdf)
# TODO: matching the behavior with pandas 1.2 and uncomment below test
# with self.assertRaisesRegex(
# ValueError,
# "shape mismatch: value array of shape (3,) could not be broadcast to indexing "
# "result of shape (2,1)",
# ):
# kdf.iloc[[1, 2], [1]] = -another_kdf.max_speed
kdf.iloc[[0, 1, 2], 1] = 10 * another_kdf.max_speed
pdf.iloc[[0, 1, 2], 1] = 10 * pdf.max_speed
self.assert_eq(kdf, pdf)
# TODO: matching the behavior with pandas 1.2 and uncomment below test
# with self.assertRaisesRegex(
# ValueError,
# "shape mismatch: value array of shape (3,) could not be broadcast to indexing "
# "result of shape (1,)",
# ):
# kdf.iloc[[0], 1] = 10 * another_kdf.max_speed
def test_series_loc_setitem(self):
pdf = pd.DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]}, index=["cobra", "viper", "sidewinder"])
kdf = ks.from_pandas(pdf)
pser = pdf.x
psery = pdf.y
kser = kdf.x
ksery = kdf.y
pser_another = pd.Series([1, 2, 3], index=["cobra", "viper", "sidewinder"])
kser_another = ks.from_pandas(pser_another)
kser.loc[kser % 2 == 1] = -kser_another
pser.loc[pser % 2 == 1] = -pser_another
self.assert_eq(kser, pser)
self.assert_eq(kdf, pdf)
self.assert_eq(ksery, psery)
pdf = pd.DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]}, index=["cobra", "viper", "sidewinder"])
kdf = ks.from_pandas(pdf)
pser = pdf.x
psery = pdf.y
kser = kdf.x
ksery = kdf.y
kser.loc[kser_another % 2 == 1] = -kser
pser.loc[pser_another % 2 == 1] = -pser
self.assert_eq(kser, pser)
self.assert_eq(kdf, pdf)
self.assert_eq(ksery, psery)
pdf = pd.DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]}, index=["cobra", "viper", "sidewinder"])
kdf = ks.from_pandas(pdf)
pser = pdf.x
psery = pdf.y
kser = kdf.x
ksery = kdf.y
kser.loc[kser_another % 2 == 1] = -kser
pser.loc[pser_another % 2 == 1] = -pser
self.assert_eq(kser, pser)
self.assert_eq(kdf, pdf)
self.assert_eq(ksery, psery)
pdf = pd.DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]}, index=["cobra", "viper", "sidewinder"])
kdf = ks.from_pandas(pdf)
pser = pdf.x
psery = pdf.y
kser = kdf.x
ksery = kdf.y
kser.loc[kser_another % 2 == 1] = -kser_another
pser.loc[pser_another % 2 == 1] = -pser_another
self.assert_eq(kser, pser)
self.assert_eq(kdf, pdf)
self.assert_eq(ksery, psery)
pdf = pd.DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]}, index=["cobra", "viper", "sidewinder"])
kdf = ks.from_pandas(pdf)
pser = pdf.x
psery = pdf.y
kser = kdf.x
ksery = kdf.y
kser.loc[["viper", "sidewinder"]] = -kser_another
pser.loc[["viper", "sidewinder"]] = -pser_another
self.assert_eq(kser, pser)
self.assert_eq(kdf, pdf)
self.assert_eq(ksery, psery)
pdf = pd.DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]}, index=["cobra", "viper", "sidewinder"])
kdf = ks.from_pandas(pdf)
pser = pdf.x
psery = pdf.y
kser = kdf.x
ksery = kdf.y
kser.loc[kser_another % 2 == 1] = 10
pser.loc[pser_another % 2 == 1] = 10
self.assert_eq(kser, pser)
self.assert_eq(kdf, pdf)
self.assert_eq(ksery, psery)
def test_series_iloc_setitem(self):
pdf = pd.DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]}, index=["cobra", "viper", "sidewinder"])
kdf = ks.from_pandas(pdf)
pser = pdf.x
psery = pdf.y
kser = kdf.x
ksery = kdf.y
pser1 = pser + 1
kser1 = kser + 1
pser_another = pd.Series([1, 2, 3], index=["cobra", "viper", "sidewinder"])
kser_another = ks.from_pandas(pser_another)
kser.iloc[[0, 1, 2]] = -kser_another
pser.iloc[[0, 1, 2]] = -pser_another
self.assert_eq(kser, pser)
self.assert_eq(kdf, pdf)
self.assert_eq(ksery, psery)
# TODO: matching the behavior with pandas 1.2 and uncomment below test.
# with self.assertRaisesRegex(
# ValueError,
# "cannot set using a list-like indexer with a different length than the value",
# ):
# kser.iloc[[1, 2]] = -kser_another
kser.iloc[[0, 1, 2]] = 10 * kser_another
pser.iloc[[0, 1, 2]] = 10 * pser_another
self.assert_eq(kser, pser)
self.assert_eq(kdf, pdf)
self.assert_eq(ksery, psery)
# with self.assertRaisesRegex(
# ValueError,
# "cannot set using a list-like indexer with a different length than the value",
# ):
# kser.iloc[[0]] = 10 * kser_another
kser1.iloc[[0, 1, 2]] = -kser_another
pser1.iloc[[0, 1, 2]] = -pser_another
self.assert_eq(kser1, pser1)
self.assert_eq(kdf, pdf)
self.assert_eq(ksery, psery)
# with self.assertRaisesRegex(
# ValueError,
# "cannot set using a list-like indexer with a different length than the value",
# ):
# kser1.iloc[[1, 2]] = -kser_another
pdf = pd.DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]}, index=["cobra", "viper", "sidewinder"])
kdf = ks.from_pandas(pdf)
pser = pdf.x
psery = pdf.y
kser = kdf.x
ksery = kdf.y
piloc = pser.iloc
kiloc = kser.iloc
kiloc[[0, 1, 2]] = -kser_another
piloc[[0, 1, 2]] = -pser_another
self.assert_eq(kser, pser)
self.assert_eq(kdf, pdf)
self.assert_eq(ksery, psery)
# TODO: matching the behavior with pandas 1.2 and uncomment below test.
# with self.assertRaisesRegex(
# ValueError,
# "cannot set using a list-like indexer with a different length than the value",
# ):
# kiloc[[1, 2]] = -kser_another
kiloc[[0, 1, 2]] = 10 * kser_another
piloc[[0, 1, 2]] = 10 * pser_another
self.assert_eq(kser, pser)
self.assert_eq(kdf, pdf)
self.assert_eq(ksery, psery)
# with self.assertRaisesRegex(
# ValueError,
# "cannot set using a list-like indexer with a different length than the value",
# ):
# kiloc[[0]] = 10 * kser_another
def test_update(self):
pdf = pd.DataFrame({"x": [1, 2, 3], "y": [10, 20, 30]})
kdf = ks.from_pandas(pdf)
pser = pdf.x
kser = kdf.x
pser.update(pd.Series([4, 5, 6]))
kser.update(ks.Series([4, 5, 6]))
self.assert_eq(kser.sort_index(), pser.sort_index())
self.assert_eq(kdf.sort_index(), pdf.sort_index())
def test_where(self):
pdf1 = pd.DataFrame({"A": [0, 1, 2, 3, 4], "B": [100, 200, 300, 400, 500]})
pdf2 = pd.DataFrame({"A": [0, -1, -2, -3, -4], "B": [-100, -200, -300, -400, -500]})
kdf1 = ks.from_pandas(pdf1)
kdf2 = ks.from_pandas(pdf2)
self.assert_eq(pdf1.where(pdf2 > 100), kdf1.where(kdf2 > 100).sort_index())
pdf1 = pd.DataFrame({"A": [-1, -2, -3, -4, -5], "B": [-100, -200, -300, -400, -500]})
pdf2 = pd.DataFrame({"A": [-10, -20, -30, -40, -50], "B": [-5, -4, -3, -2, -1]})
kdf1 = ks.from_pandas(pdf1)
kdf2 = ks.from_pandas(pdf2)
self.assert_eq(pdf1.where(pdf2 < -250), kdf1.where(kdf2 < -250).sort_index())
# multi-index columns
pdf1 = pd.DataFrame({("X", "A"): [0, 1, 2, 3, 4], ("X", "B"): [100, 200, 300, 400, 500]})
pdf2 = pd.DataFrame(
{("X", "A"): [0, -1, -2, -3, -4], ("X", "B"): [-100, -200, -300, -400, -500]}
)
kdf1 = ks.from_pandas(pdf1)
kdf2 = ks.from_pandas(pdf2)
self.assert_eq(pdf1.where(pdf2 > 100), kdf1.where(kdf2 > 100).sort_index())
def test_mask(self):
pdf1 = pd.DataFrame({"A": [0, 1, 2, 3, 4], "B": [100, 200, 300, 400, 500]})
pdf2 = pd.DataFrame({"A": [0, -1, -2, -3, -4], "B": [-100, -200, -300, -400, -500]})
kdf1 = ks.from_pandas(pdf1)
kdf2 = ks.from_pandas(pdf2)
self.assert_eq(pdf1.mask(pdf2 < 100), kdf1.mask(kdf2 < 100).sort_index())
pdf1 = pd.DataFrame({"A": [-1, -2, -3, -4, -5], "B": [-100, -200, -300, -400, -500]})
pdf2 = pd.DataFrame({"A": [-10, -20, -30, -40, -50], "B": [-5, -4, -3, -2, -1]})
kdf1 = ks.from_pandas(pdf1)
kdf2 = ks.from_pandas(pdf2)
self.assert_eq(pdf1.mask(pdf2 > -250), kdf1.mask(kdf2 > -250).sort_index())
# multi-index columns
pdf1 = pd.DataFrame({("X", "A"): [0, 1, 2, 3, 4], ("X", "B"): [100, 200, 300, 400, 500]})
pdf2 = pd.DataFrame(
{("X", "A"): [0, -1, -2, -3, -4], ("X", "B"): [-100, -200, -300, -400, -500]}
)
kdf1 = ks.from_pandas(pdf1)
kdf2 = ks.from_pandas(pdf2)
self.assert_eq(pdf1.mask(pdf2 < 100), kdf1.mask(kdf2 < 100).sort_index())
def test_multi_index_column_assignment_frame(self):
pdf = pd.DataFrame({"a": [1, 2, 3, 2], "b": [4.0, 2.0, 3.0, 1.0]})
pdf.columns = pd.MultiIndex.from_tuples([("a", "x"), ("a", "y")])
kdf = ks.DataFrame(pdf)
kdf["c"] = ks.Series([10, 20, 30, 20])
pdf["c"] = pd.Series([10, 20, 30, 20])
kdf[("d", "x")] = ks.Series([100, 200, 300, 200], name="1")
pdf[("d", "x")] = pd.Series([100, 200, 300, 200], name="1")
kdf[("d", "y")] = ks.Series([1000, 2000, 3000, 2000], name=("1", "2"))
pdf[("d", "y")] = pd.Series([1000, 2000, 3000, 2000], name=("1", "2"))
kdf["e"] = ks.Series([10000, 20000, 30000, 20000], name=("1", "2", "3"))
pdf["e"] = pd.Series([10000, 20000, 30000, 20000], name=("1", "2", "3"))
kdf[[("f", "x"), ("f", "y")]] = ks.DataFrame(
{"1": [100000, 200000, 300000, 200000], "2": [1000000, 2000000, 3000000, 2000000]}
)
pdf[[("f", "x"), ("f", "y")]] = pd.DataFrame(
{"1": [100000, 200000, 300000, 200000], "2": [1000000, 2000000, 3000000, 2000000]}
)
self.assert_eq(repr(kdf.sort_index()), repr(pdf))
with self.assertRaisesRegex(KeyError, "Key length \\(3\\) exceeds index depth \\(2\\)"):
kdf[("1", "2", "3")] = ks.Series([100, 200, 300, 200])
def test_series_dot(self):
pser = pd.Series([90, 91, 85], index=[2, 4, 1])
kser = ks.from_pandas(pser)
pser_other = pd.Series([90, 91, 85], index=[2, 4, 1])
kser_other = ks.from_pandas(pser_other)
self.assert_eq(kser.dot(kser_other), pser.dot(pser_other))
kser_other = ks.Series([90, 91, 85], index=[1, 2, 4])
pser_other = pd.Series([90, 91, 85], index=[1, 2, 4])
self.assert_eq(kser.dot(kser_other), pser.dot(pser_other))
# length of index is different
kser_other = ks.Series([90, 91, 85, 100], index=[2, 4, 1, 0])
with self.assertRaisesRegex(ValueError, "matrices are not aligned"):
kser.dot(kser_other)
# for MultiIndex
midx = pd.MultiIndex(
[["lama", "cow", "falcon"], ["speed", "weight", "length"]],
[[0, 0, 0, 1, 1, 1, 2, 2, 2], [0, 1, 2, 0, 1, 2, 0, 1, 2]],
)
pser = pd.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3], index=midx)
kser = ks.from_pandas(pser)
pser_other = pd.Series([-450, 20, 12, -30, -250, 15, -320, 100, 3], index=midx)
kser_other = ks.from_pandas(pser_other)
self.assert_eq(kser.dot(kser_other), pser.dot(pser_other))
pser = pd.Series([0, 1, 2, 3])
kser = ks.from_pandas(pser)
# DataFrame "other" without Index/MultiIndex as columns
pdf = pd.DataFrame([[0, 1], [-2, 3], [4, -5], [6, 7]])
kdf = ks.from_pandas(pdf)
self.assert_eq(kser.dot(kdf), pser.dot(pdf))
# DataFrame "other" with Index as columns
pdf.columns = pd.Index(["x", "y"])
kdf = ks.from_pandas(pdf)
self.assert_eq(kser.dot(kdf), pser.dot(pdf))
pdf.columns = pd.Index(["x", "y"], name="cols_name")
kdf = ks.from_pandas(pdf)
self.assert_eq(kser.dot(kdf), pser.dot(pdf))
pdf = pdf.reindex([1, 0, 2, 3])
kdf = ks.from_pandas(pdf)
self.assert_eq(kser.dot(kdf), pser.dot(pdf))
# DataFrame "other" with MultiIndex as columns
pdf.columns = pd.MultiIndex.from_tuples([("a", "x"), ("b", "y")])
kdf = ks.from_pandas(pdf)
self.assert_eq(kser.dot(kdf), pser.dot(pdf))
pdf.columns = pd.MultiIndex.from_tuples(
[("a", "x"), ("b", "y")], names=["cols_name1", "cols_name2"]
)
kdf = ks.from_pandas(pdf)
self.assert_eq(kser.dot(kdf), pser.dot(pdf))
kser = ks.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}).b
pser = kser.to_pandas()
kdf = ks.DataFrame({"c": [7, 8, 9]})
pdf = kdf.to_pandas()
self.assert_eq(kser.dot(kdf), pser.dot(pdf))
def test_frame_dot(self):
pdf = pd.DataFrame([[0, 1, -2, -1], [1, 1, 1, 1]])
kdf = ks.from_pandas(pdf)
pser = pd.Series([1, 1, 2, 1])
kser = ks.from_pandas(pser)
self.assert_eq(kdf.dot(kser), pdf.dot(pser))
# Index reorder
pser = pser.reindex([1, 0, 2, 3])
kser = ks.from_pandas(pser)
self.assert_eq(kdf.dot(kser), pdf.dot(pser))
# ser with name
pser.name = "ser"
kser = ks.from_pandas(pser)
self.assert_eq(kdf.dot(kser), pdf.dot(pser))
# df with MultiIndex as column (ser with MultiIndex)
arrays = [[1, 1, 2, 2], ["red", "blue", "red", "blue"]]
pidx = pd.MultiIndex.from_arrays(arrays, names=("number", "color"))
pser = pd.Series([1, 1, 2, 1], index=pidx)
pdf = pd.DataFrame([[0, 1, -2, -1], [1, 1, 1, 1]], columns=pidx)
kdf = ks.from_pandas(pdf)
kser = ks.from_pandas(pser)
self.assert_eq(kdf.dot(kser), pdf.dot(pser))
# df with Index as column (ser with Index)
pidx = pd.Index([1, 2, 3, 4], name="number")
pser = pd.Series([1, 1, 2, 1], index=pidx)
pdf = pd.DataFrame([[0, 1, -2, -1], [1, 1, 1, 1]], columns=pidx)
kdf = ks.from_pandas(pdf)
kser = ks.from_pandas(pser)
self.assert_eq(kdf.dot(kser), pdf.dot(pser))
# df with Index
pdf.index = pd.Index(["x", "y"], name="char")
kdf = ks.from_pandas(pdf)
self.assert_eq(kdf.dot(kser), pdf.dot(pser))
# df with MultiIndex
pdf.index = pd.MultiIndex.from_arrays([[1, 1], ["red", "blue"]], names=("number", "color"))
kdf = ks.from_pandas(pdf)
self.assert_eq(kdf.dot(kser), pdf.dot(pser))
pdf = pd.DataFrame([[1, 2], [3, 4]])
kdf = ks.from_pandas(pdf)
self.assert_eq(kdf.dot(kdf[0]), pdf.dot(pdf[0]))
self.assert_eq(kdf.dot(kdf[0] * 10), pdf.dot(pdf[0] * 10))
self.assert_eq((kdf + 1).dot(kdf[0] * 10), (pdf + 1).dot(pdf[0] * 10))
def test_to_series_comparison(self):
kidx1 = ks.Index([1, 2, 3, 4, 5])
kidx2 = ks.Index([1, 2, 3, 4, 5])
self.assert_eq((kidx1.to_series() == kidx2.to_series()).all(), True)
kidx1.name = "koalas"
kidx2.name = "koalas"
self.assert_eq((kidx1.to_series() == kidx2.to_series()).all(), True)
def test_series_repeat(self):
pser1 = pd.Series(["a", "b", "c"], name="a")
pser2 = | pd.Series([10, 20, 30], name="rep") | pandas.Series |
"""
Tests for live trading.
"""
from unittest import TestCase
from datetime import time
from collections import defaultdict
import pandas as pd
import numpy as np
# fix to allow zip_longest on Python 2.X and 3.X
try: # Python 3
from itertools import zip_longest
except ImportError: # Python 2
from itertools import izip_longest as zip_longest
from functools import partial
import os
from math import fabs
from mock import patch, sentinel, Mock, MagicMock
from testfixtures import tempdir
from ib.ext.Contract import Contract
from ib.ext.Order import Order
from ib.ext.Execution import Execution
from ib.ext.OrderState import OrderState
import alpaca_trade_api.rest as apca
from zipline.algorithm import TradingAlgorithm
from zipline.algorithm_live import LiveTradingAlgorithm, LiveAlgorithmExecutor
from zipline.data.data_portal_live import DataPortalLive
from zipline.gens.realtimeclock import (RealtimeClock,
SESSION_START,
BEFORE_TRADING_START_BAR)
from zipline.finance.order import Order as ZPOrder
from zipline.finance.blotter_live import BlotterLive
from zipline.gens.sim_engine import MinuteSimulationClock
from zipline.gens.brokers.broker import Broker
from zipline.gens.brokers.ib_broker import IBBroker, TWSConnection
from zipline.gens.brokers.alpaca_broker import ALPACABroker
from zipline.testing.fixtures import WithSimParams
from zipline.finance.execution import (StopLimitOrder,
MarketOrder,
StopOrder,
LimitOrder)
from zipline.finance.order import ORDER_STATUS
from zipline.finance.transaction import Transaction
from zipline.utils.calendars import get_calendar
from zipline.utils.calendars.trading_calendar import days_at_time
from zipline.utils.serialization_utils import load_context, store_context
from zipline.testing.fixtures import (ZiplineTestCase,
WithTradingEnvironment,
WithDataPortal)
from zipline.errors import CannotOrderDelistedAsset
class TestRealtimeClock(TestCase):
@classmethod
def setUpClass(cls):
cls.nyse_calendar = get_calendar("NYSE")
cls.sessions = cls.nyse_calendar.sessions_in_range(
pd.Timestamp("2017-04-20"),
pd.Timestamp("2017-04-20")
)
trading_o_and_c = cls.nyse_calendar.schedule.ix[cls.sessions]
cls.opens = trading_o_and_c['market_open']
cls.closes = trading_o_and_c['market_close']
def setUp(self):
self.internal_clock = None
self.events = defaultdict(list)
def advance_clock(self, x):
"""Mock function for sleep. Advances the internal clock by 1 min"""
# The internal clock advance time must be 1 minute to match
# MinutesSimulationClock's update frequency
self.internal_clock += | pd.Timedelta('1 min') | pandas.Timedelta |
import unittest
import pandas as pd
import pandas.util.testing as pdtest
import tia.rlab.table as tbl
class TestTable(unittest.TestCase):
def setUp(self):
self.df1 = df1 = pd.DataFrame({'A': [.55, .65], 'B': [1234., -5678.]}, index=['I1', 'I2'])
# Multi-index frame with multi-index
cols = pd.MultiIndex.from_arrays([['LEFT', 'LEFT', 'RIGHT', 'RIGHT'], ['A', 'B', 'A', 'B']])
idx = pd.MultiIndex.from_arrays([['TOP', 'BOTTOM'], ['I1', 'I2']])
self.mdf1 = pd.DataFrame([[.55, 1234., .55, 1234.], [.65, -5678., .65, -5678.]], columns=cols, index=idx)
def test_span_iter(self):
s = pd.Series([1, 1, 1, 3, 2, 2])
items = list(tbl.span_iter(s))
self.assertEqual(items, [(0, 2), (4, 5)])
# reverse and ensure it does not break it
s = s[::-1]
items = list(tbl.span_iter(s))
self.assertEqual(items, [(0, 2), (4, 5)])
def test_level_iter(self):
l1 = ['L_11', 'L_12']
l2 = ['L_21', 'L_22']
l3 = ['L_31', 'L_32']
midx = pd.MultiIndex.from_arrays([l1, l2, l3], names=['1', '2', '3'])
actual = list(tbl.level_iter(midx))
expected = [(0, 0, 'L_11'), (0, 1, 'L_12'), (1, 0, 'L_21'), (1, 1, 'L_22'), (2, 0, 'L_31'), (2, 1, 'L_32')]
self.assertEqual(actual, expected)
actual = list(tbl.level_iter(midx, levels=[0, 2]))
expected = [(0, 0, 'L_11'), (0, 1, 'L_12'), (2, 0, 'L_31'), (2, 1, 'L_32')]
self.assertEqual(actual, expected)
actual = list(tbl.level_iter(midx, levels=0))
expected = [(0, 0, 'L_11'), (0, 1, 'L_12')]
self.assertEqual(actual, expected)
def test_region_formatter_iloc(self):
tf = tbl.TableFormatter(self.df1)
region = tf.cells
region.apply_format(lambda x: 'A')
expected = pd.DataFrame([['A', 'A'], ['A', 'A']], index=[1, 2], columns=[1, 2])
pdtest.assert_frame_equal(tf.cells.formatted_values, expected)
#
# Use the location
#
region = region.iloc[:, 1]
region.apply_format(lambda x: 'B')
expected = pd.DataFrame([['A', 'B'], ['A', 'B']], index=[1, 2], columns=[1, 2])
pdtest.assert_frame_equal(tf.cells.formatted_values, expected)
# Get single cell
region = region.iloc[1]
region.apply_format(lambda x: 'D')
expected = | pd.DataFrame([['A', 'B'], ['A', 'D']], index=[1, 2], columns=[1, 2]) | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
# this definition exposes all python module imports that should be available in all subsequent commands
import json
import numpy as np
import pandas as pd
import datetime as dt
import stumpy
# ...
# global constants
MODEL_DIRECTORY = "/srv/app/model/data/"
# In[36]:
# this cell is not executed from MLTK and should only be used for staging data into the notebook environment
def stage(name):
with open("data/"+name+".csv", 'r') as f:
df = pd.read_csv(f)
with open("data/"+name+".json", 'r') as f:
param = json.load(f)
return df, param
# In[38]:
# initialize your model
# available inputs: data and parameters
# returns the model object which will be used as a reference to call fit, apply and summary subsequently
def init(df,param):
model = {}
return model
# In[40]:
# train your model
# returns a fit info json object and may modify the model object
def fit(model,df,param):
# model.fit()
info = {"message": "model created"}
return info
# In[42]:
# apply your model
# returns the calculated results
def apply(model,df,param):
m = 24
if 'options' in param:
if 'params' in param['options']:
if 'm' in param['options']['params']:
m = int(param['options']['params']['m'])
target = param['target_variables'][0]
mp = stumpy.stump(df[target], m)
result = pd.DataFrame(mp[:, 0], columns=['matrix_profile'])
return | pd.concat([df, result], axis=1) | pandas.concat |
from collections import OrderedDict
from datetime import datetime, timedelta
import numpy as np
import numpy.ma as ma
import pytest
from pandas._libs import iNaT, lib
from pandas.core.dtypes.common import is_categorical_dtype, is_datetime64tz_dtype
from pandas.core.dtypes.dtypes import (
CategoricalDtype,
DatetimeTZDtype,
IntervalDtype,
PeriodDtype,
)
import pandas as pd
from pandas import (
Categorical,
DataFrame,
Index,
Interval,
IntervalIndex,
MultiIndex,
NaT,
Period,
Series,
Timestamp,
date_range,
isna,
period_range,
timedelta_range,
)
import pandas._testing as tm
from pandas.core.arrays import IntervalArray, period_array
class TestSeriesConstructors:
@pytest.mark.parametrize(
"constructor,check_index_type",
[
# NOTE: some overlap with test_constructor_empty but that test does not
# test for None or an empty generator.
# test_constructor_pass_none tests None but only with the index also
# passed.
(lambda: Series(), True),
(lambda: Series(None), True),
(lambda: Series({}), True),
(lambda: Series(()), False), # creates a RangeIndex
(lambda: Series([]), False), # creates a RangeIndex
(lambda: Series((_ for _ in [])), False), # creates a RangeIndex
(lambda: Series(data=None), True),
(lambda: Series(data={}), True),
(lambda: Series(data=()), False), # creates a RangeIndex
(lambda: Series(data=[]), False), # creates a RangeIndex
(lambda: Series(data=(_ for _ in [])), False), # creates a RangeIndex
],
)
def test_empty_constructor(self, constructor, check_index_type):
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
expected = Series()
result = constructor()
assert len(result.index) == 0
tm.assert_series_equal(result, expected, check_index_type=check_index_type)
def test_invalid_dtype(self):
# GH15520
msg = "not understood"
invalid_list = [pd.Timestamp, "pd.Timestamp", list]
for dtype in invalid_list:
with pytest.raises(TypeError, match=msg):
Series([], name="time", dtype=dtype)
def test_invalid_compound_dtype(self):
# GH#13296
c_dtype = np.dtype([("a", "i8"), ("b", "f4")])
cdt_arr = np.array([(1, 0.4), (256, -13)], dtype=c_dtype)
with pytest.raises(ValueError, match="Use DataFrame instead"):
Series(cdt_arr, index=["A", "B"])
def test_scalar_conversion(self):
# Pass in scalar is disabled
scalar = Series(0.5)
assert not isinstance(scalar, float)
# Coercion
assert float(Series([1.0])) == 1.0
assert int(Series([1.0])) == 1
def test_constructor(self, datetime_series):
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
empty_series = Series()
assert datetime_series.index.is_all_dates
# Pass in Series
derived = Series(datetime_series)
assert derived.index.is_all_dates
assert tm.equalContents(derived.index, datetime_series.index)
# Ensure new index is not created
assert id(datetime_series.index) == id(derived.index)
# Mixed type Series
mixed = Series(["hello", np.NaN], index=[0, 1])
assert mixed.dtype == np.object_
assert mixed[1] is np.NaN
assert not empty_series.index.is_all_dates
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
assert not Series().index.is_all_dates
# exception raised is of type Exception
with pytest.raises(Exception, match="Data must be 1-dimensional"):
Series(np.random.randn(3, 3), index=np.arange(3))
mixed.name = "Series"
rs = Series(mixed).name
xp = "Series"
assert rs == xp
# raise on MultiIndex GH4187
m = MultiIndex.from_arrays([[1, 2], [3, 4]])
msg = "initializing a Series from a MultiIndex is not supported"
with pytest.raises(NotImplementedError, match=msg):
Series(m)
@pytest.mark.parametrize("input_class", [list, dict, OrderedDict])
def test_constructor_empty(self, input_class):
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
empty = Series()
empty2 = Series(input_class())
# these are Index() and RangeIndex() which don't compare type equal
# but are just .equals
tm.assert_series_equal(empty, empty2, check_index_type=False)
# With explicit dtype:
empty = Series(dtype="float64")
empty2 = Series(input_class(), dtype="float64")
tm.assert_series_equal(empty, empty2, check_index_type=False)
# GH 18515 : with dtype=category:
empty = Series(dtype="category")
empty2 = Series(input_class(), dtype="category")
tm.assert_series_equal(empty, empty2, check_index_type=False)
if input_class is not list:
# With index:
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
empty = Series(index=range(10))
empty2 = Series(input_class(), index=range(10))
tm.assert_series_equal(empty, empty2)
# With index and dtype float64:
empty = Series(np.nan, index=range(10))
empty2 = Series(input_class(), index=range(10), dtype="float64")
tm.assert_series_equal(empty, empty2)
# GH 19853 : with empty string, index and dtype str
empty = Series("", dtype=str, index=range(3))
empty2 = Series("", index=range(3))
tm.assert_series_equal(empty, empty2)
@pytest.mark.parametrize("input_arg", [np.nan, float("nan")])
def test_constructor_nan(self, input_arg):
empty = Series(dtype="float64", index=range(10))
empty2 = Series(input_arg, index=range(10))
tm.assert_series_equal(empty, empty2, check_index_type=False)
@pytest.mark.parametrize(
"dtype",
["f8", "i8", "M8[ns]", "m8[ns]", "category", "object", "datetime64[ns, UTC]"],
)
@pytest.mark.parametrize("index", [None, pd.Index([])])
def test_constructor_dtype_only(self, dtype, index):
# GH-20865
result = pd.Series(dtype=dtype, index=index)
assert result.dtype == dtype
assert len(result) == 0
def test_constructor_no_data_index_order(self):
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
result = pd.Series(index=["b", "a", "c"])
assert result.index.tolist() == ["b", "a", "c"]
def test_constructor_no_data_string_type(self):
# GH 22477
result = pd.Series(index=[1], dtype=str)
assert np.isnan(result.iloc[0])
@pytest.mark.parametrize("item", ["entry", "ѐ", 13])
def test_constructor_string_element_string_type(self, item):
# GH 22477
result = pd.Series(item, index=[1], dtype=str)
assert result.iloc[0] == str(item)
def test_constructor_dtype_str_na_values(self, string_dtype):
# https://github.com/pandas-dev/pandas/issues/21083
ser = Series(["x", None], dtype=string_dtype)
result = ser.isna()
expected = Series([False, True])
tm.assert_series_equal(result, expected)
assert ser.iloc[1] is None
ser = Series(["x", np.nan], dtype=string_dtype)
assert np.isnan(ser.iloc[1])
def test_constructor_series(self):
index1 = ["d", "b", "a", "c"]
index2 = sorted(index1)
s1 = Series([4, 7, -5, 3], index=index1)
s2 = Series(s1, index=index2)
tm.assert_series_equal(s2, s1.sort_index())
def test_constructor_iterable(self):
# GH 21987
class Iter:
def __iter__(self):
for i in range(10):
yield i
expected = Series(list(range(10)), dtype="int64")
result = Series(Iter(), dtype="int64")
tm.assert_series_equal(result, expected)
def test_constructor_sequence(self):
# GH 21987
expected = Series(list(range(10)), dtype="int64")
result = Series(range(10), dtype="int64")
tm.assert_series_equal(result, expected)
def test_constructor_single_str(self):
# GH 21987
expected = Series(["abc"])
result = Series("abc")
tm.assert_series_equal(result, expected)
def test_constructor_list_like(self):
# make sure that we are coercing different
# list-likes to standard dtypes and not
# platform specific
expected = Series([1, 2, 3], dtype="int64")
for obj in [[1, 2, 3], (1, 2, 3), np.array([1, 2, 3], dtype="int64")]:
result = Series(obj, index=[0, 1, 2])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("dtype", ["bool", "int32", "int64", "float64"])
def test_constructor_index_dtype(self, dtype):
# GH 17088
s = Series(Index([0, 2, 4]), dtype=dtype)
assert s.dtype == dtype
@pytest.mark.parametrize(
"input_vals",
[
([1, 2]),
(["1", "2"]),
(list(pd.date_range("1/1/2011", periods=2, freq="H"))),
(list(pd.date_range("1/1/2011", periods=2, freq="H", tz="US/Eastern"))),
([pd.Interval(left=0, right=5)]),
],
)
def test_constructor_list_str(self, input_vals, string_dtype):
# GH 16605
# Ensure that data elements from a list are converted to strings
# when dtype is str, 'str', or 'U'
result = Series(input_vals, dtype=string_dtype)
expected = Series(input_vals).astype(string_dtype)
tm.assert_series_equal(result, expected)
def test_constructor_list_str_na(self, string_dtype):
result = Series([1.0, 2.0, np.nan], dtype=string_dtype)
expected = Series(["1.0", "2.0", np.nan], dtype=object)
tm.assert_series_equal(result, expected)
assert np.isnan(result[2])
def test_constructor_generator(self):
gen = (i for i in range(10))
result = Series(gen)
exp = Series(range(10))
tm.assert_series_equal(result, exp)
gen = (i for i in range(10))
result = Series(gen, index=range(10, 20))
exp.index = range(10, 20)
tm.assert_series_equal(result, exp)
def test_constructor_map(self):
# GH8909
m = map(lambda x: x, range(10))
result = Series(m)
exp = Series(range(10))
tm.assert_series_equal(result, exp)
m = map(lambda x: x, range(10))
result = Series(m, index=range(10, 20))
exp.index = range(10, 20)
tm.assert_series_equal(result, exp)
def test_constructor_categorical(self):
cat = pd.Categorical([0, 1, 2, 0, 1, 2], ["a", "b", "c"], fastpath=True)
res = Series(cat)
tm.assert_categorical_equal(res.values, cat)
# can cast to a new dtype
result = Series(pd.Categorical([1, 2, 3]), dtype="int64")
expected = pd.Series([1, 2, 3], dtype="int64")
tm.assert_series_equal(result, expected)
# GH12574
cat = Series(pd.Categorical([1, 2, 3]), dtype="category")
assert is_categorical_dtype(cat)
assert is_categorical_dtype(cat.dtype)
s = Series([1, 2, 3], dtype="category")
assert is_categorical_dtype(s)
assert is_categorical_dtype(s.dtype)
def test_constructor_categorical_with_coercion(self):
factor = Categorical(["a", "b", "b", "a", "a", "c", "c", "c"])
# test basic creation / coercion of categoricals
s = Series(factor, name="A")
assert s.dtype == "category"
assert len(s) == len(factor)
str(s.values)
str(s)
# in a frame
df = DataFrame({"A": factor})
result = df["A"]
tm.assert_series_equal(result, s)
result = df.iloc[:, 0]
tm.assert_series_equal(result, s)
assert len(df) == len(factor)
str(df.values)
str(df)
df = DataFrame({"A": s})
result = df["A"]
tm.assert_series_equal(result, s)
assert len(df) == len(factor)
str(df.values)
str(df)
# multiples
df = DataFrame({"A": s, "B": s, "C": 1})
result1 = df["A"]
result2 = df["B"]
tm.assert_series_equal(result1, s)
tm.assert_series_equal(result2, s, check_names=False)
assert result2.name == "B"
assert len(df) == len(factor)
str(df.values)
str(df)
# GH8623
x = DataFrame(
[[1, "<NAME>"], [2, "<NAME>"], [1, "<NAME>"]],
columns=["person_id", "person_name"],
)
x["person_name"] = Categorical(x.person_name) # doing this breaks transform
expected = x.iloc[0].person_name
result = x.person_name.iloc[0]
assert result == expected
result = x.person_name[0]
assert result == expected
result = x.person_name.loc[0]
assert result == expected
def test_constructor_categorical_dtype(self):
result = pd.Series(
["a", "b"], dtype=CategoricalDtype(["a", "b", "c"], ordered=True)
)
assert is_categorical_dtype(result.dtype) is True
tm.assert_index_equal(result.cat.categories, pd.Index(["a", "b", "c"]))
assert result.cat.ordered
result = pd.Series(["a", "b"], dtype=CategoricalDtype(["b", "a"]))
assert is_categorical_dtype(result.dtype)
tm.assert_index_equal(result.cat.categories, pd.Index(["b", "a"]))
assert result.cat.ordered is False
# GH 19565 - Check broadcasting of scalar with Categorical dtype
result = Series(
"a", index=[0, 1], dtype=CategoricalDtype(["a", "b"], ordered=True)
)
expected = Series(
["a", "a"], index=[0, 1], dtype=CategoricalDtype(["a", "b"], ordered=True)
)
tm.assert_series_equal(result, expected)
def test_constructor_categorical_string(self):
# GH 26336: the string 'category' maintains existing CategoricalDtype
cdt = CategoricalDtype(categories=list("dabc"), ordered=True)
expected = Series(list("abcabc"), dtype=cdt)
# Series(Categorical, dtype='category') keeps existing dtype
cat = Categorical(list("abcabc"), dtype=cdt)
result = Series(cat, dtype="category")
tm.assert_series_equal(result, expected)
# Series(Series[Categorical], dtype='category') keeps existing dtype
result = Series(result, dtype="category")
tm.assert_series_equal(result, expected)
def test_categorical_sideeffects_free(self):
# Passing a categorical to a Series and then changing values in either
# the series or the categorical should not change the values in the
# other one, IF you specify copy!
cat = Categorical(["a", "b", "c", "a"])
s = Series(cat, copy=True)
assert s.cat is not cat
s.cat.categories = [1, 2, 3]
exp_s = np.array([1, 2, 3, 1], dtype=np.int64)
exp_cat = np.array(["a", "b", "c", "a"], dtype=np.object_)
tm.assert_numpy_array_equal(s.__array__(), exp_s)
tm.assert_numpy_array_equal(cat.__array__(), exp_cat)
# setting
s[0] = 2
exp_s2 = np.array([2, 2, 3, 1], dtype=np.int64)
tm.assert_numpy_array_equal(s.__array__(), exp_s2)
tm.assert_numpy_array_equal(cat.__array__(), exp_cat)
# however, copy is False by default
# so this WILL change values
cat = Categorical(["a", "b", "c", "a"])
s = Series(cat)
assert s.values is cat
s.cat.categories = [1, 2, 3]
exp_s = np.array([1, 2, 3, 1], dtype=np.int64)
tm.assert_numpy_array_equal(s.__array__(), exp_s)
tm.assert_numpy_array_equal(cat.__array__(), exp_s)
s[0] = 2
exp_s2 = np.array([2, 2, 3, 1], dtype=np.int64)
tm.assert_numpy_array_equal(s.__array__(), exp_s2)
tm.assert_numpy_array_equal(cat.__array__(), exp_s2)
def test_unordered_compare_equal(self):
left = pd.Series(["a", "b", "c"], dtype=CategoricalDtype(["a", "b"]))
right = pd.Series(pd.Categorical(["a", "b", np.nan], categories=["a", "b"]))
tm.assert_series_equal(left, right)
def test_constructor_maskedarray(self):
data = ma.masked_all((3,), dtype=float)
result = Series(data)
expected = Series([np.nan, np.nan, np.nan])
tm.assert_series_equal(result, expected)
data[0] = 0.0
data[2] = 2.0
index = ["a", "b", "c"]
result = Series(data, index=index)
expected = Series([0.0, np.nan, 2.0], index=index)
tm.assert_series_equal(result, expected)
data[1] = 1.0
result = Series(data, index=index)
expected = Series([0.0, 1.0, 2.0], index=index)
tm.assert_series_equal(result, expected)
data = ma.masked_all((3,), dtype=int)
result = Series(data)
expected = Series([np.nan, np.nan, np.nan], dtype=float)
tm.assert_series_equal(result, expected)
data[0] = 0
data[2] = 2
index = ["a", "b", "c"]
result = Series(data, index=index)
expected = Series([0, np.nan, 2], index=index, dtype=float)
tm.assert_series_equal(result, expected)
data[1] = 1
result = Series(data, index=index)
expected = Series([0, 1, 2], index=index, dtype=int)
tm.assert_series_equal(result, expected)
data = ma.masked_all((3,), dtype=bool)
result = Series(data)
expected = Series([np.nan, np.nan, np.nan], dtype=object)
tm.assert_series_equal(result, expected)
data[0] = True
data[2] = False
index = ["a", "b", "c"]
result = Series(data, index=index)
expected = Series([True, np.nan, False], index=index, dtype=object)
tm.assert_series_equal(result, expected)
data[1] = True
result = Series(data, index=index)
expected = Series([True, True, False], index=index, dtype=bool)
tm.assert_series_equal(result, expected)
data = ma.masked_all((3,), dtype="M8[ns]")
result = Series(data)
expected = Series([iNaT, iNaT, iNaT], dtype="M8[ns]")
tm.assert_series_equal(result, expected)
data[0] = datetime(2001, 1, 1)
data[2] = datetime(2001, 1, 3)
index = ["a", "b", "c"]
result = Series(data, index=index)
expected = Series(
[datetime(2001, 1, 1), iNaT, datetime(2001, 1, 3)],
index=index,
dtype="M8[ns]",
)
tm.assert_series_equal(result, expected)
data[1] = datetime(2001, 1, 2)
result = Series(data, index=index)
expected = Series(
[datetime(2001, 1, 1), datetime(2001, 1, 2), datetime(2001, 1, 3)],
index=index,
dtype="M8[ns]",
)
tm.assert_series_equal(result, expected)
def test_constructor_maskedarray_hardened(self):
# Check numpy masked arrays with hard masks -- from GH24574
data = ma.masked_all((3,), dtype=float).harden_mask()
result = pd.Series(data)
expected = pd.Series([np.nan, np.nan, np.nan])
tm.assert_series_equal(result, expected)
def test_series_ctor_plus_datetimeindex(self):
rng = date_range("20090415", "20090519", freq="B")
data = {k: 1 for k in rng}
result = Series(data, index=rng)
assert result.index is rng
def test_constructor_default_index(self):
s = Series([0, 1, 2])
tm.assert_index_equal(s.index, pd.Index(np.arange(3)))
@pytest.mark.parametrize(
"input",
[
[1, 2, 3],
(1, 2, 3),
list(range(3)),
pd.Categorical(["a", "b", "a"]),
(i for i in range(3)),
map(lambda x: x, range(3)),
],
)
def test_constructor_index_mismatch(self, input):
# GH 19342
# test that construction of a Series with an index of different length
# raises an error
msg = "Length of passed values is 3, index implies 4"
with pytest.raises(ValueError, match=msg):
Series(input, index=np.arange(4))
def test_constructor_numpy_scalar(self):
# GH 19342
# construction with a numpy scalar
# should not raise
result = Series(np.array(100), index=np.arange(4), dtype="int64")
expected = Series(100, index=np.arange(4), dtype="int64")
tm.assert_series_equal(result, expected)
def test_constructor_broadcast_list(self):
# GH 19342
# construction with single-element container and index
# should raise
msg = "Length of passed values is 1, index implies 3"
with pytest.raises(ValueError, match=msg):
Series(["foo"], index=["a", "b", "c"])
def test_constructor_corner(self):
df = tm.makeTimeDataFrame()
objs = [df, df]
s = Series(objs, index=[0, 1])
assert isinstance(s, Series)
def test_constructor_sanitize(self):
s = Series(np.array([1.0, 1.0, 8.0]), dtype="i8")
assert s.dtype == np.dtype("i8")
s = Series(np.array([1.0, 1.0, np.nan]), copy=True, dtype="i8")
assert s.dtype == np.dtype("f8")
def test_constructor_copy(self):
# GH15125
# test dtype parameter has no side effects on copy=True
for data in [[1.0], np.array([1.0])]:
x = Series(data)
y = pd.Series(x, copy=True, dtype=float)
# copy=True maintains original data in Series
tm.assert_series_equal(x, y)
# changes to origin of copy does not affect the copy
x[0] = 2.0
assert not x.equals(y)
assert x[0] == 2.0
assert y[0] == 1.0
@pytest.mark.parametrize(
"index",
[
pd.date_range("20170101", periods=3, tz="US/Eastern"),
pd.date_range("20170101", periods=3),
pd.timedelta_range("1 day", periods=3),
pd.period_range("2012Q1", periods=3, freq="Q"),
pd.Index(list("abc")),
pd.Int64Index([1, 2, 3]),
pd.RangeIndex(0, 3),
],
ids=lambda x: type(x).__name__,
)
def test_constructor_limit_copies(self, index):
# GH 17449
# limit copies of input
s = pd.Series(index)
# we make 1 copy; this is just a smoke test here
assert s._mgr.blocks[0].values is not index
def test_constructor_pass_none(self):
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
s = Series(None, index=range(5))
assert s.dtype == np.float64
s = Series(None, index=range(5), dtype=object)
assert s.dtype == np.object_
# GH 7431
# inference on the index
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
s = Series(index=np.array([None]))
expected = Series(index=Index([None]))
tm.assert_series_equal(s, expected)
def test_constructor_pass_nan_nat(self):
# GH 13467
exp = Series([np.nan, np.nan], dtype=np.float64)
assert exp.dtype == np.float64
tm.assert_series_equal(Series([np.nan, np.nan]), exp)
tm.assert_series_equal(Series(np.array([np.nan, np.nan])), exp)
exp = Series([pd.NaT, pd.NaT])
assert exp.dtype == "datetime64[ns]"
tm.assert_series_equal(Series([pd.NaT, pd.NaT]), exp)
tm.assert_series_equal(Series(np.array([pd.NaT, pd.NaT])), exp)
tm.assert_series_equal(Series([pd.NaT, np.nan]), exp)
tm.assert_series_equal(Series(np.array([pd.NaT, np.nan])), exp)
tm.assert_series_equal(Series([np.nan, pd.NaT]), exp)
tm.assert_series_equal(Series(np.array([np.nan, pd.NaT])), exp)
def test_constructor_cast(self):
msg = "could not convert string to float"
with pytest.raises(ValueError, match=msg):
Series(["a", "b", "c"], dtype=float)
def test_constructor_unsigned_dtype_overflow(self, uint_dtype):
# see gh-15832
msg = "Trying to coerce negative values to unsigned integers"
with pytest.raises(OverflowError, match=msg):
Series([-1], dtype=uint_dtype)
def test_constructor_coerce_float_fail(self, any_int_dtype):
# see gh-15832
msg = "Trying to coerce float values to integers"
with pytest.raises(ValueError, match=msg):
Series([1, 2, 3.5], dtype=any_int_dtype)
def test_constructor_coerce_float_valid(self, float_dtype):
s = Series([1, 2, 3.5], dtype=float_dtype)
expected = Series([1, 2, 3.5]).astype(float_dtype)
tm.assert_series_equal(s, expected)
def test_constructor_dtype_no_cast(self):
# see gh-1572
s = Series([1, 2, 3])
s2 = Series(s, dtype=np.int64)
s2[1] = 5
assert s[1] == 5
def test_constructor_datelike_coercion(self):
# GH 9477
# incorrectly inferring on dateimelike looking when object dtype is
# specified
s = Series([Timestamp("20130101"), "NOV"], dtype=object)
assert s.iloc[0] == Timestamp("20130101")
assert s.iloc[1] == "NOV"
assert s.dtype == object
# the dtype was being reset on the slicing and re-inferred to datetime
# even thought the blocks are mixed
belly = "216 3T19".split()
wing1 = "2T15 4H19".split()
wing2 = "416 4T20".split()
mat = pd.to_datetime("2016-01-22 2019-09-07".split())
df = pd.DataFrame({"wing1": wing1, "wing2": wing2, "mat": mat}, index=belly)
result = df.loc["3T19"]
assert result.dtype == object
result = df.loc["216"]
assert result.dtype == object
def test_constructor_datetimes_with_nulls(self):
# gh-15869
for arr in [
np.array([None, None, None, None, datetime.now(), None]),
np.array([None, None, datetime.now(), None]),
]:
result = Series(arr)
assert result.dtype == "M8[ns]"
def test_constructor_dtype_datetime64(self):
s = Series(iNaT, dtype="M8[ns]", index=range(5))
assert isna(s).all()
# in theory this should be all nulls, but since
# we are not specifying a dtype is ambiguous
s = Series(iNaT, index=range(5))
assert not isna(s).all()
s = Series(np.nan, dtype="M8[ns]", index=range(5))
assert isna(s).all()
s = Series([datetime(2001, 1, 2, 0, 0), iNaT], dtype="M8[ns]")
assert isna(s[1])
assert s.dtype == "M8[ns]"
s = Series([datetime(2001, 1, 2, 0, 0), np.nan], dtype="M8[ns]")
assert isna(s[1])
assert s.dtype == "M8[ns]"
# GH3416
dates = [
np.datetime64(datetime(2013, 1, 1)),
np.datetime64(datetime(2013, 1, 2)),
np.datetime64(datetime(2013, 1, 3)),
]
s = Series(dates)
assert s.dtype == "M8[ns]"
s.iloc[0] = np.nan
assert s.dtype == "M8[ns]"
# GH3414 related
expected = Series(
[datetime(2013, 1, 1), datetime(2013, 1, 2), datetime(2013, 1, 3)],
dtype="datetime64[ns]",
)
result = Series(Series(dates).astype(np.int64) / 1000000, dtype="M8[ms]")
tm.assert_series_equal(result, expected)
result = Series(dates, dtype="datetime64[ns]")
tm.assert_series_equal(result, expected)
expected = Series(
[pd.NaT, datetime(2013, 1, 2), datetime(2013, 1, 3)], dtype="datetime64[ns]"
)
result = Series([np.nan] + dates[1:], dtype="datetime64[ns]")
tm.assert_series_equal(result, expected)
dts = Series(dates, dtype="datetime64[ns]")
# valid astype
dts.astype("int64")
# invalid casting
msg = r"cannot astype a datetimelike from \[datetime64\[ns\]\] to \[int32\]"
with pytest.raises(TypeError, match=msg):
dts.astype("int32")
# ints are ok
# we test with np.int64 to get similar results on
# windows / 32-bit platforms
result = Series(dts, dtype=np.int64)
expected = Series(dts.astype(np.int64))
tm.assert_series_equal(result, expected)
# invalid dates can be help as object
result = Series([datetime(2, 1, 1)])
assert result[0] == datetime(2, 1, 1, 0, 0)
result = Series([datetime(3000, 1, 1)])
assert result[0] == datetime(3000, 1, 1, 0, 0)
# don't mix types
result = Series([Timestamp("20130101"), 1], index=["a", "b"])
assert result["a"] == Timestamp("20130101")
assert result["b"] == 1
# GH6529
# coerce datetime64 non-ns properly
dates = date_range("01-Jan-2015", "01-Dec-2015", freq="M")
values2 = dates.view(np.ndarray).astype("datetime64[ns]")
expected = Series(values2, index=dates)
for dtype in ["s", "D", "ms", "us", "ns"]:
values1 = dates.view(np.ndarray).astype(f"M8[{dtype}]")
result = Series(values1, dates)
tm.assert_series_equal(result, expected)
# GH 13876
# coerce to non-ns to object properly
expected = Series(values2, index=dates, dtype=object)
for dtype in ["s", "D", "ms", "us", "ns"]:
values1 = dates.view(np.ndarray).astype(f"M8[{dtype}]")
result = Series(values1, index=dates, dtype=object)
tm.assert_series_equal(result, expected)
# leave datetime.date alone
dates2 = np.array([d.date() for d in dates.to_pydatetime()], dtype=object)
series1 = Series(dates2, dates)
tm.assert_numpy_array_equal(series1.values, dates2)
assert series1.dtype == object
# these will correctly infer a datetime
s = Series([None, pd.NaT, "2013-08-05 15:30:00.000001"])
assert s.dtype == "datetime64[ns]"
s = Series([np.nan, pd.NaT, "2013-08-05 15:30:00.000001"])
assert s.dtype == "datetime64[ns]"
s = Series([pd.NaT, None, "2013-08-05 15:30:00.000001"])
assert s.dtype == "datetime64[ns]"
s = Series([pd.NaT, np.nan, "2013-08-05 15:30:00.000001"])
assert s.dtype == "datetime64[ns]"
# tz-aware (UTC and other tz's)
# GH 8411
dr = date_range("20130101", periods=3)
assert Series(dr).iloc[0].tz is None
dr = date_range("20130101", periods=3, tz="UTC")
assert str(Series(dr).iloc[0].tz) == "UTC"
dr = date_range("20130101", periods=3, tz="US/Eastern")
assert str(Series(dr).iloc[0].tz) == "US/Eastern"
# non-convertible
s = Series([1479596223000, -1479590, pd.NaT])
assert s.dtype == "object"
assert s[2] is pd.NaT
assert "NaT" in str(s)
# if we passed a NaT it remains
s = Series([datetime(2010, 1, 1), datetime(2, 1, 1), pd.NaT])
assert s.dtype == "object"
assert s[2] is pd.NaT
assert "NaT" in str(s)
# if we passed a nan it remains
s = Series([datetime(2010, 1, 1), datetime(2, 1, 1), np.nan])
assert s.dtype == "object"
assert s[2] is np.nan
assert "NaN" in str(s)
def test_constructor_with_datetime_tz(self):
# 8260
# support datetime64 with tz
dr = date_range("20130101", periods=3, tz="US/Eastern")
s = Series(dr)
assert s.dtype.name == "datetime64[ns, US/Eastern]"
assert s.dtype == "datetime64[ns, US/Eastern]"
assert is_datetime64tz_dtype(s.dtype)
assert "datetime64[ns, US/Eastern]" in str(s)
# export
result = s.values
assert isinstance(result, np.ndarray)
assert result.dtype == "datetime64[ns]"
exp = pd.DatetimeIndex(result)
exp = exp.tz_localize("UTC").tz_convert(tz=s.dt.tz)
tm.assert_index_equal(dr, exp)
# indexing
result = s.iloc[0]
assert result == Timestamp(
"2013-01-01 00:00:00-0500", tz="US/Eastern", freq="D"
)
result = s[0]
assert result == Timestamp(
"2013-01-01 00:00:00-0500", tz="US/Eastern", freq="D"
)
result = s[Series([True, True, False], index=s.index)]
tm.assert_series_equal(result, s[0:2])
result = s.iloc[0:1]
tm.assert_series_equal(result, Series(dr[0:1]))
# concat
result = pd.concat([s.iloc[0:1], s.iloc[1:]])
tm.assert_series_equal(result, s)
# short str
assert "datetime64[ns, US/Eastern]" in str(s)
# formatting with NaT
result = s.shift()
assert "datetime64[ns, US/Eastern]" in str(result)
assert "NaT" in str(result)
# long str
t = Series(date_range("20130101", periods=1000, tz="US/Eastern"))
assert "datetime64[ns, US/Eastern]" in str(t)
result = pd.DatetimeIndex(s, freq="infer")
tm.assert_index_equal(result, dr)
# inference
s = Series(
[
pd.Timestamp("2013-01-01 13:00:00-0800", tz="US/Pacific"),
pd.Timestamp("2013-01-02 14:00:00-0800", tz="US/Pacific"),
]
)
assert s.dtype == "datetime64[ns, US/Pacific]"
assert lib.infer_dtype(s, skipna=True) == "datetime64"
s = Series(
[
pd.Timestamp("2013-01-01 13:00:00-0800", tz="US/Pacific"),
pd.Timestamp("2013-01-02 14:00:00-0800", tz="US/Eastern"),
]
)
assert s.dtype == "object"
assert lib.infer_dtype(s, skipna=True) == "datetime"
# with all NaT
s = Series(pd.NaT, index=[0, 1], dtype="datetime64[ns, US/Eastern]")
expected = Series(pd.DatetimeIndex(["NaT", "NaT"], tz="US/Eastern"))
tm.assert_series_equal(s, expected)
@pytest.mark.parametrize("arr_dtype", [np.int64, np.float64])
@pytest.mark.parametrize("dtype", ["M8", "m8"])
@pytest.mark.parametrize("unit", ["ns", "us", "ms", "s", "h", "m", "D"])
def test_construction_to_datetimelike_unit(self, arr_dtype, dtype, unit):
# tests all units
# gh-19223
dtype = f"{dtype}[{unit}]"
arr = np.array([1, 2, 3], dtype=arr_dtype)
s = Series(arr)
result = s.astype(dtype)
expected = Series(arr.astype(dtype))
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("arg", ["2013-01-01 00:00:00", pd.NaT, np.nan, None])
def test_constructor_with_naive_string_and_datetimetz_dtype(self, arg):
# GH 17415: With naive string
result = Series([arg], dtype="datetime64[ns, CET]")
expected = Series(pd.Timestamp(arg)).dt.tz_localize("CET")
tm.assert_series_equal(result, expected)
def test_constructor_datetime64_bigendian(self):
# GH#30976
ms = np.datetime64(1, "ms")
arr = np.array([np.datetime64(1, "ms")], dtype=">M8[ms]")
result = Series(arr)
expected = Series([Timestamp(ms)])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("interval_constructor", [IntervalIndex, IntervalArray])
def test_construction_interval(self, interval_constructor):
# construction from interval & array of intervals
intervals = interval_constructor.from_breaks(np.arange(3), closed="right")
result = Series(intervals)
assert result.dtype == "interval[int64]"
tm.assert_index_equal(Index(result.values), Index(intervals))
@pytest.mark.parametrize(
"data_constructor", [list, np.array], ids=["list", "ndarray[object]"]
)
def test_constructor_infer_interval(self, data_constructor):
# GH 23563: consistent closed results in interval dtype
data = [pd.Interval(0, 1), pd.Interval(0, 2), None]
result = pd.Series(data_constructor(data))
expected = pd.Series(IntervalArray(data))
assert result.dtype == "interval[float64]"
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"data_constructor", [list, np.array], ids=["list", "ndarray[object]"]
)
def test_constructor_interval_mixed_closed(self, data_constructor):
# GH 23563: mixed closed results in object dtype (not interval dtype)
data = [pd.Interval(0, 1, closed="both"), pd.Interval(0, 2, closed="neither")]
result = Series(data_constructor(data))
assert result.dtype == object
assert result.tolist() == data
def test_construction_consistency(self):
# make sure that we are not re-localizing upon construction
# GH 14928
s = Series(pd.date_range("20130101", periods=3, tz="US/Eastern"))
result = Series(s, dtype=s.dtype)
tm.assert_series_equal(result, s)
result = Series(s.dt.tz_convert("UTC"), dtype=s.dtype)
tm.assert_series_equal(result, s)
result = Series(s.values, dtype=s.dtype)
tm.assert_series_equal(result, s)
@pytest.mark.parametrize(
"data_constructor", [list, np.array], ids=["list", "ndarray[object]"]
)
def test_constructor_infer_period(self, data_constructor):
data = [pd.Period("2000", "D"), pd.Period("2001", "D"), None]
result = pd.Series(data_constructor(data))
expected = pd.Series(period_array(data))
tm.assert_series_equal(result, expected)
assert result.dtype == "Period[D]"
def test_constructor_period_incompatible_frequency(self):
data = [pd.Period("2000", "D"), pd.Period("2001", "A")]
result = pd.Series(data)
assert result.dtype == object
assert result.tolist() == data
def test_constructor_periodindex(self):
# GH7932
# converting a PeriodIndex when put in a Series
pi = period_range("20130101", periods=5, freq="D")
s = Series(pi)
assert s.dtype == "Period[D]"
expected = Series(pi.astype(object))
tm.assert_series_equal(s, expected)
def test_constructor_dict(self):
d = {"a": 0.0, "b": 1.0, "c": 2.0}
result = Series(d, index=["b", "c", "d", "a"])
expected = Series([1, 2, np.nan, 0], index=["b", "c", "d", "a"])
tm.assert_series_equal(result, expected)
pidx = tm.makePeriodIndex(100)
d = {pidx[0]: 0, pidx[1]: 1}
result = Series(d, index=pidx)
expected = Series(np.nan, pidx, dtype=np.float64)
expected.iloc[0] = 0
expected.iloc[1] = 1
tm.assert_series_equal(result, expected)
def test_constructor_dict_list_value_explicit_dtype(self):
# GH 18625
d = {"a": [[2], [3], [4]]}
result = Series(d, index=["a"], dtype="object")
expected = Series(d, index=["a"])
tm.assert_series_equal(result, expected)
def test_constructor_dict_order(self):
# GH19018
# initialization ordering: by insertion order if python>= 3.6, else
# order by value
d = {"b": 1, "a": 0, "c": 2}
result = Series(d)
expected = Series([1, 0, 2], index=list("bac"))
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"data,dtype",
[
(Period("2020-01"), PeriodDtype("M")),
(Interval(left=0, right=5), IntervalDtype("int64")),
(
Timestamp("2011-01-01", tz="US/Eastern"),
DatetimeTZDtype(tz="US/Eastern"),
),
],
)
def test_constructor_dict_extension(self, data, dtype):
d = {"a": data}
result = Series(d, index=["a"])
expected = Series(data, index=["a"], dtype=dtype)
assert result.dtype == dtype
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("value", [2, np.nan, None, float("nan")])
def test_constructor_dict_nan_key(self, value):
# GH 18480
d = {1: "a", value: "b", float("nan"): "c", 4: "d"}
result = Series(d).sort_values()
expected = Series(["a", "b", "c", "d"], index=[1, value, np.nan, 4])
tm.assert_series_equal(result, expected)
# MultiIndex:
d = {(1, 1): "a", (2, np.nan): "b", (3, value): "c"}
result = Series(d).sort_values()
expected = Series(
["a", "b", "c"], index=Index([(1, 1), (2, np.nan), (3, value)])
)
tm.assert_series_equal(result, expected)
def test_constructor_dict_datetime64_index(self):
# GH 9456
dates_as_str = ["1984-02-19", "1988-11-06", "1989-12-03", "1990-03-15"]
values = [42544017.198965244, 1234565, 40512335.181958228, -1]
def create_data(constructor):
return dict(zip((constructor(x) for x in dates_as_str), values))
data_datetime64 = create_data(np.datetime64)
data_datetime = create_data(lambda x: datetime.strptime(x, "%Y-%m-%d"))
data_Timestamp = create_data(Timestamp)
expected = Series(values, (Timestamp(x) for x in dates_as_str))
result_datetime64 = Series(data_datetime64)
result_datetime = Series(data_datetime)
result_Timestamp = Series(data_Timestamp)
tm.assert_series_equal(result_datetime64, expected)
tm.assert_series_equal(result_datetime, expected)
tm.assert_series_equal(result_Timestamp, expected)
def test_constructor_dict_tuple_indexer(self):
# GH 12948
data = {(1, 1, None): -1.0}
result = Series(data)
expected = Series(
-1.0, index=MultiIndex(levels=[[1], [1], [np.nan]], codes=[[0], [0], [-1]])
)
tm.assert_series_equal(result, expected)
def test_constructor_mapping(self, non_dict_mapping_subclass):
# GH 29788
ndm = non_dict_mapping_subclass({3: "three"})
result = Series(ndm)
expected = Series(["three"], index=[3])
tm.assert_series_equal(result, expected)
def test_constructor_list_of_tuples(self):
data = [(1, 1), (2, 2), (2, 3)]
s = Series(data)
assert list(s) == data
def test_constructor_tuple_of_tuples(self):
data = ((1, 1), (2, 2), (2, 3))
s = Series(data)
assert tuple(s) == data
def test_constructor_dict_of_tuples(self):
data = {(1, 2): 3, (None, 5): 6}
result = Series(data).sort_values()
expected = Series([3, 6], index=MultiIndex.from_tuples([(1, 2), (None, 5)]))
tm.assert_series_equal(result, expected)
def test_constructor_set(self):
values = {1, 2, 3, 4, 5}
with pytest.raises(TypeError, match="'set' type is unordered"):
Series(values)
values = frozenset(values)
with pytest.raises(TypeError, match="'frozenset' type is unordered"):
Series(values)
# https://github.com/pandas-dev/pandas/issues/22698
@pytest.mark.filterwarnings("ignore:elementwise comparison:FutureWarning")
def test_fromDict(self):
data = {"a": 0, "b": 1, "c": 2, "d": 3}
series = Series(data)
tm.assert_is_sorted(series.index)
data = {"a": 0, "b": "1", "c": "2", "d": datetime.now()}
series = Series(data)
assert series.dtype == np.object_
data = {"a": 0, "b": "1", "c": "2", "d": "3"}
series = Series(data)
assert series.dtype == np.object_
data = {"a": "0", "b": "1"}
series = Series(data, dtype=float)
assert series.dtype == np.float64
def test_fromValue(self, datetime_series):
nans = Series(np.NaN, index=datetime_series.index, dtype=np.float64)
assert nans.dtype == np.float_
assert len(nans) == len(datetime_series)
strings = Series("foo", index=datetime_series.index)
assert strings.dtype == np.object_
assert len(strings) == len(datetime_series)
d = datetime.now()
dates = Series(d, index=datetime_series.index)
assert dates.dtype == "M8[ns]"
assert len(dates) == len(datetime_series)
# GH12336
# Test construction of categorical series from value
categorical = Series(0, index=datetime_series.index, dtype="category")
expected = Series(0, index=datetime_series.index).astype("category")
assert categorical.dtype == "category"
assert len(categorical) == len(datetime_series)
tm.assert_series_equal(categorical, expected)
def test_constructor_dtype_timedelta64(self):
# basic
td = Series([timedelta(days=i) for i in range(3)])
assert td.dtype == "timedelta64[ns]"
td = Series([timedelta(days=1)])
assert td.dtype == "timedelta64[ns]"
td = Series([timedelta(days=1), timedelta(days=2), np.timedelta64(1, "s")])
assert td.dtype == "timedelta64[ns]"
# mixed with NaT
td = Series([timedelta(days=1), NaT], dtype="m8[ns]")
assert td.dtype == "timedelta64[ns]"
td = Series([timedelta(days=1), np.nan], dtype="m8[ns]")
assert td.dtype == "timedelta64[ns]"
td = Series([np.timedelta64(300000000), pd.NaT], dtype="m8[ns]")
assert td.dtype == "timedelta64[ns]"
# improved inference
# GH5689
td = Series([np.timedelta64(300000000), NaT])
assert td.dtype == "timedelta64[ns]"
# because iNaT is int, not coerced to timedelta
td = Series([np.timedelta64(300000000), iNaT])
assert td.dtype == "object"
td = Series([np.timedelta64(300000000), np.nan])
assert td.dtype == "timedelta64[ns]"
td = Series([pd.NaT, np.timedelta64(300000000)])
assert td.dtype == "timedelta64[ns]"
td = Series([np.timedelta64(1, "s")])
assert td.dtype == "timedelta64[ns]"
# these are frequency conversion astypes
# for t in ['s', 'D', 'us', 'ms']:
# with pytest.raises(TypeError):
# td.astype('m8[%s]' % t)
# valid astype
td.astype("int64")
# invalid casting
msg = r"cannot astype a timedelta from \[timedelta64\[ns\]\] to \[int32\]"
with pytest.raises(TypeError, match=msg):
td.astype("int32")
# this is an invalid casting
msg = "Could not convert object to NumPy timedelta"
with pytest.raises(ValueError, match=msg):
Series([timedelta(days=1), "foo"], dtype="m8[ns]")
# leave as object here
td = Series([timedelta(days=i) for i in range(3)] + ["foo"])
assert td.dtype == "object"
# these will correctly infer a timedelta
s = Series([None, pd.NaT, "1 Day"])
assert s.dtype == "timedelta64[ns]"
s = Series([np.nan, pd.NaT, "1 Day"])
assert s.dtype == "timedelta64[ns]"
s = Series([pd.NaT, None, "1 Day"])
assert s.dtype == "timedelta64[ns]"
s = Series([pd.NaT, np.nan, "1 Day"])
assert s.dtype == "timedelta64[ns]"
# GH 16406
def test_constructor_mixed_tz(self):
s = Series([Timestamp("20130101"), Timestamp("20130101", tz="US/Eastern")])
expected = Series(
[Timestamp("20130101"), Timestamp("20130101", tz="US/Eastern")],
dtype="object",
)
tm.assert_series_equal(s, expected)
def test_NaT_scalar(self):
series = Series([0, 1000, 2000, iNaT], dtype="M8[ns]")
val = series[3]
assert isna(val)
series[2] = val
assert isna(series[2])
def test_NaT_cast(self):
# GH10747
result = Series([np.nan]).astype("M8[ns]")
expected = Series([NaT])
tm.assert_series_equal(result, expected)
def test_constructor_name_hashable(self):
for n in [777, 777.0, "name", datetime(2001, 11, 11), (1,), "\u05D0"]:
for data in [[1, 2, 3], np.ones(3), {"a": 0, "b": 1}]:
s = Series(data, name=n)
assert s.name == n
def test_constructor_name_unhashable(self):
msg = r"Series\.name must be a hashable type"
for n in [["name_list"], np.ones(2), {1: 2}]:
for data in [["name_list"], np.ones(2), {1: 2}]:
with pytest.raises(TypeError, match=msg):
Series(data, name=n)
def test_auto_conversion(self):
series = Series(list(date_range("1/1/2000", periods=10)))
assert series.dtype == "M8[ns]"
def test_convert_non_ns(self):
# convert from a numpy array of non-ns timedelta64
arr = np.array([1, 2, 3], dtype="timedelta64[s]")
s = Series(arr)
expected = Series(pd.timedelta_range("00:00:01", periods=3, freq="s"))
tm.assert_series_equal(s, expected)
# convert from a numpy array of non-ns datetime64
# note that creating a numpy datetime64 is in LOCAL time!!!!
# seems to work for M8[D], but not for M8[s]
# TODO: is the above comment still accurate/needed?
arr = np.array(
["2013-01-01", "2013-01-02", "2013-01-03"], dtype="datetime64[D]"
)
ser = Series(arr)
expected = Series(date_range("20130101", periods=3, freq="D"))
| tm.assert_series_equal(ser, expected) | pandas._testing.assert_series_equal |
import argparse
import itertools
from collections import defaultdict
from glob import glob
from shutil import copy2
import multiprocessing as mp
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from qpputils import dataparser as dp
from Timer import Timer
from crossval import InterTopicCrossValidation
# Define the Font for the plots
plt.rcParams.update({'font.size': 45, 'font.family': 'serif'})
plt.rcParams.update({'font.size': 45, 'font.family': 'serif', 'font.weight': 'normal'})
"""The next three lines are used to force matplotlib to use font-Type-1 """
plt.rcParams['ps.useafm'] = True
plt.rcParams['pdf.use14corefonts'] = True
plt.rcParams['text.usetex'] = True
parser = argparse.ArgumentParser(description='Query Prediction Using Reference lists',
usage='python3.6 qpp_ref.py -c CORPUS ... <parameter files>',
epilog='Unless --generate is given, will try loading the files')
parser.add_argument('-c', '--corpus', default=None, help='The corpus to be used', choices=['ROBUST', 'ClueWeb12B'])
parser.add_argument('--generate', action='store_true')
parser.add_argument('--nocache', action='store_false', help='Add this option in order to generate all pkl files')
PREDICTORS_WO_QF = ['clarity', 'wig', 'nqc', 'smv', 'rsd', 'uef/clarity', 'uef/wig', 'uef/nqc']
PRE_RET_PREDICTORS = ['preret/AvgIDF', 'preret/AvgSCQTFIDF', 'preret/AvgVarTFIDF', 'preret/MaxIDF',
'preret/MaxSCQTFIDF', 'preret/MaxVarTFIDF']
PREDICTORS = PRE_RET_PREDICTORS + PREDICTORS_WO_QF
# NUMBER_OF_DOCS = (5, 10, 25, 50, 100, 250, 500, 1000)
SIMILARITY_FUNCTIONS = {'Jac_coefficient': 'jac', 'Top_Docs_overlap': 'sim', 'RBO_EXT_100': 'rbo',
'RBO_FUSED_EXT_100': 'rbof'}
# Filter out filled markers and marker settings that do nothing.
MARKERS = ['+', 'x', '.', '*', 'X', 'v']
LINE_STYLES = ['--', '-', ':', ':']
# MARKERS_STYLE = [''.join(i) for i in itertools.product(LINE_STYLES, MARKERS)]
LAMBDA = np.linspace(start=0, stop=1, num=11)
# MARKERS = ['-^', '-v', '-D', '-x', '-h', '-H', 'p-', 's-', '--v', '--1', '--2', '--D', '--x', '--h', '--H', '^-.',
# '-.v', '1-.', '2-.', '-.D', '-.x', '-.h', '-.H', '3-.', '4-.', 's-.', 'p-.', '+-.', '*-.']
COLORS = ['#2A88AA', '#93BEDB', '#203D78', '#60615C', '#E57270']
# COLORS = ['#1D2735', '#135960', '#2F8F6D', '#8DC05F']
NAMES_DICT = {'rbo': 'Ref-RBO', 'sim': 'Ref-Overlap', 'wig': 'WIG', 'rsd': 'RSD', 'preret/AvgSCQTFIDF': 'AvgSCQ',
'preret/AvgVarTFIDF': 'AvgVar', 'uef/clarity': 'UEF(Clarity)', 'preret/MaxIDF': 'MaxIDF'}
class GenerateResults:
def __init__(self, corpus, corr_measure='pearson', load_from_pkl=True):
self.corpus = corpus
self.__set_paths(corpus)
self.corr_measure = corr_measure
self.results_dirs_dict = self._cp_result_file_to_dirs()
self.load_from_pkl = load_from_pkl
@classmethod
def __set_paths(cls, corpus):
"""This method sets the default paths of the files and the working directories, it assumes the standard naming
convention of the project"""
_corpus_test_dir = dp.ensure_dir(f'~/QppUqvProj/Results/{corpus}/test/')
# AP file for the cross validation process
cls.query_ap_file = dp.ensure_file(f'{_corpus_test_dir}/ref/QLmap1000-title')
# CV folds mapping file
cls.cv_map_file = dp.ensure_file(f'{_corpus_test_dir}/2_folds_30_repetitions.json')
# The data dir for the Graphs
cls.data_dir = dp.ensure_dir(f'~/QppUqvProj/Graphs/{corpus}/data')
# The results base dir for the Graphs
cls.results_dir = dp.ensure_dir(f'~/QppUqvProj/Graphs/{corpus}/referenceLists/title/all_vars/general')
cls.raw_res_base_dir = dp.ensure_dir(
f'~/QppUqvProj/Results/{corpus}/uqvPredictions/referenceLists/title/all_vars/general')
_ap_file = f'~/QppUqvProj/Results/{corpus}/test/basic/QLmap1000'
cls.true_ap_file = dp.ensure_file(_ap_file)
def _cp_result_file_to_dirs(self):
destination_dirs = defaultdict(str)
for lam in LAMBDA:
for sim, pred in itertools.product(SIMILARITY_FUNCTIONS.values(), PREDICTORS):
dest_dir = dp.ensure_dir(f'{self.results_dir}/{sim}/{pred}/lambda-{lam}/predictions')
destination_dirs[sim, pred, f'{lam:.2f}'] = dest_dir
src_dir = dp.ensure_dir(f'{self.raw_res_base_dir}/{sim}/{pred}/predictions')
prediction_files = glob(f'{src_dir}/predictions-*+lambda+{lam}')
for _file in prediction_files:
copy2(_file, dest_dir)
return destination_dirs
def generate_graph_df(self, similarity, predictor):
_dict = defaultdict(list)
def append_to_full_results_dict(result, lambda_param):
_dict['predictor'].append(predictor)
_dict['sim_func'].append(similarity)
_dict['result'].append(result)
_dict['lambda'].append(lambda_param)
for lam in LAMBDA:
lambda_param = f'{lam:.2f}'
result = self._calc_cv_result(similarity, predictor, lambda_param)
append_to_full_results_dict(result, lambda_param)
return pd.DataFrame.from_dict(_dict)
def _calc_cv_result(self, similarity, predictor, lambda_param):
predictions_dir = self.results_dirs_dict.get((similarity, predictor, lambda_param))
cv_obj = InterTopicCrossValidation(k=2, rep=30, folds_map_file=self.cv_map_file, predictions_dir=predictions_dir, load=True,
ap_file=self.query_ap_file, test=self.corr_measure)
mean = cv_obj.calc_test_results()
return mean
def generate_results_df(self, cores=4):
_pkl_file = f'{self.data_dir}/pkl_files/lambda_full_results_df_{self.corpus}_{self.corr_measure}.pkl'
if self.load_from_pkl:
try:
file_to_load = dp.ensure_file(_pkl_file)
full_results_df = pd.read_pickle(file_to_load)
except AssertionError:
print(f'\nFailed to load {_pkl_file}')
print(f'Will generate {_pkl_file} and save')
with mp.Pool(processes=cores) as pool:
result = pool.starmap(self.generate_graph_df,
itertools.product(SIMILARITY_FUNCTIONS.values(), PREDICTORS))
pool.close()
full_results_df = pd.concat(result, axis=0)
full_results_df.to_pickle(_pkl_file)
else:
with mp.Pool(processes=cores) as pool:
result = pool.starmap(self.generate_graph_df,
itertools.product(SIMILARITY_FUNCTIONS.values(), PREDICTORS))
pool.close()
full_results_df = pd.concat(result, axis=0)
full_results_df.to_pickle(_pkl_file)
return full_results_df
def plot_graphs(df: pd.DataFrame, corpus):
# print(df)
df['result'] = pd.to_numeric(df['result'])
df['lambda'] = pd.to_numeric(df['lambda'])
for simi, _df in df.groupby('sim_func'):
fig = plt.figure(figsize=(16.0, 10.0)) # in inches!
print(simi)
print(_df.drop('sim_func', axis=1).set_index('lambda').groupby('predictor')['result'])
mar = 0
for predictor, pdf in _df.drop('sim_func', axis=1).set_index('lambda').groupby('predictor'):
# if predictor in SKIP:
# continue
pdf['result'].plot(legend=True, marker=MARKERS[mar], label=predictor, linewidth=2, markersize=15, mew=5)
plt.legend()
mar += 1
plt.title(f'\\textbf{{{corpus} - {simi}}}')
plt.xlabel('$\\mathbf{\\lambda}$')
plt.ylabel("\\textbf{Pearson}")
# plt.ylabel('Correlation')
# plt.savefig(f'../../plot_now/{corpus}-{simi}.png')
plt.show()
def plot_sim_graph(orig_df: pd.DataFrame, simi, corpus):
corpus_names = {'ClueWeb12B': 'CW12', 'ROBUST': 'ROBUST'}
df = orig_df.set_index('sim_func')
df['result'] = pd.to_numeric(df['result'])
df['lambda'] = | pd.to_numeric(df['lambda']) | pandas.to_numeric |
from __future__ import annotations
import numpy as np
import pandas as pd
from sklearn import datasets
from IMLearn.metrics import mean_square_error
from IMLearn.utils import split_train_test
from IMLearn.model_selection import cross_validate
from IMLearn.learners.regressors import PolynomialFitting, LinearRegression, RidgeRegression
from sklearn.linear_model import Lasso
from utils import *
import plotnine as gg
def select_polynomial_degree(n_samples: int = 100, noise: float = 5):
"""
Simulate data from a polynomial model and use cross-validation to select the best fitting degree
Parameters
----------
n_samples: int, default=100
Number of samples to generate
noise: float, default = 5
Noise level to simulate in responses
"""
# Question 1 - Generate dataset for model f(x)=(x+3)(x+2)(x+1)(x-1)(x-2) + eps for eps Gaussian noise
# and split into training- and testing portions
def f(x):
return (x + 3) * (x + 2) * (x + 1) * (x - 1) * (x - 2)
X = np.linspace(-1.2, 2, n_samples)
y = f(X) + np.random.normal(0, noise, n_samples)
train_X, train_y, test_X, test_y = split_train_test( | pd.DataFrame(X) | pandas.DataFrame |
from datetime import datetime, timezone
from ast import literal_eval
from collections import OrderedDict, defaultdict
from functools import partial, partialmethod
from math import ceil, floor, fmod
import numpy as np
import os.path
import pandas as pd
import pyqtgraph as pg
from pyqtgraph import QtCore, QtGui
from .. import definitions as defs
def _get_datasrc(ax, require=True):
if ax.vb.datasrc is not None or not ax.vb.x_indexed:
return ax.vb.datasrc
vbs = [ax.vb for win in defs.windows for ax in win.axs]
for vb in vbs:
if vb.datasrc:
return vb.datasrc
if require:
assert ax.vb.datasrc, 'not possible to plot this primitive without a prior time-range to compare to'
lerp = lambda t,a,b: t*b+(1-t)*a
def _pdtime2index(ax, ts, any_end=False, require_time=False):
if isinstance(ts.iloc[0], pd.Timestamp):
ts = ts.view('int64')
else:
h = np.nanmax(ts.values)
if h < 1e7:
if require_time:
assert False, 'not a time series'
return ts
if h < 1e10: # handle s epochs
ts = ts.astype('float64') * 1e9
elif h < 1e13: # handle ms epochs
ts = ts.astype('float64') * 1e6
elif h < 1e16: # handle us epochs
ts = ts.astype('float64') * 1e3
datasrc = _get_datasrc(ax)
xs = datasrc.x
# try exact match before approximate match
exact = datasrc.index[xs.isin(ts)].to_list()
if len(exact) == len(ts):
return exact
r = []
for i,t in enumerate(ts):
xss = xs.loc[xs>t]
if len(xss) == 0:
t0 = xs.iloc[-1]
if any_end or t0 == t:
r.append(len(xs)-1)
continue
if i > 0:
continue
assert t <= t0, 'must plot this primitive in prior time-range'
i1 = xss.index[0]
i0 = i1-1
if i0 < 0:
i0,i1 = 0,1
t0,t1 = xs.loc[i0], xs.loc[i1]
dt = (t-t0) / (t1-t0)
r.append(lerp(dt, i0, i1))
return r
def _x2t(datasrc, x, ts2str):
if not datasrc:
return '',False
try:
x += 0.5
t,_,_,_,cnt = datasrc.hilo(x, x)
if cnt:
if not datasrc.timebased():
return '%g' % t, False
s = ts2str(t)
if defs.epoch_period >= 23*60*60: # daylight savings, leap seconds, etc
i = s.index(' ')
elif defs.epoch_period >= 59: # consider leap seconds
i = s.rindex(':')
elif defs.epoch_period >= 1:
i = s.index('.') if '.' in s else len(s)
elif defs.epoch_period >= 0.001:
i = -3
else:
i = len(s)
return s[:i],True
except Exception as e:
import traceback
traceback.print_exc()
return '',datasrc.timebased()
def _millisecond_tz_wrap(s):
if len(s) > 6 and s[-6] in '+-' and s[-3] == ':': # +01:00 fmt timezone present?
s = s[:-6]
return (s+'.000000') if '.' not in s else s
def _x2local_t(datasrc, x):
if defs.display_timezone == None:
return _x2utc(datasrc, x)
return _x2t(datasrc, x, lambda t: _millisecond_tz_wrap(datetime.fromtimestamp(t/1e9, tz=defs.display_timezone).isoformat(sep=' ')))
def _openfile(*args):
return open(*args)
def _loadwindata(win):
try: os.mkdir(os.path.expanduser('~/.finplot'))
except: pass
try:
f = os.path.expanduser('~/.finplot/'+win.title.replace('/','-')+'.ini')
settings = [(k.strip(),literal_eval(v.strip())) for line in _openfile(f) for k,d,v in [line.partition('=')] if v]
except:
return
kvs = {k:v for k,v in settings}
vbs = set(ax.vb for ax in win.axs)
zoom_set = False
for vb in vbs:
ds = vb.datasrc
if ds and (vb.linkedView(0) is None or vb.linkedView(0).datasrc is None or vb.master_viewbox):
period_ns = ds.period_ns
if kvs['min_x'] >= ds.x.iloc[0]-period_ns and kvs['max_x'] <= ds.x.iloc[-1]+period_ns:
x0,x1 = ds.x.loc[ds.x>=kvs['min_x']].index[0], ds.x.loc[ds.x<=kvs['max_x']].index[-1]
if x1 == len(ds.x)-1:
x1 += defs.right_margin_candles
x1 += 0.5
zoom_set = vb.update_y_zoom(x0, x1)
return zoom_set
def _savewindata(win):
if not defs.viewrestore:
return
try:
min_x = int(1e100)
max_x = int(-1e100)
for ax in win.axs:
if ax.vb.targetRect().right() < 4: # ignore empty plots
continue
if ax.vb.datasrc is None:
continue
t0,t1,_,_,_ = ax.vb.datasrc.hilo(ax.vb.targetRect().left(), ax.vb.targetRect().right())
min_x = np.nanmin([min_x, t0])
max_x = np.nanmax([max_x, t1])
if np.max(np.abs([min_x, max_x])) < 1e99:
s = 'min_x = %s\nmax_x = %s\n' % (min_x, max_x)
f = os.path.expanduser('~/.finplot/'+win.title.replace('/','-')+'.ini')
try: changed = _openfile(f).read() != s
except: changed = True
if changed:
_openfile(f, 'wt').write(s)
## print('%s saved' % win.title)
except Exception as e:
print('Error saving plot:', e)
from ..classes.fin_window import FinWindow
def _internal_windows_only():
return all(isinstance(win,FinWindow) for win in defs.windows)
def _create_plot(ax=None, **kwargs):
if ax:
return ax
if defs.last_ax:
return defs.last_ax
return create_plot(**kwargs)
def _clear_timers():
for timer in defs.timers:
timer.timeout.disconnect()
defs.timers.clear()
from ..classes.epoch_axis_item import EpochAxisItem
from ..classes.y_axis_item import YAxisItem
from ..classes.fin_cross_hair import FinCrossHair
def _add_timestamp_plot(master, prev_ax, viewbox, index, yscale):
native_win = isinstance(master, pg.GraphicsLayoutWidget)
if native_win and prev_ax is not None:
prev_ax.set_visible(xaxis=False) # hide the whole previous axis
axes = {'bottom': EpochAxisItem(vb=viewbox, orientation='bottom'),
'left': YAxisItem(vb=viewbox, orientation='left')}
if native_win:
ax = pg.PlotItem(viewBox=viewbox, axisItems=axes, name='plot-%i'%index, enableMenu=False)
else:
axw = pg.PlotWidget(viewBox=viewbox, axisItems=axes, name='plot-%i'%index, enableMenu=False)
ax = axw.plotItem
ax.ax_widget = axw
ax.axes['left']['item'].setWidth(defs.y_label_width) # this is to put all graphs on equal footing when texts vary from 0.4 to 2000000
ax.axes['left']['item'].setStyle(tickLength=-5) # some bug, totally unexplicable (why setting the default value again would fix repaint width as axis scale down)
ax.axes['left']['item'].setZValue(30) # put axis in front instead of behind data
ax.axes['bottom']['item'].setZValue(30)
ax.setLogMode(y=(yscale.scaletype=='log'))
ax.significant_decimals = defs.significant_decimals
ax.significant_eps = defs.significant_eps
ax.crosshair = FinCrossHair(ax, color=defs.cross_hair_color)
ax.hideButtons()
ax.overlay = partial(_ax_overlay, ax)
ax.set_visible = partial(_ax_set_visible, ax)
ax.decouple = partial(_ax_decouple, ax)
ax.disable_x_index = partial(_ax_disable_x_index, ax)
ax.reset = partial(_ax_reset, ax)
ax.prev_ax = prev_ax
ax.win_index = index
if index%2:
viewbox.setBackgroundColor(defs.odd_plot_background)
viewbox.setParent(ax)
return ax
from ..classes.fin_view_box import FinViewBox
from ..classes.y_scale import YScale
def _ax_overlay(ax, scale=0.25, yaxis=False):
'''The scale parameter defines how "high up" on the initial plot this overlay will show.
The yaxis parameter can be one of [False, 'linear', 'log'].'''
yscale = yaxis if yaxis else 'linear'
viewbox = FinViewBox(ax.vb.win, init_steps=ax.vb.init_steps, yscale=YScale(yscale, 1), enableMenu=False)
viewbox.master_viewbox = ax.vb
viewbox.setZValue(-5)
viewbox.setBackgroundColor(ax.vb.state['background'])
ax.vb.setBackgroundColor(None)
viewbox.v_zoom_scale = scale
if hasattr(ax, 'ax_widget'):
ax.ax_widget.scene().addItem(viewbox)
else:
ax.vb.win.centralWidget.scene().addItem(viewbox)
viewbox.setXLink(ax.vb)
def updateView():
viewbox.setGeometry(ax.vb.sceneBoundingRect())
axo = pg.PlotItem(enableMenu=False)
axo.significant_decimals = defs.significant_decimals
axo.significant_eps = defs.significant_eps
axo.vb = viewbox
axo.prev_ax = None
axo.crosshair = None
axo.decouple = partial(_ax_decouple, axo)
axo.disable_x_index = partial(_ax_disable_x_index, axo)
axo.reset = partial(_ax_reset, axo)
axo.hideAxis('left')
axo.hideAxis('right')
axo.hideAxis('bottom')
axo.hideButtons()
viewbox.addItem(axo)
if yaxis and isinstance(axo.vb.win, pg.GraphicsLayoutWidget):
axi = YAxisItem(vb=axo.vb, orientation='right')
axo.axes['right'] = {'item':axi}
axi.linkToView(axo.vb)
row = ax.win_index
for col in range(1, 100):
if axo.vb.win.getItem(row, col) is None:
axo.vb.win.addItem(axi, row=row, col=1)
break
ax.vb.sigResized.connect(updateView)
defs.overlay_axs.append(axo)
updateView()
return axo
def _ax_set_visible(ax, crosshair=None, xaxis=None, yaxis=None, xgrid=None, ygrid=None):
if crosshair == False:
ax.crosshair.hide()
if xaxis is not None:
ax.getAxis('bottom').setStyle(showValues=xaxis)
if yaxis is not None:
ax.getAxis('left').setStyle(showValues=yaxis)
if xgrid is not None or ygrid is not None:
ax.showGrid(x=xgrid, y=ygrid)
if ax.getAxis('left'):
ax.getAxis('left').setEnabled(False)
if ax.getAxis('bottom'):
ax.getAxis('bottom').setEnabled(False)
def _ax_decouple(ax):
ax.setXLink(None)
if ax.prev_ax:
ax.prev_ax.set_visible(xaxis=True)
def _ax_disable_x_index(ax, decouple=True):
ax.vb.x_indexed = False
if decouple:
_ax_decouple(ax)
def _ax_reset(ax):
if ax.crosshair is not None:
ax.crosshair.hide()
for item in list(ax.items):
ax.removeItem(item)
if ax.vb.master_viewbox and hasattr(item, 'name') and item.name():
legend = ax.vb.master_viewbox.parent().legend
if legend:
legend.removeItem(item)
if ax.legend:
ax.legend.opts['offset'] = None
ax.legend.setParentItem(None)
ax.legend = None
ax.vb.reset()
ax.vb.set_datasrc(None)
if ax.crosshair is not None:
ax.crosshair.show()
from ..classes.fin_legend_item import FinLegendItem
def _create_legend(ax):
if ax.vb.master_viewbox:
ax = ax.vb.master_viewbox.parent()
if ax.legend is None:
ax.legend = FinLegendItem(border_color=defs.legend_border_color, fill_color=defs.legend_fill_color, size=None, offset=(3,2))
ax.legend.setParentItem(ax.vb)
def _update_significants(ax, datasrc, force):
# check if no epsilon set yet
default_dec = 0.99 < ax.significant_decimals/defs.significant_decimals < 1.01
default_eps = 0.99 < ax.significant_eps/defs.significant_eps < 1.01
if force or (default_dec and default_eps):
try:
sd,se = datasrc.calc_significant_decimals()
if sd or se != defs.significant_eps:
if force or default_dec or sd > ax.significant_decimals:
ax.significant_decimals = sd
if force or default_eps or se < ax.significant_eps:
ax.significant_eps = se
except:
pass # datasrc probably full av NaNs
def _is_standalone(timeser):
# more than N percent gaps or time reversals probably means this is a standalone plot
return timeser.isnull().sum() + (timeser.diff()<=0).sum() > len(timeser)*0.1
def _create_series(a):
return a if isinstance(a, pd.Series) else pd.Series(a)
from ..classes.pandas_data_source import PandasDataSource
def _create_datasrc(ax, *args):
def do_create(args):
if len(args) == 1 and type(args[0]) == PandasDataSource:
return args[0]
if len(args) == 1 and type(args[0]) in (list, tuple):
args = [np.array(args[0])]
if len(args) == 1 and type(args[0]) == np.ndarray:
args = [pd.DataFrame(args[0].T)]
if len(args) == 1 and type(args[0]) == pd.DataFrame:
return PandasDataSource(args[0])
args = [_create_series(a) for a in args]
return PandasDataSource( | pd.concat(args, axis=1) | pandas.concat |
"""
Results containers and post-estimation diagnostics for IV models
"""
from __future__ import annotations
from linearmodels.compat.statsmodels import Summary
import datetime as dt
from typing import Any, Dict, List, Optional, Sequence, Tuple, Union
from numpy import array, asarray, c_, diag, empty, isnan, log, ndarray, ones, sqrt
from numpy.linalg import inv
from pandas import DataFrame, Series, concat, to_numeric
from property_cached import cached_property
import scipy.stats as stats
from statsmodels.iolib.summary import SimpleTable, fmt_2cols, fmt_params
from statsmodels.iolib.table import default_txt_fmt
import linearmodels
from linearmodels.iv._utility import annihilate, proj
from linearmodels.iv.data import IVData
from linearmodels.shared.base import _ModelComparison, _SummaryStr
from linearmodels.shared.hypotheses import (
InvalidTestStatistic,
WaldTestStatistic,
quadratic_form_test,
)
from linearmodels.shared.io import _str, add_star, pval_format
from linearmodels.typing import ArrayLike, Float64Array, OptionalArrayLike
def stub_concat(lists: Sequence[Sequence[str]], sep: str = "=") -> List[str]:
col_size = max([max(map(len, stubs)) for stubs in lists])
out: List[str] = []
for stubs in lists:
out.extend(stubs)
out.append(sep * (col_size + 2))
return out[:-1]
def table_concat(lists: Sequence[List[List[str]]], sep: str = "=") -> List[List[str]]:
col_sizes = []
for table in lists:
size = [[len(item) for item in row] for row in table]
size_arr = array(size)
col_sizes.append(list(asarray(size_arr.max(0))))
col_size = asarray(array(col_sizes).max(axis=0))
sep_cols: List[str] = [sep * (cs + 2) for cs in col_size]
out: List[List[str]] = []
for table in lists:
out.extend(table)
out.append(sep_cols)
return out[:-1]
class _LSModelResultsBase(_SummaryStr):
"""
Results from OLS model estimation
Parameters
----------
results : dict[str, any]
A dictionary of results from the model estimation.
model : _OLS
The model used to estimate parameters.
"""
def __init__(self, results: Dict[str, Any], model: Any) -> None:
self._resid = results["eps"]
self._wresid = results["weps"]
self._params = results["params"]
self._cov = results["cov"]
self.model = model
self._r2 = results["r2"]
self._cov_type = results["cov_type"]
self._rss = results["residual_ss"]
self._tss = results["total_ss"]
self._s2 = results["s2"]
self._debiased = results["debiased"]
self._f_statistic = results["fstat"]
self._vars = results["vars"]
self._cov_config = results["cov_config"]
self._method = results["method"]
self._kappa = results.get("kappa", None)
self._datetime = dt.datetime.now()
self._cov_estimator = results["cov_estimator"]
self._original_index = results["original_index"]
self._fitted = results["fitted"]
self._df_model = results.get("df_model", self._params.shape[0])
@property
def cov_config(self) -> Dict[str, Any]:
"""Parameter values from covariance estimator"""
return self._cov_config
@property
def cov_estimator(self) -> str:
"""Type of covariance estimator used to compute covariance"""
return self._cov_type
@property
def cov(self) -> DataFrame:
"""Estimated covariance of parameters"""
return self._cov
@property
def params(self) -> Series:
"""Estimated parameters"""
return self._params
@cached_property
def resids(self) -> Series:
"""Estimated residuals"""
return self._resid()
@cached_property
def fitted_values(self) -> Series:
"""Fitted values"""
return self._fitted()
@property
def idiosyncratic(self) -> Series:
"""
Idiosyncratic error
Notes
-----
Differs from resids since this is the estimated idiosyncratic shock
from the data. It has the same dimension as the dependent data.
The shape and nature of resids depends on the model estimated. These
estimates only depend on the model estimated through the estimation
of parameters and inclusion of effects, if any.
"""
return self.resids
@cached_property
def wresids(self) -> Series:
"""Weighted estimated residuals"""
return self._wresid()
@property
def nobs(self) -> int:
"""Number of observations"""
return self.model.dependent.shape[0]
@property
def df_resid(self) -> int:
"""Residual degree of freedom"""
return self.nobs - self.df_model
@property
def df_model(self) -> int:
"""Model degree of freedom"""
return int(self._df_model)
@property
def has_constant(self) -> bool:
"""Flag indicating the model includes a constant or equivalent"""
return self.model.has_constant
@property
def rsquared(self) -> float:
"""Coefficient of determination (R**2)"""
return self._r2
@property
def rsquared_adj(self) -> float:
"""Sample-size adjusted coefficient of determination (R**2)"""
n, k, c = self.nobs, self.df_model, int(self.has_constant)
return 1 - ((n - c) / (n - k)) * (1 - self._r2)
@property
def cov_type(self) -> str:
"""Covariance estimator used"""
return self._cov_type
@cached_property
def std_errors(self) -> Series:
"""Estimated parameter standard errors"""
std_errors = sqrt(diag(self.cov))
return Series(std_errors, index=self._vars, name="stderr")
@cached_property
def tstats(self) -> Series:
"""Parameter t-statistics"""
return Series(self._params / self.std_errors, name="tstat")
@cached_property
def pvalues(self) -> Series:
"""
Parameter p-vals. Uses t(df_resid) if ``debiased`` is True, else normal
"""
if self.debiased:
pvals = 2 - 2 * stats.t.cdf(abs(self.tstats), self.df_resid)
else:
pvals = 2 - 2 * stats.norm.cdf(abs(self.tstats))
return Series(pvals, index=self._vars, name="pvalue")
@property
def total_ss(self) -> float:
"""Total sum of squares"""
return self._tss
@property
def model_ss(self) -> float:
"""Residual sum of squares"""
return self._tss - self._rss
@property
def resid_ss(self) -> float:
"""Residual sum of squares"""
return self._rss
@property
def s2(self) -> float:
"""Residual variance estimator"""
return self._s2
@property
def debiased(self) -> bool:
"""Flag indicating whether covariance uses a small-sample adjustment"""
return self._debiased
@property
def f_statistic(self) -> WaldTestStatistic:
"""
Model F-statistic
Returns
-------
WaldTestStatistic
Test statistic for null all coefficients excluding constant terms
are zero.
Notes
-----
Despite name, always implemented using a quadratic-form test based on
estimated parameter covariance. Default is to use a chi2 distribution
to compute p-values. If ``debiased`` is True, divides statistic by
number of parameters tested and uses an F-distribution.
This version of the F-statistic directly uses the model covariance
estimator and so is robust against the same specification issues.
"""
return self._f_statistic
@property
def method(self) -> str:
"""Method used to estimate model parameters"""
return self._method
def conf_int(self, level: float = 0.95) -> DataFrame:
"""
Confidence interval construction
Parameters
----------
level : float
Confidence level for interval
Returns
-------
DataFrame
Confidence interval of the form [lower, upper] for each parameters
Notes
-----
Uses a t(df_resid) if ``debiased`` is True, else normal.
"""
ci_quantiles = [(1 - level) / 2, 1 - (1 - level) / 2]
if self._debiased:
q = stats.t.ppf(ci_quantiles, self.df_resid)
else:
q = stats.norm.ppf(ci_quantiles)
q = q[None, :]
ci = asarray(self.params)[:, None] + asarray(self.std_errors)[:, None] * q
return | DataFrame(ci, index=self._vars, columns=["lower", "upper"]) | pandas.DataFrame |
"""
preprocess images nad train lane navigation model
"""
import fnmatch
import os
import pickle
import random
from os import path
from os.path import exists
from os.path import join
import numpy as np
np.set_printoptions(formatter={'float_kind': lambda x: "%.4f" % x})
import pandas as pd
pd.set_option('display.width', 300)
pd.set_option('display.float_format', '{:,.4f}'.format)
pd.set_option('display.max_colwidth', 200)
# tensorflow
import tensorflow
import tensorflow.keras
from tensorflow.keras.models import load_model
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D, Dropout, Flatten, Dense
from tensorflow.keras.optimizers import Adam
# sklearn
from sklearn.model_selection import train_test_split
# imaging
import matplotlib.pyplot as plt
from PIL import Image
from util import constants
from util.image_processing import pan
from util.image_processing import zoom
from util.image_processing import blur
from util.image_processing import adjust_brightness
from util.image_processing import my_imread
from util.image_processing import preprocess_image
def get_angle_from_filename(filename):
return int(filename[-11:-8])
def load_data(data_directory):
dir_list = os.listdir(data_directory)
image_paths = []
steering_angles = []
pattern = "*.png"
for d in dir_list:
if path.isdir(join(data_directory, d)):
filenames = os.listdir(join(data_directory, d))
for filename in filenames:
if fnmatch.fnmatch(filename, pattern):
image_paths.append(os.path.join(data_directory, d, filename))
steering_angles.append(get_angle_from_filename(filename))
return image_paths, steering_angles
def nvidia_model():
model = Sequential(name="Nvidia_Model")
model.add(Conv2D(24, (5, 5), strides=(2, 2), input_shape=(66, 200, 3), activation='elu'))
model.add(Conv2D(36, (5, 5), strides=(2, 2), activation='elu'))
model.add(Conv2D(48, (5, 5), strides=(2, 2), activation='elu'))
model.add(Conv2D(64, (3, 3), activation='elu'))
# model.add(Dropout(0.2))
model.add(Conv2D(64, (3, 3), activation='elu'))
model.add(Flatten())
model.add(Dropout(0.2))
model.add(Dense(100, activation='elu'))
model.add(Dense(50, activation='elu'))
model.add(Dense(10, activation='elu'))
model.add(Dense(1))
optimizer = Adam(learning_rate=constants.LEARNING_RATE)
# model = tfmot.quantization.keras.quantize_model(model) todo: figure out quantization
model.compile(loss='mse', optimizer=optimizer)
return model
def random_augment(image, steering_angle):
if np.random.rand() < 0.5:
image = pan(image)
if np.random.rand() < 0.5:
image = zoom(image)
if np.random.rand() < 0.5:
image = blur(image)
if np.random.rand() < 0.5:
image = adjust_brightness(image)
# image, steering_angle = random_flip(image, steering_angle) # may be flip is not helping
return image, steering_angle
def image_data_generator(image_paths, steering_angles, batch_size, is_training):
while True:
batch_images = []
batch_steering_angles = []
for i in range(batch_size):
random_index = random.randint(0, len(image_paths) - 1)
image_path = image_paths[random_index]
image = my_imread(image_paths[random_index])
steering_angle = steering_angles[random_index]
if is_training:
# training: augment image
image, steering_angle = random_augment(image, steering_angle)
image = preprocess_image(image)
batch_images.append(image)
batch_steering_angles.append(steering_angle)
yield np.asarray(batch_images, dtype=np.float32), np.asarray(batch_steering_angles)
def test_data(image_paths, steering_angles, x_train, x_valid, y_train, y_valid):
image_index = random.randint(0, len(image_paths) - 1)
plt.imshow(Image.open(image_paths[image_index]))
print("image_path: %s" % image_paths[image_index])
print("steering_Angle: %d" % steering_angles[image_index])
df = | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import quantecon as qe
import requests
import string
"""
Description: Voter analysis for Index Coop DAO Decision Gate 2 Governance Votes
Prereqs:
- Download Snapshot vote reports either manually or using snapshot_report_download.py script
- Download the token's hodler distribution from Etherscan:
- for INDEX token, Download CSV link at https://etherscan.io/token/0x0954906da0Bf32d5479e25f46056d22f08464cab#balances
- this could not be automated as Etherscan requires recaptcha. Could be automated with a graph or etherscan pro API call
- Install python dependencies listed above
"""
# Parameters: Substitute with the values of the project and Snapshot vote you want to analyze
# Running analsysis for all Index Coop Decision Gate 2 votes
vote_urls = {
'BTC2X-FLI': 'https://snapshot.org/#/index/proposal/Qmc2DPHoKnyYxRjQfwbpbBngt5xHZrLBgkywGqZm7hHnEU',
'SMI': 'https://snapshot.org/#/index/proposal/QmYBG5zerdSkC9TGwguy5biCS5h2cg49PQCHCukJqHmfE1',
'TTI': 'https://snapshot.org/#/index/proposal/QmTPVkgfJBB1go2DCFhmacjgSWrLUzTFimdTGdB7d9Q2ao',
'MVI': 'https://snapshot.org/#/index/proposal/QmadsabYMJC96jU2S2kPCSh1suVfDVApGLwrux2WwsHd7x',
'ETH2X-FLI2': 'https://snapshot.org/#/index/proposal/QmYHV2vdTaSubtNJefSoYx82ypsmtzrT7CGUU1EHsXzHC3',
'ETH2X-FLI1': 'https://snapshot.org/#/index/proposal/QmQwQn4k324kMKPjsSX6ZEzjkkKWh1DNfAN2mQ3dd5aP1a'
}
proposal_ids = [url.split('/')[-1] for url in vote_urls.values()]
local_download_folder_path = 'C:/Users/craig/Downloads/'
etherscan_token_hodler_csv_path = 'etherscan_index_hodler_table.csv'
# Remove treasury, vesting and DEX LP wallets. Manually entered from https://etherscan.io/token/0x0954906da0Bf32d5479e25f46056d22f08464cab#balances
wallet_addresses_to_exclude = [
'0xd89c642e52bd9c72bcc0778bcf4de307cc48e75a',
'<KEY>',
'<KEY>',
'0x9467cfadc9de245010df95ec6a585a506a8ad5fc',
'0xe2250424378b6a6dc912f5714cfd308a8d593986',
'0x71f2b246f270c6af49e2e514ca9f362b491fbbe1',
'0x4c11dfd35a4fe079b41d5d9729ed34c00d487712',
'0x66a7d781828b03ee1ae678cd3fe2d595ba3b6000',
'0x0d627ca04a97219f182dab0dc2a23fb4a5b02a9d',
'0x5c29aa6761803bcfda7f683eaa0ff9bddda3649d',
'0xce3c6312385fcf233ab0de574b0cb1a588566c3f',
'0xb93b505ed567982e2b6756177ddd23ab5745f309',
'0xdd111f0fc07f4d89ed6ff96dbab19a61450b8435',
'0x0f58793e8cf39d6b60919ffaf773a7f95a568146',
'0xa73df646512c82550c2b3c0324c4eedee53b400c',
'0xcf19a7c81fcf0e01c927f28a2b551405e58c77e5',
'0x3452a7f30a712e415a0674c0341d44ee9d9786f9',
'0x674bdf20a0f284d710bc40872100128e2d66bd3f',
'0x8f06fba4684b5e0988f215a47775bb611af0f986',
'0x673d140eed36385cb784e279f8759f495c97cf03'
]
# Enter the quorum threshold for the project, get current circulating supply from CoinGecko
# Warning: this provides approximate results based on current outstanding supply and therefore may not be accurate for
# projects with high inflation that deviates from supply during historical votes. Historical supply could be fed
# with database queries or historically accurate wallet balance CSV files
coin_gecko_response = requests.get('https://api.coingecko.com/api/v3/coins/index-cooperative?tickers=true&market_data=true').json()
circulating_supply = coin_gecko_response['market_data']['circulating_supply']
quorum_threshold = 0.15
yes_vote_threshold = 0.6
votes_needed_for_quorum = round(quorum_threshold * circulating_supply, 2)
etherscan_token_hodler_csv_path = 'etherscan_index_hodler_table.csv'
def compute_gini(series_label:str, series_np_array:np.array):
gini = qe.gini_coefficient(series_np_array)
print(f'{series_label} Gini Coefficient: {gini}')
# Plot Lorenz curve
equality_curve, lorenz_curve = qe.lorenz_curve(series_np_array)
fig, ax = plt.subplots()
ax.plot(equality_curve, lorenz_curve, label=f'Lorenz curve of {series_label}')
ax.plot(equality_curve, equality_curve, label='Lorenz curve, equality')
ax.legend()
plt.show()
# Read Etherscan.io hodler wallet balance csv file
index_hodlers_with_treasury = pd.read_csv(etherscan_token_hodler_csv_path)
index_hodler_df = index_hodlers_with_treasury.loc[~index_hodlers_with_treasury.HolderAddress.isin(wallet_addresses_to_exclude)]
index_hodler_df = index_hodler_df.sort_values('Balance', ascending=False)
index_hodler_df['cumulative_dist'] = index_hodler_df['Balance'].cumsum()
index_hodler_df.index = range(len(index_hodler_df))
index_hodler_df.Balance.describe()
wallet_balance_sum = index_hodler_df['Balance'].sum()
theoretical_minimum_number_of_voters_needed = min(index_hodler_df.index[index_hodler_df['cumulative_dist'] > votes_needed_for_quorum]) + 1
print(f'The minimum number of (all) hodlers needed to pass an initiative is: {theoretical_minimum_number_of_voters_needed}')
theoretical_minimum_number_of_low_balance_voters = len(index_hodler_df) - max(index_hodler_df.index[(wallet_balance_sum-index_hodler_df['cumulative_dist']) > votes_needed_for_quorum])
print(f'The minimum number of (low balance) hodlers needed to pass an initiative if no one of higher rank participates is: {theoretical_minimum_number_of_low_balance_voters} / {len(index_hodler_df)}')
print(f'Global Wealth Gini Coefficient (2019): 0.885, source: https://en.wikipedia.org/wiki/List_of_countries_by_wealth_inequality')
hodler_balance_array = index_hodler_df['Balance'].to_numpy()
compute_gini('All INDEX wallet balances', hodler_balance_array)
index_hodler_df.sort_values('Balance').plot.bar(y='Balance', figsize=(24,12)).xaxis.set_visible(False)
index_hodler_df['percent_of_voting_supply'] = (index_hodler_df['cumulative_dist']/index_hodler_df['Balance'].sum()) * 100
index_hodler_df['percentile'] = (index_hodler_df.index/len(index_hodler_df)) * 100
index_hodler_df['HolderAddress'] = index_hodler_df['HolderAddress'].astype(str)
number_of_wallets_in_threshold = [min(index_hodler_df.index[index_hodler_df['percentile'] > percentile]) for percentile in top_hodler_percentile_thresholds]
max_percentile_thresholds = [0.1, 1.0, 10.0, 100.0]
percentile_members = dict()
for i in range(0, len(max_percentile_thresholds)):
percentile = max_percentile_thresholds[i]
prior_percentile_range_max = max_percentile_thresholds[i-1] if i > 0 else 0.0
percentile_range = f'{str(prior_percentile_range_max)}-{max_percentile_thresholds[i]}%'
total_n_members = len(index_hodler_df.loc[index_hodler_df['percentile'] < percentile])
range_member_df = index_hodler_df.loc[(index_hodler_df['percentile'] < percentile) & (index_hodler_df['percentile'] >= prior_percentile_range_max)]
range_n_members = len(range_member_df)
member_list = range_member_df.HolderAddress.tolist()
percentile_members[percentile_range] = {
'percentile_val': percentile,
'total_n_members': total_n_members,
'range_n_members': range_n_members,
'member_list': member_list
}
voter_df = pd.DataFrame()
voter_df['address'] = index_hodler_df['HolderAddress']
analysis_results = dict()
def run_vote_analysis(vote_key:str):
analysis_results[vote_key] = dict()
vote_url = vote_urls[vote_key]
proposal_id = vote_url.split('/')[-1]
vote_df = | pd.read_csv(f'{local_download_folder_path}snapshot-report-{proposal_id}.csv') | pandas.read_csv |
# Reading an xlsx file and creating an matrix multiplication across.
import pandas as pd
inputFileName='./ICC-Test-Championship.xlsx'
outputFileName='./result.csv'
dataIndiaEngland = | pd.read_excel(inputFileName, sheet_name='India-England-Forecast') | pandas.read_excel |
# Copyright 2019 <NAME>, Inc. and the University of Edinburgh. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import os
import sys
import pandas as pd
from sumeval.metrics.rouge import RougeCalculator
from sumeval.metrics.bleu import BLEUCalculator
from models import SumEvaluator
from utils import Config
if __name__ == "__main__":
if len(sys.argv) < 5:
print("Config file(s) are missing")
print("Usage: {} <prepare_conf> <train_conf> <aggregate_conf> <generate_conf>")
sys.exit(1)
p_conf = Config(sys.argv[1])
t_conf = Config(sys.argv[2])
a_conf = Config(sys.argv[3])
g_conf = Config(sys.argv[4])
assert p_conf.conf_type == "prepare"
assert t_conf.conf_type == "train"
assert a_conf.conf_type == "aggregate"
assert g_conf.conf_type == "generate"
# Basepath
if "BASEPATH" not in os.environ:
basepath = "."
else:
basepath = os.environ["BASEPATH"]
# TODO(Yoshi): YELP dataset hard coded
data_dirpath = os.path.join(basepath,
"data",
"{}".format(p_conf.conf_name))
agg_test_filepath = os.path.join(data_dirpath,
"test{}.csv".format(a_conf.get_agg_name()))
# output/yelp-default_op2text_small_beam_agg.csv
agg_pred_filepath = os.path.join(basepath,
"output",
"{}_op2text_{}_{}_{}_agg.csv".format(p_conf.conf_name,
t_conf.conf_name,
a_conf.conf_name,
g_conf.conf_name))
true_df = pd.read_csv(agg_test_filepath)
pred_df = pd.read_csv(agg_pred_filepath)
# if gold_summary is missing, take it from another file
if "gold_summary" not in true_df:
print("WARNING: Missing gold_summary. Borrow it from another file.")
ref_df = pd.read_csv(os.path.join(data_dirpath,
"test_8_10_all_all_300_6.csv"))
true_df = pd.merge(true_df, ref_df[["eid", "gold_summary"]])
merge_df = pd.merge(true_df[["eid", "gold_summary", "input_text"]], pred_df)
# sumeval evaluator
evaluator = SumEvaluator(metrics=t_conf["metrics"],
stopwords=False,
lang="en")
# Generation evaluation
eval_df = evaluator.eval(merge_df["gold_summary"].tolist(),
merge_df["pred"].tolist())
eval_df.mean(axis=0).to_csv(agg_pred_filepath.replace(".csv", "_eval.csv"),
index=False)
eval_df.mean(axis=0).to_csv(agg_pred_filepath.replace(".csv", ".eval"))
eval_df.to_csv(agg_pred_filepath.replace(".csv", "_all.csv"))
| pd.concat([merge_df[["eid", "input_text"]], eval_df], axis=1) | pandas.concat |
import pytest
import os
from mapping import util
from pandas.util.testing import assert_frame_equal, assert_series_equal
import pandas as pd
from pandas import Timestamp as TS
import numpy as np
@pytest.fixture
def price_files():
cdir = os.path.dirname(__file__)
path = os.path.join(cdir, 'data/')
files = ["CME-FVU2014.csv", "CME-FVZ2014.csv"]
return [os.path.join(path, f) for f in files]
def assert_dict_of_frames(dict1, dict2):
assert dict1.keys() == dict2.keys()
for key in dict1:
assert_frame_equal(dict1[key], dict2[key])
def test_read_price_data(price_files):
# using default name_func in read_price_data()
df = util.read_price_data(price_files)
dt1 = TS("2014-09-30")
dt2 = TS("2014-10-01")
idx = pd.MultiIndex.from_tuples([(dt1, "CME-FVU2014"),
(dt1, "CME-FVZ2014"),
(dt2, "CME-FVZ2014")],
names=["date", "contract"])
df_exp = pd.DataFrame([119.27344, 118.35938, 118.35938],
index=idx, columns=["Open"])
assert_frame_equal(df, df_exp)
def name_func(fstr):
file_name = os.path.split(fstr)[-1]
name = file_name.split('-')[1].split('.')[0]
return name[-4:] + name[:3]
df = util.read_price_data(price_files, name_func)
dt1 = TS("2014-09-30")
dt2 = TS("2014-10-01")
idx = pd.MultiIndex.from_tuples([(dt1, "2014FVU"), (dt1, "2014FVZ"),
(dt2, "2014FVZ")],
names=["date", "contract"])
df_exp = pd.DataFrame([119.27344, 118.35938, 118.35938],
index=idx, columns=["Open"])
assert_frame_equal(df, df_exp)
def test_calc_rets_one_generic():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-05'), 'CLG5')])
rets = pd.Series([0.1, 0.05, 0.1, 0.8], index=idx)
vals = [1, 0.5, 0.5, 1]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-05'), 'CLG5')
])
weights = pd.DataFrame(vals, index=widx, columns=['CL1'])
wrets = util.calc_rets(rets, weights)
wrets_exp = pd.DataFrame([0.1, 0.075, 0.8],
index=weights.index.levels[0],
columns=['CL1'])
assert_frame_equal(wrets, wrets_exp)
def test_calc_rets_two_generics():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')])
rets = pd.Series([0.1, 0.15, 0.05, 0.1, 0.8, -0.5, 0.2], index=idx)
vals = [[1, 0], [0, 1],
[0.5, 0], [0.5, 0.5], [0, 0.5],
[1, 0], [0, 1]]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')
])
weights = pd.DataFrame(vals, index=widx, columns=['CL1', 'CL2'])
wrets = util.calc_rets(rets, weights)
wrets_exp = pd.DataFrame([[0.1, 0.15], [0.075, 0.45], [-0.5, 0.2]],
index=weights.index.levels[0],
columns=['CL1', 'CL2'])
assert_frame_equal(wrets, wrets_exp)
def test_calc_rets_two_generics_nans_in_second_generic():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')])
rets = pd.Series([0.1, np.NaN, 0.05, 0.1, np.NaN, -0.5, 0.2],
index=idx)
vals = [[1, 0], [0, 1],
[0.5, 0], [0.5, 0.5], [0, 0.5],
[1, 0], [0, 1]]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')
])
weights = pd.DataFrame(vals, index=widx, columns=['CL1', 'CL2'])
wrets = util.calc_rets(rets, weights)
wrets_exp = pd.DataFrame([[0.1, np.NaN], [0.075, np.NaN], [-0.5, 0.2]],
index=weights.index.levels[0],
columns=['CL1', 'CL2'])
assert_frame_equal(wrets, wrets_exp)
def test_calc_rets_two_generics_non_unique_columns():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')])
rets = pd.Series([0.1, 0.15, 0.05, 0.1, 0.8, -0.5, 0.2], index=idx)
vals = [[1, 0], [0, 1],
[0.5, 0], [0.5, 0.5], [0, 0.5],
[1, 0], [0, 1]]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')
])
weights = pd.DataFrame(vals, index=widx, columns=['CL1', 'CL1'])
with pytest.raises(ValueError):
util.calc_rets(rets, weights)
def test_calc_rets_two_generics_two_asts():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')])
rets1 = pd.Series([0.1, 0.15, 0.05, 0.1, 0.8, -0.5, 0.2], index=idx)
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'COF5'),
(TS('2015-01-03'), 'COG5'),
(TS('2015-01-04'), 'COF5'),
(TS('2015-01-04'), 'COG5'),
(TS('2015-01-04'), 'COH5')])
rets2 = pd.Series([0.1, 0.15, 0.05, 0.1, 0.4], index=idx)
rets = {"CL": rets1, "CO": rets2}
vals = [[1, 0], [0, 1],
[0.5, 0], [0.5, 0.5], [0, 0.5],
[1, 0], [0, 1]]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')
])
weights1 = pd.DataFrame(vals, index=widx, columns=["CL0", "CL1"])
vals = [[1, 0], [0, 1],
[0.5, 0], [0.5, 0.5], [0, 0.5]]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'COF5'),
(TS('2015-01-03'), 'COG5'),
(TS('2015-01-04'), 'COF5'),
(TS('2015-01-04'), 'COG5'),
(TS('2015-01-04'), 'COH5')
])
weights2 = pd.DataFrame(vals, index=widx, columns=["CO0", "CO1"])
weights = {"CL": weights1, "CO": weights2}
wrets = util.calc_rets(rets, weights)
wrets_exp = pd.DataFrame([[0.1, 0.15, 0.1, 0.15],
[0.075, 0.45, 0.075, 0.25],
[-0.5, 0.2, pd.np.NaN, pd.np.NaN]],
index=weights["CL"].index.levels[0],
columns=['CL0', 'CL1', 'CO0', 'CO1'])
assert_frame_equal(wrets, wrets_exp)
def test_calc_rets_missing_instr_rets_key_error():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5')])
irets = pd.Series([0.02, 0.01, 0.012], index=idx)
vals = [1, 1/2, 1/2, 1]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-05'), 'CLG5')])
weights = pd.DataFrame(vals, index=widx, columns=["CL1"])
with pytest.raises(KeyError):
util.calc_rets(irets, weights)
def test_calc_rets_nan_instr_rets():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-05'), 'CLG5')])
rets = pd.Series([pd.np.NaN, pd.np.NaN, 0.1, 0.8], index=idx)
vals = [1, 0.5, 0.5, 1]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-05'), 'CLG5')
])
weights = pd.DataFrame(vals, index=widx, columns=['CL1'])
wrets = util.calc_rets(rets, weights)
wrets_exp = pd.DataFrame([pd.np.NaN, pd.np.NaN, 0.8],
index=weights.index.levels[0],
columns=['CL1'])
assert_frame_equal(wrets, wrets_exp)
def test_calc_rets_missing_weight():
# see https://github.com/matthewgilbert/mapping/issues/8
# missing weight for return
idx = pd.MultiIndex.from_tuples([
(TS('2015-01-02'), 'CLF5'),
(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-04'), 'CLF5')
])
rets = pd.Series([0.02, -0.03, 0.06], index=idx)
vals = [1, 1]
widx = pd.MultiIndex.from_tuples([
(TS('2015-01-02'), 'CLF5'),
(TS('2015-01-04'), 'CLF5')
])
weights = pd.DataFrame(vals, index=widx, columns=["CL1"])
with pytest.raises(ValueError):
util.calc_rets(rets, weights)
# extra instrument
idx = pd.MultiIndex.from_tuples([(TS('2015-01-02'), 'CLF5'),
(TS('2015-01-04'), 'CLF5')])
weights1 = pd.DataFrame(1, index=idx, columns=["CL1"])
idx = pd.MultiIndex.from_tuples([
(TS('2015-01-02'), 'CLF5'),
(TS('2015-01-02'), 'CLH5'),
(TS('2015-01-03'), 'CLH5'), # extra day for no weight instrument
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLH5')
])
rets = pd.Series([0.02, -0.03, 0.06, 0.05, 0.01], index=idx)
with pytest.raises(ValueError):
util.calc_rets(rets, weights1)
# leading / trailing returns
idx = pd.MultiIndex.from_tuples([(TS('2015-01-02'), 'CLF5'),
(TS('2015-01-04'), 'CLF5')])
weights2 = pd.DataFrame(1, index=idx, columns=["CL1"])
idx = pd.MultiIndex.from_tuples([(TS('2015-01-01'), 'CLF5'),
(TS('2015-01-02'), 'CLF5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-05'), 'CLF5')])
rets = pd.Series([0.02, -0.03, 0.06, 0.05], index=idx)
with pytest.raises(ValueError):
util.calc_rets(rets, weights2)
def test_to_notional_empty():
instrs = pd.Series()
prices = pd.Series()
multipliers = pd.Series()
res_exp = pd.Series()
res = util.to_notional(instrs, prices, multipliers)
assert_series_equal(res, res_exp)
def test_to_notional_same_fx():
instrs = pd.Series([-1, 2, 1], index=['CLZ6', 'COZ6', 'GCZ6'])
prices = pd.Series([30.20, 30.5, 10.2], index=['CLZ6', 'COZ6', 'GCZ6'])
multipliers = pd.Series([1, 1, 1], index=['CLZ6', 'COZ6', 'GCZ6'])
res_exp = pd.Series([-30.20, 2 * 30.5, 10.2],
index=['CLZ6', 'COZ6', 'GCZ6'])
res = util.to_notional(instrs, prices, multipliers)
assert_series_equal(res, res_exp)
def test_to_notional_extra_prices():
instrs = pd.Series([-1, 2, 1], index=['CLZ6', 'COZ6', 'GCZ6'])
multipliers = pd.Series([1, 1, 1], index=['CLZ6', 'COZ6', 'GCZ6'])
prices = pd.Series([30.20, 30.5, 10.2, 13.1], index=['CLZ6', 'COZ6',
'GCZ6', 'extra'])
res_exp = pd.Series([-30.20, 2 * 30.5, 10.2],
index=['CLZ6', 'COZ6', 'GCZ6'])
res = util.to_notional(instrs, prices, multipliers)
assert_series_equal(res, res_exp)
def test_to_notional_missing_prices():
instrs = pd.Series([-1, 2, 1], index=['CLZ6', 'COZ6', 'GCZ6'])
multipliers = pd.Series([1, 1, 1], index=['CLZ6', 'COZ6', 'GCZ6'])
prices = pd.Series([30.20, 30.5], index=['CLZ6', 'COZ6'])
res_exp = pd.Series([-30.20, 2 * 30.5, pd.np.NaN],
index=['CLZ6', 'COZ6', 'GCZ6'])
res = util.to_notional(instrs, prices, multipliers)
assert_series_equal(res, res_exp)
def test_to_notional_different_fx():
instrs = pd.Series([-1, 2, 1], index=['CLZ6', 'COZ6', 'GCZ6'])
multipliers = pd.Series([1, 1, 1], index=['CLZ6', 'COZ6', 'GCZ6'])
prices = pd.Series([30.20, 30.5, 10.2], index=['CLZ6', 'COZ6', 'GCZ6'])
instr_fx = pd.Series(['USD', 'CAD', 'AUD'],
index=['CLZ6', 'COZ6', 'GCZ6'])
fx_rates = pd.Series([1.32, 0.8], index=['USDCAD', 'AUDUSD'])
res_exp = pd.Series([-30.20, 2 * 30.5 / 1.32, 10.2 * 0.8],
index=['CLZ6', 'COZ6', 'GCZ6'])
res = util.to_notional(instrs, prices, multipliers, desired_ccy='USD',
instr_fx=instr_fx, fx_rates=fx_rates)
assert_series_equal(res, res_exp)
def test_to_notional_duplicates():
instrs = pd.Series([1, 1], index=['A', 'A'])
prices = pd.Series([200.37], index=['A'])
mults = pd.Series([100], index=['A'])
with pytest.raises(ValueError):
util.to_notional(instrs, prices, mults)
instrs = pd.Series([1], index=['A'])
prices = pd.Series([200.37, 200.37], index=['A', 'A'])
mults = pd.Series([100], index=['A'])
with pytest.raises(ValueError):
util.to_notional(instrs, prices, mults)
instrs = pd.Series([1], index=['A'])
prices = pd.Series([200.37], index=['A'])
mults = pd.Series([100, 100], index=['A', 'A'])
with pytest.raises(ValueError):
util.to_notional(instrs, prices, mults)
instrs = pd.Series([1], index=['A'])
prices = pd.Series([200.37], index=['A'])
mults = pd.Series([100], index=['A'])
desired_ccy = "CAD"
instr_fx = | pd.Series(['USD', 'USD'], index=['A', 'A']) | pandas.Series |
"""
(C) Copyright 2019 IBM Corp.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Created on Jun 21, 2017
"""
from __future__ import division
import warnings
import networkx as nx
import numpy as np
import pandas as pd
import scipy.stats as stats
from ..utils.stat_utils import robust_lookup
# TODO: support categorical (non-numeric) data predecessors.
COVARIATE = "covariate"
HIDDEN = "hidden"
TREATMENT = "treatment"
OUTCOME = "outcome"
CENSOR = "censor"
EFFECT_MODIFIER = "effect_modifier"
VALID_VAR_TYPES = {COVARIATE, HIDDEN, TREATMENT, OUTCOME, CENSOR, EFFECT_MODIFIER}
CATEGORICAL = "categorical"
SURVIVAL = "survival"
CONTINUOUS = "continuous"
PROBABILITY = "probability"
DEFAULT_LINK_TYPE = "linear"
BASELINE_SURVIVAL_PARAM = 1.0
class CausalSimulator3(object):
TREATMENT_METHODS = {"random": lambda x, p, snr, params: CausalSimulator3._treatment_random(x, p),
"odds_ratio": lambda x, p, snr, params: CausalSimulator3._treatment_odds_ratio(x, p, snr),
"quantile_gauss_fit": lambda x, p, snr, params: CausalSimulator3._treatment_quantile_gauss_fit(
x, p, snr),
"logistic": lambda x, p, snr, params: CausalSimulator3._treatment_logistic_dichotomous(x, p,
params=params),
"gaussian": lambda x, p, snr, params: CausalSimulator3._treatment_gaussian_dichotomous(x, p,
snr)}
# G for general - applicable to all types of variables
G_LINKING_METHODS = {"linear": lambda x, beta=None: CausalSimulator3._linear_link(x, beta),
"affine": lambda x, beta=None: CausalSimulator3._affine_link(x, beta),
"exp": lambda x, beta=None: CausalSimulator3._exp_linking(x, beta),
"log": lambda x, beta=None: CausalSimulator3._log_linking(x, beta),
"poly": lambda x, beta=None: CausalSimulator3._poly_linking(x, beta)}
# O for outcome - outcome specific linking
O_LINKING_METHODS = {
"marginal_structural_model": lambda x, t, m, beta=None: CausalSimulator3._marginal_structural_model_link(
x, t, m, beta=beta),
None: lambda x, beta=None: x
}
def __init__(self, topology, var_types, prob_categories, link_types, snr, treatment_importances,
treatment_methods="gaussian", outcome_types=CATEGORICAL, effect_sizes=None,
survival_distribution="expon", survival_baseline=1, params=None):
"""
Constructor
Args:
topology (np.ndarray): A boolean adjacency matrix for variables (including covariates, treatment and outcome
variables of the model).
Every row is a binary vector for a variable, where v[i, j] = 1 iff j is a parent of i
var_types (Sequence[str]): Vector the size of variables stating every variable to be "covariate",
"hidden", "outcome", "treatment", "censor".
**Notes**: if type(pd.Series) variable names will be var_types.index, otherwise,
if no-key-vector - var names will be just range(num-of-variables).
prob_categories (Sequence[float|None]): vector the size of the number of variables.
if prob_categories[i] = None -> than variable i is considered continuous.
otherwise -> prob_categories[i] should be a list (or any iterable) which
size specifies number of categories variable i has, and it contains
multinomial probabilities for those categories (i.e. list non negative and
sums to 1).
link_types (str|Sequence[str]): set of string the size or string or specifying the relation between
covariate parents to the covariate itself
snr (float|Sequence[float]): Signal to noise ratio (use 1.0 to eliminate noise in the system).
May be a vector the size of number of variables for stating different snr
values for different variables.
treatment_importances (float|Sequence[float]): The effect of treatment on the outcome. A float between 0
and 1.0 stating how much weight the treatment variable have
vs. the other parents of an outcome variable.
*To support multi-treatment* - place a list the size of the
number of treatment variables (as stated in var_types).
The matching between treatment variable and its importance
will be according to the order of the treatment variables
and the order of the list. If all treatments variables has
the same importance - pass the float value.
treatment_methods (str|Sequence[str]): method for creating treatment assignment and propensities, can be
one of {"random", "gaussian", "logistic"}.
*To support multi-treatment* - place a list the size of the number of
treatment variables. The matching between treatment variable and its
creation method will be according to the order of the treatment
variables and the order of the list. If all treatment variables has the
same type - pass the str value.
outcome_types (str|Sequence[str]): outcome can either be 'survival' or 'binary'.
*To support multi-outcome* - place a list the size of the number of outcome
variables (as stated in var_types). The matching between outcome variable and
its type will be according to the order of the outcome variables and the order
of the list. If all outcome variables has the same type - pass the str value.
effect_sizes (float|Sequence[float|None]|None): The wanted mean effect size between two counterfactuals.
If None - The mean effect size will not be adjusted, but will be
whatever generated.
If float - The mean effect size will be adjusted to be approximately
the given number (considering the noise)
*To support multi-outcome* - a list the size the number of the outcome
variables (as stated in var_types). The matching between outcome
variable and its effect size will be according to the order of the
outcome variables and the order of the list.
survival_distribution (Sequence[str] or str): The distribution family from which to generate the outcome
values of outcome variables that their corresponding outcome_types is
"survival".
Default value is exponent distribution.
The same survival distribution will be used for the corresponding
censoring variable as well.
*To support multi-outcome* - place a list the size of the number of
outcome variables of type "survival" (as stated in outcome_types). The
matching between survival outcome variable and its survival distribution
will be according to the order of the outcome variables and the order of
the list. If all outcome variables has the same survival distribution -
pass the str value (if present).
*Ignore if no outcome variable is of type survival*
survival_baseline (Sequence[float] or float): The survival baseline from the CoxPH model that will be the
basics for the parameters of the corresponding survival_distribution.
The same survival baseline will be used for the corresponding censoring
variable as well (if present).
Default value is 1 (no multiplicative meaning for baseline value).
*To support multi-outcome* - place a list the size of the number of
outcome variables of type "survival" (as stated in outcome_types). The
matching between survival outcome variable and its survival distribution
will be according to the order of the outcome variables and the order of
the list. If all outcome variables has the same survival distribution -
pass the str value.
*Ignore if no outcome variable is of type survival*
params (dict | None): Various parameters related to the generation process (e.g. the slope for
sigmoid-based functions etc.).
The form of: {var_name: {param_name: param_value, ...}, ...}
"""
# Find the indices of each type of variable:
var_types = pd.Series(var_types)
self.var_names = var_types.index.to_series().reset_index(drop=True)
self.var_types = var_types
self.treatment_indices = var_types[var_types == TREATMENT].index
self.outcome_indices = var_types[var_types == OUTCOME].index
self.covariate_indices = var_types[(var_types == COVARIATE) | (var_types == HIDDEN)].index
self.hidden_indices = var_types[var_types == HIDDEN].index
self.censor_indices = var_types[var_types == CENSOR].index
self.effmod_indices = var_types[var_types == EFFECT_MODIFIER].index
self.linking_coefs = {} # will accumulate the generated coefficients. {var: Series(coef, predecessors)}
# COMPLETE topology INTO A SQUARE ADJACENCY MATRIX:
# # let M be number of total variables, H number of variables to generate and L=M-H number of variables in a
# # given baseline dataset (that generated variables can be based on). Given Topology matrix can have either a
# # shape of MxM or HxM - in the latter case the matrix is completed into MxM by adding zero rows (since L
# # given variables would not be re-genreated anyway, they will be consider independent variables).
# if topology.shape[0] != topology.shape[1]:
# rows, cols = topology.shape
# if cols > rows:
# null_submatrix = np.zeros((cols - rows, cols), dtype=bool)
# topology = np.row_stack((topology, null_submatrix))
# else:
# raise ValueError("Topology matrix has {rows} rows and {cols} columns. This is not supported since"
# "T[i,j] = 1 iff j is parent of i. ")
if topology.shape[0] != len(var_types):
raise ValueError("Number of variables in topology graph do not correspond to the number of variables states"
" in the variable types")
self.m = len(var_types) # number of variables
# Create a graph out of matrix topology:
self.topology = topology
self.graph_topology = nx.from_numpy_matrix(topology.transpose(), create_using=nx.DiGraph()) # type:nx.DiGraph
self.graph_topology = nx.relabel_nodes(self.graph_topology,
dict(list(zip(list(range(self.m)), self.var_names))))
# check that outcome variable is not dependant on more than 1 treatment variable
for i in self.outcome_indices:
predecessors = list(self.graph_topology.predecessors(i))
treatment_predecessors = self.treatment_indices.intersection(predecessors)
if len(treatment_predecessors) > 1: # outcome variable is dependent on more than one treatment
raise ValueError(
"Outcome {outcome} should have only one treatment affecting it. The current topology has outcome"
" variable dependant on {n_parent_treat} treatment parents which are: "
"{treatment_parents}".format(outcome=i, n_parent_treat=len(treatment_predecessors),
treatment_parents=treatment_predecessors))
elif len(treatment_predecessors) == 0: # outcome variable is dependent on exactly one treatment
warnings.warn("Outcome variable {} has no treatment effecting it".format(i), UserWarning)
# check that outcome variable is dependant on most 1 censor variable
for i in self.outcome_indices:
predecessors = list(self.graph_topology.predecessors(i))
censor_predecessors = self.censor_indices.intersection(predecessors)
if len(censor_predecessors) > 1: # outcome variable is dependent on more than one treatment
raise ValueError(
"Outcome {outcome} should have at most one censor variable affecting it. The current topology has "
"outcome variable dependant on {n_parent_cens} treatment parents which are: "
"{cens_parents}".format(outcome=i, n_parent_cens=len(censor_predecessors),
cens_parents=censor_predecessors))
# check that effect modifier is independent on treatment and affects only the outcome:
for i in self.effmod_indices:
successors = self.graph_topology.successors(i)
if len(successors) == 0 or self.outcome_indices.intersection(successors).size < 1:
raise ValueError("Effect modifier variable {name} must affect an outcome variable".format(name=i))
ancestors = nx.ancestors(self.graph_topology, i)
if self.treatment_indices.intersection(ancestors).size > 0:
raise ValueError("Effect modifier variable {name} must not be affected by "
"treatment variable (which is one of {ances})".format(name=i, ances=ancestors))
# convert scalars to vectors if necessary.
self.prob_categories = self._convert_scalars_to_vectors(x=prob_categories, default_value=None,
x_type="prob_categories")
self.prob_categories = self.prob_categories.map(lambda x: pd.Series(x) if x is not None else x)
if self.prob_categories.isnull().all():
warnings.warn("Got all Nones in prob_categories. If simulation has Treatment variables in it, "
"this will throw an exception, as treatment variables must be categorical", UserWarning)
# Check that all treatment variables are categorical:
for i in self.treatment_indices:
if self.prob_categories[i] is None:
raise ValueError("Only categorical treatment is currently supported. However, treatment variable {t} "
"is not categorical. Please specify corresponding category_probabilities".format(t=i))
self.snr = self._convert_scalars_to_vectors(x=snr, default_value=1, x_type="snr")
self.link_types = self._convert_scalars_to_vectors(x=link_types, default_value=DEFAULT_LINK_TYPE,
x_type="link_type")
# if not all([x in self.VALID_LINK_TYPES for x in self.link_types]):
all_linking_types = list(self.G_LINKING_METHODS.keys()) + list(self.O_LINKING_METHODS.keys())
if not self.link_types.isin(all_linking_types).all():
raise ValueError("link type must be one of {}, "
"got {} instead.".format(list(all_linking_types),
list(set(link_types) - set(all_linking_types))))
self.treatment_methods = self._map_properties_to_variables(values=treatment_methods,
keys=self.treatment_indices, var_type="treatment",
value_type="methods")
# if not all([x in TREATMENT_METHODS.keys() for x in self.treatment_methods.values()]):
if not self.treatment_methods.isin(list(self.TREATMENT_METHODS.keys())).all():
raise ValueError("link type must be one of {}, "
"got {} instead.".format(list(self.TREATMENT_METHODS.keys()),
list(
set(treatment_methods) - set(self.TREATMENT_METHODS.keys()))))
self.treatment_importances = self._map_properties_to_variables(values=treatment_importances,
keys=self.treatment_indices,
var_type="treatment", value_type="importance")
self.outcome_types = self._map_properties_to_variables(values=outcome_types, keys=self.outcome_indices,
var_type="outcome", value_type="type")
for i in self.outcome_indices:
if self.outcome_types[i] is CONTINUOUS and self.prob_categories[i] is not None:
raise ValueError("Continuous outcome must be associated with None category probability. "
"This was not the case in variable {outcome_var}. "
"Might lead to undefined behaviour.".format(outcome_var=i))
if self.outcome_types[i] is CATEGORICAL and self.prob_categories[i] is None:
raise ValueError("Categorical outcome must be associated with category probability. However, None was"
"associated with variable {outcome_var}".format(outcome_var=i))
self.effect_sizes = self._map_properties_to_variables(values=effect_sizes, keys=self.outcome_indices,
var_type="outcome", value_type="effect size")
# map survival_related properties to survival outcome and their corresponding censor variables.
survival_outcome_variables = self.outcome_types[self.outcome_types.eq("survival")].index
self.survival_distribution = self._map_properties_to_variables(values=survival_distribution,
keys=survival_outcome_variables,
var_type="outcome",
value_type="survival_distribution")
self.survival_distribution[self.survival_distribution.isnull()] = "expon" # default is exponent distribution
self.survival_baseline = self._map_properties_to_variables(values=survival_baseline,
keys=survival_outcome_variables, var_type="outcome",
value_type="survival_baseline")
self.survival_baseline[self.survival_baseline.isnull()] = np.abs(np.random.normal(
loc=0.0, scale=1.0, size=self.survival_baseline.isnull().sum()))
for i in survival_outcome_variables:
topology_predecessors = list(self.graph_topology.predecessors(i))
censor_predecessors = self.censor_indices.intersection(topology_predecessors)
if len(censor_predecessors) > 0:
censor_predecessors = censor_predecessors[0]
# match between the outcome value and it's matching censor variable:
self.survival_distribution[censor_predecessors] = self.survival_distribution[i]
self.survival_baseline[censor_predecessors] = self.survival_baseline[i]
# self.params = params if params is not None else dict(zip(self.var_names, [None] * self.var_names.size))
self.params = params if params is not None else {}
# ### Initializing helper functions ### #
def _convert_scalars_to_vectors(self, x, default_value, x_type):
"""
Converts scalars (e.g. float, int, str, etc.) into vectors. Mapping between variable names to the desired value.
In context: If arguments given to the class init are scalar (i.e. float, int, str, etc.), converts them into
vector shape - mapping every variable to the given value
Args:
x (Any): the value wished to map to the variables.
if supplied with some sequence (e.g. list, array, Series, etc.) it will map the sequence to
variable names. if supplied with a scalar - it will duplicate the single value to all vars.
default_value (str|float|int|None): in case x=None (no value is supplied), map default_value to all vars
x_type (str): The type of value that currently being processed (e.g. the variable name in the python code),
so in case there is an error, it can display the python-variable that caused the error.
Returns:
x (pd.Series): A Series mapping between variable name and a some wanted value.
Raises:
ValueError: If a sequence is given, but its length doesn't match the number of variables in topology.
"""
if np.isscalar(x) or x is None: # a scalar, not a sequence
if x is None: # put default value
x = pd.Series(data=default_value, index=self.var_names)
else: # a scalar is given, map it to all variables
x = pd.Series(data=x, index=self.var_names)
else:
# a sequence has been provided:
if len(x) != self.m:
raise ValueError("{x_type} should have same size as number of variables."
"Got {emp} instead of {sup}".format(x_type=x_type, emp=len(x), sup=self.m))
if isinstance(x, pd.Series) and x.index.difference(self.var_names).empty:
# if supplied with a Series which has it own indexing, and it matches the the topology variables, then
# keep it as is.
x = x
else:
# either a simpler sequence or a Series with bad indexing, map to variable names.
x = pd.Series(data=x, index=self.var_names)
return x
@staticmethod
def _map_properties_to_variables(values, keys, var_type, value_type):
"""
Maps between covariate variables properties to these properties.
Args:
values (Any): some property of some variable (e.g. 0.7 for treatment_importance or
"binary" for outcome_type)
keys (Sequence[Any]): The names indices to map the given properties (values) (e.g. treatment_indices)
var_type (str {"covariate", "hidden", "treatment", "outcome", "censor"}): The type of variable the
properties being mapped to (e.g. "treatment", "outcome", "covariate")
value_type (str): The name type that the property belongs to. (e.g. the variable name in the python code),
so in case there's an error, it can display the python-variable that caused the error.
Returns:
res (pd.Series): A map between the given keys (some covariate variable names indices) to the given values
Raises:
ValueError: When a Sequence is given as values (e.g. list of properties) but it does not match the length
of the keys.
Warnings:
UserWarning: If a values is a dict, it can may not be touched, unless its keys' do not match the variable
names. A warning is issued.
Examples:
Where effect_sizes is a Sequence or a float, outcome_indices are the indices names of the outcome variables
in the graph. the variable type discussed is "outcome" (since it is effect-size). The python variable name
is effect_size, thus the value_type is effect_size.
map_properties_to_variables(values=effect_sizes, keys=self.outcome_indices, var_type="outcome",
value_type="effect size")
"""
if np.isscalar(values) or values is None:
# values is a single value (i.e. int ot string), map its value to all given treatment variables:
res = dict(list(zip(keys, [values] * len(keys))))
else:
# some sequence provided
if len(keys) != len(values):
raise ValueError("The number of {var_t} variables: {n_keys} does not match the size of the list "
"depicting the {val_t} of creating each {var_t} variable: "
"{n_vals}".format(var_t=var_type, n_keys=len(keys),
val_t=value_type, n_vals=len(values)))
# values = values.values() if isinstance(values, dict) else values
if isinstance(values, dict):
# if given property is given by a dictionary, make sure this dict keys matches to the indices it
# suppose to map to:
res = values
if list(values.keys()) != keys:
warnings.warn("{var_t} {val_t} was given as dictionary but its keys ({val}) does not match the "
"{var_t} indices provided in topology ({keys}). You may expect "
"undefined behaviour".format(var_t=var_type, val_t=value_type,
val=list(values.keys()), keys=keys), UserWarning)
else:
res = dict(list(zip(keys, values)))
res = pd.Series(res, dtype=np.dtype(object))
res = res.infer_objects()
return res
# ### Main functionality ### #
def generate_data(self, X_given=None, num_samples=None, random_seed=None):
"""
Generates tables of dataset given the object's initial parameters.
Args:
num_samples (int): Number of samples that will be in the dataset.
X_given (pd.DataFrame): A baseline dataset to generate from. This dataset may contain only some of variables
stated in the initialized topology. The rest of the dataset (variables which are
stated in the topology and not in this dataset) will be generated.
**Notes**: The data given will not be overwritten and will be taken as is. It is
user responsibility to see that the given table has no dependant variables since
they will not be re-generated according to the graph.
random_seed (int): A seed for the pseudo-random-number-generator in order to reproduce results.
Returns:
(pd.DataFrame, pd.DataFrame, pd.DataFrame): 3-element tuple containing:
- **X** (*pd.DataFrame*): A (num_samples x num_covariates) matrix of all covariates
(including treatments and outcomes) over samples.
- **propensities** (*pd.DataFrame*): A (num_samples x num_treatments) matrix (or vector) of propensity
values of every treatment.
- **counterfactuals** (*pd.DataFrame*): A (num_samples x num_outcomes) matrix -
"""
if random_seed is not None:
np.random.seed(random_seed)
if num_samples is None and X_given is None:
raise ValueError("Must supply either a dataset (X) or number of samples to generate")
if num_samples is not None and X_given is not None:
warnings.warn("Got both number of samples (num_samples) and a baseline dataset (X_given). "
"Number of samples will be ignored and only X_given will be used.", UserWarning)
if X_given is None:
num_samples = num_samples
patients_index = list(range(num_samples))
else:
num_samples = X_given.index.size
patients_index = X_given.index
# generate latent continuous covariates - every variable is guaranteed to have a population variance of 1.0
# X_latent = pd.DataFrame(index=patients_index, columns=self.var_types.index)
X = pd.DataFrame(index=patients_index, columns=self.var_types.index)
if X_given is not None: # if a dataset is given, integrate it to the current dataset being build.
X.loc[:, X_given.columns] = X_given
for col in X_given.columns:
X.loc[:, col] = X[col].astype(X_given.dtypes[col]) # insist of keeping original types.
propensities = pd.DataFrame(index=patients_index,
columns=pd.MultiIndex.from_tuples([(i, j) for i in self.treatment_indices
for j in self.prob_categories[i].index]))
cf_columns = []
for outcome in self.outcome_indices:
predecessors = list(self.graph_topology.predecessors(outcome))
treatment_predecessor = self.treatment_indices.intersection(predecessors)
if not treatment_predecessor.empty:
treatment_predecessor = treatment_predecessor[0]
for j in self.prob_categories[treatment_predecessor].index:
cf_columns.append((outcome, j))
else:
cf_columns.append((outcome, "null"))
counterfactuals = pd.DataFrame(index=patients_index, columns=pd.MultiIndex.from_tuples(cf_columns))
# create the variables according to their topological order to avoid creating variables before their
# dependencies are created:
for i in nx.topological_sort(self.graph_topology):
# i = self.var_names[i] # get the name corresponding to the i'th location in topology
if X.loc[:, i].notnull().any():
# current column has non-NAN values meaning it has some data in it so it will not be overwritten
continue
var_type = self.var_types[i]
X_parents = X.loc[:, self.topology[self.var_names[self.var_names == i].index[0], :]]
if var_type == COVARIATE or var_type == HIDDEN or var_type == EFFECT_MODIFIER:
X_signal, beta = self.generate_covariate_col(X_parents=X_parents, link_type=self.link_types[i],
snr=self.snr[i], prob_category=self.prob_categories[i],
num_samples=num_samples, var_name=i)
elif var_type == TREATMENT:
X_signal, propensity, beta = self.generate_treatment_col(X_parents=X_parents,
link_type=self.link_types[i],
snr=self.snr[i],
method=self.treatment_methods[i],
prob_category=self.prob_categories[i],
var_name=i)
propensities[i] = propensity
elif var_type == OUTCOME:
X_signal, cf, beta = self.generate_outcome_col(X_parents=X_parents, link_type=self.link_types[i],
snr=self.snr[i], prob_category=self.prob_categories[i],
effect_size=self.effect_sizes[i],
outcome_type=self.outcome_types[i],
survival_distribution=self.survival_distribution.get(i),
survival_baseline=self.survival_baseline.get(i),
var_name=i)
counterfactuals[i] = cf
# print 'mean treatment effect: %0.3f' % (np.mean(cf1 - cf0))
elif var_type == CENSOR:
outcome_successor = self.outcome_indices.intersection(self.graph_topology.successors(i))[0]
treatment_predecessor = self.treatment_indices.intersection(self.graph_topology.predecessors(i))
treatment_predecessor = treatment_predecessor[0] if len(treatment_predecessor) > 0 else None
X_signal, beta = self.generate_censor_col(X_parents=X_parents, link_type=self.link_types[i],
snr=self.snr[i], prob_category=self.prob_categories[i],
outcome_type=self.outcome_types[outcome_successor],
treatment_importance=self.treatment_importances.
get(treatment_predecessor),
survival_distribution=self.survival_distribution.get(i),
survival_baseline=self.survival_baseline.get(i),
var_name=i)
else:
raise ValueError("{c_type} is not supported type of variable. "
"Supported types are {s_types}".format(c_type=var_type, s_types=VALID_VAR_TYPES))
X.loc[:, i] = X_signal
self.linking_coefs[i] = beta
# print X_latent.var(axis=0, ddof=1)
# print X.var(axis=0, ddof=1)
return X, propensities, counterfactuals
def generate_covariate_col(self, X_parents, link_type, snr, prob_category, num_samples, var_name=None):
"""
Generates a single signal (covariate) column
Args:
X_parents (pd.DataFrame): Sub-dataset containing only the relevant columns (features which are topological
parents to the current covariate being created)
link_type (str): How the parents variables (parents covariate columns) influence the current generated
column. What relation is there between them.
snr (float): Signal to noise ratio that controls the amount of noise to add (value of 1.0 will not generate
noise)
prob_category (pd.Series|None): A vector which length states the number of classes (number of discrete
values) and every value is fractional - the probability of the corresponding
class.
**Notes**: vector must sum to 1 If None - the covariate column is left
untouched (i.e. continuous)
num_samples (int): number of samples to generate
var_name (int|str): The name of the variable currently being generated. Optional.
Returns:
(pd.Series, pd.Series): 2-element tuple containing:
- **X_final** (*pd.Series*): The final (i.e. noised and discretize [if needed]) covariate column.
- **beta** (*pd.Series*): The coefficients used to generate current variable from it predecessors.
Raises:
ValueError: if the given link_type is not a valid link_type. (Supported link types are placed in
self.G_LINKING_METHODS)
"""
# if variable has no parents - just sample from normal Gaussian distribution:
if X_parents.empty:
X_new = pd.Series(np.random.normal(loc=0.0, scale=1.0, size=num_samples), index=X_parents.index)
beta = pd.Series(dtype=np.float64)
else:
# generate covariate column based on the parents' variables
linking_method = self.G_LINKING_METHODS.get(link_type)
if linking_method is None:
raise KeyError("link type must be one of {},got {} instead.".format(list(self.G_LINKING_METHODS.keys()),
link_type))
beta = self.linking_coefs.get(var_name)
X_new, beta = linking_method(X_parents, beta=beta)
# noise the sample
X_noised_cont, _, _ = self._noise_col(X_new, snr=snr)
# discretize variables if required:
X_final = self._discretize_col(X_noised_cont, prob_category)
return X_final, beta
def generate_treatment_col(self, X_parents, link_type, snr, prob_category, method="logistic", var_name=None):
"""
Generates a single treatment variable column.
Args:
X_parents (pd.DataFrame): Sub-dataset containing only the relevant columns (features which are topological
parents to the current covariate being created)
link_type (str): How the parents variables (parents covariate columns) influence the current generated
column. What relation is there between them.
snr (float): Signal to noise ratio that controls the amount of noise to add (value of 1.0 will not generate
noise)
prob_category (pd.Series|None): A k-length distribution vector over k-1 treatments with the probability
of being untreated in prob_category[0] (prob_category.iloc[0]) and all
other k-1 probabilities corresponds to k-1 treatments.
**Notes**: vector must sum to 1. If None - the covariate column is left
untouched (i.e. continuous)
method (str): A type of method to generate the treatment signal and the corresponding propensities.
var_name (int|str): The name of the variable currently being generated. Optional.
Returns:
(pd.Series, pd.DataFrame, pd.Series): 3-element tuple containing:
- **treatment** (*pd.Series*): Treatment assignment to each sample.
- **propensity** (*pd.DataFrame*): The marginal conditional probability of treatment given covariates.
A DataFrame shaped (num_samples x num_of_possible_treatment_categories).
- **beta** (*pd.Series*): The coefficients used to generate current variable from it predecessors.
Raises:
ValueError: if prob_category is None (treatment must be categorical)
ValueError: If prob_category is not a legitimate probability vector (non negative, sums to 1)
"""
# Check input validity:
if prob_category is None:
raise ValueError("Treatment variable must be categorical, therefore it must have a legitimate distribution "
"over its possible values. Got None instead.")
CausalSimulator3._check_for_legitimate_probabilities(prob_category)
# generate only the continuous signal since it is later processed (therefore prob_category = None)
x_continuous, beta = self.generate_covariate_col(X_parents=X_parents, link_type=link_type, snr=snr,
prob_category=None, num_samples=X_parents.index.size,
var_name=var_name)
generation_method = self.TREATMENT_METHODS.get(method)
if generation_method is None:
raise KeyError("The given method {method} is not supported, "
"only {valid_methods}.".format(valid_methods=list(self.TREATMENT_METHODS.keys()),
method=method))
else:
params = self.params.get(var_name, {})
propensity, treatment = generation_method(x_continuous, prob_category, snr=snr, params=params)
return treatment.astype(int), propensity.astype(float), beta
def generate_outcome_col(self, X_parents, link_type, snr, prob_category, outcome_type, treatment_importance=None,
effect_size=None, survival_distribution=None, survival_baseline=None, var_name=None):
"""
Generates a single outcome variable column.
Args:
X_parents (pd.DataFrame): Sub-dataset containing only the relevant columns (features which are topological
parents to the current covariate being created)
link_type (str): How the parents variables (parents covariate columns) influence the current generated
column. What relation is there between them.
treatment_importance (float): The effect power of the treatment on the current generated outcome variable,
as opposed to other variables that may influence on it.
snr (float): Signal to noise ratio that controls the amount of noise to add (value of 1.0 will not generate
noise)
prob_category (pd.Series|None): A k-length distribution vector over k-1 treatments with the probability
of being untreated in prob_category[0] (prob_category.iloc[0]) and all
other k-1 probabilities corresponds to k-1 treatments.
**Notes**: vector must sum to 1. If None - the covariate column is left
untouched (i.e. continuous)
effect_size (float): wanted mean effect size.
outcome_type (str): Type of outcome variable. Either categorical (and continuous) or survival
survival_distribution (str): The type of the distribution of which to sample the survival time from.
relevant only if outcome_type is "survival"
survival_baseline: The baseline value of the the cox ph model. relevant only if outcome_type is "survival"
var_name (int|str): The name of the variable currently being generated. Optional.
Returns:
(pd.Series, pd.DataFrame, pd.DataFrame): 3-element tuple containing:
- **x_outcome** (*pd.Series*): Outcome assignment for each sample.
- **cf** (*pd.DataFrame*): Holding the counterfactuals for every possible treatment category of the
outcome's treatment predecessor variable.
- **beta** (*pd.DataFrame*): The coefficients used to generate current variable from it predecessors.
Raises:
ValueError: if the given link_type is not a valid link_type. (Supported link types are placed in
self.G_LINKING_METHODS)
ValueError: if prob_category is neither None nor a legitimate distribution vector.
"""
# drop censor indices as they do not affect the actual values of the outcome, only the masking later:
X_parents = X_parents.drop(self.censor_indices, axis='columns') # type: pd.DataFrame
if X_parents.columns.size == 0:
raise ValueError("Outcome variable cannot be independent variable (i.e. have no parent in graph topology)")
# get effect modifiers:
effect_modifier = self.effmod_indices.intersection(X_parents.columns)
X_effmod = X_parents.loc[:, effect_modifier] # type: pd.DataFrame
X_covariates = X_parents.drop(effect_modifier, axis="columns") # type: pd.DataFrame
# get the treatment variable that affect current outcome.
treatment_parent = self.treatment_indices.intersection(X_covariates.columns)
if len(treatment_parent) > 1: # outcome variable is dependent on more than one treatment
raise ValueError(
"Outcome should have only one treatment affecting it. The current topology has outcome"
" variable dependant on {n_parent_treat} treatment parents which are: "
"{treatment_parents}".format(n_parent_treat=len(treatment_parent),
treatment_parents=treatment_parent))
else:
try: # len(treatment_parents) == 0 outcome variable is dependent on exactly one treatment
treatment_parent = treatment_parent[0]
X_treatment = X_covariates.loc[:, treatment_parent] # type: pd.Series
X_covariates = X_covariates.drop(treatment_parent, axis="columns") # type: pd.DataFrame
except IndexError: # len(treatment_parents) == 0 outcome variable is independent of treatment variables
treatment_parent = None
X_treatment = pd.Series(dtype=np.float64)
has_treatment_parent = not X_treatment.empty
treatment_importance = treatment_importance or self.treatment_importances.get(treatment_parent)
original_treatment_categories = X_treatment.unique().astype(int) # before being manipulated
# convexly re-weight variables according if treatment has different importance than the covariates:
if treatment_importance is not None:
# !knowingly not weighting (especially weighting-down) effect modifiers! (so only re-weighting covariates)
X_treatment *= treatment_importance # how much the treatment affects the outcome
if not X_covariates.columns.empty: # how much non-treatments (regular covariates) affect outcome
X_covariates *= float(float(1 - treatment_importance) / X_covariates.columns.size)
X_parents = | pd.concat([X_covariates, X_effmod, X_treatment], axis="columns", ignore_index=False) | pandas.concat |
import xml.etree.ElementTree as ET
import os
import json
import string
import copy
import re
import pandas as pd
import numpy as np
from datetime import datetime
from nltk.corpus import wordnet
import sys
from nltk import Tree
import spacy
from insert_whitespace import append_text
from config import DATA_PATH, TMP_PATH
path_sample = os.path.join(DATA_PATH, "_sample_doc.json") # ->root/data/original/_sample_doc.json
MEANTIME_PARSING_FOLDER = os.path.join(DATA_PATH, "MEANTIME-prep")
OUT_PATH = os.path.join(TMP_PATH, "output_data")
CONTEXT_RANGE = 250
nlp = spacy.load('en_core_web_sm')
# opens and loads the newsplease-format out of the json file: _sample_doc.json
with open(path_sample, "r") as file:
newsplease_format = json.load(file)
import os
source_path = os.path.join(MEANTIME_PARSING_FOLDER, 'MEANTIME')
result_path = os.path.join(MEANTIME_PARSING_FOLDER, 'test_parsing')
result_path2 = os.path.join(MEANTIME_PARSING_FOLDER, 'test_parsing2')
intra = os.path.join(source_path, 'intra-doc_annotation')
intra_cross = os.path.join(source_path, 'intra_cross-doc_annotation')
meantime_types = {"PRO": "PRODUCT",
"FIN": "FINANCE",
"LOC": "LOCATION",
"ORG": "ORGANIZATION",
"OTH": "OTHER",
"PER": "PERSON",
"GRA": "GRAMMATICAL",
"SPE": "SPEECH_COGNITIVE",
"MIX": "MIXTURE"}
def to_nltk_tree(node):
if node.n_lefts + node.n_rights > 0:
return Tree(node.orth_, [to_nltk_tree(child) for child in node.children])
else:
return node.orth_
def conv_files(path):
doc_files = {}
# coref_dics = {}
entity_mentions = []
event_mentions = []
summary_df = | pd.DataFrame() | pandas.DataFrame |
"""This is a collection of helper functions"""
import pandas as pd
from datetime import datetime as dt
class NewDataFrame(pd.DataFrame):
"""Class that inherits from pandas DataFrame"""
def null_count(self):
"""Method that returns the numbers of null values in a DataFrame"""
return self.isnull().sum()
def list_2_series(self, list):
"""Method takes a list, turns it into a Series,
adds a new column and returns the NewDataFrame"""
new_series = | pd.Series(list) | pandas.Series |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 07 14:42:32 2021
@author: silviapagliarini
"""
import os
import numpy as np
import pandas as pd
import csv
from pydub import AudioSegment
import scipy.io.wavfile as wav
def opensmile_executable(data, baby_id, classes, args):
"""
Generate a text file executable on shell to compute multiple times opensmile features.
If option labels_creation == True, it also generates a csv file containing number of the sound and label.
INPUT
- path to directory
- type of dataset (can be a single directory, or a dataset keywords): see args.baby_id
OUTPUT
A text file for each directory with the command lines to compute MFCC for each extracted sound in the directory.
"""
f = open(args.data_dir + '/' + 'executable_opensmile_' + baby_id + '.txt', 'w+')
i = 0
while i < len(data):
#name = './build/progsrc/smilextract/SMILExtract -C config/mfcc/MFCC12_0_D_A.conf -I /Users/silviapagliarini/Documents/Datasets/InitialDatasets/singleVoc/single_vocalizations/'
#name = './build/progsrc/smilextract/SMILExtract -C config/mfcc/MFCC12_0_D_A.conf -I /Users/silviapagliarini/Documents/Datasets/completeDataset/'
name = './build/progsrc/smilextract/SMILExtract -C config/mfcc/MFCC12_0_D_A.conf -I /Users/silviapagliarini/Documents/Datasets/subsetSilence/'
#name = './build/progsrc/smilextract/SMILExtract -C config/mfcc/MFCC12_0_D_A.conf -I /Users/silviapagliarini/Documents/Datasets/HumanLabels/exp1'
#name = './build/progsrc/smilextract/SMILExtract -C config/mfcc/MFCC12_0_D_A.conf -I /Users/silviapagliarini/Documents/BabbleNN/interspeech_Wave'
if baby_id == 'AnneModel':
f.write(name + '/' + os.path.basename(data[i]) + ' -csvoutput ' + os.path.basename(data[i])[0:-3] + 'mfcc.csv')
f.write('\n')
else:
#output_dir = '/Users/silviapagliarini/Documents/opensmile/HumanData_analysis/humanVSlena/human'
#output_dir = '/Users/silviapagliarini/Documents/opensmile/HumanData_analysis/completeDataset'
output_dir = '/Users/silviapagliarini/Documents/opensmile/HumanData_analysis/subsetSilence'
os.makedirs(output_dir + '/' + baby_id, exist_ok=True)
for c in range(0,len(classes)):
os.makedirs(output_dir + '/' + baby_id + '/' + classes[c], exist_ok=True)
f.write(name + baby_id[0:4] + '/' + baby_id + '_segments/' + os.path.basename(data[i]) + ' -csvoutput ' + output_dir + '/' + baby_id + '/' + os.path.basename(data[i])[0:-3] + 'mfcc.csv')
#f.write(name + '/' + baby_id + '_segments/' + os.path.basename(data[i]) + ' -csvoutput ' + output_dir + '/' + baby_id + '/' + os.path.basename(data[i])[0:-3] + 'mfcc.csv')
f.write('\n')
i = i + 1
f.close()
if args.labels_creation == True:
# writing the data rows
labels = []
i = 0
while i < len(data):
j = 0
while j < len(classes):
if os.path.basename(data[i]).find(classes[j]) != -1:
labels.append(classes[j])
j = j + 1
i = i + 1
with open(args.data_dir + '/' + 'LENAlabels_' + baby_id + '.csv', 'w') as csvfile:
# creating a csv writer object
csvwriter = csv.writer(csvfile)
# writing the fields
csvwriter.writerow(['ID', 'Label'])
i = 0
while i < len(data):
csvwriter.writerow([str(i), labels[i]])
i = i + 1
print('Done')
def list(args):
"""
Create a list of all the babies in the dataset in order to simplify the following steps of the analysis.
INPUT
- path to directory (subdirectories should be the single family directories).
OUTPUT
- .csv file with name of the baby and age of the baby in days.
"""
listDir = glob2.glob(args.data_dir + '/0*')
with open(args.data_dir + '/baby_list_basic.csv', 'w') as csvfile:
# creating a csv writer object
csvwriter = csv.writer(csvfile)
# writing the fields
csvwriter.writerow(['ID', 'AGE'])
i = 0
while i<len(listDir):
name = os.path.basename(listDir[i])
age = int(name[6])*365 + int(name[8]) * 30 + int(name[10])
csvwriter.writerow([name, age])
i = i + 1
print('Done')
def merge_labels(babies, args):
"""
Create a LENA-like .csv with the human corrections included. When a label has been identified as wrong, it is substitute with the
noise lable NOF.
INPUT
- path to directory
- list of babies
OUTPUT
.csv file containing cleaned labels.
"""
for i in range(0,len(babies)):
print(babies[i])
lena = pd.read_csv(args.data_dir + '/' + babies[i] + '_segments.csv')
human = pd.read_csv(args.data_dir + '/' + babies[i] + '_scrubbed_CHNrelabel_lplf_1.csv')
time_stamp_lena_start = lena["startsec"]
time_stamp_lena_end = lena["endsec"]
prominence = human["targetChildProminence"]
lena_labels = lena["segtype"]
CHNSP_pos = np.where(lena_labels == 'CHNSP')[0]
CHNNSP_pos = np.where(lena_labels == 'CHNNSP')[0]
pos = np.append(CHNSP_pos, CHNNSP_pos)
pos = sorted(pos)
for j in range(0, len(pos)):
if i < 2:
if prominence[j] > 2:
lena_labels[pos[j]] = 'NOF'
else:
if prominence[j] == False:
lena_labels[pos[j]] = 'NOF'
with open(args.data_dir + '/new_' + babies[i] + '_segments.csv', 'w') as csvfile:
# creating a csv writer object
csvwriter = csv.writer(csvfile)
# writing the fields
csvwriter.writerow(['segtype', 'startsec', 'endsec'])
i = 0
while i < len(time_stamp_lena_start):
csvwriter.writerow([lena_labels[i], time_stamp_lena_start[i], time_stamp_lena_end[i]])
i = i + 1
print('Done')
if __name__ == '__main__':
import argparse
import glob2
import sys
parser = argparse.ArgumentParser()
parser.add_argument('--option', type=str, choices=['merge', 'list', 'executeOS'])
parser.add_argument('--data_dir', type=str)
parser.add_argument('--output_dir', type=str)
parser.add_argument('--baby_id', type = str)
parser.add_argument('--labels_creation', type = bool, default=False)
uniform_duration_args = parser.add_argument_group('Uniform')
uniform_duration_args.add_argument('--sd', type=int,
help='Expected sound duration in milliseconds', default = 1000)
uniform_duration_args.add_argument('--sr', type=int, help='Expected sampling rate',
default=16000)
args = parser.parse_args()
if args.output_dir != None:
if not os.path.isdir(args.data_dir + '/' + args.output_dir):
os.makedirs(args.data_dir + '/' + args.output_dir)
if args.option == 'executeOS':
# Labels (change only if needed)
# classes = ['B', 'S', 'N', 'MS', 'ME', 'M', 'OAS', 'SLEEP']
classes = ['MAN', 'FAN', 'CHNSP', 'CHNNSP']
#classes = ['CHNNSP']
if args.baby_id == 'initial':
# List of babies
summary = | pd.read_csv(args.data_dir + '/' + 'baby_list.csv') | pandas.read_csv |
# -*- coding: utf-8 -*-
from datetime import timedelta
import pandas as pd
import pandas.util.testing as tm
class TestTimedeltaSeriesComparisons(object):
def test_compare_timedelta_series(self):
# regresssion test for GH5963
s = pd.Series([timedelta(days=1), timedelta(days=2)])
actual = s > timedelta(days=1)
expected = pd.Series([False, True])
tm.assert_series_equal(actual, expected)
class TestPeriodSeriesArithmetic(object):
def test_ops_series_timedelta(self):
# GH 13043
ser = pd.Series([pd.Period('2015-01-01', freq='D'),
pd.Period('2015-01-02', freq='D')], name='xxx')
assert ser.dtype == object
expected = pd.Series([pd.Period('2015-01-02', freq='D'),
pd.Period('2015-01-03', freq='D')], name='xxx')
result = ser + pd.Timedelta('1 days')
tm.assert_series_equal(result, expected)
result = pd.Timedelta('1 days') + ser
tm.assert_series_equal(result, expected)
result = ser + pd.tseries.offsets.Day()
tm.assert_series_equal(result, expected)
result = pd.tseries.offsets.Day() + ser
tm.assert_series_equal(result, expected)
def test_ops_series_period(self):
# GH 13043
ser = pd.Series([pd.Period('2015-01-01', freq='D'),
pd.Period('2015-01-02', freq='D')], name='xxx')
assert ser.dtype == object
per = | pd.Period('2015-01-10', freq='D') | pandas.Period |
"""
data hash pandas / numpy objects
"""
import itertools
from typing import Optional
import numpy as np
from pandas._libs import Timestamp
import pandas._libs.hashing as hashing
from pandas.core.dtypes.cast import infer_dtype_from_scalar
from pandas.core.dtypes.common import (
is_categorical_dtype,
is_extension_array_dtype,
is_list_like,
)
from pandas.core.dtypes.generic import (
ABCDataFrame,
ABCIndexClass,
ABCMultiIndex,
ABCSeries,
)
from pandas.core.dtypes.missing import isna
# 16 byte long hashing key
_default_hash_key = "0123456789123456"
def _combine_hash_arrays(arrays, num_items: int):
"""
Parameters
----------
arrays : generator
num_items : int
Should be the same as CPython's tupleobject.c
"""
try:
first = next(arrays)
except StopIteration:
return np.array([], dtype=np.uint64)
arrays = itertools.chain([first], arrays)
mult = np.uint64(1000003)
out = np.zeros_like(first) + np.uint64(0x345678)
for i, a in enumerate(arrays):
inverse_i = num_items - i
out ^= a
out *= mult
mult += np.uint64(82520 + inverse_i + inverse_i)
assert i + 1 == num_items, "Fed in wrong num_items"
out += np.uint64(97531)
return out
def hash_pandas_object(
obj,
index: bool = True,
encoding: str = "utf8",
hash_key: Optional[str] = _default_hash_key,
categorize: bool = True,
):
"""
Return a data hash of the Index/Series/DataFrame.
Parameters
----------
index : bool, default True
Include the index in the hash (if Series/DataFrame).
encoding : str, default 'utf8'
Encoding for data & key when strings.
hash_key : str, default _default_hash_key
Hash_key for string key to encode.
categorize : bool, default True
Whether to first categorize object arrays before hashing. This is more
efficient when the array contains duplicate values.
Returns
-------
Series of uint64, same length as the object
"""
from pandas import Series
if hash_key is None:
hash_key = _default_hash_key
if isinstance(obj, ABCMultiIndex):
return Series(hash_tuples(obj, encoding, hash_key), dtype="uint64", copy=False)
elif isinstance(obj, ABCIndexClass):
h = hash_array(obj.values, encoding, hash_key, categorize).astype(
"uint64", copy=False
)
h = Series(h, index=obj, dtype="uint64", copy=False)
elif isinstance(obj, ABCSeries):
h = hash_array(obj.values, encoding, hash_key, categorize).astype(
"uint64", copy=False
)
if index:
index_iter = (
hash_pandas_object(
obj.index,
index=False,
encoding=encoding,
hash_key=hash_key,
categorize=categorize,
).values
for _ in [None]
)
arrays = itertools.chain([h], index_iter)
h = _combine_hash_arrays(arrays, 2)
h = Series(h, index=obj.index, dtype="uint64", copy=False)
elif isinstance(obj, ABCDataFrame):
hashes = (hash_array(series.values) for _, series in obj.items())
num_items = len(obj.columns)
if index:
index_hash_generator = (
hash_pandas_object(
obj.index,
index=False,
encoding=encoding,
hash_key=hash_key,
categorize=categorize,
).values # noqa
for _ in [None]
)
num_items += 1
# keep `hashes` specifically a generator to keep mypy happy
_hashes = itertools.chain(hashes, index_hash_generator)
hashes = (x for x in _hashes)
h = _combine_hash_arrays(hashes, num_items)
h = Series(h, index=obj.index, dtype="uint64", copy=False)
else:
raise TypeError(f"Unexpected type for hashing {type(obj)}")
return h
def hash_tuples(vals, encoding="utf8", hash_key: str = _default_hash_key):
"""
Hash an MultiIndex / list-of-tuples efficiently
Parameters
----------
vals : MultiIndex, list-of-tuples, or single tuple
encoding : str, default 'utf8'
hash_key : str, default _default_hash_key
Returns
-------
ndarray of hashed values array
"""
is_tuple = False
if isinstance(vals, tuple):
vals = [vals]
is_tuple = True
elif not is_list_like(vals):
raise TypeError("must be convertible to a list-of-tuples")
from pandas import Categorical, MultiIndex
if not isinstance(vals, ABCMultiIndex):
vals = MultiIndex.from_tuples(vals)
# create a list-of-Categoricals
vals = [
Categorical(vals.codes[level], vals.levels[level], ordered=False, fastpath=True)
for level in range(vals.nlevels)
]
# hash the list-of-ndarrays
hashes = (
_hash_categorical(cat, encoding=encoding, hash_key=hash_key) for cat in vals
)
h = _combine_hash_arrays(hashes, len(vals))
if is_tuple:
h = h[0]
return h
def hash_tuple(val, encoding: str = "utf8", hash_key: str = _default_hash_key):
"""
Hash a single tuple efficiently
Parameters
----------
val : single tuple
encoding : str, default 'utf8'
hash_key : str, default _default_hash_key
Returns
-------
hash
"""
hashes = (_hash_scalar(v, encoding=encoding, hash_key=hash_key) for v in val)
h = _combine_hash_arrays(hashes, len(val))[0]
return h
def _hash_categorical(c, encoding: str, hash_key: str):
"""
Hash a Categorical by hashing its categories, and then mapping the codes
to the hashes
Parameters
----------
c : Categorical
encoding : str
hash_key : str
Returns
-------
ndarray of hashed values array, same size as len(c)
"""
# Convert ExtensionArrays to ndarrays
values = np.asarray(c.categories.values)
hashed = hash_array(values, encoding, hash_key, categorize=False)
# we have uint64, as we don't directly support missing values
# we don't want to use take_nd which will coerce to float
# instead, directly construct the result with a
# max(np.uint64) as the missing value indicator
#
# TODO: GH 15362
mask = c.isna()
if len(hashed):
result = hashed.take(c.codes)
else:
result = np.zeros(len(mask), dtype="uint64")
if mask.any():
result[mask] = np.iinfo(np.uint64).max
return result
def hash_array(
vals,
encoding: str = "utf8",
hash_key: str = _default_hash_key,
categorize: bool = True,
):
"""
Given a 1d array, return an array of deterministic integers.
Parameters
----------
vals : ndarray, Categorical
encoding : str, default 'utf8'
Encoding for data & key when strings.
hash_key : str, default _default_hash_key
Hash_key for string key to encode.
categorize : bool, default True
Whether to first categorize object arrays before hashing. This is more
efficient when the array contains duplicate values.
Returns
-------
1d uint64 numpy array of hash values, same length as the vals
"""
if not hasattr(vals, "dtype"):
raise TypeError("must pass a ndarray-like")
dtype = vals.dtype
# For categoricals, we hash the categories, then remap the codes to the
# hash values. (This check is above the complex check so that we don't ask
# numpy if categorical is a subdtype of complex, as it will choke).
if is_categorical_dtype(dtype):
return _hash_categorical(vals, encoding, hash_key)
elif | is_extension_array_dtype(dtype) | pandas.core.dtypes.common.is_extension_array_dtype |
"""
Module containing metrics for the centralized version of hay_checker.
Some functions parameters are unused, they have been kept like this to allow
easier code evolution.
"""
import numpy as np
import pandas as pd
from sklearn.metrics import mutual_info_score
from haychecker.chc import task
def _completeness_todo(columns, df):
"""
Returns what to compute for each column in dict form, given the metric parameters.
:param columns:
:type columns: list
:param df:
:type df: DataFrame
:return: Dict containing what to run (pandas functions names or named lambdas) for each column.
:rtype: dict
"""
todo = dict()
if columns is None:
columns = list(df.columns)
for col in columns:
todo[col] = ["count"]
return todo
def completeness(columns=None, df=None):
"""
If a df is passed, the completeness metric will be run and results returned
as a list of scores, otherwise an instance of the Task class containing this
metric wil be returned, to be later run (possibly after adding to it other tasks/metrics).
:param columns: Columns on which to run the metric, None to run the completeness
metric on the whole table.
:type columns: list
:param df: Dataframe on which to run the metric, None to have this function return a Task instance containing
this metric to be run later.
:type df: DataFrame
:return: Either a list of scores or a Task instance containing this metric (with these parameters) to be
run later.
:rtype: list/Task
"""
# make a dict representing the parameters
params = {"metric": "completeness"}
if not (columns is None):
params["columns"] = columns
t = task.Task([params])
if df is None:
return t
else:
return t.run(df)[0]["scores"]
def _deduplication_todo(columns, df):
"""
Returns what to compute for each column in dict form, given the metric parameters.
:param columns:
:type columns: list
:param df:
:type df: DataFrame
:return: Dict containing what to run (pandas functions names or named lambdas) for each column.
:rtype: dict
"""
todo = dict()
for col in columns:
todo[col] = ["nunique"]
return todo
def deduplication(columns=None, df=None):
"""
If a df is passed, the deduplication metric will be run and result returned
as a list of scores, otherwise an instance of the Task class containing this
metric wil be returned, to be later run (possibly after adding to it other tasks/metrics).
:param columns: Columns on which to run the metric, None to run the deduplication
metric on the whole table.
:type columns: list
:param df: Dataframe on which to run the metric, None to have this function return a Task instance containing
this metric to be run later.
:type df: DataFrame
:return: Either a list of scores or a Task instance containing this metric (with these parameters) to be
run later.
:rtype: list/Task
"""
# make a dict representing the parameters
params = {"metric": "deduplication"}
if not (columns is None):
params["columns"] = columns
t = task.Task([params])
if df is None:
return t
else:
return t.run(df)[0]["scores"]
def _contains_date(format):
"""
Check if a format contains tokens related to date.
:param format:
:type format: str
:return: True if format contains tokens related to date, false otherwise.
:rtype: boolean
"""
part_of_date_tokens = "<KEY>"
for token in part_of_date_tokens:
if token in format:
return True
return False
def _to_datetime_cached(s, format):
"""
Transform a series of strings (dates) to datetimes, with a dict
to cache results.
:param s:
:param format:
:return:
"""
dates = {date: pd.to_datetime(date, errors="coerce", format=format) for date in s.dropna().unique()}
dates[np.NaN] = None
return s.map(dates)
def _set_year_month_day(series, year=None, month=None, day=None):
return pd.to_datetime(
{'year': year,
'month': month,
'day': day,
"hour": series.dt.hour,
"minute": series.dt.minute,
"second": series.dt.second
}, errors="coerce")
def _set_hour_minute_second(series, hour=None, minute=None, second=None):
return pd.to_datetime(
{'year': series.dt.year,
'month': series.dt.month,
'day': series.dt.day,
"hour": hour,
"minute": minute,
"second": second
}, errors="coerce")
def _timeliness_todo(columns, value, types, dateFormat=None, timeFormat=None):
"""
Returns what to compute for each column in dict form, given the metric parameters.
:param columns:
:type columns: list
:param value
:type value: str
:param dateFormat:
:type dateFormat: str
:param timeFormat:
:type timeFormat: str
:return: Dict containing what to run (pandas functions names or named lambdas) for each column.
:rtype: dict
"""
assert (dateFormat is None or timeFormat is None) and (
not dateFormat is None or not timeFormat is None), "Pass either a dateFormat or a timeFormat, " \
"not both. "
todo = dict()
if dateFormat:
cvalue = pd.to_datetime(value, format=dateFormat)
for col in columns:
if types[col] == str:
def _timeliness_agg(x):
s = _to_datetime_cached(x, dateFormat)
return (s < cvalue).mean()
_timeliness_agg.__name__ = ("_timeliness_agg_%s_%s_%s_%s" % (col, "dateFormat", dateFormat, value))
todo[col] = [_timeliness_agg]
elif types[col] == pd._libs.tslib.Timestamp:
def _timeliness_agg(x):
return (x < cvalue).mean()
_timeliness_agg.__name__ = ("_timeliness_agg_%s_%s_%s_%s" % (col, "dateFormat", dateFormat, value))
todo[col] = [_timeliness_agg]
else:
print(
"Type of a column on which the timeliness metric is run must be either timestamp, "
"or string, if the metric is being run on dateFormat.")
exit()
elif timeFormat:
cvalue = pd.to_datetime(value, format=timeFormat)
# check if value contains a date and not only hours, minutes, seconds
has_date = _contains_date(timeFormat)
if has_date:
for col in columns:
if types[col] == str:
def _timeliness_agg(x):
s = _to_datetime_cached(x, timeFormat)
return (s < cvalue).mean()
_timeliness_agg.__name__ = ("_timeliness_agg_%s_%s_%s_%s" % (col, "timeFormat", timeFormat, value))
todo[col] = [_timeliness_agg]
elif types[col] == pd._libs.tslib.Timestamp:
def _timeliness_agg(x):
return (x < cvalue).mean()
_timeliness_agg.__name__ = ("_timeliness_agg_%s_%s_%s_%s" % (col, "timeFormat", timeFormat, value))
todo[col] = [_timeliness_agg]
else:
print(
"Type of a column on which the timeliness metric is run must be either timestamp or string, if "
"the metric is being run on a timeFormat")
exit()
else:
"""
Set year, month, day of the series equal to today, so that confrontation between the series
and the 'value' argument will only be about hours, months, days
"""
now = pd.to_datetime("now")
year = now.year
month = now.month
day = now.day
cvalue = pd.to_datetime(value, format=timeFormat)
cvalue = pd.Timestamp(second=cvalue.second, hour=cvalue.hour, minute=cvalue.minute, day=day, month=month,
year=year)
for col in columns:
if types[col] == str:
def _timeliness_agg(x):
s = _to_datetime_cached(x, timeFormat)
s = _set_year_month_day(s, year, month, day)
return (s < cvalue).mean()
_timeliness_agg.__name__ = ("_timeliness_agg_%s_%s_%s_%s" % (col, "timeFormat", timeFormat, value))
todo[col] = [_timeliness_agg]
elif types[col] == pd._libs.tslib.Timestamp:
def _timeliness_agg(x):
x = _set_year_month_day(x, year, month, day)
return (x < cvalue).mean()
_timeliness_agg.__name__ = ("_timeliness_agg_%s_%s_%s_%s" % (col, "timeFormat", timeFormat, value))
todo[col] = [_timeliness_agg]
else:
print(
"Type of a column on which the timeliness metric is run must be either timestamp or string, if "
"the metric is being run on a timeFormat")
exit()
return todo
def timeliness(columns, value, df=None, dateFormat=None, timeFormat=None):
"""
If a df is passed, the timeliness metric will be run and result returned
as a list of scores, otherwise an instance of the Task class containing this
metric wil be returned, to be later run (possibly after adding to it other tasks/metrics).
Use http://strftime.org/ directives to express formats.
:param columns: Columns on which to run the metric, columns of type string will be casted to timestamp
using the dateFormat or timeFormat argument.
:type columns: list
:param value: Value used to run the metric, confronting values in the specified columns against it.
:type value: str
:param dateFormat: Format in which the value (and values in columns, if they are of string type) are; used
to cast columns if they contain dates as strings. Either dateFormat
or timeFormat must be passed, but not both. Use http://strftime.org/ directives to express formats.
:type dateFormat: str
:param timeFormat: Format in which the value (and values in columns, if they are of string type) are; used
to cast columns if they contain dates as strings. Either dateFormat
or timeFormat must be passed, but not both. Use http://strftime.org/ directives to express formats.
:type timeFormat: str
:param df: Dataframe on which to run the metric, None to have this function return a Task instance containing
this metric to be run later.
:type df: DataFrame
:return: Either a list of scores or a Task instance containing this metric (with these parameters) to be
run later.
:rtype: list/Task
"""
assert (dateFormat is None or timeFormat is None) and (
not dateFormat is None or not timeFormat is None), "Pass either a dateFormat or a timeFormat, not both."
# make a dict representing the parameters
params = {"metric": "timeliness", "columns": columns, "value": value}
if dateFormat:
params["dateFormat"] = dateFormat
elif timeFormat:
params["timeFormat"] = timeFormat
t = task.Task([params])
if df is None:
return t
else:
return t.run(df)[0]["scores"]
def _freshness_todo(columns, types, dateFormat=None, timeFormat=None):
"""
Returns what to compute for each column in dict form, given the metric parameters.
:param columns:
:type columns: list
:param types: Dict mapping column names to type.
:type types: dict
:param dateFormat:
:type dateFormat: str
:param timeFormat:
:type timeFormat: str
:return: Dict containing what to run (pandas functions names or named lambdas) for each column.
:rtype: dict
"""
assert (dateFormat is None or timeFormat is None) and (
not dateFormat is None or not timeFormat is None), "Pass either a dateFormat or a timeFormat, " \
"not both. "
todo = dict()
if dateFormat:
now = pd.Timestamp.today()
for col in columns:
if types[col] == str:
def _freshness_agg(s):
s = _to_datetime_cached(s, dateFormat)
s = _set_hour_minute_second(s, 0, 0, 0)
return (now - s).astype("timedelta64[D]").abs().mean()
_freshness_agg.__name__ = ("_freshness_agg_%s_%s_%s" % (col, "dateFormat", dateFormat))
todo[col] = [_freshness_agg]
elif types[col] == pd._libs.tslib.Timestamp:
def _freshness_agg(s):
s = _set_hour_minute_second(s, 0, 0, 0)
return (now - s).astype("timedelta64[D]").abs().mean()
_freshness_agg.__name__ = ("_freshness_agg_%s_%s_%s" % (col, "dateFormat", dateFormat))
todo[col] = [_freshness_agg]
else:
print(
"Type of a column on which the freshness metric is run must be either timestamp "
"or string, if the metric is being run on dateFormat.")
exit()
elif timeFormat:
now = pd.Timestamp.now()
# check if value contains a date and not only hours, minutes, seconds
has_date = _contains_date(timeFormat)
if has_date:
"""
If the time format also contains a date it means the user is also interested in comparing years, months, days,
etc.
"""
for col in columns:
if types[col] == str:
def _freshness_agg(s):
s = _to_datetime_cached(s, timeFormat)
return (now - s).astype("timedelta64[s]").abs().mean()
_freshness_agg.__name__ = ("_freshness_agg_%s_%s_%s" % (col, "timeFormat", timeFormat))
todo[col] = [_freshness_agg]
elif types[col] == pd._libs.tslib.Timestamp:
def _freshness_agg(s):
return (now - s).astype("timedelta64[s]").abs().mean()
_freshness_agg.__name__ = ("_freshness_agg_%s_%s_%s" % (col, "timeFormat", timeFormat))
todo[col] = [_freshness_agg]
else:
print(
"Type of a column on which the freshness metric is run must be either timestamp "
"or string, if the metric is being run on dateFormat.")
exit()
else:
"""
If the timestamp has no date the user is not interested in differences that consider years, months, days, but
only hours, minutes, seconds.
"""
year = now.year
month = now.month
day = now.day
for col in columns:
if types[col] == str:
def _freshness_agg(s):
s = _to_datetime_cached(s, timeFormat)
s = _set_year_month_day(s, year, month, day)
return (now - s).astype("timedelta64[s]").abs().mean()
_freshness_agg.__name__ = ("_freshness_agg_%s_%s_%s" % (col, "timeFormat", timeFormat))
todo[col] = [_freshness_agg]
elif types[col] == pd._libs.tslib.Timestamp:
def _freshness_agg(s):
s = _set_year_month_day(s, year, month, day)
return (now - s).astype("timedelta64[s]").abs().mean()
_freshness_agg.__name__ = ("_freshness_agg_%s_%s_%s" % (col, "timeFormat", timeFormat))
todo[col] = [_freshness_agg]
else:
print(
"Type of a column on which the freshness metric is run must be either timestamp "
"or string, if the metric is being run on dateFormat.")
exit()
return todo
def freshness(columns, df=None, dateFormat=None, timeFormat=None):
"""
If a df is passed, the freshness metric will be run and result returned
as a list of scores, otherwise an instance of the Task class containing this
metric wil be returned, to be later run (possibly after adding to it other tasks/metrics).
Use http://strftime.org/ directives to express formats.
:param columns: Columns on which to run the metric, columns of type string will be casted to timestamp
using the dateFormat or timeFormat argument.
:type columns: list
:param dateFormat: Format in which the values in columns are if those columns are of type string; otherwise they must
be of type timestamp. Use this parameter if you are interested in a result in terms of days.
Either dateFormat or timeFormat must be passed, but not both.
Use http://strftime.org/ directives to express formats.
:type dateFormat: str
:param timeFormat: Format in which the values in columns are if those columns are of type string; otherwise they must
be of type timestamp. Use this parameter if you are interested in results in terms of seconds.
Either dateFormat or timeFormat must be passed, but not both.
Use http://strftime.org/ directives to express formats.
:type timeFormat: str
:param df: Dataframe on which to run the metric, None to have this function return a Task instance containing
this metric to be run later.
:type df: DataFrame
:return: Either a list of scores or a Task instance containing this metric (with these parameters) to be
run later.
:rtype: list/Task
"""
# make a dict representing the parameters
params = {"metric": "freshness", "columns": columns}
if dateFormat:
params["dateFormat"] = dateFormat
elif timeFormat:
params["timeFormat"] = timeFormat
t = task.Task([params])
if df is None:
return t
else:
return t.run(df)[0]["scores"]
def _and_conditions_as_columns(conditions, df):
"""
Computes a boolean series given conditions of columns on the dataframes, representing
what rows are passing the condition.
:param conditions:
:type conditions: list
:param df:
:type df: DataFrame
:return: Boolean series representing what rows are passing the conditions, can be used as indices to
get those rows from the df.
:rtype: Series
"""
# add first condition
cond = conditions[0]
if "casted_to" in cond:
if cond["operator"] == "gt":
result = pd.to_numeric(df[cond["column"]], errors="coerce") > cond["value"]
elif cond["operator"] == "lt":
result = pd.to_numeric(df[cond["column"]], errors="coerce") < cond["value"]
elif cond["operator"] == "eq":
result = | pd.to_numeric(df[cond["column"]], errors="coerce") | pandas.to_numeric |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author : <NAME>
# @Contact : <EMAIL>
import numpy as np
import pandas as pd
from pandas import Index
from autoflow import DataManager
from autoflow import datasets
from autoflow.tests.base import LocalResourceTestCase
from autoflow.utils.dict_ import sort_dict
class TestDataManager(LocalResourceTestCase):
def test_instancing1(self):
def do_assert(data_manager, remote=False, stacked=True):
final_column_descriptions = {'id': 'PassengerId',
'target': 'Survived',
'text': ['Name'],
'num': ['Pclass', 'Age', 'SibSp', 'Parch', 'Fare'],
'cat': ['Sex', 'Cabin', 'Embarked'],
'highC_cat': ['Ticket']}
assert sort_dict(data_manager.final_column_descriptions) == sort_dict(final_column_descriptions)
if not remote:
assert sort_dict(data_manager.column_descriptions) == sort_dict({'id': 'PassengerId', 'target': 'Survived', 'text': 'Name'})
else:
assert sort_dict(data_manager.column_descriptions) == sort_dict(final_column_descriptions)
if stacked:
assert np.all( | pd.Series(data_manager.feature_groups) | pandas.Series |
import joblib
from ..config import config
from .. import models
import fasttext
import numpy as np
import pandas as pd
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from sklearn.preprocessing import MultiLabelBinarizer
from keras import backend as K
from pathlib import Path
import logging
_logger = logging.getLogger(__name__)
def read_corpus_file(corpus_text_file_path):
whole_text = []
with open(corpus_text_file_path, 'r') as corpus_file:
for each in corpus_file.readlines():
whole_text.append(each.strip())
return whole_text
def load_data(data_file_path):
df = pd.read_csv(data_file_path)
return df
def fit_tokenizer(num_words, corpus_text, tokenizer_path):
try:
tokenizer = load_tokenizer(tokenizer_path)
except:
tokenizer = Tokenizer(num_words=num_words, lower=True, filters='"#()*+-/:;<=>?@[\\]^_`{|}~\t\n')
# tokenizer = Tokenizer(num_words=num_words, lower=True)
tokenizer.fit_on_texts(corpus_text)
joblib.dump(tokenizer, tokenizer_path)
return tokenizer
def train_embedding_model(corpus_text_file_path, embedding_dim, embedding_model, embedding_model_file_path):
model_path = Path(embedding_model_file_path)
if model_path.is_file():
pass
else:
model = fasttext.train_unsupervised(input=corpus_text_file_path,
model=embedding_model,
dim=embedding_dim)
model.save_model(embedding_model_file_path)
return
def build_embedding_matrix(word_index, embedding_dim, embedding_model_path, embedding_matrix_path):
try:
embedding_matrix = load_embedding_matrix(embedding_matrix_path)
except:
embedding_model = fasttext.load_model(embedding_model_path)
embedding_matrix = np.zeros((len(word_index) + 1, embedding_dim))
for word, i in word_index.items():
try:
embedding_matrix[i] = embedding_model.get_word_vector(word)
except:
embedding_matrix[i] = embedding_model.get_word_vector("unknown")
joblib.dump(embedding_matrix, embedding_matrix_path)
return embedding_matrix
def load_tokenizer(tokenizer_path):
return joblib.load(tokenizer_path)
def load_embedding_matrix(embedding_matrix_path):
return joblib.load(embedding_matrix_path)
def text_to_sequence_transformer(text_data, tokenizer):
return tokenizer.texts_to_sequences(text_data)
def padding_sequence_transformer(sequence_text_data, max_sequence_len):
return pad_sequences(sequence_text_data, maxlen=max_sequence_len)
def save_result(prob_prediction, prediction_file_path):
result_df = | pd.DataFrame(prob_prediction, columns=config.ASPECT_TARGET) | pandas.DataFrame |
"""Auxiliary file for regressions."""
from collections import OrderedDict
from unittest import mock
import numpy as np
import pandas as pd
import statsmodels.formula.api as smf
from linearmodels.iv.model import IV2SLS
from linearmodels.iv.model import IVLIML
from statsmodels.regression.linear_model import OLS
from . import data_helper as dhlp
from .data_helper import get_age_control_names
from .data_helper import get_constant_name
from .data_helper import get_education_name
from .data_helper import get_further_exogenous_regressors
from .data_helper import get_log_weekly_wage_name
from .data_helper import get_qob_state_of_birth_interaction_names
from .data_helper import get_qob_yob_interaction_names
from .data_helper import get_quarter_of_birth_dummy_names
from .data_helper import get_region_of_residence_dummies
from .data_helper import get_state_of_birth_dummy_names
from .data_helper import get_year_of_birth_dummy_names
def get_regression_results_educational_variables(educ_vars, cohorts):
results = []
for ev in educ_vars:
for chrt_name, chrt in cohorts:
results.append(
{
"var": ev,
"cohort": chrt_name,
"mean": chrt[ev].mean(),
"ols": smf.ols(
formula=f"DTRND_{ev} ~ DUMMY_QOB_1 + DUMMY_QOB_2 + DUMMY_QOB_3", data=chrt
).fit(),
}
)
return results
def get_results_table_wald_estimates(df):
wage_1st = df.loc[df["QOB"] == 1]["LWKLYWGE"].mean()
wage_other = df.loc[df["QOB"] != 1]["LWKLYWGE"].mean()
wage_diff = wage_1st - wage_other
wage_err = np.sqrt(
np.power(df.loc[df["QOB"] == 1]["LWKLYWGE"].sem(), 2)
+ np.power(df.loc[df["QOB"] != 1]["LWKLYWGE"].sem(), 2)
)
educ_1st = df.loc[df["QOB"] == 1]["EDUC"].mean()
educ_other = df.loc[df["QOB"] != 1]["EDUC"].mean()
educ_diff = educ_1st - educ_other
educ_err = np.sqrt(
np.power(df.loc[df["QOB"] == 1]["EDUC"].sem(), 2)
+ np.power(df.loc[df["QOB"] != 1]["EDUC"].sem(), 2)
)
# wald return to education
df["EDUC_pred"] = smf.ols(formula="EDUC ~ DUMMY_QOB_1", data=df).fit().predict()
wald_rslt = smf.ols(formula="LWKLYWGE ~ EDUC_pred", data=df).fit()
# ols return to education
ols_rslt = smf.ols(formula="LWKLYWGE ~ EDUC", data=df).fit()
return {
"wage_1st": wage_1st,
"wage_other": wage_other,
"wage_diff": wage_diff,
"wage_err": wage_err,
"educ_1st": educ_1st,
"educ_other": educ_other,
"educ_diff": educ_diff,
"educ_err": educ_err,
"wald_est": wald_rslt.params["EDUC_pred"],
"wald_err": wald_rslt.bse["EDUC_pred"],
"ols_est": ols_rslt.params["EDUC"],
"ols_err": ols_rslt.bse["EDUC"],
}
def get_regression_results_ols_tsls(df, state_of_birth_dummies=False, race=True):
# add dummies for quarter and year of birth
df = dhlp.add_quarter_of_birth_dummies(df)
df = dhlp.add_year_of_birth_dummies(df)
if state_of_birth_dummies:
df = dhlp.add_state_of_birth_dummies(df)
state_lst = set(df["STATE"])
state_lst.remove(1)
# add AGESQ age squared
df["AGESQ"] = df["AGEQ"].pow(2)
# regression (1) OLS
formula_1 = "LWKLYWGE ~ EDUC + "
formula_1 += " + ".join([f"DUMMY_YOB_{i}" for i in range(0, 9)])
if state_of_birth_dummies:
formula_1 += " + "
formula_1 += " + ".join([f"DUMMY_STATE_{i}" for i in state_lst])
ols_1 = smf.ols(formula=formula_1, data=df).fit()
# regression (2) TSLS
formula_1st_stage_2 = "EDUC ~ "
formula_1st_stage_2 += " + ".join([f"DUMMY_YOB_{i}" for i in range(0, 9)])
formula_1st_stage_2 += " + "
formula_1st_stage_2 += " + ".join(
[f"DUMMY_YOB_{i} : DUMMY_QOB_{j}" for j in range(1, 4) for i in range(0, 10)]
)
if state_of_birth_dummies:
formula_1st_stage_2 += " + "
formula_1st_stage_2 += " + ".join([f"DUMMY_STATE_{i}" for i in state_lst])
formula_1st_stage_2 += " + "
formula_1st_stage_2 += " + ".join(
[f"DUMMY_STATE_{i} : DUMMY_QOB_{j}" for j in range(1, 4) for i in state_lst]
)
df["EDUC_pred_2"] = smf.ols(formula=formula_1st_stage_2, data=df).fit().predict()
formula_2nd_stage_2 = "LWKLYWGE ~ EDUC_pred_2 +"
formula_2nd_stage_2 += " + ".join([f"DUMMY_YOB_{i}" for i in range(0, 9)])
if state_of_birth_dummies:
formula_2nd_stage_2 += " + "
formula_2nd_stage_2 += " + ".join([f"DUMMY_STATE_{i}" for i in state_lst])
tsls_2 = smf.ols(formula=formula_2nd_stage_2, data=df).fit()
# regression (3) OLS
formula_3 = "LWKLYWGE ~ EDUC + AGEQ + AGESQ + "
formula_3 += " + ".join([f"DUMMY_YOB_{i}" for i in range(0, 9)])
if state_of_birth_dummies:
formula_3 += " + "
formula_3 += " + ".join([f"DUMMY_STATE_{i}" for i in state_lst])
ols_3 = smf.ols(formula=formula_3, data=df).fit()
# regression (4) TSLS
formula_1st_stage_4 = "EDUC ~ AGEQ + AGESQ + "
formula_1st_stage_4 += " + ".join([f"DUMMY_YOB_{i}" for i in range(0, 9)])
formula_1st_stage_4 += " + "
formula_1st_stage_4 += " + ".join(
[f"DUMMY_YOB_{i} : DUMMY_QOB_{j}" for j in range(1, 4) for i in range(0, 10)]
)
if state_of_birth_dummies:
formula_1st_stage_4 += " + "
formula_1st_stage_4 += " + ".join([f"DUMMY_STATE_{i}" for i in state_lst])
formula_1st_stage_4 += " + "
formula_1st_stage_4 += " + ".join(
[f"DUMMY_STATE_{i} : DUMMY_QOB_{j}" for j in range(1, 4) for i in state_lst]
)
df["EDUC_pred_4"] = smf.ols(formula=formula_1st_stage_4, data=df).fit().predict()
formula_2nd_stage_4 = "LWKLYWGE ~ EDUC_pred_4 + AGEQ + AGESQ + "
formula_2nd_stage_4 += " + ".join([f"DUMMY_YOB_{i}" for i in range(0, 9)])
if state_of_birth_dummies:
formula_2nd_stage_4 += " + "
formula_2nd_stage_4 += " + ".join([f"DUMMY_STATE_{i}" for i in state_lst])
tsls_4 = smf.ols(formula=formula_2nd_stage_4, data=df).fit()
# regression (5) OLS
formula_5 = "LWKLYWGE ~ EDUC + MARRIED + SMSA + NEWENG + MIDATL + "
formula_5 += "ENOCENT + WNOCENT + SOATL + ESOCENT + WSOCENT + MT + "
formula_5 += " + ".join([f"DUMMY_YOB_{i}" for i in range(0, 9)])
if race:
formula_5 += " + RACE"
if state_of_birth_dummies:
formula_5 += " + "
formula_5 += " + ".join([f"DUMMY_STATE_{i}" for i in state_lst])
ols_5 = smf.ols(formula=formula_5, data=df).fit()
# regression (6) TSLS
formula_1st_stage_6 = "EDUC ~ MARRIED + SMSA + NEWENG + MIDATL + "
formula_1st_stage_6 += "ENOCENT + WNOCENT + SOATL + ESOCENT + WSOCENT + MT + "
formula_1st_stage_6 += " + ".join([f"DUMMY_YOB_{i}" for i in range(0, 9)])
formula_1st_stage_6 += " + "
formula_1st_stage_6 += " + ".join(
[f"DUMMY_YOB_{i} : DUMMY_QOB_{j}" for j in range(1, 4) for i in range(0, 10)]
)
if race:
formula_1st_stage_6 += " + RACE"
if state_of_birth_dummies:
formula_1st_stage_6 += " + "
formula_1st_stage_6 += " + ".join([f"DUMMY_STATE_{i}" for i in state_lst])
formula_1st_stage_6 += " + "
formula_1st_stage_6 += " + ".join(
[f"DUMMY_STATE_{i} : DUMMY_QOB_{j}" for j in range(1, 4) for i in state_lst]
)
df["EDUC_pred_6"] = smf.ols(formula=formula_1st_stage_6, data=df).fit().predict()
formula_2nd_stage_6 = "LWKLYWGE ~ EDUC_pred_6 + MARRIED + SMSA + NEWENG + MIDATL + "
formula_2nd_stage_6 += "ENOCENT + WNOCENT + SOATL + ESOCENT + WSOCENT + MT + "
formula_2nd_stage_6 += " + ".join([f"DUMMY_YOB_{i}" for i in range(0, 9)])
if race:
formula_2nd_stage_6 += " + RACE"
if state_of_birth_dummies:
formula_2nd_stage_6 += " + "
formula_2nd_stage_6 += " + ".join([f"DUMMY_STATE_{i}" for i in state_lst])
tsls_6 = smf.ols(formula=formula_2nd_stage_6, data=df).fit()
# regression (7) OLS
formula_7 = "LWKLYWGE ~ EDUC + AGEQ + AGESQ + MARRIED + SMSA + NEWENG + MIDATL + "
formula_7 += "ENOCENT + WNOCENT + SOATL + ESOCENT + WSOCENT + MT + "
formula_7 += " + ".join([f"DUMMY_YOB_{i}" for i in range(0, 9)])
if race:
formula_7 += " + RACE"
if state_of_birth_dummies:
formula_7 += " + "
formula_7 += " + ".join([f"DUMMY_STATE_{i}" for i in state_lst])
ols_7 = smf.ols(formula=formula_7, data=df).fit()
# regression (8) TSLS
formula_1st_stage_8 = "EDUC ~ AGEQ + AGESQ + MARRIED + SMSA + NEWENG + MIDATL + "
formula_1st_stage_8 += "ENOCENT + WNOCENT + SOATL + ESOCENT + WSOCENT + MT + "
formula_1st_stage_8 += " + ".join([f"DUMMY_YOB_{i}" for i in range(0, 9)])
formula_1st_stage_8 += " + "
formula_1st_stage_8 += " + ".join(
[f"DUMMY_YOB_{i} : DUMMY_QOB_{j}" for j in range(1, 4) for i in range(0, 10)]
)
if race:
formula_1st_stage_8 += " + RACE"
if state_of_birth_dummies:
formula_1st_stage_8 += " + "
formula_1st_stage_8 += " + ".join([f"DUMMY_STATE_{i}" for i in state_lst])
formula_1st_stage_8 += " + "
formula_1st_stage_8 += " + ".join(
[f"DUMMY_STATE_{i} : DUMMY_QOB_{j}" for j in range(1, 4) for i in state_lst]
)
df["EDUC_pred_8"] = smf.ols(formula=formula_1st_stage_8, data=df).fit().predict()
formula_2nd_stage_8 = (
"LWKLYWGE ~ EDUC_pred_8 + AGEQ + AGESQ + MARRIED + SMSA + NEWENG + MIDATL + "
)
formula_2nd_stage_8 += "ENOCENT + WNOCENT + SOATL + ESOCENT + WSOCENT + MT + "
formula_2nd_stage_8 += " + ".join([f"DUMMY_YOB_{i}" for i in range(0, 9)])
if race:
formula_2nd_stage_8 += " + RACE"
if state_of_birth_dummies:
formula_2nd_stage_8 += " + "
formula_2nd_stage_8 += " + ".join([f"DUMMY_STATE_{i}" for i in set(df["STATE"])])
tsls_8 = smf.ols(formula=formula_2nd_stage_8, data=df).fit()
return OrderedDict(
[
("ols_1", ols_1),
("tsls_2", tsls_2),
("ols_3", ols_3),
("tsls_4", tsls_4),
("ols_5", ols_5),
("tsls_6", tsls_6),
("ols_7", ols_7),
("tsls_8", tsls_8),
]
)
class SmallRegressionResult:
"""Generate small regression results."""
def __init__(self, regressionResult):
"""Initiate."""
self.params = regressionResult.params
self.bse = regressionResult.bse if hasattr(regressionResult, "bse") else None
self.std_errors = (
regressionResult.std_errors if hasattr(regressionResult, "std_errors") else None
)
# wrapper for the IV2SLS method
def IV2SLS_wrapper(dependent, exog, endog, instruments, small_rslt=False):
"""If small_rslt is True, return smaller version of the regression result."""
# try to run the IV2SLS method without mocking the validation
try:
if small_rslt:
rslt = SmallRegressionResult(IV2SLS(dependent, exog, endog, instruments).fit())
else:
rslt = IV2SLS(dependent, exog, endog, instruments).fit()
except ValueError as e:
print(str(e))
# run the IV2LS method while mocking the validation
with mock.patch("linearmodels.iv.model._IVModelBase._validate_inputs"):
if small_rslt:
rslt = SmallRegressionResult(IV2SLS(dependent, exog, endog, instruments).fit())
else:
rslt = IV2SLS(dependent, exog, endog, instruments).fit()
return rslt
def IV2SLS_using_ols(dependent, exog, endog, instruments, small_rslt=False):
"""If small_rslt is True, return a smaller version of the regression result."""
# run tsls regression if all required variables are passed, otherwise run ols
if endog is not None and instruments is not None:
# predict the endog, using the results from first stage
endog_pred = pd.Series(
data=OLS(endog=endog, exog=pd.concat((exog, instruments), axis=1)).fit().predict(),
name=f"{endog.columns[0]}",
)
# run the second stage, effect of the predicted endog on dependent controlling for exog
if small_rslt:
rslt = SmallRegressionResult(
OLS(endog=dependent, exog=pd.concat((exog, endog_pred), axis=1)).fit()
)
else:
rslt = OLS(endog=dependent, exog= | pd.concat((exog, endog_pred), axis=1) | pandas.concat |
"""
Import as:
import im.ib.data.load.test.test_s3_data_loader as tsdloa
"""
import pandas as pd
import pytest
import helpers.hunit_test as hunitest
import im.common.data.types as imcodatyp
import im.ib.data.load.ib_s3_data_loader as imidlisdlo
class TestS3IbDataLoader1(hunitest.TestCase):
"""
Test data loading correctness for Ib from S3.
"""
def setUp(self) -> None:
super().setUp()
self._s3_data_loader = imidlisdlo.IbS3DataLoader()
def test_dtypes1(self) -> None:
"""
Test column types of loaded dataframe.
"""
# Load data.
data = self._s3_data_loader.read_data(
exchange="GLOBEX",
symbol="ES",
asset_class=imcodatyp.AssetClass.Futures,
frequency=imcodatyp.Frequency.Daily,
contract_type=imcodatyp.ContractType.Continuous,
currency="USD",
unadjusted=None,
nrows=1,
)
# Get columns types.
types = data.dtypes.to_string()
# Compare with expected.
self.check_string(types, fuzzy_match=True)
def test_read_data1(self) -> None:
"""
Test correctness of minute ES data loading.
"""
# Load data.
data = self._s3_data_loader.read_data(
exchange="GLOBEX",
symbol="ES",
asset_class=imcodatyp.AssetClass.Futures,
frequency=imcodatyp.Frequency.Minutely,
contract_type=imcodatyp.ContractType.Continuous,
currency="USD",
unadjusted=None,
nrows=10,
)
# Transform dataframe to string.
actual_string = hunitest.convert_df_to_string(data)
# Compare with expected.
self.check_string(actual_string, fuzzy_match=True)
def test_read_data2(self) -> None:
"""
Test correctness of daily ES data loading.
"""
# Load data.
data = self._s3_data_loader.read_data(
exchange="GLOBEX",
symbol="ES",
asset_class=imcodatyp.AssetClass.Futures,
frequency=imcodatyp.Frequency.Daily,
contract_type=imcodatyp.ContractType.Continuous,
currency="USD",
unadjusted=True,
nrows=10,
)
# Transform dataframe to string.
actual_string = hunitest.convert_df_to_string(data)
# Compare with expected.
self.check_string(actual_string, fuzzy_match=True)
def test_read_data3(self) -> None:
"""
Test correctness of hourly ES data loading.
"""
# Load data.
data = self._s3_data_loader.read_data(
exchange="GLOBEX",
symbol="ES",
asset_class=imcodatyp.AssetClass.Futures,
frequency=imcodatyp.Frequency.Hourly,
contract_type=imcodatyp.ContractType.Continuous,
currency="USD",
unadjusted=None,
nrows=10,
)
# Transform dataframe to string.
actual_string = hunitest.convert_df_to_string(data)
# Compare with expected.
self.check_string(actual_string, fuzzy_match=True)
def test_read_data_check_date_type(self) -> None:
"""
Check date type of date field if frequency is daily.
"""
# Load data.
data = self._s3_data_loader.read_data(
exchange="GLOBEX",
symbol="ES",
asset_class=imcodatyp.AssetClass.Futures,
frequency=imcodatyp.Frequency.Daily,
contract_type=imcodatyp.ContractType.Continuous,
currency="USD",
unadjusted=True,
nrows=10,
)
# Check if date columns is date type.
self.assertIsInstance(data["date"][0], pd.Timestamp)
def test_read_data_with_start_end_ts(self) -> None:
"""
Test correctness of hourly ES data loading.
"""
# Load data.
data = self._s3_data_loader.read_data(
exchange="GLOBEX",
symbol="ES",
asset_class=imcodatyp.AssetClass.Futures,
frequency=imcodatyp.Frequency.Hourly,
contract_type=imcodatyp.ContractType.Continuous,
currency="USD",
unadjusted=None,
start_ts=pd.to_datetime("2021-03-04 22:00:00-05:00"),
end_ts= | pd.to_datetime("2021-03-05 05:00:00-05:00") | pandas.to_datetime |
#!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import print_function
import nltk.tokenize
import psycopg2
import pandas as pd
import sys, re
def clean_str(string):
"""
Tokenization/string cleaning for all datasets
Every dataset is lower cased
Original taken from https://github.com/yoonkim/CNN_sentence/blob/master/process_data.py
"""
string = re.sub(r"[^A-Za-z0-9(),!?\'\`]", " ", string)
string = re.sub(r"\'s", " \'s", string)
string = re.sub(r"\'ve", " \'ve", string)
string = re.sub(r"n\'t", " n\'t", string)
string = re.sub(r"\'re", " \'re", string)
string = re.sub(r"\'d", " \'d", string)
string = re.sub(r"\'ll", " \'ll", string)
string = re.sub(r",", " , ", string)
string = re.sub(r"!", " ! ", string)
string = re.sub(r"\(", " \( ", string)
string = re.sub(r"\)", " \) ", string)
string = re.sub(r"\?", " \? ", string)
string = re.sub(r"\s{2,}", " ", string)
return string.strip().lower()
def getWordAuthData(PORT, authors, doc, documentTable = 'document', chunk_size = 1000):
df = pd.DataFrame()
conn = None
output = []
i = 1
# nltk.download('punkt')
try:
conn = psycopg2.connect(user="stylometry", password="<PASSWORD>",
database="stylometry_v2", host="localhost", port=PORT)
cur = conn.cursor()
query = "SELECT author_id, doc_content FROM " + str(documentTable) + " WHERE author_id IN ("
flag = False
for auth in authors:
if not flag:
query = query + str(auth)
flag = True
else:
query = query + ", " + str(auth)
query = query + ") AND doc_id <> '" + str(doc) + "' ;"
cur.execute(query)
print("Execution completed")
rows = cur.fetchall()
print("Read completed")
print("Number of rows: %s" % (len(rows)))
for row in rows:
clean_row = clean_str(row[1].decode("utf8"))
tokens = nltk.word_tokenize(clean_row)
chunk1 = []
for x in tokens:
if (i < chunk_size):
chunk1.append(x.encode("utf8"))
i += 1
else:
chunk1.append(x.encode("utf8"))
xx = ' '.join(chunk1)
xx = str(xx)
chunk1 = []
output.append([row[0], xx])
i = 1
if len(chunk1) > 0:
xx = ' '.join(chunk1)
xx = str(xx)
chunk1 = []
output.append([row[0], xx])
i = 1
df = pd.DataFrame(output, columns=["author_id", "doc_content"])
print(df.dtypes)
print("Data Frame created: Shape: %s" % (str(df.shape)))
except psycopg2.Error as e:
if conn:
conn.rollback()
print('Error %s' % e)
sys.exit(1)
finally:
if conn is not None:
conn.close()
return df
def getWordDocData(PORT, doc, documentTable = 'document', chunk_size = 1000):
df = pd.DataFrame()
conn = None
output = []
i = 1
try:
conn = psycopg2.connect(user="stylometry", password="<PASSWORD>",
database="stylometry_v2", host="localhost", port=PORT)
cur = conn.cursor()
query = "SELECT author_id, doc_content FROM " + str(documentTable) + " WHERE doc_id = '" + str(doc) + "' ;"
cur.execute(query)
print("Execution completed")
rows = cur.fetchall()
print("Read completed")
print("Number of rows: %s" % (len(rows)))
for row in rows:
clean_row = clean_str(row[1].decode("utf8"))
tokens = nltk.word_tokenize(clean_row)
chunk1 = []
for x in tokens:
if (i < chunk_size):
chunk1.append(x.encode("utf8"))
i += 1
else:
chunk1.append(x.encode("utf8"))
xx = ' '.join(chunk1)
xx = str(xx)
chunk1 = []
output.append([row[0], xx])
i = 1
if len(chunk1) > 0:
xx = ' '.join(chunk1)
xx = str(xx)
chunk1 = []
output.append([row[0], xx])
i = 1
df = | pd.DataFrame(output, columns=["author_id", "doc_content"]) | pandas.DataFrame |
import json, datetime, requests, time
import schedule
import pytz
import pandas as pd
def convert_datetime_timezone(dt, tz1, tz2):
tz1 = pytz.timezone(tz1)
tz2 = pytz.timezone(tz2)
dt = datetime.datetime.strptime(dt,"%Y/%m/%d %H:%M:%S")
dt = tz1.localize(dt)
dt = dt.astimezone(tz2)
dt = dt.strftime("%Y/%m/%d %H:%M")
return dt
def getFloatVal(value):
try:
return float(value.replace(',','.'))
except Exception:
return None
# DATA BTC
def btc():
btc_data = requests.get('https://api.coindesk.com/v1/bpi/currentprice/USD.json').json()
btc_value = round(btc_data['bpi']['USD']['rate_float'],2)
last_update_btc_iso = btc_data['time']['updatedISO'][:-6].replace('T',' ').replace('-','/')
last_update_btc_arg = convert_datetime_timezone(last_update_btc_iso,'UTC','America/Argentina/Buenos_Aires')
btc_df = pd.DataFrame({
'DATE': [last_update_btc_arg],
'PRICE': [btc_value]
})
try:
file_btc = pd.read_csv('btc.csv')
btc_df.to_csv(r'btc.csv', index=False, header=False, mode='a')
print('Cotización guardada')
except FileNotFoundError:
btc_df.to_csv(r'btc.csv', index=False, header=True, mode='w')
print('Archivo btc.csv creado y cotizacion guardada.')
# DATA DOLAR
def usd():
dolar_endpoints = ['dolaroficial','dolarblue','contadoliqui','bbva','santander','nacion','galicia']
for apis in dolar_endpoints:
dolar_data = requests.get('https://api-dolar-argentina.herokuapp.com/api/'+apis).json()
fecha_utc = dolar_data['fecha']
fecha_arg = convert_datetime_timezone(fecha_utc,'UTC','America/Argentina/Buenos_Aires')
usd_compra = dolar_data['compra']
usd_venta = dolar_data['venta']
usd_df = pd.DataFrame({
'DATE': [fecha_arg],
'TYPE': [apis],
'BUY': [usd_compra],
'SELL': [usd_venta]
})
try:
file_usd = | pd.read_csv('dolar.csv') | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 17 15:42:42 2018
@author: MichaelEK
"""
import numpy as np
import pandas as pd
from pdsql import mssql
import os
import geopandas as gpd
from shapely.geometry import Point
from hydrolm.lm import LM
from hydrolm import util
from seaborn import regplot
import matplotlib.pyplot as plt
from gistools.vector import sel_sites_poly
plt.ioff()
############################################
### Parameters to modify
recent_date = '2019-01-01'
min_date = '2004-07-01'
min_count = 10
search_dis = 50000
input_sites = None # None or a list of sites
export_dir = r'E:\ecan\shared\projects\gw_regressions'
fig_sub_dir = 'plots_to_manual'
export_summ1 = 'manual_to_manual_2019-07-10.csv'
############################################
### Other Parameters
server = 'edwprod01'
database = 'hydro'
ts_daily_table = 'TSDataNumericDaily'
ts_hourly_table = 'TSDataNumericHourly'
ts_summ_table = 'TSDataNumericDailySumm'
sites_table = 'ExternalSite'
man_datasets = [13]
qual_codes = [200, 400, 500, 520, 600]
############################################
### Extract summary data and determine the appropriate sites to use
man_summ_data = mssql.rd_sql(server, database, ts_summ_table, where_in={'DatasetTypeID': man_datasets}).drop('ModDate', axis=1)
man_summ_data.FromDate = | pd.to_datetime(man_summ_data.FromDate) | pandas.to_datetime |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 25 15:50:20 2019
work flow for ZWD and PW retreival after python copy_gipsyx_post_from_geo.py:
1)save_PPP_field_unselected_data_and_errors(field='ZWD')
2)select_PPP_field_thresh_and_combine_save_all(field='ZWD')
3)use mean_ZWD_over_sound_time_and_fit_tstm to obtain the mda (model dataarray)
3*) can't use produce_kappa_ml_with_cats for hour on 5 mins data, dahhh!
can do that with dayofyear, month, season (need to implement it first)
4)save_GNSS_PW_israeli_stations using mda (e.g., season) from 3
5) do homogenization using Homogenization_R.py and run homogenize_pw_dataset
6) for hydro analysis and more run produce_all_GNSS_PW_anomalies
@author: shlomi
"""
import pandas as pd
import numpy as np
from PW_paths import work_yuval
from PW_paths import work_path
from PW_paths import geo_path
from pathlib import Path
from sklearn.linear_model import LinearRegression
from scipy import stats
hydro_path = work_yuval / 'hydro'
garner_path = work_yuval / 'garner'
ims_path = work_yuval / 'IMS_T'
gis_path = work_yuval / 'gis'
sound_path = work_yuval / 'sounding'
climate_path = work_yuval / 'climate'
dem_path = work_yuval / 'AW3D30'
phys_soundings = sound_path / 'bet_dagan_phys_sounding_2007-2019.nc'
tela_zwd = work_yuval / 'gipsyx_results/tela_newocean/TELA_PPP_1996-2019.nc'
jslm_zwd = work_yuval / 'gipsyx_results/jslm_newocean/JSLM_PPP_2001-2019.nc'
alon_zwd = work_yuval / 'gipsyx_results/alon_newocean/ALON_PPP_2005-2019.nc'
tela_zwd_aligned = work_yuval / 'tela_zwd_aligned_with_physical_bet_dagan.nc'
alon_zwd_aligned = work_yuval / 'ALON_zwd_aligned_with_physical_bet_dagan.nc'
jslm_zwd_aligned = work_yuval / 'JSLM_zwd_aligned_with_physical_bet_dagan.nc'
tela_ims = ims_path / '10mins/TEL-AVIV-COAST_178_TD_10mins_filled.nc'
alon_ims = ims_path / '10mins/ASHQELON-PORT_208_TD_10mins_filled.nc'
jslm_ims = ims_path / '10mins/JERUSALEM-CENTRE_23_TD_10mins_filled.nc'
station_on_geo = geo_path / 'Work_Files/PW_yuval/GNSS_stations'
era5_path = work_yuval / 'ERA5'
PW_stations_path = work_yuval / '1minute'
# stations = pd.read_csv('All_gps_stations.txt', header=0, delim_whitespace=True,
# index_col='name')
logs_path = geo_path / 'Python_Projects/PW_from_GPS/log_files'
GNSS = work_yuval / 'GNSS_stations'
cwd = Path().cwd()
gnss_sound_stations_dict = {'acor': '08001', 'mall': '08302'}
# TODO: kappa_ml_with_cats yields smaller k using cats not None, check it...
# TODO: then assemble PW for all the stations.
class LinearRegression_with_stats(LinearRegression):
"""
LinearRegression class after sklearn's, but calculate t-statistics
and p-values for model coefficients (betas).
Additional attributes available after .fit()
are `t` and `p` which are of the shape (y.shape[1], X.shape[1])
which is (n_features, n_coefs)
This class sets the intercept to 0 by default, since usually we include it
in X.
"""
def __init__(self, *args, **kwargs):
# if not "fit_intercept" in kwargs:
# kwargs['fit_intercept'] = False
super().__init__(*args,**kwargs)
def fit(self, X, y=None, verbose=True, **fit_params):
from scipy import linalg
""" A wrapper around the fitting function.
Improved: adds the X_ and y_ and results_ attrs to class.
Parameters
----------
X : xarray DataArray, Dataset other other array-like
The training input samples.
y : xarray DataArray, Dataset other other array-like
The target values.
Returns
-------
Returns self.
"""
self = super().fit(X, y, **fit_params)
n, k = X.shape
yHat = np.matrix(self.predict(X)).T
# Change X and Y into numpy matricies. x also has a column of ones added to it.
x = np.hstack((np.ones((n,1)),np.matrix(X)))
y = np.matrix(y).T
# Degrees of freedom.
df = float(n-k-1)
# Sample variance.
sse = np.sum(np.square(yHat - y),axis=0)
self.sampleVariance = sse/df
# Sample variance for x.
self.sampleVarianceX = x.T*x
# Covariance Matrix = [(s^2)(X'X)^-1]^0.5. (sqrtm = matrix square root. ugly)
self.covarianceMatrix = linalg.sqrtm(self.sampleVariance[0,0]*self.sampleVarianceX.I)
# Standard erros for the difference coefficients: the diagonal elements of the covariance matrix.
self.se = self.covarianceMatrix.diagonal()[1:]
# T statistic for each beta.
self.betasTStat = np.zeros(len(self.se))
for i in range(len(self.se)):
self.betasTStat[i] = self.coef_[i]/self.se[i]
# P-value for each beta. This is a two sided t-test, since the betas can be
# positive or negative.
self.betasPValue = 1 - stats.t.cdf(abs(self.betasTStat),df)
return self
def compare_different_cats_bet_dagan_tela():
from aux_gps import error_mean_rmse
ds, mda = mean_ZWD_over_sound_time_and_fit_tstm(
plot=False, times=['2013-09', '2020'], cats=None)
ds_hour, mda = mean_ZWD_over_sound_time_and_fit_tstm(
plot=False, times=['2013-09', '2020'], cats=['hour'])
ds_season, mda = mean_ZWD_over_sound_time_and_fit_tstm(
plot=False, times=['2013-09', '2020'], cats=['season'])
ds_hour_season, mda = mean_ZWD_over_sound_time_and_fit_tstm(
plot=False, times=['2013-09', '2020'], cats=['hour', 'season'])
ds = ds.dropna('sound_time')
ds_hour = ds_hour.dropna('sound_time')
ds_season = ds_season.dropna('sound_time')
ds_hour_season = ds_hour_season.dropna('sound_time')
mean_none, rmse_none = error_mean_rmse(ds['tpw_bet_dagan'], ds['tela_pw'])
mean_hour, rmse_hour = error_mean_rmse(
ds_hour['tpw_bet_dagan'], ds_hour['tela_pw'])
mean_season, rmse_season = error_mean_rmse(
ds_season['tpw_bet_dagan'], ds_season['tela_pw'])
mean_hour_season, rmse_hour_season = error_mean_rmse(
ds_hour_season['tpw_bet_dagan'], ds_hour_season['tela_pw'])
hour_mean_per = 100 * (abs(mean_none) - abs(mean_hour)) / abs(mean_none)
hour_rmse_per = 100 * (abs(rmse_none) - abs(rmse_hour)) / abs(rmse_none)
season_mean_per = 100 * (abs(mean_none) - abs(mean_season)) / abs(mean_none)
season_rmse_per = 100 * (abs(rmse_none) - abs(rmse_season)) / abs(rmse_none)
hour_season_mean_per = 100 * (abs(mean_none) - abs(mean_hour_season)) / abs(mean_none)
hour_season_rmse_per = 100 * (abs(rmse_none) - abs(rmse_hour_season)) / abs(rmse_none)
print(
'whole data mean: {:.2f} and rmse: {:.2f}'.format(
mean_none,
rmse_none))
print(
'hour data mean: {:.2f} and rmse: {:.2f}, {:.1f} % and {:.1f} % better than whole data.'.format(
mean_hour, rmse_hour, hour_mean_per, hour_rmse_per))
print(
'season data mean: {:.2f} and rmse: {:.2f}, {:.1f} % and {:.1f} % better than whole data.'.format(
mean_season, rmse_season, season_mean_per, season_rmse_per))
print(
'hour and season data mean: {:.2f} and rmse: {:.2f}, {:.1f} % and {:.1f} % better than whole data.'.format(
mean_hour_season, rmse_hour_season, hour_season_mean_per, hour_season_rmse_per))
return
def PW_trend_analysis(path=work_yuval, anom=False, station='tela'):
import xarray as xr
pw = xr.open_dataset(path / 'GNSS_daily_PW.nc')[station]
if anom:
pw = pw.groupby('time.month') - pw.groupby('time.month').mean('time')
pw_lr = ML_fit_model_to_tmseries(pw, modelname='LR', plot=False, verbose=True)
pw_tsen = ML_fit_model_to_tmseries(pw, modelname='TSEN', plot=False, verbose=True)
return pw_tsen
def produce_gnss_pw_from_uerra(era5_path=era5_path,
glob_str='UERRA_TCWV_*.nc',
pw_path=work_yuval, savepath=None):
from aux_gps import path_glob
import xarray as xr
from aux_gps import save_ncfile
udf = add_UERRA_xy_to_israeli_gps_coords(pw_path, era5_path)
files = path_glob(era5_path, glob_str)
uerra_list = [xr.open_dataset(file) for file in files]
ds_attrs = uerra_list[0].attrs
ds_list = []
for i, uerra in enumerate(uerra_list):
print('proccessing {}'.format(files[i].as_posix().split('/')[-1]))
st_list = []
for station in udf.index:
y = udf.loc[station, 'y']
x = udf.loc[station, 'x']
uerra_st = uerra['tciwv'].isel(y=y, x=x).reset_coords(drop=True)
uerra_st.name = station
uerra_st.attrs = uerra['tciwv'].attrs
uerra_st.attrs['lon'] = udf.loc[station, 'lon']
uerra_st.attrs['lat'] = udf.loc[station, 'lat']
st_list.append(uerra_st)
ds_st = xr.merge(st_list)
ds_list.append(ds_st)
ds = xr.concat(ds_list, 'time')
ds = ds.sortby('time')
ds.attrs = ds_attrs
ds_monthly = ds.resample(time='MS', keep_attrs=True).mean(keep_attrs=True)
if savepath is not None:
filename = 'GNSS_uerra_4xdaily_PW.nc'
save_ncfile(ds, savepath, filename)
filename = 'GNSS_uerra_monthly_PW.nc'
save_ncfile(ds_monthly, savepath, filename)
return ds
def produce_PWV_flux_from_ERA5_UVQ(
path=era5_path,
savepath=None,
pw_path=work_yuval, return_magnitude=False):
import xarray as xr
from aux_gps import calculate_pressure_integral
from aux_gps import calculate_g
from aux_gps import save_ncfile
import numpy as np
ds = xr.load_dataset(era5_path / 'ERA5_UVQ_mm_israel_1979-2020.nc')
ds = ds.sel(expver=1).reset_coords(drop=True)
g = calculate_g(ds['latitude']).mean().item()
qu = calculate_pressure_integral(ds['q'] * ds['u'])
qv = calculate_pressure_integral(ds['q'] * ds['v'])
qu.name = 'qu'
qv.name = 'qv'
# convert to mm/sec units
qu = 100 * qu / (g * 1000)
qv = 100 * qv / (g * 1000)
# add attrs:
qu.attrs['units'] = 'mm/sec'
qv.attrs['units'] = 'mm/sec'
qu_gnss = produce_era5_field_at_gnss_coords(
qu, savepath=None, pw_path=pw_path)
qv_gnss = produce_era5_field_at_gnss_coords(
qv, savepath=None, pw_path=pw_path)
if return_magnitude:
qflux = np.sqrt(qu_gnss**2 + qv_gnss**2)
qflux.attrs['units'] = 'mm/sec'
return qflux
else:
return qu_gnss, qv_gnss
def produce_era5_field_at_gnss_coords(era5_da, savepath=None,
pw_path=work_yuval):
import xarray as xr
from aux_gps import save_ncfile
print('reading ERA5 {} field.'.format(era5_da.name))
gps = produce_geo_gnss_solved_stations(plot=False)
era5_pw_list = []
for station in gps.index:
slat = gps.loc[station, 'lat']
slon = gps.loc[station, 'lon']
da = era5_da.sel(latitude=slat, longitude=slon, method='nearest')
da.name = station
da.attrs['era5_lat'] = da.latitude.values.item()
da.attrs['era5_lon'] = da.longitude.values.item()
da = da.reset_coords(drop=True)
era5_pw_list.append(da)
ds = xr.merge(era5_pw_list)
if savepath is not None:
name = era5_da.name
yrmin = era5_da['time'].dt.year.min().item()
yrmax = era5_da['time'].dt.year.max().item()
filename = 'GNSS_ERA5_{}_{}-{}.nc'.format(name, yrmin, yrmax)
save_ncfile(ds, savepath, filename)
return ds
def produce_gnss_pw_from_era5(era5_path=era5_path,
glob_str='era5_TCWV_israel*.nc',
pw_path=work_yuval, savepath=None):
from aux_gps import path_glob
import xarray as xr
from aux_gps import save_ncfile
filepath = path_glob(era5_path, glob_str)[0]
print('opening ERA5 file {}'.format(filepath.as_posix().split('/')[-1]))
era5_pw = xr.open_dataarray(filepath)
era5_pw = era5_pw.sortby('time')
gps = produce_geo_gnss_solved_stations(plot=False)
era5_pw_list = []
for station in gps.index:
slat = gps.loc[station, 'lat']
slon = gps.loc[station, 'lon']
da = era5_pw.sel(lat=slat, lon=slon, method='nearest')
da.name = station
da.attrs['era5_lat'] = da.lat.values.item()
da.attrs['era5_lon'] = da.lon.values.item()
da = da.reset_coords(drop=True)
era5_pw_list.append(da)
ds_hourly = xr.merge(era5_pw_list)
ds_monthly = ds_hourly.resample(time='MS', keep_attrs=True).mean(keep_attrs=True)
if savepath is not None:
filename = 'GNSS_era5_hourly_PW.nc'
save_ncfile(ds_hourly, savepath, filename)
filename = 'GNSS_era5_monthly_PW.nc'
save_ncfile(ds_monthly, savepath, filename)
return ds_hourly
def plug_in_approx_loc_gnss_stations(log_path=logs_path, file_path=cwd):
from aux_gps import path_glob
import pandas as pd
def plug_loc_to_log_file(logfile, loc):
def replace_field(content_list, string, replacment):
pos = [(i, x) for i, x in enumerate(content_list)
if string in x][0][0]
con = content_list[pos].split(':')
con[-1] = ' {}'.format(replacment)
con = ':'.join(con)
content_list[pos] = con
return content_list
with open(logfile) as f:
content = f.read().splitlines()
repl = [
'X coordinate (m)',
'Y coordinate (m)',
'Z coordinate (m)',
'Latitude (deg)',
'Longitude (deg)',
'Elevation (m)']
location = [loc['X'], loc['Y'], loc['Z'], '+' +
str(loc['lat']), '+' + str(loc['lon']), loc['alt']]
for rep, loca in list(zip(repl, location)):
try:
content = replace_field(content, rep, loca)
except IndexError:
print('did not found {} field...'.format(rep))
pass
with open(logfile, 'w') as f:
for item in content:
f.write('{}\n'.format(item))
print('writing {}'.format(logfile))
return
# load gnss accurate loc:
acc_loc_df = pd.read_csv(file_path / 'israeli_gnss_coords.txt',
delim_whitespace=True)
log_files = path_glob(log_path, '*updated_by_shlomi*.log')
for logfile in log_files:
st_log = logfile.as_posix().split('/')[-1].split('_')[0]
try:
loc = acc_loc_df.loc[st_log, :]
except KeyError:
print('station {} not found in accurate location df, skipping'.format(st_log))
continue
plug_loc_to_log_file(logfile, loc)
print('Done!')
return
def build_df_lat_lon_alt_gnss_stations(gnss_path=GNSS, savepath=None):
from aux_gps import path_glob
import pandas as pd
import pyproj
from pathlib import Path
stations_in_gnss = [x.as_posix().split('/')[-1]
for x in path_glob(GNSS, '*')]
dss = [
load_gipsyx_results(
x,
sample_rate='MS',
plot_fields=None) for x in stations_in_gnss]
# stations_not_found = [x for x in dss if isinstance(x, str)]
# [stations_in_gnss.remove(x) for x in stations_in_gnss if x is None]
dss = [x for x in dss if not isinstance(x, str)]
dss = [x for x in dss if x is not None]
lats = [x.dropna('time').lat[0].values.item() for x in dss]
lons = [x.dropna('time').lon[0].values.item() for x in dss]
alts = [x.dropna('time').alt[0].values.item() for x in dss]
df = pd.DataFrame(lats)
df.index = [x.attrs['station'].lower() for x in dss]
df['lon'] = lons
df['alt'] = alts
df.columns = ['lat', 'lon', 'alt']
ecef = pyproj.Proj(proj='geocent', ellps='WGS84', datum='WGS84')
lla = pyproj.Proj(proj='latlong', ellps='WGS84', datum='WGS84')
X, Y, Z = pyproj.transform(lla, ecef, df['lon'].values, df['lat'].values,
df['alt'].values, radians=False)
df['X'] = X
df['Y'] = Y
df['Z'] = Z
# read station names from log files:
stations_approx = pd.read_fwf(Path().cwd()/'stations_approx_loc.txt',
delim_whitespace=False, skiprows=1, header=None)
stations_approx.columns=['index','X','Y','Z','name', 'extra']
stations_approx['name'] = stations_approx['name'].fillna('') +' ' + stations_approx['extra'].fillna('')
stations_approx.drop('extra', axis=1, inplace=True)
stations_approx = stations_approx.set_index('index')
df['name'] = stations_approx['name']
df.sort_index(inplace=True)
if savepath is not None:
filename = 'israeli_gnss_coords.txt'
df.to_csv(savepath/filename, sep=' ')
return df
def produce_homogeniety_results_xr(ds, alpha=0.05, test='snht', sim=20000):
import pyhomogeneity as hg
import xarray as xr
from aux_gps import homogeneity_test_xr
hg_tests_dict = {
'snht': hg.snht_test,
'pett': hg.pettitt_test,
'b_like': hg.buishand_likelihood_ratio_test,
'b_u': hg.buishand_u_test,
'b_q': hg.buishand_q_test,
'b_range': hg.buishand_range_test}
if test == 'all':
tests = [x for x in hg_tests_dict.keys()]
ds_list = []
for t in tests:
print('running {} test...'.format(t))
rds = ds.map(homogeneity_test_xr, hg_test_func=hg_tests_dict[t],
alpha=alpha, sim=sim, verbose=False)
rds = rds.to_array('station').to_dataset('results')
ds_list.append(rds)
rds = xr.concat(ds_list, 'test')
rds['test'] = tests
rds.attrs['alpha'] = alpha
rds.attrs['sim'] = sim
else:
rds = ds.map(homogeneity_test_xr, hg_test_func=hg_tests_dict[test],
alpha=alpha, sim=sim, verbose=False)
rds = rds.to_array('station').to_dataset('results')
rds.attrs['alpha'] = alpha
rds.attrs['sim'] = sim
# df=rds.to_array('st').to_dataset('results').to_dataframe()
print('Done!')
return rds
def run_error_analysis(station='tela', task='edit30hr'):
station_on_geo = geo_path / 'Work_Files/PW_yuval/GNSS_stations'
if task == 'edit30hr':
path = station_on_geo / station / 'rinex/30hr'
err, df = gipsyx_runs_error_analysis(path, glob_str='*.dr.gz')
elif task == 'run':
path = station_on_geo / station / 'rinex/30hr/results'
err, df = gipsyx_runs_error_analysis(path, glob_str='*.tdp')
return err, df
def gipsyx_runs_error_analysis(path, glob_str='*.tdp'):
from collections import Counter
from aux_gps import get_timedate_and_station_code_from_rinex
from aux_gps import path_glob
import pandas as pd
import logging
def find_errors(content_list, name):
keys = [x for x in content_list if 'KeyError' in x]
vals = [x for x in content_list if 'ValueError' in x]
excpt = [x for x in content_list if 'Exception' in x]
err = [x for x in content_list if 'Error' in x]
trouble = [x for x in content_list if 'Trouble' in x]
problem = [x for x in content_list if 'Problem' in x]
fatal = [x for x in content_list if 'FATAL' in x]
timed = [x for x in content_list if 'Timed' in x]
errors = keys + vals + excpt + err + trouble + problem + fatal + timed
if not errors:
dt, _ = get_timedate_and_station_code_from_rinex(name)
logger.warning('found new error on {} ({})'.format(name, dt.strftime('%Y-%m-%d')))
return errors
logger = logging.getLogger('gipsyx_post_proccesser')
rfns = []
files = path_glob(path, glob_str, True)
for file in files:
# first get all the rinex filenames that gipsyx ran successfuly:
rfn = file.as_posix().split('/')[-1][0:12]
rfns.append(rfn)
if files:
logger.info('running error analysis for station {}'.format(rfn[0:4].upper()))
all_errors = []
errors = []
dates = []
rinex = []
files = path_glob(path, '*.err')
for file in files:
rfn = file.as_posix().split('/')[-1][0:12]
# now, filter the error files that were copyed but there is tdp file
# i.e., the gipsyx run was successful:
if rfn in rfns:
continue
else:
dt, _ = get_timedate_and_station_code_from_rinex(rfn)
dates.append(dt)
rinex.append(rfn)
with open(file) as f:
content = f.readlines()
# you may also want to remove whitespace characters like `\n` at
# the end of each line
content = [x.strip() for x in content]
all_errors.append(content)
errors.append(find_errors(content, rfn))
er = [','.join(x) for x in all_errors]
df = pd.DataFrame(data=rinex, index=dates, columns=['rinex'])
df['error'] = er
df = df.sort_index()
total = len(rfns) + len(df)
good = len(rfns)
bad = len(df)
logger.info('total files: {}, successful runs: {}, errornous runs: {}'.format(
total, good, bad))
logger.info('success percent: {0:.1f}%'.format(100.0 * good / total))
logger.info('error percent: {0:.1f}%'.format(100.0 * bad / total))
# now count the similar errors and sort:
flat_list = [item for sublist in errors for item in sublist]
counted_errors = Counter(flat_list)
errors_sorted = sorted(counted_errors.items(), key=lambda x: x[1],
reverse=True)
return errors_sorted, df
def compare_gipsyx_soundings(sound_path=sound_path, gps_station='acor',
times=['1996', '2019'], var='pw'):
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import matplotlib.dates as mdates
import xarray as xr
from aux_gps import path_glob
# sns.set_style('whitegrid')
# ds = mean_zwd_over_sound_time(
# physical_file, ims_path=ims_path, gps_station='tela',
# times=times)
sound_station = gnss_sound_stations_dict.get(gps_station)
gnss = load_gipsyx_results(plot_fields=None, station=gps_station)
sound_file = path_glob(sound_path, 'station_{}_soundings_ts_tm_tpw*.nc'.format(sound_station))[0]
sds = xr.open_dataset(sound_file)
time_dim = list(set(sds.dims))[0]
sds = sds.rename({time_dim: 'time'})
sds[gps_station] = gnss.WetZ
if var == 'zwd':
k = kappa(sds['Tm'], Tm_input=True)
sds['sound'] = sds.Tpw / k
sds[gps_station] = gnss.WetZ
elif var == 'pw':
linear_model = ml_models_T_from_sounding(times=times,
station=sound_station,
plot=False, models=['LR'])
linear_model = linear_model.sel(name='LR').values.item()
k = kappa_ml(sds['Ts'] - 273.15, model=linear_model, no_error=True)
sds[gps_station] = sds[gps_station] * k
sds['sound'] = sds.Tpw
sds = sds.dropna('time')
sds = sds.sel(time=slice(*times))
df = sds[['sound', gps_station]].to_dataframe()
fig, axes = plt.subplots(2, 1, sharex=True, figsize=(12, 8))
[x.set_xlim([pd.to_datetime(times[0]), pd.to_datetime(times[1])])
for x in axes]
df.columns = ['{} soundings'.format(sound_station), '{} GNSS station'.format(gps_station)]
sns.scatterplot(
data=df,
s=20,
ax=axes[0],
style='x',
linewidth=0,
alpha=0.8)
# axes[0].legend(['Bet_Dagan soundings', 'TELA GPS station'])
df_r = df.iloc[:, 0] - df.iloc[:, 1]
df_r.columns = ['Residual distribution']
sns.scatterplot(
data=df_r,
color='k',
s=20,
ax=axes[1],
linewidth=0,
alpha=0.5)
axes[0].grid(b=True, which='major')
axes[1].grid(b=True, which='major')
if var == 'zwd':
axes[0].set_ylabel('Zenith Wet Delay [cm]')
axes[1].set_ylabel('Residuals [cm]')
elif var == 'pw':
axes[0].set_ylabel('Precipitable Water [mm]')
axes[1].set_ylabel('Residuals [mm]')
# sonde_change_x = pd.to_datetime('2013-08-20')
# axes[1].axvline(sonde_change_x, color='red')
# axes[1].annotate(
# 'changed sonde type from VIZ MK-II to PTU GPS',
# (mdates.date2num(sonde_change_x),
# 10),
# xytext=(
# 15,
# 15),
# textcoords='offset points',
# arrowprops=dict(
# arrowstyle='fancy',
# color='red'),
# color='red')
plt.tight_layout()
plt.subplots_adjust(wspace=0, hspace=0.01)
return sds
def produce_zwd_from_sounding_and_compare_to_gps(phys_sound_file=phys_soundings,
zwd_file=tela_zwd_aligned,
tm=None, plot=True):
"""compare zwd from any gps station (that first has to be aligned to
Bet_dagan station) to that of Bet-Dagan radiosonde station using tm from
either bet dagan or user inserted. by default, using zwd from pw by
inversing Bevis 1992 et al. formula"""
import xarray as xr
import matplotlib.pyplot as plt
import seaborn as sns
import matplotlib.dates as mdates
station = zwd_file.as_posix().split('/')[-1].split('_')[0]
zwd_and_tpw = xr.open_dataset(zwd_file)
tpw = zwd_and_tpw['Tpw']
pds = get_ts_tm_from_physical(phys_sound_file, plot=False)
if tm is None:
k = kappa(pds['tm'], Tm_input=True)
else:
k = kappa(tm, Tm_input=True)
zwd_sound = tpw / k
zwd_and_tpw['WetZ_from_bet_dagan'] = zwd_sound
radio = zwd_and_tpw['WetZ_from_bet_dagan']
gps = zwd_and_tpw['{}_WetZ'.format(station)]
gps.name = ['WetZ_from_TELA']
if plot:
# sns.set_style("whitegrid")
df = radio.to_dataframe()
df[gps.name] = gps.to_dataframe()
fig, axes = plt.subplots(2, 1, sharex=True, figsize=(12, 8))
[x.set_xlim([pd.to_datetime('2007-12-31'), pd.to_datetime('2019')]) for x in axes]
# radio.plot.line(marker='.', linewidth=0., ax=axes[0])
sns.scatterplot(data=df, s=20, ax=axes[0], style='x', linewidth=0, alpha=0.8)
# gps.plot.line(marker='.', linewidth=0., ax=axes[0])
#sns.scatterplot(data=df, y= 'tela_WetZ', s=10, ax=axes[0])
# axes[0].legend('radiosonde', '{}_gnss_site'.format(station))
df_r = df.iloc[:, 0] - df.iloc[:, 1]
df_r.columns = ['Residuals']
# (radio - gps).plot.line(marker='.', linewidth=0., ax=axes[1])
sns.scatterplot(data=df_r, color = 'k', s=20, ax=axes[1], linewidth=0, alpha=0.5)
axes[0].grid(b=True, which='major')
axes[1].grid(b=True, which='major')
axes[0].set_ylabel('Zenith Wet Delay [cm]')
axes[1].set_ylabel('Residuals [cm]')
axes[0].set_title('Zenith wet delay from Bet-Dagan radiosonde station and TELA GNSS satation')
sonde_change_x = pd.to_datetime('2013-08-20')
axes[1].axvline(sonde_change_x, color='red')
axes[1].annotate('changed sonde type from VIZ MK-II to PTU GPS', (mdates.date2num(sonde_change_x), 15), xytext=(15, 15),
textcoords='offset points', arrowprops=dict(arrowstyle='fancy', color='red'), color='red')
# axes[1].set_aspect(3)
plt.tight_layout()
plt.subplots_adjust(wspace=0, hspace=0)
# plt.figure()
# (radio - gps).plot.hist(bins=100)
return zwd_and_tpw
def fit_ts_tm_produce_ipw_and_compare_TELA(phys_sound_file=phys_soundings,
zwd_file=tela_zwd_aligned,
IMS_file=None,
sound_path=sound_path,
categories=None, model='LR',
times=['2005', '2019'],
**compare_kwargs):
"""categories can be :'bevis', None, 'season' and/or 'hour'. None means
whole dataset ts-tm.
models can be 'LR' or 'TSEN'. compare_kwargs is for
compare_to_sounding2 i.e., times, season, hour, title"""
import xarray as xr
print(compare_kwargs)
if categories == 'bevis':
results = None
compare_kwargs.update({'title': None})
else:
results = ml_models_T_from_sounding(sound_path, categories, model,
physical_file=phys_sound_file,
times=times)
if categories is None:
compare_kwargs.update({'title': 'whole'})
elif categories is not None and categories != 'bevis':
if isinstance(categories, str):
compare_kwargs.update({'title': [categories][0]})
elif isinstance(categories, list):
compare_kwargs.update({'title': 'hour_season'})
zwd_and_tpw = xr.open_dataset(zwd_file)
if times is not None:
zwd_and_tpw = zwd_and_tpw.sel(time=slice(*times))
station = zwd_file.as_posix().split('/')[-1].split('_')[0]
tpw = zwd_and_tpw['Tpw']
if IMS_file is None:
T = xr.open_dataset(ims_path / 'GNSS_5mins_TD_ALL_1996_2019.nc')
T = T['tela']
else:
# load the 10 mins temperature data from IMS:
T = xr.open_dataset(IMS_file)
T = T.to_array(name='t').squeeze(drop=True)
zwd_and_tpw = zwd_and_tpw.rename({'{}_WetZ'.format(
station): 'WetZ', '{}_WetZ_error'.format(station): 'WetZ_error'})
zwd = zwd_and_tpw[['WetZ', 'WetZ_error']]
zwd.attrs['station'] = station
pw_gps = produce_single_station_IPW(zwd, T, mda=results, model_name=model)
compare_to_sounding2(pw_gps['PW'], tpw, station=station, **compare_kwargs)
return pw_gps, tpw
def mean_ZWD_over_sound_time_and_fit_tstm(path=work_yuval,
sound_path=sound_path,
data_type='phys',
ims_path=ims_path,
gps_station='tela',
times=['2007', '2019'], plot=False,
cats=None,
savepath=None):
import xarray as xr
import joblib
from aux_gps import multi_time_coord_slice
from aux_gps import path_glob
from aux_gps import xr_reindex_with_date_range
from sounding_procedures import load_field_from_radiosonde
from sounding_procedures import get_field_from_radiosonde
"""mean the WetZ over the gps station soundings datetimes to get a more
accurate realistic measurement comparison to soundings"""
# tpw = load_field_from_radiosonde(path=sound_path, field='PW', data_type=data_type,
# reduce='max',dim='Height', plot=False)
min_time = get_field_from_radiosonde(path=sound_path, field='min_time', data_type='phys',
reduce=None, plot=False)
max_time = get_field_from_radiosonde(path=sound_path, field='max_time', data_type='phys',
reduce=None, plot=False)
sound_time = get_field_from_radiosonde(path=sound_path, field='sound_time', data_type='phys',
reduce=None, plot=False)
min_time = min_time.dropna('sound_time').values
max_time = max_time.dropna('sound_time').values
# load the zenith wet daley for GPS (e.g.,TELA) station:
file = path_glob(path, 'ZWD_thresh_*.nc')[0]
zwd = xr.open_dataset(file)[gps_station]
zwd_error = xr.open_dataset(file)[gps_station + '_error']
freq = pd.infer_freq(zwd.time.values)
if not freq:
zwd = xr_reindex_with_date_range(zwd)
zwd_error = xr_reindsave_GNSS_PW_israeli_stationsex_with_date_range(zwd_error)
freq = pd.infer_freq(zwd.time.values)
min_time = zwd.time.sel(time=min_time, method='nearest').values
max_time = zwd.time.sel(time=max_time, method='nearest').values
da_group = multi_time_coord_slice(min_time, max_time, freq=freq,
time_dim='time', name='sound_time')
zwd[da_group.name] = da_group
zwd_error[da_group.name] = da_group
ds = zwd.groupby(zwd[da_group.name]).mean(
'time').to_dataset(name='{}'.format(gps_station))
ds['{}_std'.format(gps_station)] = zwd.groupby(
zwd[da_group.name]).std('time')
ds['{}_error'.format(gps_station)] = zwd_error.groupby(
zwd[da_group.name]).mean('time')
ds['sound_time'] = sound_time.dropna('sound_time')
# ds['tpw_bet_dagan'] = tpw
wetz = ds['{}'.format(gps_station)]
wetz_error = ds['{}_error'.format(gps_station)]
# do the same for surface temperature:
file = path_glob(ims_path, 'GNSS_5mins_TD_ALL_*.nc')[0]
td = xr.open_dataset(file)[gps_station].to_dataset(name='ts')
min_time = td.time.sel(time=min_time, method='nearest').values
max_time = td.time.sel(time=max_time, method='nearest').values
freq = pd.infer_freq(td.time.values)
da_group = multi_time_coord_slice(min_time, max_time, freq=freq,
time_dim='time', name='sound_time')
td[da_group.name] = da_group
ts_sound = td.ts.groupby(td[da_group.name]).mean('time')
ts_sound['sound_time'] = sound_time.dropna('sound_time')
ds['{}_ts'.format(gps_station)] = ts_sound
ts_sound = ts_sound.rename({'sound_time': 'time'})
# prepare ts-tm data:
tm = get_field_from_radiosonde(path=sound_path, field='Tm', data_type=data_type,
reduce=None, dim='Height', plot=False)
ts = get_field_from_radiosonde(path=sound_path, field='Ts', data_type=data_type,
reduce=None, dim='Height', plot=False)
tstm = xr.Dataset()
tstm['Tm'] = tm
tstm['Ts'] = ts
tstm = tstm.rename({'sound_time': 'time'})
# select a model:
mda = ml_models_T_from_sounding(categories=cats, models=['LR', 'TSEN'],
physical_file=tstm, plot=plot,
times=times)
# compute the kappa function and multiply by ZWD to get PW(+error):
k, dk = produce_kappa_ml_with_cats(ts_sound, mda=mda, model_name='TSEN')
ds['{}_pw'.format(gps_station)] = k.rename({'time': 'sound_time'}) * wetz
ds['{}_pw_error'.format(gps_station)] = np.sqrt(
wetz_error**2.0 + dk**2.0)
# divide by kappa calculated from bet_dagan ts to get bet_dagan zwd:
k = kappa(tm, Tm_input=True)
# ds['zwd_bet_dagan'] = ds['tpw_bet_dagan'] / k
if savepath is not None:
m = mda.to_dataset('name')
for model in m:
joblib.dump(m[model].item(), savepath/'ts_tm_{}.pkl'.format(model))
print('{} saved to {}.'.format(model, savepath))
return ds, mda
def load_mda(path=work_yuval):
import joblib
from aux_gps import path_glob
import xarray as xr
files = path_glob(path, 'ts_tm_*.pkl')
names = [x.as_posix().split('/')[-1].split('.')[0].split('_')[-1] for x in files]
dsl = [joblib.load(x) for x in files]
dsl = [xr.DataArray(x) for x in dsl]
mda = xr.concat(dsl, 'name')
mda['name'] = names
mda.attrs['time_dim'] = 'time'
mda.attrs['LR_whole_stderr_slope'] = 0.006420637318868484
return mda
#def align_physical_bet_dagan_soundings_pw_to_gps_station_zwd(
# phys_sound_file, ims_path=ims_path, gps_station='tela',
# savepath=work_yuval, model=None):
# """compare the IPW of the physical soundings of bet dagan station to
# the any gps station - using IMS temperature of that gps station"""
# from aux_gps import get_unique_index
# from aux_gps import keep_iqr
# from aux_gps import dim_intersection
# import xarray as xr
# import numpy as np
# filename = '{}_zwd_aligned_with_physical_bet_dagan.nc'.format(gps_station)
# if not (savepath / filename).is_file():
# print('saving {} to {}'.format(filename, savepath))
# # first load physical bet_dagan Tpw, Ts, Tm and dt_range:
# phys = xr.open_dataset(phys_sound_file)
# # clean and merge:
# p_list = [get_unique_index(phys[x], 'sound_time')
# for x in ['Ts', 'Tm', 'Tpw', 'dt_range']]
# phys_ds = xr.merge(p_list)
# phys_ds = keep_iqr(phys_ds, 'sound_time', k=2.0)
# phys_ds = phys_ds.rename({'Ts': 'ts', 'Tm': 'tm'})
# # load the zenith wet daley for GPS (e.g.,TELA) station:
# zwd = load_gipsyx_results(station=gps_station, plot_fields=None)
# # zwd = xr.open_dataset(zwd_file)
# zwd = zwd[['WetZ', 'WetZ_error']]
# # loop over dt_range and average the results on PW:
# wz_list = []
# wz_std = []
# wz_error_list = []
# for i in range(len(phys_ds['dt_range'].sound_time)):
# min_time = phys_ds['dt_range'].isel(sound_time=i).sel(bnd='Min').values
# max_time = phys_ds['dt_range'].isel(sound_time=i).sel(bnd='Max').values
# wetz = zwd['WetZ'].sel(time=slice(min_time, max_time)).mean('time')
# wetz_std = zwd['WetZ'].sel(time=slice(min_time, max_time)).std('time')
# wetz_error = zwd['WetZ_error'].sel(time=slice(min_time, max_time)).mean('time')
# wz_std.append(wetz_std)
# wz_list.append(wetz)
# wz_error_list.append(wetz_error)
# wetz_gps = xr.DataArray(wz_list, dims='sound_time')
# wetz_gps.name = '{}_WetZ'.format(gps_station)
# wetz_gps_error = xr.DataArray(wz_error_list, dims='sound_time')
# wetz_gps_error.name = '{}_WetZ_error'.format(gps_station)
# wetz_gps_std = xr.DataArray(wz_list, dims='sound_time')
# wetz_gps_std.name = '{}_WetZ_std'.format(gps_station)
# wetz_gps['sound_time'] = phys_ds['sound_time']
# wetz_gps_error['sound_time'] = phys_ds['sound_time']
# new_time = dim_intersection([wetz_gps, phys_ds['Tpw']], 'sound_time')
# wetz_gps = wetz_gps.sel(sound_time=new_time)
# tpw_bet_dagan = phys_ds.Tpw.sel(sound_time=new_time)
# zwd_and_tpw = xr.merge([wetz_gps, wetz_gps_error, wetz_gps_std,
# tpw_bet_dagan])
# zwd_and_tpw = zwd_and_tpw.rename({'sound_time': 'time'})
# comp = dict(zlib=True, complevel=9) # best compression
# encoding = {var: comp for var in zwd_and_tpw.data_vars}
# zwd_and_tpw.to_netcdf(savepath / filename, 'w', encoding=encoding)
# print('Done!')
# return
# else:
# print('found file!')
# zwd_and_tpw = xr.open_dataset(savepath / filename)
# wetz = zwd_and_tpw['{}_WetZ'.format(gps_station)]
# wetz_error = zwd_and_tpw['{}_WetZ_error'.format(gps_station)]
# # load the 10 mins temperature data from IMS:
# td = xr.open_dataset(ims_path/'GNSS_5mins_TD_ALL_1996_2019.nc')
# td = td[gps_station]
# td.name = 'Ts'
# # tela_T = tela_T.resample(time='5min').ffill()
# # compute the kappa function and multiply by ZWD to get PW(+error):
# k, dk = kappa_ml(td, model=model, verbose=True)
# kappa = k.to_dataset(name='{}_kappa'.format(gps_station))
# kappa['{}_kappa_error'.format(gps_station)] = dk
# PW = (
# kappa['{}_kappa'.format(gps_station)] *
# wetz).to_dataset(
# name='{}_PW'.format(gps_station)).squeeze(
# drop=True)
# PW['{}_PW_error'.format(gps_station)] = np.sqrt(
# wetz_error**2.0 +
# kappa['{}_kappa_error'.format(gps_station)]**2.0)
# PW['TPW_bet_dagan'] = zwd_and_tpw['Tpw']
# PW = PW.dropna('time')
# return PW
def read_log_files(path, savepath=None, fltr='updated_by_shlomi',
suff='*.log'):
"""read gnss log files for putting them into ocean tides model"""
import pandas as pd
from aux_gps import path_glob
from tabulate import tabulate
def to_fwf(df, fname, showindex=False):
from tabulate import simple_separated_format
tsv = simple_separated_format(" ")
# tsv = 'plain'
content = tabulate(
df.values.tolist(), list(
df.columns), tablefmt=tsv, showindex=showindex, floatfmt='f')
open(fname, "w").write(content)
files = sorted(path_glob(path, glob_str=suff))
record = {}
for file in files:
filename = file.as_posix().split('/')[-1]
if fltr not in filename:
continue
station = filename.split('_')[0]
print('reading station {} log file'.format(station))
with open(file) as f:
content = f.readlines()
content = [x.strip() for x in content]
posnames = ['X', 'Y', 'Z']
pos_list = []
for pos in posnames:
text = [
x for x in content if '{} coordinate (m)'.format(pos) in x][0]
xyz = float(text.split(':')[-1])
pos_list.append(xyz)
text = [x for x in content if 'Site Name' in x][0]
name = text.split(':')[-1]
st_id = [x for x in content if 'Four Character ID' in x][0]
st_id = st_id.split(':')[-1]
record[st_id] = pos_list
pos_list.append(name)
df = pd.DataFrame.from_dict(record, orient='index')
posnames.append('name')
df.columns = posnames
if savepath is not None:
savefilename = 'stations_approx_loc.txt'
show_index = [x + ' ' for x in df.index.tolist()]
to_fwf(df, savepath / savefilename, show_index)
# df.to_csv(savepath / savefilename, sep=' ')
print('{} was saved to {}.'.format(savefilename, savepath))
return df
def analyze_missing_rinex_files(path, savepath=None):
from aux_gps import get_timedate_and_station_code_from_rinex
from aux_gps import datetime_to_rinex_filename
from aux_gps import path_glob
import pandas as pd
dt_list = []
files = path_glob(path, '*.Z')
for file in files:
filename = file.as_posix().split('/')[-1][:-2]
dt, station = get_timedate_and_station_code_from_rinex(filename)
dt_list.append(dt)
dt_list = sorted(dt_list)
true = pd.date_range(dt_list[0], dt_list[-1], freq='1D')
# df = pd.DataFrame(dt_list, columns=['downloaded'], index=true)
dif = true.difference(dt_list)
dts = [datetime_to_rinex_filename(station, x) for x in dif]
df_missing = pd.DataFrame(data=dts, index=dif.strftime('%Y-%m-%d'),
columns=['filenames'])
df_missing.index.name = 'dates'
if savepath is not None:
filename = station + '_missing_rinex_files.txt'
df_missing.to_csv(savepath / filename)
print('{} was saved to {}'.format(filename, savepath))
return df_missing
def proc_1minute(path):
stations = pd.read_csv(path + 'Zstations', header=0,
delim_whitespace=True)
station_names = stations['NAME'].values.tolist()
df_list = []
for st_name in station_names:
print('Proccessing ' + st_name + ' Station...')
df = pd.read_csv(PW_stations_path + st_name, delim_whitespace=True)
df.columns = ['date', 'time', 'PW']
df.index = pd.to_datetime(df['date'] + 'T' + df['time'])
df.drop(columns=['date', 'time'], inplace=True)
df_list.append(df)
df = pd.concat(df_list, axis=1)
print('Concatanting to Xarray...')
# ds = xr.concat([df.to_xarray() for df in df_list], dim="station")
# ds['station'] = station_names
df.columns = station_names
ds = df.to_xarray()
ds = ds.rename({'index': 'time'})
# da = ds.to_array(name='PW').squeeze(drop=True)
comp = dict(zlib=True, complevel=9) # best compression
encoding = {var: comp for var in ds.data_vars}
print('Saving to PW_2007-2016.nc')
ds.to_netcdf(work_path + 'PW_2007-2016.nc', 'w', encoding=encoding)
print('Done!')
# clean the data:
# da = da.where(da >= 0, np.nan)
# da = da.where(da < 100, np.nan)
# plot the data:
ds.to_array(dim='station').plot(x='time', col='station', col_wrap=4)
# hist:
# df=ds.to_dataframe()
sl = (df > 0) & (df < 50)
df[sl].hist(bins=30, grid=False, figsize=(15, 8))
return
def parameter_study_ts_tm_TELA_bet_dagan(tel_aviv_IMS_file, path=work_yuval,
coef=[-3, 3], inter=[-300, 300],
span=10, breakdown=True, plot=True):
import xarray as xr
import numpy as np
from aux_gps import dim_intersection
import matplotlib.pyplot as plt
filename = 'TELA_zwd_aligned_with_physical_bet_dagan.nc'
zwd_and_tpw = xr.open_dataset(path / filename)
wetz = zwd_and_tpw['TELA_WetZ']
tpw = zwd_and_tpw['Tpw']
# load the 10 mins temperature data from IMS:
tela_T = xr.open_dataset(tel_aviv_IMS_file)
coef_space = np.linspace(*coef, span)
intercept_space = np.linspace(*inter, span)
model = np.stack([coef_space, intercept_space], axis=0)
if breakdown:
seasons = ['DJF', 'MAM', 'JJA', 'SON']
hours = [0, 12]
rds_list = []
for season in seasons:
for hour in hours:
print('calculating kappa of season {} and hour {}'.format(season, hour))
T = tela_T.to_array(name='TELA_T').squeeze(drop=True)
T = T.where(T['time.season'] == season).dropna('time')
T = T.where(T['time.hour'] == hour).dropna('time')
k, _ = kappa_ml(T, model=model, no_error=True)
print('building results...')
pw = k * wetz
new_time = dim_intersection([pw, tpw])
pw = pw.sel(time=new_time)
tpw_sel = tpw.sel(time=new_time)
rmse = (tpw_sel - pw)**2.0
rmse = np.sqrt(rmse.mean('time'))
mean_error = (tpw_sel - pw).mean('time')
rmse.name = 'RMSE'.format(season, hour)
mean_error.name = 'MEAN'.format(season, hour)
merged = xr.merge([mean_error, rmse])
merged = merged.expand_dims(['season', 'hour'])
merged['season'] = [season]
merged['hour'] = [hour]
rds_list.append(merged.stack(prop=['season', 'hour']))
rds = xr.concat(rds_list, 'prop').unstack('prop')
print('Done!')
else:
print('calculating kappa of for all data!')
T = tela_T.to_array(name='TELA_T').squeeze(drop=True)
k, _ = kappa_ml(T, model=model, no_error=True)
print('building results...')
pw = k * wetz
new_time = dim_intersection([pw, tpw])
pw = pw.sel(time=new_time)
tpw_sel = tpw.sel(time=new_time)
rmse = (tpw_sel - pw)**2.0
rmse = np.sqrt(rmse.mean('time'))
mean_error = (tpw_sel - pw).mean('time')
rmse.name = 'RMSE_all'
mean_error.name = 'MEAN_all'
rds = xr.merge([mean_error, rmse])
print('Done!')
if plot:
if not breakdown:
fig, ax = plt.subplots(2, 1, figsize=(12, 8), sharex=True)
rds.MEAN.plot.pcolormesh(ax=ax[0])
rds.RMSE.plot.pcolormesh(ax=ax[1])
else:
fg_mean = rds.MEAN.plot.pcolormesh(row='hour', col='season',
figsize=(20, 10),
cmap='seismic')
[ax.grid() for ax in fg_mean.fig.axes]
# fg_mean.fig.tight_layout()
# fg_mean.fig.subplots_adjust(right=0.9)
fg_rmse = rds.RMSE.plot.pcolormesh(row='hour', col='season',
figsize=(20, 10))
[ax.grid() for ax in fg_rmse.fig.axes]
# fg_mean.fig.tight_layout()
# fg_rmse.fig.subplots_adjust(right=0.9)
return rds
#def get_geo_data_from_gps_stations(gps_names):
# import requests
# from bs4 import BeautifulSoup as bs
# user = "anonymous"
# passwd = "<PASSWORD>"
# # Make a request to the endpoint using the correct auth values
# auth_values = (user, passwd)
# response = requests.get(url, auth=auth_values)
# soup = bs(response.text, "lxml")
# allLines = soup.text.split('\n')
# X = [x for x in allLines if 'XLR coordinate' in x][0].split()[-1]
# Y = [x for x in allLines if 'Y coordinate' in x][0].split()[-1]
# Z = [x for x in allLines if 'Z coordinate' in x][0].split()[-1]
#
## Convert JSON to dict and print
#print(response.json())
def read_stations_to_dataset(path, group_name='israeli', save=False,
names=None):
import xarray as xr
if names is None:
stations = []
for filename in sorted(path.glob('garner_trop_[!all_stations]*.nc')):
st_name = filename.as_posix().split('/')[-1].split('.')[0].split('_')[-1]
print('Reading station {}'.format(st_name))
da = xr.open_dataarray(filename)
da = da.dropna('time')
stations.append(da)
ds = xr.merge(stations)
if save:
savefile = 'garner_' + group_name + '_stations.nc'
print('saving {} to {}'.format(savefile, path))
ds.to_netcdf(path / savefile, 'w')
print('Done!')
return ds
def filter_stations(path, group_name='israeli', save=False):
"""filter bad values in trop products stations"""
import xarray as xr
from aux_gps import Zscore_xr
filename = 'garner_' + group_name + '_stations.nc'
print('Reading {}.nc from {}'.format(filename, path))
ds = xr.open_dataset(path / filename)
ds['zwd'].attrs['units'] = 'Zenith Wet Delay in cm'
stations = [x for x in ds.data_vars.keys()]
for station in stations:
print('filtering station {}'.format(station))
# first , remove negative values:
ds[station] = ds[station].where(ds[station].sel(zwd='value') > 0)
# get zscore of data and errors:
zscore_val = Zscore_xr(ds[station].sel(zwd='value'), dim='time')
zscore_sig = Zscore_xr(ds[station].sel(zwd='sigma'), dim='time')
# filter for zscore <5 for data and <3 for error:
ds[station] = ds[station].where(np.abs(zscore_val) < 5)
ds[station] = ds[station].where(np.abs(zscore_sig) < 3)
if save:
filename = filename + '_filtered.nc'
print('saving {} to {}'.format(filename, path))
comp = dict(zlib=True, complevel=9) # best compression
encoding = {var: comp for var in ds.data_vars}
ds.to_netcdf(path / filename, 'w', encoding=encoding)
print('Done!')
return ds
# def overlap_time_xr(*args, union=False):
# """return the intersection of datetime objects from time field in *args"""
# # caution: for each arg input is xarray with dim:time
# time_list = []
# for ts in args:
# time_list.append(ts.time.values)
# if union:
# union = set.union(*map(set, time_list))
# un = sorted(list(union))
# return un
# else:
# intersection = set.intersection(*map(set, time_list))
# intr = sorted(list(intersection))
# return intr
def produce_pw_statistics(path=work_yuval, resample_to_mm=True, thresh=50,
pw_input=None):
import xarray as xr
from scipy.stats import kurtosis
from scipy.stats import skew
import pandas as pd
if pw_input is None:
pw = xr.load_dataset(path / 'GNSS_PW_thresh_{:.0f}_homogenized.nc'.format(thresh))
pw = pw[[x for x in pw.data_vars if '_error' not in x]]
else:
pw = pw_input
if resample_to_mm:
pw = pw.resample(time='MS').mean()
pd.options.display.float_format = '{:.1f}'.format
mean = pw.mean('time').reset_coords().to_array(
'index').to_dataframe('Mean')
std = pw.std('time').reset_coords().to_array('index').to_dataframe('SD')
median = pw.median('time').reset_coords().to_array(
'index').to_dataframe('Median')
q5 = pw.quantile(0.05, 'time').reset_coords(drop=True).to_array(
'index').to_dataframe('5th')
q95 = pw.quantile(0.95, 'time').reset_coords(drop=True).to_array(
'index').to_dataframe('95th')
maximum = pw.max('time').reset_coords().to_array(
'index').to_dataframe('Maximum')
minimum = pw.min('time').reset_coords().to_array(
'index').to_dataframe('Minimum')
sk = pw.map(skew, nan_policy='omit').to_array(
'index').to_dataframe('Skewness')
kurt = pw.map(kurtosis, nan_policy='omit').to_array(
'index').to_dataframe('Kurtosis')
df = pd.concat([mean, std, median, q5, q95,
maximum, minimum, sk, kurt], axis=1)
cols = []
cols.append('Site ID')
cols += [x for x in df.columns]
df['Site ID'] = df.index.str.upper()
df = df[cols]
df.index.name = ''
return df
def produce_geo_gnss_solved_stations(path=gis_path,
file='israeli_gnss_coords.txt',
add_distance_to_coast=False,
climate_path=None,
plot=True):
import geopandas as gpd
import pandas as pd
from pathlib import Path
from ims_procedures import get_israeli_coast_line
cwd = Path().cwd()
df = pd.read_csv(cwd / file, delim_whitespace=True)
df = df[['lat', 'lon', 'alt', 'name']]
isr = gpd.read_file(path / 'Israel_and_Yosh.shp')
isr.crs = {'init': 'epsg:4326'}
stations = gpd.GeoDataFrame(df, geometry=gpd.points_from_xy(df.lon,
df.lat),
crs=isr.crs)
if add_distance_to_coast:
isr_coast = get_israeli_coast_line(path=path)
coast_lines = [isr_coast.to_crs(
'epsg:2039').loc[x].geometry for x in isr_coast.index]
for station in stations.index:
point = stations.to_crs('epsg:2039').loc[station, 'geometry']
stations.loc[station, 'distance'] = min(
[x.distance(point) for x in coast_lines]) / 1000.0
# define groups for longterm analysis, north to south, west to east:
coastal_dict = {
key: 0 for (key) in [
'kabr',
'bshm',
'csar',
'tela',
'alon',
'slom',
'nizn']}
highland_dict = {key: 1 for (key) in
['nzrt', 'mrav', 'yosh', 'jslm', 'klhv', 'yrcm', 'ramo']}
eastern_dict = {key: 2 for (key) in
['elro', 'katz', 'drag', 'dsea', 'spir', 'nrif', 'elat']}
groups_dict = {**coastal_dict, **highland_dict, **eastern_dict}
stations['groups_annual'] = pd.Series(groups_dict)
# define groups with climate code
gr1_dict = {
key: 0 for (key) in [
'kabr',
'bshm',
'csar',
'tela',
'alon',
'nzrt',
'mrav',
'yosh',
'jslm',
'elro',
'katz']}
gr2_dict = {key: 1 for (key) in
['slom', 'klhv', 'yrcm', 'drag']}
gr3_dict = {key: 2 for (key) in
['nizn', 'ramo', 'dsea', 'spir', 'nrif', 'elat']}
groups_dict = {**gr1_dict, **gr2_dict, **gr3_dict}
stations['groups_climate'] = pd.Series(groups_dict)
if climate_path is not None:
cc = pd.read_csv(climate_path / 'gnss_station_climate_code.csv',
index_col='station')
stations = stations.join(cc)
# cc, ccc = assign_climate_classification_to_gnss(path=climate_path)
# stations['climate_class'] = cc
# stations['climate_code'] = ccc
if plot:
ax = isr.plot()
stations.plot(ax=ax, column='alt', cmap='Greens',
edgecolor='black', legend=True)
for x, y, label in zip(stations.lon, stations.lat,
stations.index):
ax.annotate(label, xy=(x, y), xytext=(3, 3),
textcoords="offset points")
return stations
def add_UERRA_xy_to_israeli_gps_coords(path=work_yuval, era5_path=era5_path):
import xarray as xr
from aux_gps import path_glob
from aux_gps import get_nearest_lat_lon_for_xy
import pandas as pd
from aux_gps import calculate_distance_between_two_lat_lon_points
file = path_glob(era5_path, 'UERRA*.nc')[0]
uerra = xr.open_dataset(file)
ulat = uerra['latitude']
ulon = uerra['longitude']
df = produce_geo_gnss_solved_stations(
plot=False, add_distance_to_coast=True)
points = df[['lat', 'lon']].values
xy = get_nearest_lat_lon_for_xy(ulat, ulon, points)
udf = pd.DataFrame(xy, index=df.index, columns=['y', 'x'])
udf['lat'] = [ulat.isel(y=xi, x=yi).item() for (xi, yi) in xy]
udf['lon'] = [ulon.isel(y=xi, x=yi).item() for (xi, yi) in xy]
ddf = calculate_distance_between_two_lat_lon_points(
df['lat'],
df['lon'],
udf['lat'],
udf['lon'],
orig_epsg='4326',
meter_epsg='2039')
ddf /= 1000 # distance in km
udf['distance_to_orig'] = ddf
return udf
def produce_geo_gps_stations(path=gis_path, file='All_gps_stations.txt',
plot=True):
import geopandas as gpd
import xarray as xr
from pathlib import Path
from aux_gps import get_latlonalt_error_from_geocent_error
stations_df = pd.read_csv(file, index_col='name',
delim_whitespace=True)
isr_dem = xr.open_rasterio(path / 'israel_dem.tif')
alt_list = []
for index, row in stations_df.iterrows():
lat = row['lat']
lon = row['lon']
alt = isr_dem.sel(band=1, x=lon, y=lat, method='nearest').values.item()
alt_list.append(float(alt))
stations_df['alt_dem'] = alt_list
isr = gpd.read_file(path / 'israel_demog2012.shp')
isr.crs = {'init': 'epsg:4326'}
stations = gpd.GeoDataFrame(stations_df,
geometry=gpd.points_from_xy(stations_df.lon,
stations_df.lat),
crs=isr.crs)
stations_isr = gpd.sjoin(stations, isr, op='within')
stations_approx = pd.read_csv(Path().cwd()/'stations_approx_loc.txt',
delim_whitespace=True)
lon, lat, alt = get_latlonalt_error_from_geocent_error(
stations_approx['X'].values, stations_approx['Y'].values,
stations_approx['Z'].values)
stations_approx.columns = ['approx_X', 'approx_Y', 'approx_Z']
stations_approx['approx_lat'] = lat
stations_approx['approx_lon'] = lon
stations_approx['approx_alt'] = alt
stations_isr_df = pd.DataFrame(stations_isr.drop(columns=['geometry',
'index_right']))
compare_df = stations_isr_df.join(stations_approx)
alt_list = []
for index, row in compare_df.iterrows():
lat = row['approx_lat']
lon = row['approx_lon']
alt = isr_dem.sel(band=1, x=lon, y=lat, method='nearest').values.item()
alt_list.append(float(alt))
compare_df['approx_alt_dem'] = alt_list
if plot:
ax = isr.plot()
stations_isr.plot(ax=ax, column='alt', cmap='Greens',
edgecolor='black', legend=True)
for x, y, label in zip(stations_isr.lon, stations_isr.lat,
stations_isr.index):
ax.annotate(label, xy=(x, y), xytext=(3, 3),
textcoords="offset points")
return stations_isr
def get_minimum_distance(geo_ims, geo_gps, path, plot=True):
def min_dist(point, gpd2):
gpd2['Dist'] = gpd2.apply(
lambda row: point.distance(
row.geometry), axis=1)
geoseries = gpd2.iloc[gpd2['Dist'].values.argmin()]
geoseries.loc['distance'] = gpd2['Dist'].values.min()
return geoseries
min_list = []
for gps_rows in geo_gps.iterrows():
ims_min_series = min_dist(gps_rows[1]['geometry'], geo_ims)
min_list.append(ims_min_series[['ID', 'name_hebrew', 'name_english',
'lon', 'lat', 'alt', 'starting_date',
'distance']])
geo_df = pd.concat(min_list, axis=1).T
geo_df['lat'] = geo_df['lat'].astype(float)
geo_df['lon'] = geo_df['lon'].astype(float)
geo_df['alt'] = geo_df['alt'].astype(float)
geo_df.index = geo_gps.index
stations_meta = ims_api_get_meta()
# select ims_stations that appear in the geo_df (closest to gps stations):
ims_selected = stations_meta.loc[stations_meta.stationId.isin(
geo_df.ID.values.tolist())]
# get the channel of temperature measurment of the selected stations:
cid = []
for index, row in geo_df.iterrows():
channel = [irow['TD_channel'] for ind, irow in ims_selected.iterrows()
if irow['stationId'] == row['ID']]
if channel:
cid.append(channel[0])
else:
cid.append(None)
# put the channel_id in the geo_df so later i can d/l the exact channel
# for each stations needed for the gps station:
geo_df['channel_id'] = cid
geo_df['channel_id'] = geo_df['channel_id'].fillna(0).astype(int)
geo_df['ID'] = geo_df.ID.astype(int)
geo_df['distance'] = geo_df.distance.astype(float)
geo_df['starting_date'] = | pd.to_datetime(geo_df.starting_date) | pandas.to_datetime |
# coding=utf-8
import pandas as pd
import numpy as np
import re
from matplotlib.ticker import FuncFormatter
def number_formatter(number, pos=None):
"""Convert a number into a human readable format."""
magnitude = 0
while abs(number) >= 1000:
magnitude += 1
number /= 1000.0
return '%.1f%s' % (number, ['', 'K', 'M', 'B', 'T', 'Q'][magnitude])
def cuenta_tipo_de_dato(df,tipo):
"""
Esta función crea la tabla con información sobre la cantidad de cada tipo de dato encontrado en el csv
==========
* Args:
- df: el data frame al que se le va a realizar el conteo del tipo de dato.
- tipo: El nombre del tipo de dato que estamos buscando.
* Return:
- Data Frame: entrega el data frame con los la categoría de la columna RESPUESTA modificada.
==========
Ejemplo:
# Para encontrar el tipo de dato numérico
>>conteo_nuericos = cuenta_tipo_de_dato(df, 'numerico')
# Para encontrar el tipo de dato texto
>>conteo_texto = cuenta_tipo_de_dato(df, 'object')
"""
vars_type = df.dtypes
vars_type = pd.DataFrame(vars_type, columns = ['tipo'])
if tipo == 'numerico':
cantidad_tipo = len(vars_type.loc[vars_type["tipo"] == "int64"])
cantidad_tipo = cantidad_tipo + len(vars_type.loc[vars_type["tipo"] == "float64"])
else:
cantidad_tipo = len(vars_type.loc[vars_type["tipo"] == tipo])
return cantidad_tipo
def cuenta_nulos_por_columnas(df):
"""
Función que realiza una tabla con la cuenta de missing values por columna y obtiene la proporción que estos missing
values representan del total.
==========
* Args:
- df: el data frame al que se le va a realizar el conteo de los nulos por cada columna.
* Return:
- Data Frame: entrega el data frame que indica cuantos elementos nulos fueron encontrados en cada columna.
==========
Ejemplo:
>>faltates_por_columna = cuenta_nulos_por_columnas(df)
"""
valores_nulos = df.isnull().sum()
porcentaje_valores_nulos = 100 * df.isnull().sum() / len(df)
tabla_valores_nulos = pd.concat([valores_nulos, porcentaje_valores_nulos], axis=1)
tabla_valores_nulos_ordenada = tabla_valores_nulos.rename(
columns={0: 'Missing Values', 1: '% del Total'})
tabla_valores_nulos_ordenada = tabla_valores_nulos_ordenada[
tabla_valores_nulos_ordenada.iloc[:, 1] != 0].sort_values(
'% del Total', ascending=False).round(1)
print("El dataframe tiene " + str(df.shape[1]) + " columnas.\n"
"Hay " + str(tabla_valores_nulos_ordenada.shape[0]) +
" columnas que tienen NA's.")
return tabla_valores_nulos_ordenada
def CreaTablaConteoPorcentaje(df, nomColumna, booleanNA):
"""
Esta función crea la tabla con información sobre los conteos y el porcentaje al que corresponden del total de los datos.
==========
* Args:
- df: el data frame completo.
- nomColumna: El nombre de la columna sobre la que se quiere realizar la tabla.
- booleanNA: Indicador booleano que indica si se requiere que se muestren los NA's en la tabla.
* Return:
- Data Frame: entrega el data frame con los la categoría de la columna RESPUESTA modificada.
==========
Ejemplo:
>>df = CreaTablaConteoPorcentaje(df, 'RESPUESTA', True)
"""
df_resultado = df[nomColumna].value_counts(dropna=booleanNA)
df_resultado = pd.DataFrame(data=df_resultado)
df_resultado = df_resultado[nomColumna].map('{:,}'.format)
df_resultado = pd.DataFrame(data=df_resultado)
#obteniendo los porcentajes
df_resultado['porcentaje'] = df[nomColumna].value_counts(dropna=booleanNA, normalize=True).mul(100).round(2).astype(str)+'%'
return df_resultado
def CreaTablaConteoPorcentaje_sin_stringformat(df, nomColumna, booleanNA):
"""
Esta función crea la tabla con información sobre los conteos y el porcentaje al que corresponden del total de los datos.
==========
* Args:
- df: el data frame completo.
- nomColumna: El nombre de la columna sobre la que se quiere realizar la tabla.
- booleanNA: Indicador booleano que indica si se requiere que se muestren los NA's en la tabla.
* Return:
- Data Frame: entrega el data frame con los la categoría de la columna RESPUESTA modificada.
==========
Ejemplo:
>>df = CreaTablaConteoPorcentaje(df, 'RESPUESTA', True)
"""
df_resultado = df[nomColumna].value_counts(dropna=booleanNA)
df_resultado = | pd.DataFrame(data=df_resultado) | pandas.DataFrame |
from collections import OrderedDict
import numpy as np
import pandas as pd
from sklearn.ensemble import BaggingClassifier, RandomForestRegressor
from sklearn.tree import DecisionTreeClassifier
from unittest.mock import patch
from zipline.data import bundles
from tests import assert_output, project_test, generate_random_dates, assert_structure
def get_assets(ticker_count):
bundle = bundles.load('eod-quotemedia')
return bundle.asset_finder.retrieve_all(bundle.asset_finder.sids[:ticker_count])
@project_test
def test_train_valid_test_split(fn):
columns = ['test column 1', 'test column 2', 'test column 3']
dates = generate_random_dates(10)
assets = get_assets(3)
index = pd.MultiIndex.from_product([dates, assets])
values = np.arange(len(index) * len(columns)).reshape([len(columns), len(index)]).T
targets = np.arange(len(index))
fn_inputs = {
'all_x': pd.DataFrame(values, index, columns),
'all_y': pd.Series(targets, index, name='target'),
'train_size': 0.6,
'valid_size': 0.2,
'test_size': 0.2}
fn_correct_outputs = OrderedDict([
('X_train', pd.DataFrame(values[:18], index[:18], columns=columns)),
('X_valid', pd.DataFrame(values[18:24], index[18:24], columns=columns)),
('X_test', pd.DataFrame(values[24:], index[24:], columns=columns)),
('y_train', pd.Series(targets[:18], index[:18])),
('y_valid', pd.Series(targets[18:24], index[18:24])),
('y_test', pd.Series(targets[24:], index[24:]))])
assert_output(fn, fn_inputs, fn_correct_outputs, check_parameter_changes=False)
@project_test
def test_non_overlapping_samples(fn):
columns = ['test column 1', 'test column 2']
dates = generate_random_dates(8)
assets = get_assets(3)
index = pd.MultiIndex.from_product([dates, assets])
values = np.arange(len(index) * len(columns)).reshape([len(columns), len(index)]).T
targets = np.arange(len(index))
fn_inputs = {
'x': pd.DataFrame(values, index, columns),
'y': | pd.Series(targets, index) | pandas.Series |
#!/usr/bin/env python3
#
# Copyright © 2016, Evolved Binary Ltd
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the <organization> nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import argparse
from datetime import datetime
from collections import namedtuple
from json.decoder import JSONDecodeError
import pathlib
from typing import Dict, List, NewType, Sequence, Tuple
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from pandas.core.frame import DataFrame
import re
from sys import maxsize
import json
# Types
Params = NewType('Params', dict[str, str])
BMParams = namedtuple("BMParams", "primary secondary")
Benchmark = NewType('Benchmark', str)
BMResult = namedtuple("BMResult", "value, score, error")
ResultSet = NewType('ResultSet', dict[Benchmark, Sequence[BMResult]])
ResultSets = NewType('ResultSets', dict[Tuple, ResultSet])
const_datetime_str = datetime.today().isoformat()
class RunnerError(Exception):
"""Base class for exceptions in this module."""
def __init__(self, message: str):
self.message = message
def error(message: str):
raise RunnerError(message)
def uncomment(line: str) -> bool:
if line.strip().startswith('#'):
return False
return True
def read_config_file(configFile: pathlib.Path):
lines = [line.strip()
for line in configFile.open().readlines() if uncomment(line)]
try:
return json.loads('\n'.join(lines))
except JSONDecodeError as e:
error(
f'JSON config file {configFile} ({configFile.absolute()}) error {str(e)}')
def optional(key: str, dict: Dict, op=None):
if key in dict:
if op:
return op(dict[key])
else:
return dict[key]
else:
return None
def required(key: str, dict: Dict):
if key in dict:
return dict[key]
else:
error(f'{key} missing from JMH config')
# read files, merge allegedly similar results
#
# return a single data frame
#
def normalize_data_frame_from_path(path: pathlib.Path):
files = []
if path.is_dir():
files = sorted(path.rglob("*.csv"))
else:
files.append(path)
normalized = None
for file in files:
try:
df = | pd.read_csv(file) | pandas.read_csv |
# coding: utf-8
import glob
import os
import pandas as pd
import numpy as np
import shutil
# BLOCK FOR JFC1 AT FENDT
rootdir = "/home/maik/b2drop/cosmicsense/inbox/fendt/timeseries/crns/JFC-1-sd"
rtdir = "/home/maik/b2drop/cosmicsense/inbox/fendt/timeseries/crns/JFC-1"
trgdir = "/media/x/cosmicsense/data/fendt/crns"
tmpfile = "tmpfile.txt"
id_sd = [5, 6, 14, 18, 19]
id_sd_other = [2, 3, 4, 21, 22, 23]
id_rt = [1, 2, 3, 4, 5, 6, 7, 8, 16, 17, 18, 19, 21, 22, 23, 24, 25]
id_all = [1, 2, 3, 4, 5, 6, 7, 8, 14, 16, 17, 18, 19, 21, 22, 23, 24, 25]
crns = {
# 2: {"pattern": ".002",
# "colnames": ["rec_id", "datetime", "press1", "temp1", "relhum1", "volt", "counts1", "nsecs1"]
# },
#
# 3: {"pattern": ".003",
# "colnames": ["rec_id", "datetime", "press1", "temp1",
# "relhum1", "volt", "counts1", "nsecs1", "counts2", "nsecs2", "T4_C"]
# },
#
# 4: {"pattern": ".004",
# "colnames": ["rec_id", "datetime", "press1", "temp1",
# "relhum1", "volt", "counts1", "nsecs1", "counts2", "nsecs2", "T4_C"]
# },
# //RecordNum,Date Time(UTC),P1_mb,T1_C,RH1,Vbat,N1Cts,N2Cts,N1ET_sec,N2ET_sec,N1T_C,N1RH,N2T_C,N2RH,MetOne092_1,P4_mb,T_CS215,RH_CS215,
5: {"pattern": ".005",
"colnames": ["rec_id", "datetime", "press4", "press1", "temp1",
"relhum1", "temp_ext", "relhum_ext", "volt", "counts1", "nsecs1", "N1T_C", "N1RH"]
},
6: {"pattern": ".006",
"colnames": ["rec_id", "datetime", "press4", "press1", "temp1",
"relhum1", "temp_ext", "relhum_ext", "volt", "counts1", "nsecs1", "N1T_C", "N1RH"]
},
14: {"pattern": ".836",
"colnames": ["rec_id", "datetime", "press4", "press1", "temp1",
"relhum1", "temp_ext", "relhum_ext", "volt", "counts1", "nsecs1", "N1T_C", "N1RH"]
},
18: {"pattern": ".018",
"colnames": ["rec_id", "datetime", "press1", "temp1", "relhum1", "volt", "counts1", "nsecs1",
"counts2", "nsecs2", "temp_ext", "relhum_ext"]
},
19: {"pattern": ".019",
"colnames": ["rec_id", "datetime", "press1", "temp1",
"relhum1", "volt", "counts1", "nsecs1", "temp_ext", "relhum_ext"]
}
}
for i, id in enumerate(id_sd):
crnsdir = os.path.join(rootdir, str(id))
print(crnsdir)
if not os.path.exists(crnsdir):
print("Path not found: %s" % crnsdir)
try:
os.remove(tmpfile)
except:
pass
for name in glob.glob(crnsdir+'/**/*'+crns[id]["pattern"], recursive=True):
print("\t", name)
fin = open(name, "r")
body = fin.read()
# replace comment character
body = body.replace("//", "#")
# replace zombie line endings
body = body.replace(",\r\n", "\r\n")
# comment out these lines
body = body.replace("CRS#1:", "#CRS#1")
body = body.replace("CRS#2:", "#CRS#2")
myfile = open(tmpfile, 'a')
myfile.write(body)
myfile.close()
df = | pd.read_csv(tmpfile, sep=",", comment="#", header=None, error_bad_lines=False, warn_bad_lines=True) | pandas.read_csv |
import strat_models
import networkx as nx
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt
np.random.seed(123)
"""
Cardiovascular disease dataset
data is from https://www.kaggle.com/sulianova/cardiovascular-disease-dataset
"""
#gender: 1-women, 2-men
data = pd.read_csv("data/cardio_train.csv", sep=";")
#Basic feature engineering
data = data.drop(["id"], axis=1)
data.age = [int(days) for days in np.round(data.age/365.25)]
data.gender = ["Male" if gender==2 else "Female" for gender in data.gender]
data = data[data.age != 30]
dummies_chol = pd.get_dummies(data.cholesterol, prefix="cholesterol")
dummies_gluc = pd.get_dummies(data.gluc, prefix="glucose")
data = | pd.concat([data, dummies_chol, dummies_gluc], axis=1) | pandas.concat |
"""Exports burst data to other data structures."""
import pandas as pd
import numpy as np
import os
import itertools
import pickle
from itertools import groupby
def df_export(bursts, offsets, from_svo=False):
"""Exports the burst data to a dataframe.
TODO: remove offsets parameter, as it is not used to generate the dataframe
(as far as I can tell).
TODO: does the 'bursts' column need to be kept for every edge entry?
"""
key_list = []
burst_list = []
offset_list = []
for k, v in bursts.items():
key_list.append(k)
burst_list.append(v)
offset_list.append(offsets[k])
if from_svo == True:
df = | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 7 12:04:39 2018
@author: saintlyvi
"""
import time
import pandas as pd
import numpy as np
from sklearn.cluster import MiniBatchKMeans, KMeans
import somoclu
from experiment.algorithms.cluster_prep import xBins, preprocessX, clusterStats, bestClusters, saveLabels, saveResults
def kmeans(X, range_n_clusters, top_lbls=10, preprocessing = None, bin_X=False, experiment_name=None):
"""
This function applies the MiniBatchKmeans algorithm from sklearn on inputs X for range_n_clusters.
If preprossing = True, X is normalised with sklearn.preprocessing.normalize()
Returns cluster stats, cluster centroids and cluster labels.
"""
if experiment_name is None:
save = False
else:
if preprocessing is None:
pass
else:
experiment_name = experiment_name+'_'+ preprocessing
save = True
#apply pre-binning
if bin_X != False:
Xbin = xBins(X, bin_X)
else:
Xbin = {'all':X}
for b, ids in Xbin.items():
try:
A = X.loc[ids,:]
except:
A = ids
#apply preprocessing
A = preprocessX(A, norm=preprocessing)
centroids = pd.DataFrame()
stats = pd.DataFrame()
cluster_lbls = pd.DataFrame()
dim = 0 #set dim to 0 to match SOM formating
cluster_lbls_dim = {}
stats_dim = pd.DataFrame()
for n_clust in range_n_clusters:
clusterer = MiniBatchKMeans(n_clusters=n_clust, random_state=10)
#train clustering algorithm
tic = time.time()
clusterer.fit(A)
cluster_labels = clusterer.predict(A)
toc = time.time()
## Calculate scores
cluster_stats = clusterStats({}, n_clust, A, cluster_labels,
preprocessing = preprocessing, transform = None,
tic = tic, toc = toc)
cluster_centroids = clusterer.cluster_centers_
eval_results, centroid_results = saveResults(experiment_name, cluster_stats,
cluster_centroids, dim, b, save)
stats_dim = stats_dim.append(eval_results)
centroids = centroids.append(centroid_results)
cluster_lbls_dim[n_clust] = cluster_labels
#outside n_clust loop
best_clusters, best_stats = bestClusters(cluster_lbls_dim, stats_dim, top_lbls)
cluster_lbls = pd.concat([cluster_lbls, best_clusters], axis=1)
stats = pd.concat([stats, best_stats], axis=0)
stats.reset_index(drop=True, inplace=True)
if save is True:
saveLabels(cluster_lbls, stats)
return stats, centroids, cluster_lbls
def som(X, range_n_dim, top_lbls=10, preprocessing = None, bin_X=False, transform=None, experiment_name=None, **kwargs):
"""
This function applies the self organising maps algorithm from somoclu on inputs X over square maps of range_n_dim.
If preprossing = True, X is normalised with sklearn.preprocessing.normalize()
If kmeans = True, the KMeans algorithm from sklearn is applied to the SOM and returns clusters
kwargs can be n_clusters = range(start, end, interval) OR list()
Returns cluster stats, cluster centroids and cluster labels.
"""
for dim in range_n_dim:
limit = int(np.sqrt(len(X)/20))
if dim > limit: #verify that number of nodes are sensible for size of input data
return print('Input size too small for map. Largest n should be ' + str(limit))
else:
pass
if experiment_name is None:
save = False
else:
if preprocessing is None:
pass
else:
experiment_name = experiment_name+'_'+ preprocessing
save = True
#apply pre-binning
if bin_X != False:
Xbin = xBins(X, bin_X)
else:
Xbin = {'0-4000':X}
for b, ids in Xbin.items():
try:
A = X.loc[ids,:]
except:
A = ids
#apply preprocessing
A = preprocessX(A, norm=preprocessing)
centroids = pd.DataFrame()
stats = pd.DataFrame()
cluster_lbls = | pd.DataFrame() | pandas.DataFrame |
from __future__ import division
import inspect
import json
import re
from datetime import datetime
from functools import wraps
import jsonschema
from numbers import Number
import numpy as np
import pandas as pd
from dateutil.parser import parse
from scipy import stats
from six import string_types
from .base import DataSet
from .util import DocInherit, parse_result_format, \
is_valid_partition_object, is_valid_categorical_partition_object, is_valid_continuous_partition_object
class MetaPandasDataSet(DataSet):
"""
MetaPandasDataSet is a thin layer between DataSet and PandasDataSet. This two-layer inheritance is required to make @classmethod decorators work.
Practically speaking, that means that MetaPandasDataSet implements
expectation decorators, like `column_map_expectation` and `column_aggregate_expectation`,
and PandasDataset implements the expectation methods themselves.
"""
def __init__(self, *args, **kwargs):
super(MetaPandasDataSet, self).__init__(*args, **kwargs)
@classmethod
def column_map_expectation(cls, func):
"""Constructs an expectation using column-map semantics.
The MetaPandasDataSet implementation replaces the "column" parameter supplied by the user with a pandas Series
object containing the actual column from the relevant pandas dataframe. This simplifies the implementing expectation
logic while preserving the standard DataSet signature and expected behavior.
See :func:`column_map_expectation <great_expectations.dataset.base.DataSet.column_map_expectation>` \
for full documentation of this function.
"""
@cls.expectation(inspect.getargspec(func)[0][1:])
@wraps(func)
def inner_wrapper(self, column, mostly=None, result_format=None, *args, **kwargs):
if result_format is None:
result_format = self.default_expectation_args["result_format"]
series = self[column]
boolean_mapped_null_values = series.isnull()
element_count = int(len(series))
nonnull_values = series[boolean_mapped_null_values==False]
nonnull_count = int((boolean_mapped_null_values==False).sum())
boolean_mapped_success_values = func(self, nonnull_values, *args, **kwargs)
success_count = boolean_mapped_success_values.sum()
unexpected_list = list(series[(boolean_mapped_success_values==False)&(boolean_mapped_null_values==False)])
unexpected_index_list = list(series[(boolean_mapped_success_values==False)&(boolean_mapped_null_values==False)].index)
success, percent_success = self._calc_map_expectation_success(success_count, nonnull_count, mostly)
return_obj = self._format_column_map_output(
result_format, success,
element_count, nonnull_count,
unexpected_list, unexpected_index_list
)
return return_obj
inner_wrapper.__name__ = func.__name__
inner_wrapper.__doc__ = func.__doc__
return inner_wrapper
@classmethod
def column_aggregate_expectation(cls, func):
"""Constructs an expectation using column-aggregate semantics.
The MetaPandasDataSet implementation replaces the "column" parameter supplied by the user with a pandas
Series object containing the actual column from the relevant pandas dataframe. This simplifies the implementing
expectation logic while preserving the standard DataSet signature and expected behavior.
See :func:`column_aggregate_expectation <great_expectations.dataset.base.DataSet.column_aggregate_expectation>` \
for full documentation of this function.
"""
@cls.expectation(inspect.getargspec(func)[0][1:])
@wraps(func)
def inner_wrapper(self, column, result_format = None, *args, **kwargs):
if result_format is None:
result_format = self.default_expectation_args["result_format"]
series = self[column]
null_indexes = series.isnull()
element_count = int(len(series))
nonnull_values = series[null_indexes == False]
nonnull_count = int((null_indexes == False).sum())
null_count = element_count - nonnull_count
evaluation_result = func(self, nonnull_values, *args, **kwargs)
if 'success' not in evaluation_result:
raise ValueError("Column aggregate expectation failed to return required information: success")
if ('result_obj' not in evaluation_result) or ('observed_value' not in evaluation_result['result_obj']):
raise ValueError("Column aggregate expectation failed to return required information: observed_value")
# Retain support for string-only output formats:
result_format = parse_result_format(result_format)
return_obj = {
'success': bool(evaluation_result['success'])
}
if result_format['result_obj_format'] == 'BOOLEAN_ONLY':
return return_obj
return_obj['result_obj'] = {
'observed_value': evaluation_result['result_obj']['observed_value'],
"element_count": element_count,
"missing_count": null_count,
"missing_percent": null_count * 1.0 / element_count if element_count > 0 else None
}
if result_format['result_obj_format'] == 'BASIC':
return return_obj
if 'details' in evaluation_result['result_obj']:
return_obj['result_obj']['details'] = evaluation_result['result_obj']['details']
if result_format['result_obj_format'] in ["SUMMARY", "COMPLETE"]:
return return_obj
raise ValueError("Unknown result_format %s." % (result_format['result_obj_format'],))
return inner_wrapper
class PandasDataSet(MetaPandasDataSet, pd.DataFrame):
"""
PandasDataset instantiates the great_expectations Expectations API as a subclass of a pandas.DataFrame.
For the full API reference, please see :func:`DataSet <great_expectations.dataset.base.DataSet>`
"""
def __init__(self, *args, **kwargs):
super(PandasDataSet, self).__init__(*args, **kwargs)
self.add_default_expectations()
def add_default_expectations(self):
"""
The default behavior for PandasDataSet is to explicitly include expectations that every column present upon initialization exists.
"""
for col in self.columns:
self.append_expectation({
"expectation_type": "expect_column_to_exist",
"kwargs": {
"column": col
}
})
### Expectation methods ###
@DocInherit
@DataSet.expectation(['column'])
def expect_column_to_exist(
self, column, column_index=None, result_format=None, include_config=False,
catch_exceptions=None, meta=None
):
if column in self:
return {
"success": (column_index is None) or (self.columns.get_loc(column) == column_index)
}
else:
return {
"success": False
}
@DocInherit
@DataSet.expectation(['min_value', 'max_value'])
def expect_table_row_count_to_be_between(self,
min_value=0,
max_value=None,
result_format=None, include_config=False, catch_exceptions=None, meta=None
):
# Assert that min_value and max_value are integers
try:
if min_value is not None:
float(min_value).is_integer()
if max_value is not None:
float(max_value).is_integer()
except ValueError:
raise ValueError("min_value and max_value must be integers")
row_count = self.shape[0]
if min_value != None and max_value != None:
outcome = row_count >= min_value and row_count <= max_value
elif min_value == None and max_value != None:
outcome = row_count <= max_value
elif min_value != None and max_value == None:
outcome = row_count >= min_value
return {
'success': outcome,
'result_obj': {
'observed_value': row_count
}
}
@DocInherit
@DataSet.expectation(['value'])
def expect_table_row_count_to_equal(self,
value,
result_format=None, include_config=False, catch_exceptions=None, meta=None
):
try:
if value is not None:
float(value).is_integer()
except ValueError:
raise ValueError("value must be an integer")
if value is None:
raise ValueError("value must be provided")
if self.shape[0] == value:
outcome = True
else:
outcome = False
return {
'success':outcome,
'result_obj': {
'observed_value':self.shape[0]
}
}
@DocInherit
@MetaPandasDataSet.column_map_expectation
def expect_column_values_to_be_unique(self, column,
mostly=None,
result_format=None, include_config=False, catch_exceptions=None, meta=None):
dupes = set(column[column.duplicated()])
return column.map(lambda x: x not in dupes)
@DocInherit
@DataSet.expectation(['column', 'mostly', 'result_format'])
def expect_column_values_to_not_be_null(self, column,
mostly=None,
result_format=None, include_config=False, catch_exceptions=None, meta=None):
if result_format is None:
result_format = self.default_expectation_args["result_format"]
series = self[column]
boolean_mapped_null_values = series.isnull()
element_count = int(len(series))
nonnull_values = series[boolean_mapped_null_values==False]
nonnull_count = int((boolean_mapped_null_values==False).sum())
boolean_mapped_success_values = boolean_mapped_null_values==False
success_count = boolean_mapped_success_values.sum()
unexpected_list = [None for i in list(series[(boolean_mapped_success_values==False)])]
unexpected_index_list = list(series[(boolean_mapped_success_values==False)].index)
unexpected_count = len(unexpected_list)
# Pass element_count instead of nonnull_count, because that's the right denominator for this expectation
success, percent_success = self._calc_map_expectation_success(success_count, element_count, mostly)
return_obj = self._format_column_map_output(
result_format, success,
element_count, nonnull_count,
unexpected_list, unexpected_index_list
)
return return_obj
@DocInherit
@DataSet.expectation(['column', 'mostly', 'result_format'])
def expect_column_values_to_be_null(self, column,
mostly=None,
result_format=None, include_config=False, catch_exceptions=None, meta=None):
if result_format is None:
result_format = self.default_expectation_args["result_format"]
series = self[column]
boolean_mapped_null_values = series.isnull()
element_count = int(len(series))
nonnull_values = series[boolean_mapped_null_values==False]
nonnull_count = (boolean_mapped_null_values==False).sum()
boolean_mapped_success_values = boolean_mapped_null_values
success_count = boolean_mapped_success_values.sum()
unexpected_list = list(series[(boolean_mapped_success_values==False)])
unexpected_index_list = list(series[(boolean_mapped_success_values==False)].index)
unexpected_count = len(unexpected_list)
# Pass element_count instead of nonnull_count, because that's the right denominator for this expectation
success, percent_success = self._calc_map_expectation_success(success_count, element_count, mostly)
return_obj = self._format_column_map_output(
result_format, success,
element_count, nonnull_count,
unexpected_list, unexpected_index_list
)
return return_obj
@DocInherit
@MetaPandasDataSet.column_map_expectation
def expect_column_values_to_be_of_type(self, column, type_, target_datasource="numpy",
mostly=None,
result_format=None, include_config=False, catch_exceptions=None, meta=None):
python_avro_types = {
"null":type(None),
"boolean":bool,
"int":int,
"long":int,
"float":float,
"double":float,
"bytes":bytes,
"string":str
}
numpy_avro_types = {
"null":np.nan,
"boolean":np.bool_,
"int":np.int64,
"long":np.longdouble,
"float":np.float_,
"double":np.longdouble,
"bytes":np.bytes_,
"string":np.string_
}
datasource = {"python":python_avro_types, "numpy":numpy_avro_types}
target_type = datasource[target_datasource][type_]
result = column.map(lambda x: type(x) == target_type)
return result
@DocInherit
@MetaPandasDataSet.column_map_expectation
def expect_column_values_to_be_in_type_list(self, column, type_list, target_datasource="numpy",
mostly=None,
result_format=None, include_config=False, catch_exceptions=None, meta=None):
python_avro_types = {
"null":type(None),
"boolean":bool,
"int":int,
"long":int,
"float":float,
"double":float,
"bytes":bytes,
"string":str
}
numpy_avro_types = {
"null":np.nan,
"boolean":np.bool_,
"int":np.int64,
"long":np.longdouble,
"float":np.float_,
"double":np.longdouble,
"bytes":np.bytes_,
"string":np.string_
}
datasource = {"python":python_avro_types, "numpy":numpy_avro_types}
target_type_list = [datasource[target_datasource][t] for t in type_list]
result = column.map(lambda x: type(x) in target_type_list)
return result
@DocInherit
@MetaPandasDataSet.column_map_expectation
def expect_column_values_to_be_in_set(self, column, values_set,
mostly=None,
result_format=None, include_config=False, catch_exceptions=None, meta=None):
return column.map(lambda x: x in values_set)
@DocInherit
@MetaPandasDataSet.column_map_expectation
def expect_column_values_to_not_be_in_set(self, column, values_set,
mostly=None,
result_format=None, include_config=False, catch_exceptions=None, meta=None):
return column.map(lambda x: x not in values_set)
@DocInherit
@MetaPandasDataSet.column_map_expectation
def expect_column_values_to_be_between(self,
column,
min_value=None, max_value=None,
parse_strings_as_datetimes=None,
allow_cross_type_comparisons=None,
mostly=None,
result_format=None, include_config=False, catch_exceptions=None, meta=None
):
if min_value is None and max_value is None:
raise ValueError("min_value and max_value cannot both be None")
if parse_strings_as_datetimes:
if min_value:
min_value = parse(min_value)
if max_value:
max_value = parse(max_value)
temp_column = column.map(parse)
else:
temp_column = column
if min_value != None and max_value != None and min_value > max_value:
raise ValueError("min_value cannot be greater than max_value")
def is_between(val):
# TODO Might be worth explicitly defining comparisons between types (for example, between strings and ints).
# Ensure types can be compared since some types in Python 3 cannot be logically compared.
# print type(val), type(min_value), type(max_value), val, min_value, max_value
if type(val) == None:
return False
else:
if min_value != None and max_value != None:
if allow_cross_type_comparisons:
try:
return (min_value <= val) and (val <= max_value)
except TypeError:
return False
else:
if (isinstance(val, string_types) != isinstance(min_value, string_types)) or (isinstance(val, string_types) != isinstance(max_value, string_types)):
raise TypeError("Column values, min_value, and max_value must either be None or of the same type.")
return (min_value <= val) and (val <= max_value)
elif min_value == None and max_value != None:
if allow_cross_type_comparisons:
try:
return val <= max_value
except TypeError:
return False
else:
if isinstance(val, string_types) != isinstance(max_value, string_types):
raise TypeError("Column values, min_value, and max_value must either be None or of the same type.")
return val <= max_value
elif min_value != None and max_value == None:
if allow_cross_type_comparisons:
try:
return min_value <= val
except TypeError:
return False
else:
if isinstance(val, string_types) != isinstance(min_value, string_types):
raise TypeError("Column values, min_value, and max_value must either be None or of the same type.")
return min_value <= val
else:
return False
return temp_column.map(is_between)
@DocInherit
@MetaPandasDataSet.column_map_expectation
def expect_column_values_to_be_increasing(self, column, strictly=None, parse_strings_as_datetimes=None,
mostly=None,
result_format=None, include_config=False, catch_exceptions=None, meta=None):
if parse_strings_as_datetimes:
temp_column = column.map(parse)
col_diff = temp_column.diff()
#The first element is null, so it gets a bye and is always treated as True
col_diff[0] = pd.Timedelta(1)
if strictly:
return col_diff > pd.Timedelta(0)
else:
return col_diff >= pd.Timedelta(0)
else:
col_diff = column.diff()
#The first element is null, so it gets a bye and is always treated as True
col_diff[col_diff.isnull()] = 1
if strictly:
return col_diff > 0
else:
return col_diff >= 0
@DocInherit
@MetaPandasDataSet.column_map_expectation
def expect_column_values_to_be_decreasing(self, column, strictly=None, parse_strings_as_datetimes=None,
mostly=None,
result_format=None, include_config=False, catch_exceptions=None, meta=None):
if parse_strings_as_datetimes:
temp_column = column.map(parse)
col_diff = temp_column.diff()
#The first element is null, so it gets a bye and is always treated as True
col_diff[0] = | pd.Timedelta(-1) | pandas.Timedelta |
"""
Provide a generic structure to support window functions,
similar to how we have a Groupby object.
"""
from collections import defaultdict
from datetime import timedelta
from textwrap import dedent
from typing import List, Optional, Set
import warnings
import numpy as np
import pandas._libs.window as libwindow
from pandas.compat._optional import import_optional_dependency
from pandas.compat.numpy import function as nv
from pandas.util._decorators import Appender, Substitution, cache_readonly
from pandas.core.dtypes.common import (
ensure_float64,
is_bool,
is_float_dtype,
is_integer,
is_integer_dtype,
is_list_like,
is_scalar,
is_timedelta64_dtype,
needs_i8_conversion,
)
from pandas.core.dtypes.generic import (
ABCDataFrame,
ABCDateOffset,
ABCDatetimeIndex,
ABCPeriodIndex,
ABCSeries,
ABCTimedeltaIndex,
)
from pandas._typing import Axis, FrameOrSeries
from pandas.core.base import DataError, PandasObject, SelectionMixin
import pandas.core.common as com
from pandas.core.generic import _shared_docs
from pandas.core.groupby.base import GroupByMixin
_shared_docs = dict(**_shared_docs)
_doc_template = """
Returns
-------
Series or DataFrame
Return type is determined by the caller.
See Also
--------
Series.%(name)s : Series %(name)s.
DataFrame.%(name)s : DataFrame %(name)s.
"""
class _Window(PandasObject, SelectionMixin):
_attributes = [
"window",
"min_periods",
"center",
"win_type",
"axis",
"on",
"closed",
] # type: List[str]
exclusions = set() # type: Set[str]
def __init__(
self,
obj,
window=None,
min_periods: Optional[int] = None,
center: Optional[bool] = False,
win_type: Optional[str] = None,
axis: Axis = 0,
on: Optional[str] = None,
closed: Optional[str] = None,
**kwargs
):
self.__dict__.update(kwargs)
self.obj = obj
self.on = on
self.closed = closed
self.window = window
self.min_periods = min_periods
self.center = center
self.win_type = win_type
self.win_freq = None
self.axis = obj._get_axis_number(axis) if axis is not None else None
self.validate()
@property
def _constructor(self):
return Window
@property
def is_datetimelike(self) -> Optional[bool]:
return None
@property
def _on(self):
return None
@property
def is_freq_type(self) -> bool:
return self.win_type == "freq"
def validate(self):
if self.center is not None and not is_bool(self.center):
raise ValueError("center must be a boolean")
if self.min_periods is not None and not is_integer(self.min_periods):
raise ValueError("min_periods must be an integer")
if self.closed is not None and self.closed not in [
"right",
"both",
"left",
"neither",
]:
raise ValueError("closed must be 'right', 'left', 'both' or " "'neither'")
def _create_blocks(self):
"""
Split data into blocks & return conformed data.
"""
obj = self._selected_obj
# filter out the on from the object
if self.on is not None:
if obj.ndim == 2:
obj = obj.reindex(columns=obj.columns.difference([self.on]), copy=False)
blocks = obj._to_dict_of_blocks(copy=False).values()
return blocks, obj
def _gotitem(self, key, ndim, subset=None):
"""
Sub-classes to define. Return a sliced object.
Parameters
----------
key : str / list of selections
ndim : 1,2
requested ndim of result
subset : object, default None
subset to act on
"""
# create a new object to prevent aliasing
if subset is None:
subset = self.obj
self = self._shallow_copy(subset)
self._reset_cache()
if subset.ndim == 2:
if is_scalar(key) and key in subset or is_list_like(key):
self._selection = key
return self
def __getattr__(self, attr):
if attr in self._internal_names_set:
return object.__getattribute__(self, attr)
if attr in self.obj:
return self[attr]
raise AttributeError(
"%r object has no attribute %r" % (type(self).__name__, attr)
)
def _dir_additions(self):
return self.obj._dir_additions()
def _get_window(self, other=None):
return self.window
@property
def _window_type(self) -> str:
return self.__class__.__name__
def __repr__(self) -> str:
"""
Provide a nice str repr of our rolling object.
"""
attrs = (
"{k}={v}".format(k=k, v=getattr(self, k))
for k in self._attributes
if getattr(self, k, None) is not None
)
return "{klass} [{attrs}]".format(
klass=self._window_type, attrs=",".join(attrs)
)
def __iter__(self):
url = "https://github.com/pandas-dev/pandas/issues/11704"
raise NotImplementedError("See issue #11704 {url}".format(url=url))
def _get_index(self) -> Optional[np.ndarray]:
"""
Return index as an ndarray.
Returns
-------
None or ndarray
"""
if self.is_freq_type:
return self._on.asi8
return None
def _prep_values(self, values: Optional[np.ndarray] = None) -> np.ndarray:
"""Convert input to numpy arrays for Cython routines"""
if values is None:
values = getattr(self._selected_obj, "values", self._selected_obj)
# GH #12373 : rolling functions error on float32 data
# make sure the data is coerced to float64
if is_float_dtype(values.dtype):
values = ensure_float64(values)
elif is_integer_dtype(values.dtype):
values = ensure_float64(values)
elif needs_i8_conversion(values.dtype):
raise NotImplementedError(
"ops for {action} for this "
"dtype {dtype} are not "
"implemented".format(action=self._window_type, dtype=values.dtype)
)
else:
try:
values = ensure_float64(values)
except (ValueError, TypeError):
raise TypeError(
"cannot handle this type -> {0}" "".format(values.dtype)
)
# Always convert inf to nan
values[np.isinf(values)] = np.NaN
return values
def _wrap_result(self, result, block=None, obj=None) -> FrameOrSeries:
"""
Wrap a single result.
"""
if obj is None:
obj = self._selected_obj
index = obj.index
if isinstance(result, np.ndarray):
# coerce if necessary
if block is not None:
if is_timedelta64_dtype(block.values.dtype):
from pandas import to_timedelta
result = to_timedelta(result.ravel(), unit="ns").values.reshape(
result.shape
)
if result.ndim == 1:
from pandas import Series
return Series(result, index, name=obj.name)
return type(obj)(result, index=index, columns=block.columns)
return result
def _wrap_results(self, results, blocks, obj, exclude=None) -> FrameOrSeries:
"""
Wrap the results.
Parameters
----------
results : list of ndarrays
blocks : list of blocks
obj : conformed data (may be resampled)
exclude: list of columns to exclude, default to None
"""
from pandas import Series, concat
from pandas.core.index import ensure_index
final = []
for result, block in zip(results, blocks):
result = self._wrap_result(result, block=block, obj=obj)
if result.ndim == 1:
return result
final.append(result)
# if we have an 'on' column
# we want to put it back into the results
# in the same location
columns = self._selected_obj.columns
if self.on is not None and not self._on.equals(obj.index):
name = self._on.name
final.append(Series(self._on, index=obj.index, name=name))
if self._selection is not None:
selection = ensure_index(self._selection)
# need to reorder to include original location of
# the on column (if its not already there)
if name not in selection:
columns = self.obj.columns
indexer = columns.get_indexer(selection.tolist() + [name])
columns = columns.take(sorted(indexer))
# exclude nuisance columns so that they are not reindexed
if exclude is not None and exclude:
columns = [c for c in columns if c not in exclude]
if not columns:
raise DataError("No numeric types to aggregate")
if not len(final):
return obj.astype("float64")
return concat(final, axis=1).reindex(columns=columns, copy=False)
def _center_window(self, result, window) -> np.ndarray:
"""
Center the result in the window.
"""
if self.axis > result.ndim - 1:
raise ValueError(
"Requested axis is larger then no. of argument " "dimensions"
)
offset = _offset(window, True)
if offset > 0:
if isinstance(result, (ABCSeries, ABCDataFrame)):
result = result.slice_shift(-offset, axis=self.axis)
else:
lead_indexer = [slice(None)] * result.ndim
lead_indexer[self.axis] = slice(offset, None)
result = np.copy(result[tuple(lead_indexer)])
return result
def aggregate(self, func, *args, **kwargs):
result, how = self._aggregate(func, *args, **kwargs)
if result is None:
return self.apply(func, raw=False, args=args, kwargs=kwargs)
return result
agg = aggregate
_shared_docs["sum"] = dedent(
"""
Calculate %(name)s sum of given DataFrame or Series.
Parameters
----------
*args, **kwargs
For compatibility with other %(name)s methods. Has no effect
on the computed value.
Returns
-------
Series or DataFrame
Same type as the input, with the same index, containing the
%(name)s sum.
See Also
--------
Series.sum : Reducing sum for Series.
DataFrame.sum : Reducing sum for DataFrame.
Examples
--------
>>> s = pd.Series([1, 2, 3, 4, 5])
>>> s
0 1
1 2
2 3
3 4
4 5
dtype: int64
>>> s.rolling(3).sum()
0 NaN
1 NaN
2 6.0
3 9.0
4 12.0
dtype: float64
>>> s.expanding(3).sum()
0 NaN
1 NaN
2 6.0
3 10.0
4 15.0
dtype: float64
>>> s.rolling(3, center=True).sum()
0 NaN
1 6.0
2 9.0
3 12.0
4 NaN
dtype: float64
For DataFrame, each %(name)s sum is computed column-wise.
>>> df = pd.DataFrame({"A": s, "B": s ** 2})
>>> df
A B
0 1 1
1 2 4
2 3 9
3 4 16
4 5 25
>>> df.rolling(3).sum()
A B
0 NaN NaN
1 NaN NaN
2 6.0 14.0
3 9.0 29.0
4 12.0 50.0
"""
)
_shared_docs["mean"] = dedent(
"""
Calculate the %(name)s mean of the values.
Parameters
----------
*args
Under Review.
**kwargs
Under Review.
Returns
-------
Series or DataFrame
Returned object type is determined by the caller of the %(name)s
calculation.
See Also
--------
Series.%(name)s : Calling object with Series data.
DataFrame.%(name)s : Calling object with DataFrames.
Series.mean : Equivalent method for Series.
DataFrame.mean : Equivalent method for DataFrame.
Examples
--------
The below examples will show rolling mean calculations with window sizes of
two and three, respectively.
>>> s = pd.Series([1, 2, 3, 4])
>>> s.rolling(2).mean()
0 NaN
1 1.5
2 2.5
3 3.5
dtype: float64
>>> s.rolling(3).mean()
0 NaN
1 NaN
2 2.0
3 3.0
dtype: float64
"""
)
class Window(_Window):
"""
Provide rolling window calculations.
.. versionadded:: 0.18.0
Parameters
----------
window : int, or offset
Size of the moving window. This is the number of observations used for
calculating the statistic. Each window will be a fixed size.
If its an offset then this will be the time period of each window. Each
window will be a variable sized based on the observations included in
the time-period. This is only valid for datetimelike indexes. This is
new in 0.19.0
min_periods : int, default None
Minimum number of observations in window required to have a value
(otherwise result is NA). For a window that is specified by an offset,
`min_periods` will default to 1. Otherwise, `min_periods` will default
to the size of the window.
center : bool, default False
Set the labels at the center of the window.
win_type : str, default None
Provide a window type. If ``None``, all points are evenly weighted.
See the notes below for further information.
on : str, optional
For a DataFrame, a datetime-like column on which to calculate the rolling
window, rather than the DataFrame's index. Provided integer column is
ignored and excluded from result since an integer index is not used to
calculate the rolling window.
axis : int or str, default 0
closed : str, default None
Make the interval closed on the 'right', 'left', 'both' or
'neither' endpoints.
For offset-based windows, it defaults to 'right'.
For fixed windows, defaults to 'both'. Remaining cases not implemented
for fixed windows.
.. versionadded:: 0.20.0
Returns
-------
a Window or Rolling sub-classed for the particular operation
See Also
--------
expanding : Provides expanding transformations.
ewm : Provides exponential weighted functions.
Notes
-----
By default, the result is set to the right edge of the window. This can be
changed to the center of the window by setting ``center=True``.
To learn more about the offsets & frequency strings, please see `this link
<http://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__.
The recognized win_types are:
* ``boxcar``
* ``triang``
* ``blackman``
* ``hamming``
* ``bartlett``
* ``parzen``
* ``bohman``
* ``blackmanharris``
* ``nuttall``
* ``barthann``
* ``kaiser`` (needs beta)
* ``gaussian`` (needs std)
* ``general_gaussian`` (needs power, width)
* ``slepian`` (needs width)
* ``exponential`` (needs tau), center is set to None.
If ``win_type=None`` all points are evenly weighted. To learn more about
different window types see `scipy.signal window functions
<https://docs.scipy.org/doc/scipy/reference/signal.html#window-functions>`__.
Examples
--------
>>> df = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]})
>>> df
B
0 0.0
1 1.0
2 2.0
3 NaN
4 4.0
Rolling sum with a window length of 2, using the 'triang'
window type.
>>> df.rolling(2, win_type='triang').sum()
B
0 NaN
1 0.5
2 1.5
3 NaN
4 NaN
Rolling sum with a window length of 2, min_periods defaults
to the window length.
>>> df.rolling(2).sum()
B
0 NaN
1 1.0
2 3.0
3 NaN
4 NaN
Same as above, but explicitly set the min_periods
>>> df.rolling(2, min_periods=1).sum()
B
0 0.0
1 1.0
2 3.0
3 2.0
4 4.0
A ragged (meaning not-a-regular frequency), time-indexed DataFrame
>>> df = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]},
... index = [pd.Timestamp('20130101 09:00:00'),
... pd.Timestamp('20130101 09:00:02'),
... pd.Timestamp('20130101 09:00:03'),
... pd.Timestamp('20130101 09:00:05'),
... pd.Timestamp('20130101 09:00:06')])
>>> df
B
2013-01-01 09:00:00 0.0
2013-01-01 09:00:02 1.0
2013-01-01 09:00:03 2.0
2013-01-01 09:00:05 NaN
2013-01-01 09:00:06 4.0
Contrasting to an integer rolling window, this will roll a variable
length window corresponding to the time period.
The default for min_periods is 1.
>>> df.rolling('2s').sum()
B
2013-01-01 09:00:00 0.0
2013-01-01 09:00:02 1.0
2013-01-01 09:00:03 3.0
2013-01-01 09:00:05 NaN
2013-01-01 09:00:06 4.0
"""
def validate(self):
super().validate()
window = self.window
if isinstance(window, (list, tuple, np.ndarray)):
pass
elif is_integer(window):
if window <= 0:
raise ValueError("window must be > 0 ")
import_optional_dependency(
"scipy", extra="Scipy is required to generate window weight."
)
import scipy.signal as sig
if not isinstance(self.win_type, str):
raise ValueError("Invalid win_type {0}".format(self.win_type))
if getattr(sig, self.win_type, None) is None:
raise ValueError("Invalid win_type {0}".format(self.win_type))
else:
raise ValueError("Invalid window {0}".format(window))
def _prep_window(self, **kwargs):
"""
Provide validation for our window type, return the window
we have already been validated.
"""
window = self._get_window()
if isinstance(window, (list, tuple, np.ndarray)):
return com.asarray_tuplesafe(window).astype(float)
elif is_integer(window):
import scipy.signal as sig
# the below may pop from kwargs
def _validate_win_type(win_type, kwargs):
arg_map = {
"kaiser": ["beta"],
"gaussian": ["std"],
"general_gaussian": ["power", "width"],
"slepian": ["width"],
"exponential": ["tau"],
}
if win_type in arg_map:
win_args = _pop_args(win_type, arg_map[win_type], kwargs)
if win_type == "exponential":
# exponential window requires the first arg (center)
# to be set to None (necessary for symmetric window)
win_args.insert(0, None)
return tuple([win_type] + win_args)
return win_type
def _pop_args(win_type, arg_names, kwargs):
msg = "%s window requires %%s" % win_type
all_args = []
for n in arg_names:
if n not in kwargs:
raise ValueError(msg % n)
all_args.append(kwargs.pop(n))
return all_args
win_type = _validate_win_type(self.win_type, kwargs)
# GH #15662. `False` makes symmetric window, rather than periodic.
return sig.get_window(win_type, window, False).astype(float)
def _apply_window(self, mean=True, **kwargs):
"""
Applies a moving window of type ``window_type`` on the data.
Parameters
----------
mean : bool, default True
If True computes weighted mean, else weighted sum
Returns
-------
y : same type as input argument
"""
window = self._prep_window(**kwargs)
center = self.center
blocks, obj = self._create_blocks()
block_list = list(blocks)
results = []
exclude = []
for i, b in enumerate(blocks):
try:
values = self._prep_values(b.values)
except (TypeError, NotImplementedError):
if isinstance(obj, ABCDataFrame):
exclude.extend(b.columns)
del block_list[i]
continue
else:
raise DataError("No numeric types to aggregate")
if values.size == 0:
results.append(values.copy())
continue
offset = _offset(window, center)
additional_nans = np.array([np.NaN] * offset)
def f(arg, *args, **kwargs):
minp = _use_window(self.min_periods, len(window))
return libwindow.roll_window(
np.concatenate((arg, additional_nans)) if center else arg,
window,
minp,
avg=mean,
)
result = np.apply_along_axis(f, self.axis, values)
if center:
result = self._center_window(result, window)
results.append(result)
return self._wrap_results(results, block_list, obj, exclude)
_agg_see_also_doc = dedent(
"""
See Also
--------
pandas.DataFrame.rolling.aggregate
pandas.DataFrame.aggregate
"""
)
_agg_examples_doc = dedent(
"""
Examples
--------
>>> df = pd.DataFrame(np.random.randn(10, 3), columns=['A', 'B', 'C'])
>>> df
A B C
0 -2.385977 -0.102758 0.438822
1 -1.004295 0.905829 -0.954544
2 0.735167 -0.165272 -1.619346
3 -0.702657 -1.340923 -0.706334
4 -0.246845 0.211596 -0.901819
5 2.463718 3.157577 -1.380906
6 -1.142255 2.340594 -0.039875
7 1.396598 -1.647453 1.677227
8 -0.543425 1.761277 -0.220481
9 -0.640505 0.289374 -1.550670
>>> df.rolling(3, win_type='boxcar').agg('mean')
A B C
0 NaN NaN NaN
1 NaN NaN NaN
2 -0.885035 0.212600 -0.711689
3 -0.323928 -0.200122 -1.093408
4 -0.071445 -0.431533 -1.075833
5 0.504739 0.676083 -0.996353
6 0.358206 1.903256 -0.774200
7 0.906020 1.283573 0.085482
8 -0.096361 0.818139 0.472290
9 0.070889 0.134399 -0.031308
"""
)
@Substitution(
see_also=_agg_see_also_doc,
examples=_agg_examples_doc,
versionadded="",
klass="Series/DataFrame",
axis="",
)
@Appender(_shared_docs["aggregate"])
def aggregate(self, arg, *args, **kwargs):
result, how = self._aggregate(arg, *args, **kwargs)
if result is None:
# these must apply directly
result = arg(self)
return result
agg = aggregate
@Substitution(name="window")
@Appender(_shared_docs["sum"])
def sum(self, *args, **kwargs):
nv.validate_window_func("sum", args, kwargs)
return self._apply_window(mean=False, **kwargs)
@Substitution(name="window")
@Appender(_shared_docs["mean"])
def mean(self, *args, **kwargs):
nv.validate_window_func("mean", args, kwargs)
return self._apply_window(mean=True, **kwargs)
class _GroupByMixin(GroupByMixin):
"""
Provide the groupby facilities.
"""
def __init__(self, obj, *args, **kwargs):
parent = kwargs.pop("parent", None) # noqa
groupby = kwargs.pop("groupby", None)
if groupby is None:
groupby, obj = obj, obj.obj
self._groupby = groupby
self._groupby.mutated = True
self._groupby.grouper.mutated = True
super().__init__(obj, *args, **kwargs)
count = GroupByMixin._dispatch("count")
corr = GroupByMixin._dispatch("corr", other=None, pairwise=None)
cov = GroupByMixin._dispatch("cov", other=None, pairwise=None)
def _apply(
self, func, name=None, window=None, center=None, check_minp=None, **kwargs
):
"""
Dispatch to apply; we are stripping all of the _apply kwargs and
performing the original function call on the grouped object.
"""
def f(x, name=name, *args):
x = self._shallow_copy(x)
if isinstance(name, str):
return getattr(x, name)(*args, **kwargs)
return x.apply(name, *args, **kwargs)
return self._groupby.apply(f)
class _Rolling(_Window):
@property
def _constructor(self):
return Rolling
def _apply(
self, func, name=None, window=None, center=None, check_minp=None, **kwargs
):
"""
Rolling statistical measure using supplied function.
Designed to be used with passed-in Cython array-based functions.
Parameters
----------
func : str/callable to apply
name : str, optional
name of this function
window : int/array, default to _get_window()
center : bool, default to self.center
check_minp : function, default to _use_window
Returns
-------
y : type of input
"""
if center is None:
center = self.center
if window is None:
window = self._get_window()
if check_minp is None:
check_minp = _use_window
blocks, obj = self._create_blocks()
block_list = list(blocks)
index_as_array = self._get_index()
results = []
exclude = []
for i, b in enumerate(blocks):
try:
values = self._prep_values(b.values)
except (TypeError, NotImplementedError):
if isinstance(obj, ABCDataFrame):
exclude.extend(b.columns)
del block_list[i]
continue
else:
raise DataError("No numeric types to aggregate")
if values.size == 0:
results.append(values.copy())
continue
# if we have a string function name, wrap it
if isinstance(func, str):
cfunc = getattr(libwindow, func, None)
if cfunc is None:
raise ValueError(
"we do not support this function "
"in libwindow.{func}".format(func=func)
)
def func(arg, window, min_periods=None, closed=None):
minp = check_minp(min_periods, window)
# ensure we are only rolling on floats
arg = ensure_float64(arg)
return cfunc(arg, window, minp, index_as_array, closed, **kwargs)
# calculation function
if center:
offset = _offset(window, center)
additional_nans = np.array([np.NaN] * offset)
def calc(x):
return func(
np.concatenate((x, additional_nans)),
window,
min_periods=self.min_periods,
closed=self.closed,
)
else:
def calc(x):
return func(
x, window, min_periods=self.min_periods, closed=self.closed
)
with np.errstate(all="ignore"):
if values.ndim > 1:
result = np.apply_along_axis(calc, self.axis, values)
else:
result = calc(values)
if center:
result = self._center_window(result, window)
results.append(result)
return self._wrap_results(results, block_list, obj, exclude)
class _Rolling_and_Expanding(_Rolling):
_shared_docs["count"] = dedent(
r"""
The %(name)s count of any non-NaN observations inside the window.
Returns
-------
Series or DataFrame
Returned object type is determined by the caller of the %(name)s
calculation.
See Also
--------
Series.%(name)s : Calling object with Series data.
DataFrame.%(name)s : Calling object with DataFrames.
DataFrame.count : Count of the full DataFrame.
Examples
--------
>>> s = pd.Series([2, 3, np.nan, 10])
>>> s.rolling(2).count()
0 1.0
1 2.0
2 1.0
3 1.0
dtype: float64
>>> s.rolling(3).count()
0 1.0
1 2.0
2 2.0
3 2.0
dtype: float64
>>> s.rolling(4).count()
0 1.0
1 2.0
2 2.0
3 3.0
dtype: float64
"""
)
def count(self):
blocks, obj = self._create_blocks()
# Validate the index
self._get_index()
window = self._get_window()
window = min(window, len(obj)) if not self.center else window
results = []
for b in blocks:
result = b.notna().astype(int)
result = self._constructor(
result,
window=window,
min_periods=0,
center=self.center,
axis=self.axis,
closed=self.closed,
).sum()
results.append(result)
return self._wrap_results(results, blocks, obj)
_shared_docs["apply"] = dedent(
r"""
The %(name)s function's apply function.
Parameters
----------
func : function
Must produce a single value from an ndarray input if ``raw=True``
or a single value from a Series if ``raw=False``.
raw : bool, default None
* ``False`` : passes each row or column as a Series to the
function.
* ``True`` or ``None`` : the passed function will receive ndarray
objects instead.
If you are just applying a NumPy reduction function this will
achieve much better performance.
The `raw` parameter is required and will show a FutureWarning if
not passed. In the future `raw` will default to False.
.. versionadded:: 0.23.0
*args, **kwargs
Arguments and keyword arguments to be passed into func.
Returns
-------
Series or DataFrame
Return type is determined by the caller.
See Also
--------
Series.%(name)s : Series %(name)s.
DataFrame.%(name)s : DataFrame %(name)s.
"""
)
def apply(self, func, raw=None, args=(), kwargs={}):
from pandas import Series
kwargs.pop("_level", None)
window = self._get_window()
offset = _offset(window, self.center)
index_as_array = self._get_index()
# TODO: default is for backward compat
# change to False in the future
if raw is None:
warnings.warn(
"Currently, 'apply' passes the values as ndarrays to the "
"applied function. In the future, this will change to passing "
"it as Series objects. You need to specify 'raw=True' to keep "
"the current behaviour, and you can pass 'raw=False' to "
"silence this warning",
FutureWarning,
stacklevel=3,
)
raw = True
def f(arg, window, min_periods, closed):
minp = _use_window(min_periods, window)
if not raw:
arg = Series(arg, index=self.obj.index)
return libwindow.roll_generic(
arg,
window,
minp,
index_as_array,
closed,
offset,
func,
raw,
args,
kwargs,
)
return self._apply(f, func, args=args, kwargs=kwargs, center=False, raw=raw)
def sum(self, *args, **kwargs):
nv.validate_window_func("sum", args, kwargs)
return self._apply("roll_sum", "sum", **kwargs)
_shared_docs["max"] = dedent(
"""
Calculate the %(name)s maximum.
Parameters
----------
*args, **kwargs
Arguments and keyword arguments to be passed into func.
"""
)
def max(self, *args, **kwargs):
nv.validate_window_func("max", args, kwargs)
return self._apply("roll_max", "max", **kwargs)
_shared_docs["min"] = dedent(
"""
Calculate the %(name)s minimum.
Parameters
----------
**kwargs
Under Review.
Returns
-------
Series or DataFrame
Returned object type is determined by the caller of the %(name)s
calculation.
See Also
--------
Series.%(name)s : Calling object with a Series.
DataFrame.%(name)s : Calling object with a DataFrame.
Series.min : Similar method for Series.
DataFrame.min : Similar method for DataFrame.
Examples
--------
Performing a rolling minimum with a window size of 3.
>>> s = pd.Series([4, 3, 5, 2, 6])
>>> s.rolling(3).min()
0 NaN
1 NaN
2 3.0
3 2.0
4 2.0
dtype: float64
"""
)
def min(self, *args, **kwargs):
nv.validate_window_func("min", args, kwargs)
return self._apply("roll_min", "min", **kwargs)
def mean(self, *args, **kwargs):
nv.validate_window_func("mean", args, kwargs)
return self._apply("roll_mean", "mean", **kwargs)
_shared_docs["median"] = dedent(
"""
Calculate the %(name)s median.
Parameters
----------
**kwargs
For compatibility with other %(name)s methods. Has no effect
on the computed median.
Returns
-------
Series or DataFrame
Returned type is the same as the original object.
See Also
--------
Series.%(name)s : Calling object with Series data.
DataFrame.%(name)s : Calling object with DataFrames.
Series.median : Equivalent method for Series.
DataFrame.median : Equivalent method for DataFrame.
Examples
--------
Compute the rolling median of a series with a window size of 3.
>>> s = pd.Series([0, 1, 2, 3, 4])
>>> s.rolling(3).median()
0 NaN
1 NaN
2 1.0
3 2.0
4 3.0
dtype: float64
"""
)
def median(self, **kwargs):
return self._apply("roll_median_c", "median", **kwargs)
_shared_docs["std"] = dedent(
"""
Calculate %(name)s standard deviation.
Normalized by N-1 by default. This can be changed using the `ddof`
argument.
Parameters
----------
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations
is ``N - ddof``, where ``N`` represents the number of elements.
*args, **kwargs
For NumPy compatibility. No additional arguments are used.
Returns
-------
Series or DataFrame
Returns the same object type as the caller of the %(name)s calculation.
See Also
--------
Series.%(name)s : Calling object with Series data.
DataFrame.%(name)s : Calling object with DataFrames.
Series.std : Equivalent method for Series.
DataFrame.std : Equivalent method for DataFrame.
numpy.std : Equivalent method for Numpy array.
Notes
-----
The default `ddof` of 1 used in Series.std is different than the default
`ddof` of 0 in numpy.std.
A minimum of one period is required for the rolling calculation.
Examples
--------
>>> s = pd.Series([5, 5, 6, 7, 5, 5, 5])
>>> s.rolling(3).std()
0 NaN
1 NaN
2 0.577350
3 1.000000
4 1.000000
5 1.154701
6 0.000000
dtype: float64
>>> s.expanding(3).std()
0 NaN
1 NaN
2 0.577350
3 0.957427
4 0.894427
5 0.836660
6 0.786796
dtype: float64
"""
)
def std(self, ddof=1, *args, **kwargs):
nv.validate_window_func("std", args, kwargs)
window = self._get_window()
index_as_array = self._get_index()
def f(arg, *args, **kwargs):
minp = _require_min_periods(1)(self.min_periods, window)
return _zsqrt(
libwindow.roll_var(arg, window, minp, index_as_array, self.closed, ddof)
)
return self._apply(
f, "std", check_minp=_require_min_periods(1), ddof=ddof, **kwargs
)
_shared_docs["var"] = dedent(
"""
Calculate unbiased %(name)s variance.
Normalized by N-1 by default. This can be changed using the `ddof`
argument.
Parameters
----------
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations
is ``N - ddof``, where ``N`` represents the number of elements.
*args, **kwargs
For NumPy compatibility. No additional arguments are used.
Returns
-------
Series or DataFrame
Returns the same object type as the caller of the %(name)s calculation.
See Also
--------
Series.%(name)s : Calling object with Series data.
DataFrame.%(name)s : Calling object with DataFrames.
Series.var : Equivalent method for Series.
DataFrame.var : Equivalent method for DataFrame.
numpy.var : Equivalent method for Numpy array.
Notes
-----
The default `ddof` of 1 used in :meth:`Series.var` is different than the
default `ddof` of 0 in :func:`numpy.var`.
A minimum of 1 period is required for the rolling calculation.
Examples
--------
>>> s = pd.Series([5, 5, 6, 7, 5, 5, 5])
>>> s.rolling(3).var()
0 NaN
1 NaN
2 0.333333
3 1.000000
4 1.000000
5 1.333333
6 0.000000
dtype: float64
>>> s.expanding(3).var()
0 NaN
1 NaN
2 0.333333
3 0.916667
4 0.800000
5 0.700000
6 0.619048
dtype: float64
"""
)
def var(self, ddof=1, *args, **kwargs):
nv.validate_window_func("var", args, kwargs)
return self._apply(
"roll_var", "var", check_minp=_require_min_periods(1), ddof=ddof, **kwargs
)
_shared_docs[
"skew"
] = """
Unbiased %(name)s skewness.
Parameters
----------
**kwargs
Keyword arguments to be passed into func.
"""
def skew(self, **kwargs):
return self._apply(
"roll_skew", "skew", check_minp=_require_min_periods(3), **kwargs
)
_shared_docs["kurt"] = dedent(
"""
Calculate unbiased %(name)s kurtosis.
This function uses Fisher's definition of kurtosis without bias.
Parameters
----------
**kwargs
Under Review.
Returns
-------
Series or DataFrame
Returned object type is determined by the caller of the %(name)s
calculation.
See Also
--------
Series.%(name)s : Calling object with Series data.
DataFrame.%(name)s : Calling object with DataFrames.
Series.kurt : Equivalent method for Series.
DataFrame.kurt : Equivalent method for DataFrame.
scipy.stats.skew : Third moment of a probability density.
scipy.stats.kurtosis : Reference SciPy method.
Notes
-----
A minimum of 4 periods is required for the %(name)s calculation.
"""
)
def kurt(self, **kwargs):
return self._apply(
"roll_kurt", "kurt", check_minp=_require_min_periods(4), **kwargs
)
_shared_docs["quantile"] = dedent(
"""
Calculate the %(name)s quantile.
Parameters
----------
quantile : float
Quantile to compute. 0 <= quantile <= 1.
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
.. versionadded:: 0.23.0
This optional parameter specifies the interpolation method to use,
when the desired quantile lies between two data points `i` and `j`:
* linear: `i + (j - i) * fraction`, where `fraction` is the
fractional part of the index surrounded by `i` and `j`.
* lower: `i`.
* higher: `j`.
* nearest: `i` or `j` whichever is nearest.
* midpoint: (`i` + `j`) / 2.
**kwargs:
For compatibility with other %(name)s methods. Has no effect on
the result.
Returns
-------
Series or DataFrame
Returned object type is determined by the caller of the %(name)s
calculation.
See Also
--------
Series.quantile : Computes value at the given quantile over all data
in Series.
DataFrame.quantile : Computes values at the given quantile over
requested axis in DataFrame.
Examples
--------
>>> s = pd.Series([1, 2, 3, 4])
>>> s.rolling(2).quantile(.4, interpolation='lower')
0 NaN
1 1.0
2 2.0
3 3.0
dtype: float64
>>> s.rolling(2).quantile(.4, interpolation='midpoint')
0 NaN
1 1.5
2 2.5
3 3.5
dtype: float64
"""
)
def quantile(self, quantile, interpolation="linear", **kwargs):
window = self._get_window()
index_as_array = self._get_index()
def f(arg, *args, **kwargs):
minp = _use_window(self.min_periods, window)
if quantile == 1.0:
return libwindow.roll_max(
arg, window, minp, index_as_array, self.closed
)
elif quantile == 0.0:
return libwindow.roll_min(
arg, window, minp, index_as_array, self.closed
)
else:
return libwindow.roll_quantile(
arg,
window,
minp,
index_as_array,
self.closed,
quantile,
interpolation,
)
return self._apply(f, "quantile", quantile=quantile, **kwargs)
_shared_docs[
"cov"
] = """
Calculate the %(name)s sample covariance.
Parameters
----------
other : Series, DataFrame, or ndarray, optional
If not supplied then will default to self and produce pairwise
output.
pairwise : bool, default None
If False then only matching columns between self and other will be
used and the output will be a DataFrame.
If True then all pairwise combinations will be calculated and the
output will be a MultiIndexed DataFrame in the case of DataFrame
inputs. In the case of missing elements, only complete pairwise
observations will be used.
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations
is ``N - ddof``, where ``N`` represents the number of elements.
**kwargs
Keyword arguments to be passed into func.
"""
def cov(self, other=None, pairwise=None, ddof=1, **kwargs):
if other is None:
other = self._selected_obj
# only default unset
pairwise = True if pairwise is None else pairwise
other = self._shallow_copy(other)
# GH 16058: offset window
if self.is_freq_type:
window = self.win_freq
else:
window = self._get_window(other)
def _get_cov(X, Y):
# GH #12373 : rolling functions error on float32 data
# to avoid potential overflow, cast the data to float64
X = X.astype("float64")
Y = Y.astype("float64")
mean = lambda x: x.rolling(
window, self.min_periods, center=self.center
).mean(**kwargs)
count = (X + Y).rolling(window=window, center=self.center).count(**kwargs)
bias_adj = count / (count - ddof)
return (mean(X * Y) - mean(X) * mean(Y)) * bias_adj
return _flex_binary_moment(
self._selected_obj, other._selected_obj, _get_cov, pairwise=bool(pairwise)
)
_shared_docs["corr"] = dedent(
"""
Calculate %(name)s correlation.
Parameters
----------
other : Series, DataFrame, or ndarray, optional
If not supplied then will default to self.
pairwise : bool, default None
Calculate pairwise combinations of columns within a
DataFrame. If `other` is not specified, defaults to `True`,
otherwise defaults to `False`.
Not relevant for :class:`~pandas.Series`.
**kwargs
Unused.
Returns
-------
Series or DataFrame
Returned object type is determined by the caller of the
%(name)s calculation.
See Also
--------
Series.%(name)s : Calling object with Series data.
DataFrame.%(name)s : Calling object with DataFrames.
Series.corr : Equivalent method for Series.
DataFrame.corr : Equivalent method for DataFrame.
%(name)s.cov : Similar method to calculate covariance.
numpy.corrcoef : NumPy Pearson's correlation calculation.
Notes
-----
This function uses Pearson's definition of correlation
(https://en.wikipedia.org/wiki/Pearson_correlation_coefficient).
When `other` is not specified, the output will be self correlation (e.g.
all 1's), except for :class:`~pandas.DataFrame` inputs with `pairwise`
set to `True`.
Function will return ``NaN`` for correlations of equal valued sequences;
this is the result of a 0/0 division error.
When `pairwise` is set to `False`, only matching columns between `self` and
`other` will be used.
When `pairwise` is set to `True`, the output will be a MultiIndex DataFrame
with the original index on the first level, and the `other` DataFrame
columns on the second level.
In the case of missing elements, only complete pairwise observations
will be used.
Examples
--------
The below example shows a rolling calculation with a window size of
four matching the equivalent function call using :meth:`numpy.corrcoef`.
>>> v1 = [3, 3, 3, 5, 8]
>>> v2 = [3, 4, 4, 4, 8]
>>> fmt = "{0:.6f}" # limit the printed precision to 6 digits
>>> # numpy returns a 2X2 array, the correlation coefficient
>>> # is the number at entry [0][1]
>>> print(fmt.format(np.corrcoef(v1[:-1], v2[:-1])[0][1]))
0.333333
>>> print(fmt.format(np.corrcoef(v1[1:], v2[1:])[0][1]))
0.916949
>>> s1 = pd.Series(v1)
>>> s2 = pd.Series(v2)
>>> s1.rolling(4).corr(s2)
0 NaN
1 NaN
2 NaN
3 0.333333
4 0.916949
dtype: float64
The below example shows a similar rolling calculation on a
DataFrame using the pairwise option.
>>> matrix = np.array([[51., 35.], [49., 30.], [47., 32.],\
[46., 31.], [50., 36.]])
>>> print(np.corrcoef(matrix[:-1,0], matrix[:-1,1]).round(7))
[[1. 0.6263001]
[0.6263001 1. ]]
>>> print(np.corrcoef(matrix[1:,0], matrix[1:,1]).round(7))
[[1. 0.5553681]
[0.5553681 1. ]]
>>> df = pd.DataFrame(matrix, columns=['X','Y'])
>>> df
X Y
0 51.0 35.0
1 49.0 30.0
2 47.0 32.0
3 46.0 31.0
4 50.0 36.0
>>> df.rolling(4).corr(pairwise=True)
X Y
0 X NaN NaN
Y NaN NaN
1 X NaN NaN
Y NaN NaN
2 X NaN NaN
Y NaN NaN
3 X 1.000000 0.626300
Y 0.626300 1.000000
4 X 1.000000 0.555368
Y 0.555368 1.000000
"""
)
def corr(self, other=None, pairwise=None, **kwargs):
if other is None:
other = self._selected_obj
# only default unset
pairwise = True if pairwise is None else pairwise
other = self._shallow_copy(other)
window = self._get_window(other)
def _get_corr(a, b):
a = a.rolling(
window=window, min_periods=self.min_periods, center=self.center
)
b = b.rolling(
window=window, min_periods=self.min_periods, center=self.center
)
return a.cov(b, **kwargs) / (a.std(**kwargs) * b.std(**kwargs))
return _flex_binary_moment(
self._selected_obj, other._selected_obj, _get_corr, pairwise=bool(pairwise)
)
class Rolling(_Rolling_and_Expanding):
@cache_readonly
def is_datetimelike(self):
return isinstance(
self._on, (ABCDatetimeIndex, ABCTimedeltaIndex, ABCPeriodIndex)
)
@cache_readonly
def _on(self):
if self.on is None:
return self.obj.index
elif isinstance(self.obj, ABCDataFrame) and self.on in self.obj.columns:
from pandas import Index
return Index(self.obj[self.on])
else:
raise ValueError(
"invalid on specified as {0}, "
"must be a column (if DataFrame) "
"or None".format(self.on)
)
def validate(self):
super().validate()
# we allow rolling on a datetimelike index
if (self.obj.empty or self.is_datetimelike) and isinstance(
self.window, (str, ABCDateOffset, timedelta)
):
self._validate_monotonic()
freq = self._validate_freq()
# we don't allow center
if self.center:
raise NotImplementedError(
"center is not implemented "
"for datetimelike and offset "
"based windows"
)
# this will raise ValueError on non-fixed freqs
self.win_freq = self.window
self.window = freq.nanos
self.win_type = "freq"
# min_periods must be an integer
if self.min_periods is None:
self.min_periods = 1
elif not is_integer(self.window):
raise ValueError("window must be an integer")
elif self.window < 0:
raise ValueError("window must be non-negative")
if not self.is_datetimelike and self.closed is not None:
raise ValueError(
"closed only implemented for datetimelike " "and offset based windows"
)
def _validate_monotonic(self):
"""
Validate on is_monotonic.
"""
if not self._on.is_monotonic:
formatted = self.on or "index"
raise ValueError("{0} must be " "monotonic".format(formatted))
def _validate_freq(self):
"""
Validate & return window frequency.
"""
from pandas.tseries.frequencies import to_offset
try:
return to_offset(self.window)
except (TypeError, ValueError):
raise ValueError(
"passed window {0} is not "
"compatible with a datetimelike "
"index".format(self.window)
)
_agg_see_also_doc = dedent(
"""
See Also
--------
Series.rolling
DataFrame.rolling
"""
)
_agg_examples_doc = dedent(
"""
Examples
--------
>>> df = pd.DataFrame(np.random.randn(10, 3), columns=['A', 'B', 'C'])
>>> df
A B C
0 -2.385977 -0.102758 0.438822
1 -1.004295 0.905829 -0.954544
2 0.735167 -0.165272 -1.619346
3 -0.702657 -1.340923 -0.706334
4 -0.246845 0.211596 -0.901819
5 2.463718 3.157577 -1.380906
6 -1.142255 2.340594 -0.039875
7 1.396598 -1.647453 1.677227
8 -0.543425 1.761277 -0.220481
9 -0.640505 0.289374 -1.550670
>>> df.rolling(3).sum()
A B C
0 NaN NaN NaN
1 NaN NaN NaN
2 -2.655105 0.637799 -2.135068
3 -0.971785 -0.600366 -3.280224
4 -0.214334 -1.294599 -3.227500
5 1.514216 2.028250 -2.989060
6 1.074618 5.709767 -2.322600
7 2.718061 3.850718 0.256446
8 -0.289082 2.454418 1.416871
9 0.212668 0.403198 -0.093924
>>> df.rolling(3).agg({'A':'sum', 'B':'min'})
A B
0 NaN NaN
1 NaN NaN
2 -2.655105 -0.165272
3 -0.971785 -1.340923
4 -0.214334 -1.340923
5 1.514216 -1.340923
6 1.074618 0.211596
7 2.718061 -1.647453
8 -0.289082 -1.647453
9 0.212668 -1.647453
"""
)
@Substitution(
see_also=_agg_see_also_doc,
examples=_agg_examples_doc,
versionadded="",
klass="Series/Dataframe",
axis="",
)
@Appender(_shared_docs["aggregate"])
def aggregate(self, arg, *args, **kwargs):
return super().aggregate(arg, *args, **kwargs)
agg = aggregate
@Substitution(name="rolling")
@Appender(_shared_docs["count"])
def count(self):
# different impl for freq counting
if self.is_freq_type:
return self._apply("roll_count", "count")
return super().count()
@Substitution(name="rolling")
@Appender(_shared_docs["apply"])
def apply(self, func, raw=None, args=(), kwargs={}):
return super().apply(func, raw=raw, args=args, kwargs=kwargs)
@Substitution(name="rolling")
@Appender(_shared_docs["sum"])
def sum(self, *args, **kwargs):
nv.validate_rolling_func("sum", args, kwargs)
return super().sum(*args, **kwargs)
@Substitution(name="rolling")
@Appender(_doc_template)
@Appender(_shared_docs["max"])
def max(self, *args, **kwargs):
nv.validate_rolling_func("max", args, kwargs)
return super().max(*args, **kwargs)
@Substitution(name="rolling")
@Appender(_shared_docs["min"])
def min(self, *args, **kwargs):
nv.validate_rolling_func("min", args, kwargs)
return super().min(*args, **kwargs)
@Substitution(name="rolling")
@Appender(_shared_docs["mean"])
def mean(self, *args, **kwargs):
nv.validate_rolling_func("mean", args, kwargs)
return super().mean(*args, **kwargs)
@Substitution(name="rolling")
@Appender(_shared_docs["median"])
def median(self, **kwargs):
return super().median(**kwargs)
@Substitution(name="rolling")
@Appender(_shared_docs["std"])
def std(self, ddof=1, *args, **kwargs):
nv.validate_rolling_func("std", args, kwargs)
return super().std(ddof=ddof, **kwargs)
@Substitution(name="rolling")
@Appender(_shared_docs["var"])
def var(self, ddof=1, *args, **kwargs):
nv.validate_rolling_func("var", args, kwargs)
return super().var(ddof=ddof, **kwargs)
@Substitution(name="rolling")
@Appender(_doc_template)
@Appender(_shared_docs["skew"])
def skew(self, **kwargs):
return super().skew(**kwargs)
_agg_doc = dedent(
"""
Examples
--------
The example below will show a rolling calculation with a window size of
four matching the equivalent function call using `scipy.stats`.
>>> arr = [1, 2, 3, 4, 999]
>>> fmt = "{0:.6f}" # limit the printed precision to 6 digits
>>> import scipy.stats
>>> print(fmt.format(scipy.stats.kurtosis(arr[:-1], bias=False)))
-1.200000
>>> print(fmt.format(scipy.stats.kurtosis(arr[1:], bias=False)))
3.999946
>>> s = pd.Series(arr)
>>> s.rolling(4).kurt()
0 NaN
1 NaN
2 NaN
3 -1.200000
4 3.999946
dtype: float64
"""
)
@Appender(_agg_doc)
@Substitution(name="rolling")
@Appender(_shared_docs["kurt"])
def kurt(self, **kwargs):
return super().kurt(**kwargs)
@Substitution(name="rolling")
@Appender(_shared_docs["quantile"])
def quantile(self, quantile, interpolation="linear", **kwargs):
return super().quantile(
quantile=quantile, interpolation=interpolation, **kwargs
)
@Substitution(name="rolling")
@Appender(_doc_template)
@Appender(_shared_docs["cov"])
def cov(self, other=None, pairwise=None, ddof=1, **kwargs):
return super().cov(other=other, pairwise=pairwise, ddof=ddof, **kwargs)
@Substitution(name="rolling")
@Appender(_shared_docs["corr"])
def corr(self, other=None, pairwise=None, **kwargs):
return super().corr(other=other, pairwise=pairwise, **kwargs)
class RollingGroupby(_GroupByMixin, Rolling):
"""
Provide a rolling groupby implementation.
.. versionadded:: 0.18.1
"""
@property
def _constructor(self):
return Rolling
def _gotitem(self, key, ndim, subset=None):
# we are setting the index on the actual object
# here so our index is carried thru to the selected obj
# when we do the splitting for the groupby
if self.on is not None:
self._groupby.obj = self._groupby.obj.set_index(self._on)
self.on = None
return super()._gotitem(key, ndim, subset=subset)
def _validate_monotonic(self):
"""
Validate that on is monotonic;
we don't care for groupby.rolling
because we have already validated at a higher
level.
"""
pass
class Expanding(_Rolling_and_Expanding):
"""
Provide expanding transformations.
.. versionadded:: 0.18.0
Parameters
----------
min_periods : int, default 1
Minimum number of observations in window required to have a value
(otherwise result is NA).
center : bool, default False
Set the labels at the center of the window.
axis : int or str, default 0
Returns
-------
a Window sub-classed for the particular operation
See Also
--------
rolling : Provides rolling window calculations.
ewm : Provides exponential weighted functions.
Notes
-----
By default, the result is set to the right edge of the window. This can be
changed to the center of the window by setting ``center=True``.
Examples
--------
>>> df = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]})
B
0 0.0
1 1.0
2 2.0
3 NaN
4 4.0
>>> df.expanding(2).sum()
B
0 NaN
1 1.0
2 3.0
3 3.0
4 7.0
"""
_attributes = ["min_periods", "center", "axis"]
def __init__(self, obj, min_periods=1, center=False, axis=0, **kwargs):
super().__init__(obj=obj, min_periods=min_periods, center=center, axis=axis)
@property
def _constructor(self):
return Expanding
def _get_window(self, other=None):
"""
Get the window length over which to perform some operation.
Parameters
----------
other : object, default None
The other object that is involved in the operation.
Such an object is involved for operations like covariance.
Returns
-------
window : int
The window length.
"""
axis = self.obj._get_axis(self.axis)
length = len(axis) + (other is not None) * len(axis)
other = self.min_periods or -1
return max(length, other)
_agg_see_also_doc = dedent(
"""
See Also
--------
DataFrame.expanding.aggregate
DataFrame.rolling.aggregate
DataFrame.aggregate
"""
)
_agg_examples_doc = dedent(
"""
Examples
--------
>>> df = pd.DataFrame(np.random.randn(10, 3), columns=['A', 'B', 'C'])
>>> df
A B C
0 -2.385977 -0.102758 0.438822
1 -1.004295 0.905829 -0.954544
2 0.735167 -0.165272 -1.619346
3 -0.702657 -1.340923 -0.706334
4 -0.246845 0.211596 -0.901819
5 2.463718 3.157577 -1.380906
6 -1.142255 2.340594 -0.039875
7 1.396598 -1.647453 1.677227
8 -0.543425 1.761277 -0.220481
9 -0.640505 0.289374 -1.550670
>>> df.ewm(alpha=0.5).mean()
A B C
0 -2.385977 -0.102758 0.438822
1 -1.464856 0.569633 -0.490089
2 -0.207700 0.149687 -1.135379
3 -0.471677 -0.645305 -0.906555
4 -0.355635 -0.203033 -0.904111
5 1.076417 1.503943 -1.146293
6 -0.041654 1.925562 -0.588728
7 0.680292 0.132049 0.548693
8 0.067236 0.948257 0.163353
9 -0.286980 0.618493 -0.694496
"""
)
@Substitution(
see_also=_agg_see_also_doc,
examples=_agg_examples_doc,
versionadded="",
klass="Series/Dataframe",
axis="",
)
@Appender(_shared_docs["aggregate"])
def aggregate(self, arg, *args, **kwargs):
return super().aggregate(arg, *args, **kwargs)
agg = aggregate
@Substitution(name="expanding")
@Appender(_shared_docs["count"])
def count(self, **kwargs):
return super().count(**kwargs)
@Substitution(name="expanding")
@Appender(_shared_docs["apply"])
def apply(self, func, raw=None, args=(), kwargs={}):
return super().apply(func, raw=raw, args=args, kwargs=kwargs)
@Substitution(name="expanding")
@Appender(_shared_docs["sum"])
def sum(self, *args, **kwargs):
nv.validate_expanding_func("sum", args, kwargs)
return super().sum(*args, **kwargs)
@Substitution(name="expanding")
@Appender(_doc_template)
@Appender(_shared_docs["max"])
def max(self, *args, **kwargs):
nv.validate_expanding_func("max", args, kwargs)
return super().max(*args, **kwargs)
@Substitution(name="expanding")
@Appender(_shared_docs["min"])
def min(self, *args, **kwargs):
nv.validate_expanding_func("min", args, kwargs)
return super().min(*args, **kwargs)
@Substitution(name="expanding")
@Appender(_shared_docs["mean"])
def mean(self, *args, **kwargs):
nv.validate_expanding_func("mean", args, kwargs)
return super().mean(*args, **kwargs)
@Substitution(name="expanding")
@Appender(_shared_docs["median"])
def median(self, **kwargs):
return super().median(**kwargs)
@Substitution(name="expanding")
@Appender(_shared_docs["std"])
def std(self, ddof=1, *args, **kwargs):
nv.validate_expanding_func("std", args, kwargs)
return super().std(ddof=ddof, **kwargs)
@Substitution(name="expanding")
@Appender(_shared_docs["var"])
def var(self, ddof=1, *args, **kwargs):
nv.validate_expanding_func("var", args, kwargs)
return super().var(ddof=ddof, **kwargs)
@Substitution(name="expanding")
@Appender(_doc_template)
@Appender(_shared_docs["skew"])
def skew(self, **kwargs):
return super().skew(**kwargs)
_agg_doc = dedent(
"""
Examples
--------
The example below will show an expanding calculation with a window size of
four matching the equivalent function call using `scipy.stats`.
>>> arr = [1, 2, 3, 4, 999]
>>> import scipy.stats
>>> fmt = "{0:.6f}" # limit the printed precision to 6 digits
>>> print(fmt.format(scipy.stats.kurtosis(arr[:-1], bias=False)))
-1.200000
>>> print(fmt.format(scipy.stats.kurtosis(arr, bias=False)))
4.999874
>>> s = pd.Series(arr)
>>> s.expanding(4).kurt()
0 NaN
1 NaN
2 NaN
3 -1.200000
4 4.999874
dtype: float64
"""
)
@Appender(_agg_doc)
@Substitution(name="expanding")
@Appender(_shared_docs["kurt"])
def kurt(self, **kwargs):
return super().kurt(**kwargs)
@ | Substitution(name="expanding") | pandas.util._decorators.Substitution |
from rvranking.logs import hplogger
from rvranking.sampling.main import prep_samples_list, get_train_test
import pandas as pd
from rvranking.globalVars import _FAKE_ELWC, _EVENT_FEATURES, _RV_FEATURES
def get_data():
sample_list_train, sample_list_test = get_train_test()
x_train, y_train, xy_train = x_y_data(sample_list_train)
x_test, y_test, xy_test = x_y_data(sample_list_test)
return x_train, y_train, xy_train, x_test, y_test, xy_test
def x_y_data(sample_list):
# x_train, y_train = toTensor(sample_list)
x, y, x_per_y = to_data_frame(sample_list)
return x, y, x_per_y
def toTensor(s_list):
"""
:param s_list: list of samples with rvs
:return:
X = [[ 1, 2, 3], # 2 samples, 3 features
[11, 12, 13]]
y = [0, 1] # classes of each sample
:param s_list:
"""
def conc_features(obj):
feat_arr = pd.Series(dtype='int')
feat_list = obj.features()
for f in feat_list:
try:
f_arr = pd.Series(f, dtype='int')
except ValueError:
f_arr = pd.Series([f], dtype='int')
feat_arr = pd.concat([feat_arr, f_arr], ignore_index=True)
return feat_arr
labels = []
feat_matrix = []
for s in s_list:
rvli = s.rvli
s_feat_arr = conc_features(s)
for rv in rvli:
rv_feat_arr = conc_features(rv)
tot_feat_arr = pd.concat([rv_feat_arr, s_feat_arr])
labels.append(rv.relevance)
feat_matrix.append(tot_feat_arr)
return feat_matrix, labels
def to_data_frame(s_list):
"""
:param s_list: list of samples with rvs
:return:
problem: df should only of nr not series (as tline)
"""
def ext_features(obj):
"""
:param obj:
:return: list of features -> for dataframe
"""
feat_arr = []
feat_list = obj.features()
for f in feat_list:
try:
f_arr = list(f)
except TypeError:
f_arr = [f]
feat_arr.extend(f_arr)
return feat_arr
def get_rv_feat_cols(obj, feat_names):
feat_list = obj.features()
col_names = []
for f, n in zip(feat_list, feat_names) :
try:
f_arr = pd.Series(f, dtype='int')
li = list(range(f_arr.size))
f_cols = [n + str(i) for i in li]
except ValueError:
f_cols = [n]
col_names.extend(f_cols)
return col_names
ev_features = _EVENT_FEATURES
rv_features = _RV_FEATURES
rv0 = s_list[0].rvli[0]
rv_cols = get_rv_feat_cols(rv0, rv_features)
all_feat_names = rv_cols + ev_features
labels = []
tot_feat_list = []
labels_per_s = []
for s in s_list:
rvli = s.rvli
s_feat_arr = ext_features(s)
s_labels = []
labels_per_s.append(s_labels)
for rv in rvli:
rv_feat_arr = ext_features(rv)
rv_feat_arr.extend(s_feat_arr)
tot_feat_list.append(rv_feat_arr)
labels.append(rv.relevance)
s_labels.append(rv.relevance)
feat_matrix = | pd.DataFrame(tot_feat_list, columns=all_feat_names, dtype='int') | pandas.DataFrame |
import requests
import time
import pandas
from string import Template
ENDPOINT = 'https://api.portfolio123.com'
AUTH_PATH = '/auth'
SCREEN_ROLLING_BACKTEST_PATH = '/screen/rolling-backtest'
SCREEN_BACKTEST_PATH = '/screen/backtest'
SCREEN_RUN_PATH = '/screen/run'
UNIVERSE_PATH = '/universe'
RANK_PATH = '/rank'
DATA_PATH = '/data'
RANK_RANKS_PATH = '/rank/ranks'
RANK_PERF_PATH = '/rank/performance'
DATA_UNIVERSE_PATH = '/data/universe'
STRATEGY_UNIVERSE_PATH = Template('/strategy/$id')
STOCK_FACTOR_UPLOAD_PATH = Template('/stockFactor/upload/$id')
STOCK_FACTOR_CREATE_UPDATE_PATH = '/stockFactor'
STOCK_FACTOR_DELETE_PATH = Template('/stockFactor/$id')
DATA_SERIES_UPLOAD_PATH = Template('/dataSeries/upload/$id')
DATA_SERIES_CREATE_UPDATE_PATH = '/dataSeries'
DATA_SERIES_DELETE_PATH = Template('/dataSeries/$id')
class ClientException(Exception):
def __init__(self, message, *, resp=None, exception=None):
super().__init__(message)
self._resp = resp
self._exception = exception
def get_resp(self) -> requests.Response:
return self._resp
def get_cause(self) -> Exception:
return self._exception
class Client(object):
"""
class for interfacing with P123 API
"""
def __init__(self, *, api_id, api_key):
self._endpoint = ENDPOINT
self._verify_requests = True
self._max_req_retries = 5
self._timeout = 300
self._token = None
if not isinstance(api_id, str) or not api_id:
raise ClientException('api_id needs to be a non empty str')
if not isinstance(api_key, str) or not api_key:
raise ClientException('api_key needs to be a non empty str')
self._api_id = api_id
self._api_key = api_key
self._session = requests.Session()
def set_endpoint(self, endpoint):
self._endpoint = endpoint
def enable_verify_requests(self):
self._verify_requests = True
def disable_verify_requests(self):
self._verify_requests = False
def set_max_request_retries(self, retries):
if not isinstance(retries, int) or retries < 1 or retries > 10:
raise ClientException('retries needs to be an int between 1 and 10')
self._max_req_retries = retries
def set_timeout(self, timeout):
if not isinstance(timeout, int) or timeout < 1:
raise ClientException('timeout needs to be an int greater than 0')
self._timeout = timeout
def get_token(self):
return self._token
def auth(self):
"""
Authenticates and sets the Bearer authorization header on success. This method doesn't need to be called
explicitly since all requests first check if the authorization header is set and attempt to re-authenticate
if session expires.
:return: bool
"""
resp = req_with_retry(
self._session.post,
self._max_req_retries,
url=self._endpoint + AUTH_PATH,
auth=(self._api_id, self._api_key),
verify=self._verify_requests,
timeout=30
)
if resp.status_code == 200:
self._token = resp.text
self._session.headers.update({'Authorization': f'Bearer {resp.text}'})
else:
if resp.status_code == 406:
message = 'user account inactive'
elif resp.status_code == 402:
message = 'paying subscription required'
elif resp.status_code == 401:
message = 'invalid id/key combination or key inactive'
elif resp.status_code == 400:
message = 'invalid key'
else:
message = resp.text
if message:
message = ': ' + message
raise ClientException(f'API authentication failed{message}', resp=resp)
def _req_with_auth_fallback(
self, *, name: str, method: str = 'POST', url: str, params=None, data=None,
headers=None, stop: bool = False):
"""
Request with authentication fallback, used by all requests (except authentication)
:param name: request action
:param method: request method
:param url: request url
:param params: request params
:param data: request data
:param headers: request headers
:param stop: flag to stop infinite authentication recursion
:return: request response object
"""
resp = None
if self._session.headers.get('Authorization') is not None:
if method == 'POST':
resp = req_with_retry(
self._session.post,
self._max_req_retries,
url=url,
json=params,
verify=self._verify_requests,
timeout=self._timeout,
data=data,
headers=headers
)
else:
req_type = self._session.delete if method == 'DELETE' else self._session.get
resp = req_with_retry(
req_type,
self._max_req_retries,
url=url,
verify=self._verify_requests,
timeout=self._timeout,
headers=headers
)
if resp is None or resp.status_code == 403:
if not stop:
self.auth()
return self._req_with_auth_fallback(
name=name, method=method, url=url, params=params, data=data, stop=True)
elif resp.status_code == 200:
return resp
else:
message = resp.text
if not message and resp.status_code == 402:
message = 'request quota exhausted'
if message:
message = ': ' + message
raise ClientException(f'API request failed{message}', resp=resp)
def screen_rolling_backtest(self, params: dict, to_pandas: bool = False):
"""
Screen rolling backtest
:param params:
:param to_pandas:
:return:
"""
ret = self._req_with_auth_fallback(
name='screen rolling backtest',
url=self._endpoint + SCREEN_ROLLING_BACKTEST_PATH,
params=params
).json()
if to_pandas:
rows = ret['rows']
ret['average'][0] = 'Average'
rows.append(ret['average'])
ret['upMarkets'][0] = 'Up Markets'
rows.append(ret['upMarkets'])
ret['downMarkets'][0] = 'Down Markets'
rows.append(ret['downMarkets'])
ret = | pandas.DataFrame(data=rows, columns=ret['columns']) | pandas.DataFrame |
import os
import re
import json
import abc
import warnings
from typing import MutableMapping, List, Union
from functools import reduce
from enum import Enum
import pandas as pd
import numpy as np
from scipy import sparse
import loompy as lp
from loomxpy import __DEBUG__
from loomxpy._specifications import (
ProjectionMethod,
LoomXMetadataEmbedding,
LoomXMetadataClustering,
LoomXMetadataCluster,
LoomXMetadataClusterMarkerMetric,
)
from loomxpy._s7 import S7
from loomxpy._errors import BadDTypeException
from loomxpy._hooks import WithInitHook
from loomxpy._matrix import DataMatrix
from loomxpy.utils import df_to_named_matrix, compress_encode
def custom_formatwarning(msg, *args, **kwargs):
# ignore everything except the message
return str(msg) + "\n"
warnings.formatwarning = custom_formatwarning
##########################################
# MODES #
##########################################
class ModeType(Enum):
NONE = "_"
RNA = "rna"
class Mode(S7):
def __init__(self, mode_type: ModeType, data_matrix: DataMatrix):
"""
constructor for Mode
"""
self._mode_type = mode_type
# Data Matrix
self._data_matrix = data_matrix
# Global
self._global_attrs = GlobalAttributes(mode=self)
# Features
self._feature_attrs = FeatureAttributes(mode=self)
self._fa_annotations = FeatureAnnotationAttributes(mode=self)
self._fa_metrics = FeatureMetricAttributes(mode=self)
# Observations
self._observation_attrs = ObservationAttributes(mode=self)
self._oa_annotations = ObservationAnnotationAttributes(mode=self)
self._oa_metrics = ObservationMetricAttributes(mode=self)
self._oa_embeddings = ObservationEmbeddingAttributes(mode=self)
self._oa_clusterings = ObservationClusteringAttributes(mode=self)
@property
def X(self):
return self._data_matrix
@property
def g(self):
return self._global_attrs
@property
def f(self):
return self._feature_attrs
@property
def o(self):
return self._observation_attrs
def export(
self,
filename: str,
output_format: str,
title: str = None,
genome: str = None,
compress_metadata: bool = False,
cluster_marker_metrics: List[LoomXMetadataClusterMarkerMetric] = [
{
"accessor": "avg_logFC",
"name": "Avg. logFC",
"description": f"Average log fold change from Wilcoxon test",
"threshold": 0,
"threshold_method": "lte_or_gte", # lte, lt, gte, gt, lte_or_gte, lte_and_gte
},
{
"accessor": "pval",
"name": "Adjusted P-Value",
"description": f"Adjusted P-Value from Wilcoxon test",
"threshold": 0.05,
"threshold_method": "lte", # lte, lt, gte, gt, lte_or_gte, lte_and_gte
},
],
):
"""
Export this LoomX object to Loom file
Parameters
---------
cluster_marker_metrics: dict, optional
List of dict (ClusterMarkerMetric) containing metadata of each metric available for the cluster markers.
Expects each metric to be of type float.
Return
------
None
"""
if output_format == "scope_v1":
#
_feature_names = self._data_matrix._feature_names
# Init
_row_attrs: MutableMapping = {}
_col_attrs: MutableMapping = {}
_global_attrs: MutableMapping = {
"title": os.path.splitext(os.path.basename(filename))[0]
if title is None
else title,
"MetaData": {
"annotations": [],
"metrics": [],
"embeddings": [],
"clusterings": [],
},
"Genome": genome,
}
# Add row attributes (in Loom specifications)
for _attr_key, _attr in self._feature_attrs:
_row_attrs[_attr_key] = _attr.values
# Add columns attributes (in Loom specifications)
_default_embedding = None
_embeddings_X = pd.DataFrame(index=self._data_matrix._observation_names)
_embeddings_Y = pd.DataFrame(index=self._data_matrix._observation_names)
_clusterings = pd.DataFrame(index=self._data_matrix._observation_names)
for _attr_key, _attr in self._observation_attrs:
if _attr.attr_type.value == AttributeType.ANNOTATION.value:
# Categorical not valid, ndarray is required
_col_attrs[_attr_key] = np.asarray(_attr.values)
_global_attrs["MetaData"]["annotations"].append(
{
"name": _attr_key,
"values": list(
map(
lambda x: x.item()
if type(x).__module__ == "numpy"
else x,
sorted(
np.unique(_attr.values),
reverse=False,
),
)
),
}
)
if _attr.attr_type.value == AttributeType.METRIC.value:
_col_attrs[_attr_key] = np.asarray(_attr.values)
_global_attrs["MetaData"]["metrics"].append({"name": _attr_key})
if _attr.attr_type.value == AttributeType.EMBEDDING.value:
_attr: EmbeddingAttribute
_data = _attr.data.iloc[:, 0:2]
_data.columns = ["_X", "_Y"]
# Number of embeddings (don't count the default embedding since this will be use to determine the id of the embedding)
_num_embeddings = len(
list(
filter(
lambda x: int(x["id"]) != -1,
_global_attrs["MetaData"]["embeddings"],
)
)
)
_embedding_id = (
_attr.id
if _attr.id is not None
else (
-1
if _attr._default
else 0
if _num_embeddings == 0
else _num_embeddings + 1
)
)
_embeddings_X = pd.merge(
_embeddings_X,
_data["_X"]
.to_frame()
.rename(columns={"_X": str(_embedding_id)})
.astype("float32"),
left_index=True,
right_index=True,
)
_embeddings_Y = pd.merge(
_embeddings_Y,
_data["_Y"]
.to_frame()
.rename(columns={"_Y": str(_embedding_id)})
.astype("float32"),
left_index=True,
right_index=True,
)
_global_attrs["MetaData"]["embeddings"].append(
{
"id": str(
_embedding_id
), # TODO: type not consistent with clusterings
"name": _attr.name,
}
)
if _attr.default:
_default_embedding = _data
if _attr.attr_type.value == AttributeType.CLUSTERING.value:
_attr: ClusteringAttribute
if _attr.name is None:
raise Exception(
f"The clustering with key '{_attr.key}' does not have a name. This is required when exporting to SCope."
)
# Clustering
_col_name = (
_attr.data.columns[0]
if isinstance(_attr, pd.DataFrame)
else _attr.name
)
_num_clusterings = len(_global_attrs["MetaData"]["clusterings"])
_clustering_id = (
0 if _num_clusterings == 0 else _num_clusterings + 1
)
_clustering_data = (
_attr.data.rename(columns={_col_name: str(_clustering_id)})
if isinstance(_attr.data, pd.DataFrame) # pd.DataFrame
else _attr.data.rename(str(_clustering_id)) # pd.Series
)
_clusterings = pd.merge(
_clusterings,
_clustering_data,
left_index=True,
right_index=True,
)
_clustering_md = LoomXMetadataClustering.from_dict(
{
"id": _clustering_id,
**_attr.metadata.to_dict(),
}
).to_dict()
# Markers
# Dictionary of DataFrame (value) containing the the values of the different metric (key) across features (rows) and for each cluster (columns)
_cluster_markers_dict = {}
if cluster_marker_metrics:
has_cluster_markers = [
cluster.markers is not None
for cluster in _attr._metadata.clusters
]
if not all(has_cluster_markers):
continue
# Init DataFrame mask of genes representing markers
cluster_markers = pd.DataFrame(
index=_feature_names,
columns=[str(x.id) for x in _attr._metadata.clusters],
).fillna(0, inplace=False)
# Init DataFrame containing metric valuess
_cluster_marker_metric: LoomXMetadataClusterMarkerMetric
for _cluster_marker_metric in cluster_marker_metrics:
_cluster_markers_dict[
_cluster_marker_metric["accessor"]
] = pd.DataFrame(
index=_feature_names,
columns=[str(x.id) for x in _attr._metadata.clusters],
).fillna(
0, inplace=False
)
_cluster: LoomXMetadataCluster
for _cluster in _attr._metadata.clusters:
_features_df = pd.Series(
_cluster.markers.index.values,
index=_cluster.markers.index.values,
)
# Dictionary of Series (value) containing the values of the different metric (key) for the current cluster
_cluster_marker_metric_values_dict = {}
# Dictionary of Series (value) containing a boolean mask of the features that pass the filter criteria for the different metrics (key)
_cluster_marker_metric_masks_dict = {}
_cluster_marker_metric: LoomXMetadataClusterMarkerMetric
for _cluster_marker_metric in cluster_marker_metrics:
# Check if metric exists in markers table
if (
_cluster_marker_metric["accessor"]
not in _cluster.markers.columns
):
raise Exception(
f"The cluster_marker_metrics argument was not properly defined. Missing {_cluster_marker_metric['accessor']} metric in the markers table. Available columns in markers table are f{''.join(_cluster.markers.columns)}."
)
cluster_marker_metric_values = pd.Series(
_cluster.markers[
_cluster_marker_metric["accessor"]
].values,
index=_cluster.markers.index.values,
).astype(float)
if pd.isnull(cluster_marker_metric_values).any():
raise Exception(
f"NaN detected in {_cluster_marker_metric['accessor']} metric column of the markers table"
)
if _cluster_marker_metric["threshold_method"] == "lte":
feature_mask = (
cluster_marker_metric_values
< _cluster_marker_metric["threshold"]
)
elif _cluster_marker_metric["threshold_method"] == "lt":
feature_mask = (
cluster_marker_metric_values
<= _cluster_marker_metric["threshold"]
)
elif (
_cluster_marker_metric["threshold_method"]
== "lte_or_gte"
):
feature_mask = np.logical_and(
np.logical_or(
cluster_marker_metric_values
>= _cluster_marker_metric["threshold"],
cluster_marker_metric_values
<= -_cluster_marker_metric["threshold"],
),
np.isfinite(cluster_marker_metric_values),
)
else:
raise Exception(
"The given threshold method is not implemented"
)
_cluster_marker_metric_masks_dict[
_cluster_marker_metric["accessor"]
] = feature_mask
_cluster_marker_metric_values_dict[
_cluster_marker_metric["accessor"]
] = cluster_marker_metric_values
# Create a new cluster marker mask based on all feature mask generated using each metric
cluster_marker_metrics_mask = np.logical_or.reduce(
[
v
for _, v in _cluster_marker_metric_masks_dict.items()
]
)
marker_names = _features_df[cluster_marker_metrics_mask]
# Get a marker mask along all features in the matrix
marker_genes_along_data_mask = np.in1d(
_feature_names, marker_names
)
marker_genes_along_data = cluster_markers.index[
marker_genes_along_data_mask
]
# Populate the marker mask
markers_df = pd.DataFrame(
1, index=marker_names, columns=["is_marker"]
)
cluster_markers.loc[
marker_genes_along_data_mask, str(_cluster.id)
] = markers_df["is_marker"][marker_genes_along_data]
if pd.isnull(cluster_markers[str(_cluster.id)]).any():
raise Exception(
f"NaN detected in markers DataFrame of cluster {_cluster.id}."
)
# Populate the marker nmetrics
for _cluster_marker_metric in _cluster_markers_dict.keys():
_metric_df = pd.DataFrame(
_cluster_marker_metric_values_dict[
_cluster_marker_metric
][cluster_marker_metrics_mask],
index=marker_names,
columns=[_cluster_marker_metric],
)
_cluster_markers_dict[_cluster_marker_metric].loc[
marker_genes_along_data_mask, str(_cluster.id)
] = _metric_df[_cluster_marker_metric][
marker_genes_along_data
]
if pd.isnull(
_cluster_markers_dict[_cluster_marker_metric][
str(_cluster.id)
]
).any():
raise Exception(
f"NaN detected in markers metric {_cluster_marker_metric['accessor']} DataFrame of cluster {_cluster.id}."
)
# Add the required global metadata for markes to be visualized in SCope
# Encapsule with mixin to avoid properties not required by gRPC
_clustering_md["clusterMarkerMetrics"] = [
LoomXMetadataClusterMarkerMetric.from_dict(cmm).to_dict()
for cmm in cluster_marker_metrics
]
_global_attrs["MetaData"]["clusterings"].append(_clustering_md)
# Convert all markers related data to Loom compliant format
_row_attrs[
f"ClusterMarkers_{str(_clustering_id)}"
] = df_to_named_matrix(cluster_markers)
for _cluster_marker_metric in _cluster_markers_dict.keys():
_row_attrs[
f"ClusterMarkers_{str(_clustering_id)}_{_cluster_marker_metric}"
] = df_to_named_matrix(
_cluster_markers_dict[_cluster_marker_metric].astype(float)
)
_row_attrs["Gene"] = np.asarray(_feature_names)
_col_attrs["CellID"] = np.asarray(self._data_matrix._observation_names)
# If no default embedding, use the first embedding as default
if _default_embedding is None:
_col_attrs["Embedding"] = df_to_named_matrix(
df=pd.DataFrame(
{
"_X": _embeddings_X["0"].values,
"_Y": _embeddings_Y["0"].values,
}
)
)
_embeddings_X.insert(
loc=0, column="-1", value=_embeddings_X["0"].values
)
_embeddings_Y.insert(
loc=0, column="-1", value=_embeddings_Y["0"].values
)
_md_first_embedding = list(
filter(
lambda x: x["id"] == "0",
_global_attrs["MetaData"]["embeddings"],
)
)[0]
_global_attrs["MetaData"]["embeddings"] = [
{"id": "-1", "name": _md_first_embedding["name"]}
] + list(
filter(
lambda x: x["id"] != "0",
_global_attrs["MetaData"]["embeddings"],
)
)
else:
_col_attrs["Embedding"] = df_to_named_matrix(df=_default_embedding)
_col_attrs["Embeddings_X"] = df_to_named_matrix(df=_embeddings_X)
_col_attrs["Embeddings_Y"] = df_to_named_matrix(df=_embeddings_Y)
_col_attrs["Clusterings"] = df_to_named_matrix(
df=_clusterings.astype(np.int16)
)
_global_attrs["MetaData"] = json.dumps(_global_attrs["MetaData"])
if compress_metadata:
_global_attrs["MetaData"] = compress_encode(
value=_global_attrs["MetaData"]
)
for _ga_key, _ga_value in self._global_attrs:
_global_attrs[_ga_key] = _ga_value
lp.create(
filename=filename,
layers=self._data_matrix._data_matrix.transpose(),
row_attrs=_row_attrs,
col_attrs=_col_attrs,
file_attrs=_global_attrs,
)
print("INFO: LoomX successfully exported to SCope-compatible loom file.")
else:
raise Exception(
f"Cannot export LoomX to the given output format '{output_format}'. Invalid output format"
)
class Modes(MutableMapping[str, Mode], metaclass=WithInitHook):
def __init__(self):
""""""
self._keys: List[str] = []
self._mode_types = [item.value for item in ModeType]
# Implemented modes (used here mainly for typing purposes)
self.rna: Mode = None
def __setattr__(self, name, value):
if not hasattr(self, "_initialized"):
if __DEBUG__:
print(f"DEBUG: constructor call: set attr with name {name}")
super().__setattr__(name, value)
else:
self.__setitem__(name=name, value=value)
def __delattr__(self, name: str):
self._keys.remove(name)
super().__delattr__(name)
def __iter__(self):
""""""
raise NotImplementedError
def __len__(self):
""""""
raise NotImplementedError
def __delitem__(self, name) -> None:
""""""
self.__delattr__(name)
def __getitem__(self, name) -> Mode:
""""""
return getattr(self, name)
def _validate_key(self, key):
_key = None
# FIXME: Fix for editable mode (get called with name=__class__)
if key.startswith("__"):
return
if key.startswith("_"):
raise Exception(
f"Cannot add Mode with key {key}. Not a valid key. Expects key not to start with an underscore ('_')."
)
if not isinstance(key, ModeType) and not str:
raise Exception("Not a valid ModeType.")
if isinstance(key, ModeType):
if key in self._mode_types:
_key = key
else:
raise Exception("Not a valid ModeType.")
if isinstance(key, str):
if key in self._mode_types:
_key = key
else:
raise Exception(
f"Not a valid ModeType: {key}. Choose one ModeType from {', '.join(self._mode_types)}"
)
return _key
@staticmethod
def _validate_value(value):
if not type(value) in [tuple, np.matrix, pd.DataFrame]:
raise Exception(
f"""
Got {type(value)} but expecting either:
- Tuple: (<SciPy CSR matrix>, <feature names>, <observation names>) or
- Tuple: (<NumPy 2D matrix>, <feature names>, <observation names>) or
- pandas.DataFrame
"""
)
if isinstance(value, sparse.csr_matrix):
raise Exception(
"If your data matrix is a SciPy CSR matrix, use tuple: (<SciPy CSR matrix>, <feature names>, <observation names>)."
)
if isinstance(value, sparse.csc_matrix):
raise Exception(
"If your data matrix is a SciPy CSC matrix, use tuple: (<SciPy CSC matrix>, <feature names>, <observation names>)."
)
if isinstance(value, np.matrix):
raise Exception(
"If your data matrix is a NumPy 2D matrix, use tuple: (<NumPy 2D matrix>, <feature names>, <observation names>)."
)
if isinstance(value, tuple):
if len(value) != 3:
raise Exception(
"If your data matrix is a NumPy 2D matrix or SciPy CSR matrix, use tuple: (<NumPy 2D matrix | SciPy CSR matrix>, <feature names>, <observation names>)."
)
def __setitem__(self, name, value) -> None:
""""""
if __DEBUG__:
print(f"DEBUG: instance call: set attr with name {name}")
# FIXME: Fix for editable mode (get called with name=__class__)
if name.startswith("__"):
return
print(f"INFO: Adding new {name} mode")
_key = self._validate_key(key=name)
Modes._validate_value(value=value)
_mode = None
_data_matrix = None
if isinstance(value, tuple):
_matrix, _feature_names, _observation_names = value
_data_matrix = DataMatrix(
data_matrix=_matrix,
feature_names=_feature_names,
observation_names=_observation_names,
)
if isinstance(value, pd.DataFrame):
_data_matrix = DataMatrix(
data_matrix=value.values,
feature_names=value.columns,
observation_names=value.index,
)
if _data_matrix is None:
raise Exception("Invalid type of the given data natrix.")
_mode = Mode(mode_type=ModeType(_key), data_matrix=_data_matrix)
if _key not in self._keys:
self._keys.append(_key)
super().__setattr__(_key, _mode)
def __repr__(self) -> str:
_mode_keys = f"{', '.join(self._keys)}" if len(self._keys) > 0 else "none"
return f"Modalities: {_mode_keys}"
##########################################
# ATTRIBUTES #
##########################################
class GlobalAttributes(MutableMapping[str, str], metaclass=WithInitHook):
def __init__(self, mode: Mode):
""""""
self._keys: List[str] = []
# Implemented modes (used here mainly for typing purposes)
self.rna: Mode = mode
def __setattr__(self, name, value):
if not hasattr(self, "_initialized"):
if __DEBUG__:
print(f"DEBUG: constructor call: set attr with name {name}")
super().__setattr__(name, value)
else:
self.__setitem__(name=name, value=value)
def __delattr__(self, name: str):
self._keys.remove(name)
super().__delattr__(name)
def __iter__(self):
""""""
return iter(AttributesIterator(self))
def __len__(self):
""""""
return len(self._keys)
def __delitem__(self, name: str) -> None:
""""""
self.__delattr__(name)
def __getitem__(self, name: str) -> Mode:
""""""
return getattr(self, name)
def __setitem__(self, name: str, value: str) -> None:
""""""
# FIXME: Fix for editable mode (get called with name=__class__)
if name.startswith("__"):
return
if not isinstance(name, str):
raise Exception("Not a valid key for GlobalAttribute.")
if not isinstance(value, str):
raise Exception("Not a valid value for GlobalAttribute.")
self._add_key(key=name)
super().__setattr__(name, value)
def get_attribute(self, key: str):
""""""
return super().__getattribute__(key)
def __repr__(self) -> str:
_keys = f"{', '.join(self._keys)}" if len(self._keys) > 0 else "none"
return f"Global attributes: {_keys}"
def _add_key(self, key: str):
if key not in self._keys:
self._keys.append(key)
class GlobalAttributesIterator:
"""Class to implement an iterator of Attributes """
def __init__(self, attrs: GlobalAttributes):
self._attrs = attrs
def __iter__(self):
self._n = 0
return self
def __next__(self):
if self._n < len(self._attrs._keys):
current_key = self._attrs._keys[self._n]
self._n += 1
return current_key, self._attrs.get_attribute(current_key)
else:
raise StopIteration
class Axis(Enum):
OBSERVATIONS = 0
FEATURES = 1
class AttributeType(Enum):
ANNOTATION = 0
METRIC = 1
EMBEDDING = 2
CLUSTERING = 3
class Attribute:
def __init__(
self,
key: str,
mode_type: ModeType,
attr_type: AttributeType,
axis: Axis,
data,
name: str = None,
description: str = None,
):
""""""
self._key = key
self._mode_type = mode_type
self._attr_type = attr_type
self._axis = axis
self._data = data
self._name = name
self._description = description
@property
def key(self):
return self._key
@property
def mode_type(self):
return self._mode_type
@property
def attr_type(self):
return self._attr_type
@property
def axis(self):
return self._axis
@property
def data(self):
return self._data
@property
def values(self):
if isinstance(self._data, pd.DataFrame):
_col_name = self._data.columns[0]
return self._data[_col_name].values
if isinstance(self._data, pd.Series):
return self._data
raise Exception(f"Cannot get values from Attribute with key {self._key}")
@property
def name(self):
return self._name
@name.setter
def name(self, value):
self._name = value
@property
def description(self):
return self._description
def __repr__(self):
""""""
return f"""
key: {self._key}
mode: {self._mode_type}
type: {self._attr_type}
name: {self._name}
description: {self._description}
"""
class Attributes(MutableMapping[str, Attribute], metaclass=WithInitHook):
def __init__(self, mode: Mode, axis: Axis, is_proxy: bool = False, **kwargs):
""""""
self._keys: List[str] = []
self._mode = mode
self._mode_type = mode._mode_type if mode is not None else ModeType.NONE
self._axis = axis
self._is_proxy = is_proxy
self._attr_type = kwargs["attr_type"] if "attr_type" in kwargs else False
self._is_multi = True if "is_multi" in kwargs and kwargs["is_multi"] else False
def __getattribute__(self, key):
""""""
if not super().__getattribute__("_is_proxy"):
if key in super().__getattribute__("_keys"):
return super().__getattribute__(key).data
return super().__getattribute__(key)
else:
"""
This is a proxy. Override __getattribute__ of Attributes class
"""
if key in super().__getattribute__("_keys"):
if super().__getattribute__("_axis") == Axis.FEATURES:
return super().__getattribute__("_mode")._feature_attrs[key]
elif super().__getattribute__("_axis") == Axis.OBSERVATIONS:
return super().__getattribute__("_mode")._observation_attrs[key]
else:
raise Exception("Invalid axis.")
return super().__getattribute__(key)
def __setattr__(self, name, value):
if not hasattr(self, "_initialized"):
if __DEBUG__:
print(f"DEBUG: constructor call: set attr with name {name}")
super().__setattr__(name, value)
else:
self.__setitem__(name=name, value=value)
def __delattr__(self, name):
self._keys.remove(name)
super().__delattr__(name)
def __delitem__(self, key):
""""""
self.__delattr__(key)
def __getitem__(self, key):
""""""
return super().__getattribute__(key).data
def __iter__(self):
""""""
return iter(AttributesIterator(self))
def __len__(self):
""""""
return len(self._keys)
def _add_key(self, key: str):
if key not in self._keys:
self._keys.append(key)
def _add_item(self, key: str, value: Attribute) -> None:
self._add_key(key=key)
super().__setattr__(key, value)
def _add_item_by_value(self, value: Attribute):
self._add_key(key=value.key)
super().__setattr__(value.key, value)
def get_attribute(self, key):
""""""
return super().__getattribute__(key)
@abc.abstractclassmethod
def __setitem__(self, name, value):
""""""
raise NotImplementedError
def _validate_key(self, key):
# FIXME: Fix for editable mode (get called with name=__class__)
if key.startswith("__"):
return
if key.startswith("_"):
raise Exception(
f"Cannot add attribute with key {key}. Not a valid key. Expects key not to start with an underscore ('_')."
)
if not isinstance(key, str):
raise Exception(
f"Cannot add attribute with key of type ({type(key).__name__}) to {type(self).__name__}. Not a valid key. Expects key of type str."
)
# Print a warning in key contains characters not allowed. If any present, this will prevent the user to use dot notation. Brackets access will work.
pattern = "^[a-zA-Z0-9_]+$"
if not re.match(pattern, key):
warnings.warn(
f"The key '{key}' won't be accessible using the dot notation (containing special characters other than '_')",
)
def _validate_value(self, value):
if not isinstance(value, pd.DataFrame) and not isinstance(value, pd.Series):
raise Exception(
f"Cannot add attribute of type {type(value).__name__} to {type(self).__name__}. Expects a pandas.DataFrame or a pandas.Series."
)
if (
isinstance(value, pd.DataFrame)
and not self._is_multi
and value.shape[1] > 1
):
raise Exception(
f"Cannot add attribute of shape {value.shape[1]}. Currently, allows only {type(value).__name__} with maximally 1 feature (i.e.: column)."
)
class AttributesIterator:
"""Class to implement an iterator of Attributes """
def __init__(self, attrs: Attributes):
self._attrs = attrs
def __iter__(self):
self._n = 0
return self
def __next__(self):
if self._n < len(self._attrs._keys):
current_key = self._attrs._keys[self._n]
self._n += 1
return current_key, self._attrs.get_attribute(current_key)
else:
raise StopIteration
##########################################
# ATTRIBUTE TYPES #
##########################################
class AnnotationAttributes(Attributes):
def __init__(self, mode: Mode, axis: Axis, is_proxy: bool = False, **kwargs):
""""""
super().__init__(
mode=mode,
axis=axis,
is_proxy=is_proxy,
attr_type=AttributeType.ANNOTATION,
**kwargs,
)
def _validate_value(self, value: Union[pd.DataFrame, pd.Series], **kwargs):
if __DEBUG__:
print(f"DEBUG: _validate_value ({type(self).__name__})")
super()._validate_value(value=value)
_force_conversion_to_categorical = (
kwargs["force_conversion_to_categorical"]
if "force_conversion_to_categorical" in kwargs
else False
)
# Do some checks and processing for attribute of type ANNOTATION
if not _force_conversion_to_categorical:
if (
isinstance(value, pd.DataFrame)
and not all(value.apply(pd.api.types.is_categorical_dtype))
and not all(value.apply(pd.api.types.is_bool_dtype))
) or (
isinstance(value, pd.Series)
and not pd.api.types.is_categorical_dtype(arr_or_dtype=value)
and not | pd.api.types.is_bool_dtype(arr_or_dtype=value) | pandas.api.types.is_bool_dtype |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import os
import time
import re
def extractTradingPartners():
directory = 'Data Sources/Trading Partners (1)/'
files = os.listdir(directory)
for i,file in enumerate(files):
# We are only interested in the csv's and not the source text nor a potential zip-folder containing raw data.
if '.csv' in file.lower():
if i == 0:
# Reading in the file using the encoding latin-1 instead of the default unicode
# because the indicator-specifications contains characters which is included in unicode.
# Part of on the reading is converting NaN values to zeros.
tradingPartners = pd.read_csv(directory+file,encoding = 'latin-1').fillna(value=0)
# Extracting the rows, which contains information on the top 5 trading partners.
indicatorOfInterest = [indicator for indicator in tradingPartners.Indicator.unique() if 'top 5' in indicator.lower()]
# Locating the relevant indices
indices = [i for i,indicator in enumerate(tradingPartners.Indicator) if indicator in indicatorOfInterest]
# Constructing the dataframe with the observations of interest
tradingPartners = tradingPartners.loc[indices]
tradingPartners = tradingPartners.reset_index(drop=True)
else:
# Reading in the file using the encoding latin-1 instead of the default unicode
# because the indicator-specifications contains characters which is included in unicode.
# Part of on the reading is converting NaN values to zeros.
temp = pd.read_csv(directory+file,encoding = 'latin-1').fillna(value=0)
# Locating the relevant indices
indices = [i for i,indicator in enumerate(temp.Indicator) if indicator in indicatorOfInterest]
# Constructing the dataframe with the observations of interest
temp = temp.loc[indices]
temp = temp.reset_index(drop=True)
# Concate the new dataframe with the existing
tradingPartners = | pd.concat([tradingPartners,temp],axis = 0) | pandas.concat |
#SPDX-License-Identifier: MIT
import datetime
import json
import logging
import os
import sys
import warnings
from multiprocessing import Process, Queue
from workers.worker_git_integration import WorkerGitInterfaceable
import numpy as np
import pandas as pd
import requests
import sqlalchemy as s
from skimage.filters import threshold_otsu
from sklearn.ensemble import IsolationForest
from augur import ROOT_AUGUR_DIRECTORY
from workers.message_insights_worker.message_novelty import novelty_analysis
from workers.message_insights_worker.message_sentiment import get_senti_score
from workers.worker_base import Worker
warnings.filterwarnings('ignore')
class MessageInsightsWorker(WorkerGitInterfaceable):
def __init__(self, config={}):
# Define the worker's type, which will be used for self identification.
worker_type = "message_insights_worker"
# Define what this worker can be given and know how to interpret
given = [['github_url']]
# The name the housekeeper/broker use to distinguish the data model this worker can fill
models = ['message_analysis']
# Define the tables needed to insert, update, or delete on
data_tables = ['message', 'repo', 'message_analysis', 'message_analysis_summary']
# For most workers you will only need the worker_history and worker_job tables
# from the operations schema, these tables are to log worker task histories
operations_tables = ['worker_history', 'worker_job']
# Run the general worker initialization
super().__init__(worker_type, config, given, models, data_tables, operations_tables)
# Do any additional configuration after the general initialization has been run
self.config.update(config)
# Define data collection info
self.tool_source = 'Message Insights Worker'
self.tool_version = '0.2.0'
self.data_source = 'Non-existent API'
self.insight_days = self.config['insight_days']
# Abs paths
self.models_dir = os.path.join(ROOT_AUGUR_DIRECTORY, "workers", "message_insights_worker", self.config['models_dir'])
self.full_train = False
# To identify which run of worker inserted the data
self.run_id = 100
def message_analysis_model(self, task, repo_id):
"""
:param task: the task generated by the housekeeper and sent to the broker which
was then sent to this worker. Takes the example dict format of:
{
'job_type': 'MAINTAIN',
'models': ['fake_data'],
'display_name': 'fake_data model for url: https://github.com/vmware/vivace',
'given': {
'git_url': 'https://github.com/vmware/vivace'
}
}
:param repo_id: the collect() method queries the repo_id given the git/github url
and passes it along to make things easier. An int such as: 27869
"""
# Any initial database instructions, like finding the last tuple inserted or generate the next ID value
self.begin_date = ''
# Check to see if repo has been analyzed previously
repo_exists_SQL = s.sql.text("""
SELECT exists (SELECT 1 FROM augur_data.message_analysis_summary WHERE repo_id = :repo_id LIMIT 1)""")
df_rep = pd.read_sql_query(repo_exists_SQL, self.db, params={'repo_id': repo_id})
self.full_train = not(df_rep['exists'].iloc[0])
self.logger.info(f'Full Train: {self.full_train}')
# Collection and insertion of data happens here
if not self.full_train:
# Fetch the timestamp of last analyzed message for the repo
past_SQL = s.sql.text("""
select message_analysis.msg_id, message.msg_timestamp
from augur_data.message_analysis
inner join augur_data.message on message.msg_id = message_analysis.msg_id
inner join augur_data.pull_request_message_ref on message.msg_id = pull_request_message_ref.msg_id
inner join augur_data.pull_requests on pull_request_message_ref.pull_request_id = pull_requests.pull_request_id
where message.repo_id = :repo_id
UNION
select message_analysis.msg_id, message.msg_timestamp
from augur_data.message_analysis
inner join augur_data.message on message.msg_id = message_analysis.msg_id
inner join augur_data.issue_message_ref on message.msg_id = issue_message_ref.msg_id
inner join augur_data.issues on issue_message_ref.issue_id = issues.issue_id
where message.repo_id = :repo_id
""")
df_past = pd.read_sql_query(past_SQL, self.db, params={'repo_id': repo_id})
df_past['msg_timestamp'] = | pd.to_datetime(df_past['msg_timestamp']) | pandas.to_datetime |
# -*- coding: utf-8 -*-
from __future__ import print_function
from datetime import datetime, timedelta
import functools
import itertools
import numpy as np
import numpy.ma as ma
import numpy.ma.mrecords as mrecords
from numpy.random import randn
import pytest
from pandas.compat import (
PY3, PY36, OrderedDict, is_platform_little_endian, lmap, long, lrange,
lzip, range, zip)
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
from pandas.core.dtypes.common import is_integer_dtype
import pandas as pd
from pandas import (
Categorical, DataFrame, Index, MultiIndex, Series, Timedelta, Timestamp,
compat, date_range, isna)
from pandas.tests.frame.common import TestData
import pandas.util.testing as tm
MIXED_FLOAT_DTYPES = ['float16', 'float32', 'float64']
MIXED_INT_DTYPES = ['uint8', 'uint16', 'uint32', 'uint64', 'int8', 'int16',
'int32', 'int64']
class TestDataFrameConstructors(TestData):
def test_constructor(self):
df = DataFrame()
assert len(df.index) == 0
df = DataFrame(data={})
assert len(df.index) == 0
def test_constructor_mixed(self):
index, data = tm.getMixedTypeDict()
# TODO(wesm), incomplete test?
indexed_frame = DataFrame(data, index=index) # noqa
unindexed_frame = DataFrame(data) # noqa
assert self.mixed_frame['foo'].dtype == np.object_
def test_constructor_cast_failure(self):
foo = DataFrame({'a': ['a', 'b', 'c']}, dtype=np.float64)
assert foo['a'].dtype == object
# GH 3010, constructing with odd arrays
df = DataFrame(np.ones((4, 2)))
# this is ok
df['foo'] = np.ones((4, 2)).tolist()
# this is not ok
pytest.raises(ValueError, df.__setitem__, tuple(['test']),
np.ones((4, 2)))
# this is ok
df['foo2'] = np.ones((4, 2)).tolist()
def test_constructor_dtype_copy(self):
orig_df = DataFrame({
'col1': [1.],
'col2': [2.],
'col3': [3.]})
new_df = pd.DataFrame(orig_df, dtype=float, copy=True)
new_df['col1'] = 200.
assert orig_df['col1'][0] == 1.
def test_constructor_dtype_nocast_view(self):
df = DataFrame([[1, 2]])
should_be_view = DataFrame(df, dtype=df[0].dtype)
should_be_view[0][0] = 99
assert df.values[0, 0] == 99
should_be_view = DataFrame(df.values, dtype=df[0].dtype)
should_be_view[0][0] = 97
assert df.values[0, 0] == 97
def test_constructor_dtype_list_data(self):
df = DataFrame([[1, '2'],
[None, 'a']], dtype=object)
assert df.loc[1, 0] is None
assert df.loc[0, 1] == '2'
def test_constructor_list_frames(self):
# see gh-3243
result = DataFrame([DataFrame([])])
assert result.shape == (1, 0)
result = DataFrame([DataFrame(dict(A=lrange(5)))])
assert isinstance(result.iloc[0, 0], DataFrame)
def test_constructor_mixed_dtypes(self):
def _make_mixed_dtypes_df(typ, ad=None):
if typ == 'int':
dtypes = MIXED_INT_DTYPES
arrays = [np.array(np.random.rand(10), dtype=d)
for d in dtypes]
elif typ == 'float':
dtypes = MIXED_FLOAT_DTYPES
arrays = [np.array(np.random.randint(
10, size=10), dtype=d) for d in dtypes]
zipper = lzip(dtypes, arrays)
for d, a in zipper:
assert(a.dtype == d)
if ad is None:
ad = dict()
ad.update({d: a for d, a in zipper})
return DataFrame(ad)
def _check_mixed_dtypes(df, dtypes=None):
if dtypes is None:
dtypes = MIXED_FLOAT_DTYPES + MIXED_INT_DTYPES
for d in dtypes:
if d in df:
assert(df.dtypes[d] == d)
# mixed floating and integer coexinst in the same frame
df = _make_mixed_dtypes_df('float')
_check_mixed_dtypes(df)
# add lots of types
df = _make_mixed_dtypes_df('float', dict(A=1, B='foo', C='bar'))
_check_mixed_dtypes(df)
# GH 622
df = _make_mixed_dtypes_df('int')
_check_mixed_dtypes(df)
def test_constructor_complex_dtypes(self):
# GH10952
a = np.random.rand(10).astype(np.complex64)
b = np.random.rand(10).astype(np.complex128)
df = DataFrame({'a': a, 'b': b})
assert a.dtype == df.a.dtype
assert b.dtype == df.b.dtype
def test_constructor_dtype_str_na_values(self, string_dtype):
# https://github.com/pandas-dev/pandas/issues/21083
df = DataFrame({'A': ['x', None]}, dtype=string_dtype)
result = df.isna()
expected = DataFrame({"A": [False, True]})
tm.assert_frame_equal(result, expected)
assert df.iloc[1, 0] is None
df = DataFrame({'A': ['x', np.nan]}, dtype=string_dtype)
assert np.isnan(df.iloc[1, 0])
def test_constructor_rec(self):
rec = self.frame.to_records(index=False)
if PY3:
# unicode error under PY2
rec.dtype.names = list(rec.dtype.names)[::-1]
index = self.frame.index
df = DataFrame(rec)
tm.assert_index_equal(df.columns, pd.Index(rec.dtype.names))
df2 = DataFrame(rec, index=index)
tm.assert_index_equal(df2.columns, pd.Index(rec.dtype.names))
tm.assert_index_equal(df2.index, index)
rng = np.arange(len(rec))[::-1]
df3 = DataFrame(rec, index=rng, columns=['C', 'B'])
expected = DataFrame(rec, index=rng).reindex(columns=['C', 'B'])
tm.assert_frame_equal(df3, expected)
def test_constructor_bool(self):
df = DataFrame({0: np.ones(10, dtype=bool),
1: np.zeros(10, dtype=bool)})
assert df.values.dtype == np.bool_
def test_constructor_overflow_int64(self):
# see gh-14881
values = np.array([2 ** 64 - i for i in range(1, 10)],
dtype=np.uint64)
result = DataFrame({'a': values})
assert result['a'].dtype == np.uint64
# see gh-2355
data_scores = [(6311132704823138710, 273), (2685045978526272070, 23),
(8921811264899370420, 45),
(long(17019687244989530680), 270),
(long(9930107427299601010), 273)]
dtype = [('uid', 'u8'), ('score', 'u8')]
data = np.zeros((len(data_scores),), dtype=dtype)
data[:] = data_scores
df_crawls = DataFrame(data)
assert df_crawls['uid'].dtype == np.uint64
@pytest.mark.parametrize("values", [np.array([2**64], dtype=object),
np.array([2**65]), [2**64 + 1],
np.array([-2**63 - 4], dtype=object),
np.array([-2**64 - 1]), [-2**65 - 2]])
def test_constructor_int_overflow(self, values):
# see gh-18584
value = values[0]
result = DataFrame(values)
assert result[0].dtype == object
assert result[0][0] == value
def test_constructor_ordereddict(self):
import random
nitems = 100
nums = lrange(nitems)
random.shuffle(nums)
expected = ['A%d' % i for i in nums]
df = DataFrame(OrderedDict(zip(expected, [[0]] * nitems)))
assert expected == list(df.columns)
def test_constructor_dict(self):
frame = DataFrame({'col1': self.ts1,
'col2': self.ts2})
# col2 is padded with NaN
assert len(self.ts1) == 30
assert len(self.ts2) == 25
tm.assert_series_equal(self.ts1, frame['col1'], check_names=False)
exp = pd.Series(np.concatenate([[np.nan] * 5, self.ts2.values]),
index=self.ts1.index, name='col2')
tm.assert_series_equal(exp, frame['col2'])
frame = DataFrame({'col1': self.ts1,
'col2': self.ts2},
columns=['col2', 'col3', 'col4'])
assert len(frame) == len(self.ts2)
assert 'col1' not in frame
assert isna(frame['col3']).all()
# Corner cases
assert len(DataFrame({})) == 0
# mix dict and array, wrong size - no spec for which error should raise
# first
with pytest.raises(ValueError):
DataFrame({'A': {'a': 'a', 'b': 'b'}, 'B': ['a', 'b', 'c']})
# Length-one dict micro-optimization
frame = DataFrame({'A': {'1': 1, '2': 2}})
tm.assert_index_equal(frame.index, pd.Index(['1', '2']))
# empty dict plus index
idx = Index([0, 1, 2])
frame = DataFrame({}, index=idx)
assert frame.index is idx
# empty with index and columns
idx = Index([0, 1, 2])
frame = DataFrame({}, index=idx, columns=idx)
assert frame.index is idx
assert frame.columns is idx
assert len(frame._series) == 3
# with dict of empty list and Series
frame = DataFrame({'A': [], 'B': []}, columns=['A', 'B'])
tm.assert_index_equal(frame.index, Index([], dtype=np.int64))
# GH 14381
# Dict with None value
frame_none = DataFrame(dict(a=None), index=[0])
frame_none_list = DataFrame(dict(a=[None]), index=[0])
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
assert frame_none.get_value(0, 'a') is None
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
assert frame_none_list.get_value(0, 'a') is None
tm.assert_frame_equal(frame_none, frame_none_list)
# GH10856
# dict with scalar values should raise error, even if columns passed
msg = 'If using all scalar values, you must pass an index'
with pytest.raises(ValueError, match=msg):
DataFrame({'a': 0.7})
with pytest.raises(ValueError, match=msg):
DataFrame({'a': 0.7}, columns=['a'])
@pytest.mark.parametrize("scalar", [2, np.nan, None, 'D'])
def test_constructor_invalid_items_unused(self, scalar):
# No error if invalid (scalar) value is in fact not used:
result = DataFrame({'a': scalar}, columns=['b'])
expected = DataFrame(columns=['b'])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("value", [2, np.nan, None, float('nan')])
def test_constructor_dict_nan_key(self, value):
# GH 18455
cols = [1, value, 3]
idx = ['a', value]
values = [[0, 3], [1, 4], [2, 5]]
data = {cols[c]: Series(values[c], index=idx) for c in range(3)}
result = DataFrame(data).sort_values(1).sort_values('a', axis=1)
expected = DataFrame(np.arange(6, dtype='int64').reshape(2, 3),
index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx).sort_values('a', axis=1)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("value", [np.nan, None, float('nan')])
def test_constructor_dict_nan_tuple_key(self, value):
# GH 18455
cols = Index([(11, 21), (value, 22), (13, value)])
idx = Index([('a', value), (value, 2)])
values = [[0, 3], [1, 4], [2, 5]]
data = {cols[c]: Series(values[c], index=idx) for c in range(3)}
result = (DataFrame(data)
.sort_values((11, 21))
.sort_values(('a', value), axis=1))
expected = DataFrame(np.arange(6, dtype='int64').reshape(2, 3),
index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx).sort_values(('a', value), axis=1)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
@pytest.mark.skipif(not PY36, reason='Insertion order for Python>=3.6')
def test_constructor_dict_order_insertion(self):
# GH19018
# initialization ordering: by insertion order if python>= 3.6
d = {'b': self.ts2, 'a': self.ts1}
frame = DataFrame(data=d)
expected = DataFrame(data=d, columns=list('ba'))
tm.assert_frame_equal(frame, expected)
@pytest.mark.skipif(PY36, reason='order by value for Python<3.6')
def test_constructor_dict_order_by_values(self):
# GH19018
# initialization ordering: by value if python<3.6
d = {'b': self.ts2, 'a': self.ts1}
frame = DataFrame(data=d)
expected = DataFrame(data=d, columns=list('ab'))
tm.assert_frame_equal(frame, expected)
def test_constructor_multi_index(self):
# GH 4078
# construction error with mi and all-nan frame
tuples = [(2, 3), (3, 3), (3, 3)]
mi = MultiIndex.from_tuples(tuples)
df = DataFrame(index=mi, columns=mi)
assert pd.isna(df).values.ravel().all()
tuples = [(3, 3), (2, 3), (3, 3)]
mi = MultiIndex.from_tuples(tuples)
df = DataFrame(index=mi, columns=mi)
assert pd.isna(df).values.ravel().all()
def test_constructor_error_msgs(self):
msg = "Empty data passed with indices specified."
# passing an empty array with columns specified.
with pytest.raises(ValueError, match=msg):
DataFrame(np.empty(0), columns=list('abc'))
msg = "Mixing dicts with non-Series may lead to ambiguous ordering."
# mix dict and array, wrong size
with pytest.raises(ValueError, match=msg):
DataFrame({'A': {'a': 'a', 'b': 'b'},
'B': ['a', 'b', 'c']})
# wrong size ndarray, GH 3105
msg = r"Shape of passed values is \(3, 4\), indices imply \(3, 3\)"
with pytest.raises(ValueError, match=msg):
DataFrame(np.arange(12).reshape((4, 3)),
columns=['foo', 'bar', 'baz'],
index=pd.date_range('2000-01-01', periods=3))
# higher dim raise exception
with pytest.raises(ValueError, match='Must pass 2-d input'):
DataFrame(np.zeros((3, 3, 3)), columns=['A', 'B', 'C'], index=[1])
# wrong size axis labels
msg = ("Shape of passed values "
r"is \(3, 2\), indices "
r"imply \(3, 1\)")
with pytest.raises(ValueError, match=msg):
DataFrame(np.random.rand(2, 3), columns=['A', 'B', 'C'], index=[1])
msg = ("Shape of passed values "
r"is \(3, 2\), indices "
r"imply \(2, 2\)")
with pytest.raises(ValueError, match=msg):
DataFrame(np.random.rand(2, 3), columns=['A', 'B'], index=[1, 2])
msg = ("If using all scalar "
"values, you must pass "
"an index")
with pytest.raises(ValueError, match=msg):
DataFrame({'a': False, 'b': True})
def test_constructor_with_embedded_frames(self):
# embedded data frames
df1 = DataFrame({'a': [1, 2, 3], 'b': [3, 4, 5]})
df2 = DataFrame([df1, df1 + 10])
df2.dtypes
str(df2)
result = df2.loc[0, 0]
tm.assert_frame_equal(result, df1)
result = df2.loc[1, 0]
tm.assert_frame_equal(result, df1 + 10)
def test_constructor_subclass_dict(self):
# Test for passing dict subclass to constructor
data = {'col1': tm.TestSubDict((x, 10.0 * x) for x in range(10)),
'col2': tm.TestSubDict((x, 20.0 * x) for x in range(10))}
df = DataFrame(data)
refdf = DataFrame({col: dict(compat.iteritems(val))
for col, val in compat.iteritems(data)})
tm.assert_frame_equal(refdf, df)
data = tm.TestSubDict(compat.iteritems(data))
df = DataFrame(data)
tm.assert_frame_equal(refdf, df)
# try with defaultdict
from collections import defaultdict
data = {}
self.frame['B'][:10] = np.nan
for k, v in compat.iteritems(self.frame):
dct = defaultdict(dict)
dct.update(v.to_dict())
data[k] = dct
frame = DataFrame(data)
tm.assert_frame_equal(self.frame.sort_index(), frame)
def test_constructor_dict_block(self):
expected = np.array([[4., 3., 2., 1.]])
df = DataFrame({'d': [4.], 'c': [3.], 'b': [2.], 'a': [1.]},
columns=['d', 'c', 'b', 'a'])
tm.assert_numpy_array_equal(df.values, expected)
def test_constructor_dict_cast(self):
# cast float tests
test_data = {
'A': {'1': 1, '2': 2},
'B': {'1': '1', '2': '2', '3': '3'},
}
frame = DataFrame(test_data, dtype=float)
assert len(frame) == 3
assert frame['B'].dtype == np.float64
assert frame['A'].dtype == np.float64
frame = DataFrame(test_data)
assert len(frame) == 3
assert frame['B'].dtype == np.object_
assert frame['A'].dtype == np.float64
# can't cast to float
test_data = {
'A': dict(zip(range(20), tm.makeStringIndex(20))),
'B': dict(zip(range(15), randn(15)))
}
frame = DataFrame(test_data, dtype=float)
assert len(frame) == 20
assert frame['A'].dtype == np.object_
assert frame['B'].dtype == np.float64
def test_constructor_dict_dont_upcast(self):
d = {'Col1': {'Row1': 'A String', 'Row2': np.nan}}
df = DataFrame(d)
assert isinstance(df['Col1']['Row2'], float)
dm = DataFrame([[1, 2], ['a', 'b']], index=[1, 2], columns=[1, 2])
assert isinstance(dm[1][1], int)
def test_constructor_dict_of_tuples(self):
# GH #1491
data = {'a': (1, 2, 3), 'b': (4, 5, 6)}
result = DataFrame(data)
expected = DataFrame({k: list(v) for k, v in compat.iteritems(data)})
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_constructor_dict_multiindex(self):
def check(result, expected):
return tm.assert_frame_equal(result, expected, check_dtype=True,
check_index_type=True,
check_column_type=True,
check_names=True)
d = {('a', 'a'): {('i', 'i'): 0, ('i', 'j'): 1, ('j', 'i'): 2},
('b', 'a'): {('i', 'i'): 6, ('i', 'j'): 5, ('j', 'i'): 4},
('b', 'c'): {('i', 'i'): 7, ('i', 'j'): 8, ('j', 'i'): 9}}
_d = sorted(d.items())
df = DataFrame(d)
expected = DataFrame(
[x[1] for x in _d],
index=MultiIndex.from_tuples([x[0] for x in _d])).T
expected.index = MultiIndex.from_tuples(expected.index)
check(df, expected)
d['z'] = {'y': 123., ('i', 'i'): 111, ('i', 'j'): 111, ('j', 'i'): 111}
_d.insert(0, ('z', d['z']))
expected = DataFrame(
[x[1] for x in _d],
index=Index([x[0] for x in _d], tupleize_cols=False)).T
expected.index = Index(expected.index, tupleize_cols=False)
df = DataFrame(d)
df = df.reindex(columns=expected.columns, index=expected.index)
check(df, expected)
def test_constructor_dict_datetime64_index(self):
# GH 10160
dates_as_str = ['1984-02-19', '1988-11-06', '1989-12-03', '1990-03-15']
def create_data(constructor):
return {i: {constructor(s): 2 * i}
for i, s in enumerate(dates_as_str)}
data_datetime64 = create_data(np.datetime64)
data_datetime = create_data(lambda x: datetime.strptime(x, '%Y-%m-%d'))
data_Timestamp = create_data(Timestamp)
expected = DataFrame([{0: 0, 1: None, 2: None, 3: None},
{0: None, 1: 2, 2: None, 3: None},
{0: None, 1: None, 2: 4, 3: None},
{0: None, 1: None, 2: None, 3: 6}],
index=[Timestamp(dt) for dt in dates_as_str])
result_datetime64 = DataFrame(data_datetime64)
result_datetime = DataFrame(data_datetime)
result_Timestamp = DataFrame(data_Timestamp)
tm.assert_frame_equal(result_datetime64, expected)
tm.assert_frame_equal(result_datetime, expected)
tm.assert_frame_equal(result_Timestamp, expected)
def test_constructor_dict_timedelta64_index(self):
# GH 10160
td_as_int = [1, 2, 3, 4]
def create_data(constructor):
return {i: {constructor(s): 2 * i}
for i, s in enumerate(td_as_int)}
data_timedelta64 = create_data(lambda x: np.timedelta64(x, 'D'))
data_timedelta = create_data(lambda x: timedelta(days=x))
data_Timedelta = create_data(lambda x: Timedelta(x, 'D'))
expected = DataFrame([{0: 0, 1: None, 2: None, 3: None},
{0: None, 1: 2, 2: None, 3: None},
{0: None, 1: None, 2: 4, 3: None},
{0: None, 1: None, 2: None, 3: 6}],
index=[Timedelta(td, 'D') for td in td_as_int])
result_timedelta64 = DataFrame(data_timedelta64)
result_timedelta = DataFrame(data_timedelta)
result_Timedelta = DataFrame(data_Timedelta)
tm.assert_frame_equal(result_timedelta64, expected)
tm.assert_frame_equal(result_timedelta, expected)
tm.assert_frame_equal(result_Timedelta, expected)
def test_constructor_period(self):
# PeriodIndex
a = pd.PeriodIndex(['2012-01', 'NaT', '2012-04'], freq='M')
b = pd.PeriodIndex(['2012-02-01', '2012-03-01', 'NaT'], freq='D')
df = pd.DataFrame({'a': a, 'b': b})
assert df['a'].dtype == a.dtype
assert df['b'].dtype == b.dtype
# list of periods
df = pd.DataFrame({'a': a.astype(object).tolist(),
'b': b.astype(object).tolist()})
assert df['a'].dtype == a.dtype
assert df['b'].dtype == b.dtype
def test_nested_dict_frame_constructor(self):
rng = pd.period_range('1/1/2000', periods=5)
df = DataFrame(randn(10, 5), columns=rng)
data = {}
for col in df.columns:
for row in df.index:
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
data.setdefault(col, {})[row] = df.get_value(row, col)
result = DataFrame(data, columns=rng)
tm.assert_frame_equal(result, df)
data = {}
for col in df.columns:
for row in df.index:
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
data.setdefault(row, {})[col] = df.get_value(row, col)
result = DataFrame(data, index=rng).T
tm.assert_frame_equal(result, df)
def _check_basic_constructor(self, empty):
# mat: 2d matrix with shape (3, 2) to input. empty - makes sized
# objects
mat = empty((2, 3), dtype=float)
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
# 1-D input
frame = DataFrame(empty((3,)), columns=['A'], index=[1, 2, 3])
assert len(frame.index) == 3
assert len(frame.columns) == 1
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=np.int64)
assert frame.values.dtype == np.int64
# wrong size axis labels
msg = r'Shape of passed values is \(3, 2\), indices imply \(3, 1\)'
with pytest.raises(ValueError, match=msg):
DataFrame(mat, columns=['A', 'B', 'C'], index=[1])
msg = r'Shape of passed values is \(3, 2\), indices imply \(2, 2\)'
with pytest.raises(ValueError, match=msg):
DataFrame(mat, columns=['A', 'B'], index=[1, 2])
# higher dim raise exception
with pytest.raises(ValueError, match='Must pass 2-d input'):
DataFrame(empty((3, 3, 3)), columns=['A', 'B', 'C'],
index=[1])
# automatic labeling
frame = DataFrame(mat)
tm.assert_index_equal(frame.index, pd.Index(lrange(2)))
tm.assert_index_equal(frame.columns, pd.Index(lrange(3)))
frame = DataFrame(mat, index=[1, 2])
tm.assert_index_equal(frame.columns, pd.Index(lrange(3)))
frame = DataFrame(mat, columns=['A', 'B', 'C'])
tm.assert_index_equal(frame.index, pd.Index(lrange(2)))
# 0-length axis
frame = DataFrame(empty((0, 3)))
assert len(frame.index) == 0
frame = DataFrame(empty((3, 0)))
assert len(frame.columns) == 0
def test_constructor_ndarray(self):
self._check_basic_constructor(np.ones)
frame = DataFrame(['foo', 'bar'], index=[0, 1], columns=['A'])
assert len(frame) == 2
def test_constructor_maskedarray(self):
self._check_basic_constructor(ma.masked_all)
# Check non-masked values
mat = ma.masked_all((2, 3), dtype=float)
mat[0, 0] = 1.0
mat[1, 2] = 2.0
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert 1.0 == frame['A'][1]
assert 2.0 == frame['C'][2]
# what is this even checking??
mat = ma.masked_all((2, 3), dtype=float)
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert np.all(~np.asarray(frame == frame))
def test_constructor_maskedarray_nonfloat(self):
# masked int promoted to float
mat = ma.masked_all((2, 3), dtype=int)
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
assert np.all(~np.asarray(frame == frame))
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=np.float64)
assert frame.values.dtype == np.float64
# Check non-masked values
mat2 = ma.copy(mat)
mat2[0, 0] = 1
mat2[1, 2] = 2
frame = DataFrame(mat2, columns=['A', 'B', 'C'], index=[1, 2])
assert 1 == frame['A'][1]
assert 2 == frame['C'][2]
# masked np.datetime64 stays (use NaT as null)
mat = ma.masked_all((2, 3), dtype='M8[ns]')
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
assert isna(frame).values.all()
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=np.int64)
assert frame.values.dtype == np.int64
# Check non-masked values
mat2 = ma.copy(mat)
mat2[0, 0] = 1
mat2[1, 2] = 2
frame = DataFrame(mat2, columns=['A', 'B', 'C'], index=[1, 2])
assert 1 == frame['A'].view('i8')[1]
assert 2 == frame['C'].view('i8')[2]
# masked bool promoted to object
mat = ma.masked_all((2, 3), dtype=bool)
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
assert np.all(~np.asarray(frame == frame))
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=object)
assert frame.values.dtype == object
# Check non-masked values
mat2 = ma.copy(mat)
mat2[0, 0] = True
mat2[1, 2] = False
frame = DataFrame(mat2, columns=['A', 'B', 'C'], index=[1, 2])
assert frame['A'][1] is True
assert frame['C'][2] is False
def test_constructor_mrecarray(self):
# Ensure mrecarray produces frame identical to dict of masked arrays
# from GH3479
assert_fr_equal = functools.partial(tm.assert_frame_equal,
check_index_type=True,
check_column_type=True,
check_frame_type=True)
arrays = [
('float', np.array([1.5, 2.0])),
('int', np.array([1, 2])),
('str', np.array(['abc', 'def'])),
]
for name, arr in arrays[:]:
arrays.append(('masked1_' + name,
np.ma.masked_array(arr, mask=[False, True])))
arrays.append(('masked_all', np.ma.masked_all((2,))))
arrays.append(('masked_none',
np.ma.masked_array([1.0, 2.5], mask=False)))
# call assert_frame_equal for all selections of 3 arrays
for comb in itertools.combinations(arrays, 3):
names, data = zip(*comb)
mrecs = mrecords.fromarrays(data, names=names)
# fill the comb
comb = {k: (v.filled() if hasattr(v, 'filled') else v)
for k, v in comb}
expected = DataFrame(comb, columns=names)
result = DataFrame(mrecs)
assert_fr_equal(result, expected)
# specify columns
expected = DataFrame(comb, columns=names[::-1])
result = DataFrame(mrecs, columns=names[::-1])
assert_fr_equal(result, expected)
# specify index
expected = DataFrame(comb, columns=names, index=[1, 2])
result = DataFrame(mrecs, index=[1, 2])
assert_fr_equal(result, expected)
def test_constructor_corner_shape(self):
df = DataFrame(index=[])
assert df.values.shape == (0, 0)
@pytest.mark.parametrize("data, index, columns, dtype, expected", [
(None, lrange(10), ['a', 'b'], object, np.object_),
(None, None, ['a', 'b'], 'int64', np.dtype('int64')),
(None, lrange(10), ['a', 'b'], int, np.dtype('float64')),
({}, None, ['foo', 'bar'], None, np.object_),
({'b': 1}, lrange(10), list('abc'), int, np.dtype('float64'))
])
def test_constructor_dtype(self, data, index, columns, dtype, expected):
df = DataFrame(data, index, columns, dtype)
assert df.values.dtype == expected
def test_constructor_scalar_inference(self):
data = {'int': 1, 'bool': True,
'float': 3., 'complex': 4j, 'object': 'foo'}
df = DataFrame(data, index=np.arange(10))
assert df['int'].dtype == np.int64
assert df['bool'].dtype == np.bool_
assert df['float'].dtype == np.float64
assert df['complex'].dtype == np.complex128
assert df['object'].dtype == np.object_
def test_constructor_arrays_and_scalars(self):
df = DataFrame({'a': randn(10), 'b': True})
exp = DataFrame({'a': df['a'].values, 'b': [True] * 10})
tm.assert_frame_equal(df, exp)
with pytest.raises(ValueError, match='must pass an index'):
DataFrame({'a': False, 'b': True})
def test_constructor_DataFrame(self):
df = DataFrame(self.frame)
tm.assert_frame_equal(df, self.frame)
df_casted = DataFrame(self.frame, dtype=np.int64)
assert df_casted.values.dtype == np.int64
def test_constructor_more(self):
# used to be in test_matrix.py
arr = randn(10)
dm = DataFrame(arr, columns=['A'], index=np.arange(10))
assert dm.values.ndim == 2
arr = randn(0)
dm = DataFrame(arr)
assert dm.values.ndim == 2
assert dm.values.ndim == 2
# no data specified
dm = DataFrame(columns=['A', 'B'], index=np.arange(10))
assert dm.values.shape == (10, 2)
dm = DataFrame(columns=['A', 'B'])
assert dm.values.shape == (0, 2)
dm = DataFrame(index=np.arange(10))
assert dm.values.shape == (10, 0)
# can't cast
mat = np.array(['foo', 'bar'], dtype=object).reshape(2, 1)
with pytest.raises(ValueError, match='cast'):
DataFrame(mat, index=[0, 1], columns=[0], dtype=float)
dm = DataFrame(DataFrame(self.frame._series))
tm.assert_frame_equal(dm, self.frame)
# int cast
dm = DataFrame({'A': np.ones(10, dtype=int),
'B': np.ones(10, dtype=np.float64)},
index=np.arange(10))
assert len(dm.columns) == 2
assert dm.values.dtype == np.float64
def test_constructor_empty_list(self):
df = DataFrame([], index=[])
expected = DataFrame(index=[])
tm.assert_frame_equal(df, expected)
# GH 9939
df = DataFrame([], columns=['A', 'B'])
expected = DataFrame({}, columns=['A', 'B'])
tm.assert_frame_equal(df, expected)
# Empty generator: list(empty_gen()) == []
def empty_gen():
return
yield
df = DataFrame(empty_gen(), columns=['A', 'B'])
tm.assert_frame_equal(df, expected)
def test_constructor_list_of_lists(self):
# GH #484
df = DataFrame(data=[[1, 'a'], [2, 'b']], columns=["num", "str"])
assert is_integer_dtype(df['num'])
assert df['str'].dtype == np.object_
# GH 4851
# list of 0-dim ndarrays
expected = DataFrame({0: np.arange(10)})
data = [np.array(x) for x in range(10)]
result = DataFrame(data)
tm.assert_frame_equal(result, expected)
def test_constructor_sequence_like(self):
# GH 3783
# collections.Squence like
class DummyContainer(compat.Sequence):
def __init__(self, lst):
self._lst = lst
def __getitem__(self, n):
return self._lst.__getitem__(n)
def __len__(self, n):
return self._lst.__len__()
lst_containers = [DummyContainer([1, 'a']), DummyContainer([2, 'b'])]
columns = ["num", "str"]
result = DataFrame(lst_containers, columns=columns)
expected = | DataFrame([[1, 'a'], [2, 'b']], columns=columns) | pandas.DataFrame |
"""
This module contains classes for quantifying the predicted model errors (uncertainty quantification), and preparing
provided residual (true errors) predicted model error data for plotting (e.g. residual vs. error plots), or for
recalibration of model errors using the method of Palmer et al.
ErrorUtils:
Collection of functions to conduct error analysis on certain types of models (uncertainty quantification), and prepare
residual and model error data for plotting, as well as recalibrate model errors with various methods
CorrectionFactors
Class for performing recalibration of model errors (uncertainty quantification) based on the method from the
work of Palmer et al.
"""
import os
import sys
import numpy as np
import pandas as pd
from scipy.optimize import minimize
try:
from forestci import random_forest_error
except:
print('forestci is an optional dependency. To install latest forestci compatabilty with scikit-learn>=0.24, run '
'pip install git+git://github.com/scikit-learn-contrib/forest-confidence-interval.git')
class ErrorUtils():
'''
Collection of functions to conduct error analysis on certain types of models (uncertainty quantification), and prepare
residual and model error data for plotting, as well as recalibrate model errors with various methods
Args:
None
Methods:
_collect_error_data: method to collect all residuals, model errors, and dataset standard deviation over many data splits
Args:
savepath: (str), string denoting the path to save output to
data_type: (str), string denoting the data type analyzed, e.g. train, test, leftout
Returns:
model_errors: (pd.Series), series containing the predicted model errors
residuals: (pd.Series), series containing the true model errors (residuals)
dataset_stdev: (float), standard deviation of the data set
_recalibrate_errors: method to recalibrate the model errors using negative log likelihood function from work of Palmer et al.
Args:
model_errors: (pd.Series), series containing the predicted (uncalibrated) model errors
residuals: (pd.Series), series containing the true model errors (residuals)
Returns:
model_errors: (pd.Series), series containing the predicted (calibrated) model errors
a: (float), the slope of the recalibration linear fit
b: (float), the intercept of the recalibration linear fit
_parse_error_data: method to prepare the provided residuals and model errors for plotting the binned RvE (residual vs error) plots
Args:
model_errors: (pd.Series), series containing the predicted model errors
residuals: (pd.Series), series containing the true model errors (residuals)
dataset_stdev: (float), standard deviation of the data set
number_of_bins: (int), the number of bins to digitize the data into for making the RvE (residual vs. error) plot
Returns:
bin_values: (np.array), the x-axis of the RvE plot: reduced model error values digitized into bins
rms_residual_values: (np.array), the y-axis of the RvE plot: the RMS of the residual values digitized into bins
num_values_per_bin: (np.array), the number of data samples in each bin
number_of_bins: (int), the number of bins to put the model error and residual data into.
_get_model_errors: method for generating the model error values using either the standard deviation of weak learners or jackknife-after-bootstrap method of Wager et al.
Args:
model: (mastml.models object), a MAST-ML model, e.g. SklearnModel or EnsembleModel
X: (pd.DataFrame), dataframe of the X feature matrix
X_train: (pd.DataFrame), dataframe of the X training data feature matrix
X_test: (pd.DataFrame), dataframe of the X test data feature matrix
error_method: (str), string denoting the UQ error method to use. Viable options are 'stdev_weak_learners' and 'jackknife_after_bootstrap'
remove_outlier_learners: (bool), whether specific weak learners that are found to deviate from 3 sigma of the average prediction for a given data point are removed (Default False)
Returns:
model_errors: (pd.Series), series containing the predicted model errors
num_removed_learners: (list), list of number of removed weak learners for each data point
_remove_outlier_preds: method to flag and remove outlier weak learner predictions
Args:
preds: (list), list of predicted values of a given data point from an ensemble of weak learners
Returns:
preds_cleaned: (list), ammended list of predicted values of a given data point from an ensemble of weak learners, with predictions from outlier learners removed
num_outliers: (int), the number of removed weak learners for the data point evaluated
'''
@classmethod
def _collect_error_data(cls, savepath, data_type):
if data_type not in ['train', 'test', 'leaveout']:
print('Error: data_test_type must be one of "train", "test" or "leaveout"')
exit()
dfs_error = list()
dfs_residuals = list()
dfs_ytrue = list()
residuals_files_to_parse = list()
error_files_to_parse = list()
ytrue_files_to_parse = list()
splits = list()
for folder, subfolders, files in os.walk(savepath):
if 'split' in folder:
splits.append(folder)
for path in splits:
if os.path.exists(os.path.join(path, 'model_errors_'+str(data_type)+'.xlsx')):
error_files_to_parse.append(os.path.join(path, 'model_errors_' + str(data_type) + '.xlsx'))
if os.path.exists(os.path.join(path, 'residuals_' + str(data_type) + '.xlsx')):
residuals_files_to_parse.append(os.path.join(path, 'residuals_' + str(data_type) + '.xlsx'))
if os.path.exists(os.path.join(path, 'y_train.xlsx')):
ytrue_files_to_parse.append(os.path.join(path, 'y_train.xlsx'))
for file in residuals_files_to_parse:
df = pd.read_excel(file)
dfs_residuals.append(np.array(df['residuals']))
for file in error_files_to_parse:
df = | pd.read_excel(file) | pandas.read_excel |
# -*- coding: utf-8 -*-
"""
Created on Fri May 15 01:55:22 2020
@author: balajiramesh
"""
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 10 00:25:12 2020
@author: balajiramesh
Raw : 16,319230 2,641562
Within study timeline: 14393806 2247749
Within study area and timeline: 7892752 1246896
AFter removing washout period: 7816138 1233913
After removeing missing data: 7,813,866 and 1,233,600 OP and IP ED visit records
"""
import pandas as pd
import numpy as np
import geopandas
import statsmodels.api as sm
import statsmodels.formula.api as smf
from datetime import timedelta, date,datetime
from dateutil import parser
import glob
import sys
import gc
sys.path.insert(1, r'H:\Balaji\GRAScripts\dhs_scripts')
from recalculate_svi import recalculateSVI
#%%functions
def filter_mortality(df):
pat_sta=df.PAT_STATUS.copy()
pat_sta=pd.to_numeric(pat_sta,errors="coerce")
return pat_sta.isin([20,40,41,42]).astype('int') #status code for died
def get_sp_outcomes(sp,Dis_cat):
global sp_outcomes
return sp.merge(sp_outcomes.loc[:,['RECORD_ID','op',Dis_cat]],on=['RECORD_ID','op'],how='left')[Dis_cat].values
#%%read from pickle - shortcut ===================================================================================
#=================================================================================================================
INPUT_IPOP_DIR=r'H:\Balaji\DSHS ED visit data(PII)\CleanedMergedJoined'
sp=pd.read_pickle(r'Z:\Balaji\R session_home_dir (PII)\sp_pyton_EdVisit.pkl')
#read the categories file
outcome_cats=pd.read_csv('H:/Balaji/GRAScripts/dhs_scripts/categories.csv')
outcome_cats.fillna('',inplace=True)
#read op/ip outcomes df
sp_outcomes=pd.read_csv(INPUT_IPOP_DIR+'\\ip_op_outcomes.csv')
flood_join_field='PAT_ADDR_CENSUS_TRACT'
Dis_cat='Pregnancy_complic'
#%%merege Dr Samarth's dtataset
evacDf_raw=pd.read_csv('Z:/Balaji/EvacuationDataDrSamarth/overall_sim_feature_values.csv')
evacDf=evacDf_raw.rename(columns={'flooding_close_proximity_duration_hr':'floodCloseProxDur',
'tri_close_proximity_duration_hr':'triCloseProxDur', 'tri_distance_mi':'triDistMiles',
'heavy_rainfall_duration_hr':'hvyRainDur', 'rainfall_total_mm':'totRainfall'
})
#make quantile bins for each variable
# evacDfCat=evacDf.loc[:,evacDf.columns != 'FIPS'] \
# .apply(axis=0,func=lambda x: \
# pd.cut(x,np.round(np.insert(np.quantile(x,[.25,.5,.75,1]),0,-1),3),labels=np.round(np.quantile(x,[.25,.5,.75,1]),3)))
#convert everything to categorical
#evacDf=pd.concat([evacDf.loc[:,'FIPS'],evacDfCat],axis=1)
#subset df for census tracts in evac df
sp=sp.loc[sp.PAT_ADDR_CENSUS_TRACT.isin(evacDf.FIPS),:]
#merge evacDF
sp=sp.merge(evacDf,how='left',left_on='PAT_ADDR_CENSUS_TRACT',right_on='FIPS')
#subset sp_outcomes to save memory
sp_outcomes=sp_outcomes.loc[sp_outcomes.RECORD_ID.isin(sp.RECORD_ID),:]
#redifine floodcat
#%%merge flood ratio ctegories
tractsfloodr=sp.loc[~sp.duplicated(flood_join_field),[flood_join_field,'floodr']]
s=tractsfloodr.loc[tractsfloodr.floodr>0,'floodr']
flood_bins=[0,0.00000001,s.quantile(0.5),1]
sp['floodr_cat']=pd.cut(sp.floodr,bins=flood_bins,right=True,include_lowest=True,labels=['NO','FloodCat1','FloodCat2'])
#%%function for looping
exposure='evacuation_pct'
def run():
#%%filter records for specific outcome
df=sp#.sample(500000)#[sp.SVI_Cat=='SVI_filter'] #--------------Edit here for stratified model
if Dis_cat=="DEATH":df.loc[:,'Outcome']=filter_mortality(sp)
if Dis_cat=="ALL":df.loc[:,'Outcome']=1
if Dis_cat in outcome_cats.category.to_list():df.loc[:,'Outcome']=get_sp_outcomes(df, Dis_cat)
#%%for filtering flooded or non flooded alone
#df=df[df.floodr_cat=="FLood_1"].copy()
#df=df[df.SEX_CODE==FIL_COL].copy()
#df=df[df.AGE_cat==FIL_COL].copy()
#df=df[df[SVI_COL]==FIL_COL].copy()
#df=df[df.RACE==FIL_COL].copy()
#%%stratified model for each period
#df=df.loc[df.Time.isin(['control', 'flood']),]
#df.Time.cat.remove_unused_categories(inplace=True)
#%% save cross tab
#counts_outcome=pd.DataFrame(df.Outcome.value_counts())
# outcomes_recs=df.loc[(df.Outcome>0)&(~pd.isna(df.loc[:,[exposure,'Time','year','month','weekday' ,'PAT_AGE_YEARS',
# 'SEX_CODE','RACE','ETHNICITY','SVI_Cat']]).any(axis=1)),]
# counts_outcome=pd.crosstab(outcomes_recs[exposure],outcomes_recs.Time)
# counts_outcome.to_csv(Dis_cat+"_"+exposure+"_aux"+".csv")
# print(counts_outcome)
# del outcomes_recs
#%%for total ED visits using grouped / coutns
if Dis_cat=="ALL":
grouped_tracts=df.loc[:,['STMT_PERIOD_FROM','PAT_AGE_YEARS','PAT_ADDR_CENSUS_TRACT','Outcome']]
grouped_tracts=pd.concat([grouped_tracts]+[ | pd.get_dummies(df[i],prefix=i) | pandas.get_dummies |
import pandas as pd
import numpy as np
import pickle
import lap_v2_py3 as lap_v2
reprocess_new_basis = True
#Folder with data:
source_folder = '../Source_Data/'
dest_folder = '../Processed_Data/'
if reprocess_new_basis:
#Load in the conversion table
conv = pd.read_csv(source_folder+'ann.csv',usecols=[1,2],index_col=0,squeeze=True)
#Throw out ambiguous reads
conv = conv[~conv.index.duplicated(keep=False)]
#Load in the new basis data
LU = pd.read_excel(source_folder+'InVivo.xlsx',sheet_name='LU_POS vs FG',index_col=0,usecols=[0,5,6,7]).reindex(index=conv.keys()).mean(axis=1)
FG = pd.read_excel(source_folder+'InVivo.xlsx',sheet_name='LU_POS vs FG',index_col=0,usecols=[0,2,3,4]).reindex(index=conv.keys()).mean(axis=1)
TH = pd.read_excel(source_folder+'InVivo.xlsx',sheet_name='TH_POS vs FG',index_col=0,usecols=[0,5,6,7]).reindex(index=conv.keys()).mean(axis=1)
BR = pd.read_excel(source_folder+'InVivo.xlsx',sheet_name='BR_POS vs ECT',index_col=0,usecols=[0,2,3,4]).reindex(index=conv.keys()).mean(axis=1)
ECT = pd.read_excel(source_folder+'InVivo.xlsx',sheet_name='BR_POS vs ECT',index_col=0,usecols=[0,5,6,7]).reindex(index=conv.keys()).mean(axis=1)
newdict = {'E.9 Lung Prim Nkx+':LU,'E.13 Thyroid':TH,'E.8.25 Foregut Endoderm':FG,'E.9 Forebrain':BR,'E.8.25 Ectoderm':ECT}
#Reindex using Entrez ID's, add name, and average duplicates
for name in newdict.keys():
newdict[name].index=conv
newdict[name].dropna(inplace=True)
newdict[name].name = name
temp = newdict[name].copy()
newdict[name] = newdict[name][~newdict[name].index.duplicated()]
for item in newdict[name].index:
newdict[name].loc[item] = temp.loc[item].mean()
del temp
f = open(dest_folder+'NewBasis.dat','wb')
pickle.dump(newdict,f)
f.close()
else:
f = open(dest_folder+'NewBasis.dat','rb')
newdict = pickle.load(f)
f.close()
#%% Load in the basis data
basis = pd.read_csv(source_folder+'Mouse_Basis_Data.txt',sep='\t',index_col=0,usecols=[0]+list(range(3,64)))
#####################
# Append new basis data
#thresh = 1
basis_new = basis.copy()
newdict_log = {}
#for name in newdict.keys():
# newdict_log[name] = np.log2(newdict[name]+1)
# basis_new = basis_new.join(newdict_log[name][newdict_log[name]>thresh],how='inner')
for name in newdict.keys():
basis_new = basis_new.join(newdict[name],how='inner')
basis_new.dropna(inplace=True)
####################
#Load Keri's data
keri_index = pd.read_csv(source_folder+'entrez_id.txt',index_col=None,header=None).squeeze().values
keri_label = pd.read_csv(source_folder+'keri_cell_lbl.txt',index_col=None,header=None).squeeze()
keri = | pd.read_csv(source_folder+'keri_ranknorm_data_corr.txt',index_col=None,header=None,sep='\t') | pandas.read_csv |
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
# ================================================================================================ #
# Project : Deep Learning for Conversion Rate Prediction (CVR) #
# Version : 0.1.0 #
# File : \dataset.py #
# Language : Python 3.7.12 #
# ------------------------------------------------------------------------------------------------ #
# Author : <NAME> #
# Email : <EMAIL> #
# URL : https://github.com/john-james-ai/cvr #
# ------------------------------------------------------------------------------------------------ #
# Created : Thursday, January 13th 2022, 2:22:59 am #
# Modified : Thursday, February 10th 2022, 8:53:04 pm #
# Modifier : <NAME> (<EMAIL>) #
# ------------------------------------------------------------------------------------------------ #
# License : BSD 3-clause "New" or "Revised" License #
# Copyright: (c) 2022 Bryant St. Labs #
# ================================================================================================ #
"""Dataset Module"""
import pandas as pd
from pandas.api.types import is_numeric_dtype
from cvr.core.asset import AssetPassport, AssetRepo, Asset
from cvr.utils.printing import Printer
# ============================================================================ #
# DATASET #
# ============================================================================ #
class Dataset(Asset):
def __init__(self, passport: AssetPassport, data: pd.DataFrame) -> None:
super(Dataset, self).__init__(passport=passport)
self._df = data
# Cache for computations
# Dataset summaries and info
self._summary = None
self._info = None
# Column-wise computations
self._rank_frequency_table = {}
self._descriptive_statistics = {}
self._printer = Printer()
# ------------------------------------------------------------------------ #
# PROPERTIES #
# ------------------------------------------------------------------------ #
@property
def size(self) -> float:
return self._df.memory_usage(deep=True).sum() / (1024 * 1024)
@property
def shape(self) -> tuple:
return self._df.shape
@property
def data(self) -> pd.DataFrame:
return self._df
@property
def info(self) -> pd.DataFrame:
return self._infoize()
@property
def summary(self) -> pd.DataFrame:
return self._summarize()
# ------------------------------------------------------------------------ #
# DATA ACCESS #
# ------------------------------------------------------------------------ #
def head(self, n: int = 5) -> pd.DataFrame:
"""Prints and returns the top n rows from a dataset.
Args:
n (int): Number of observations to print/return
"""
df = self._df.head(n)
subtitle = "First {} Rows".format(str(n))
self._printer.print_title(self.passport.name, subtitle)
self._printer.print_dataframe(df)
def tail(self, n: int = 5) -> pd.DataFrame:
"""Prints and returns the last n rows from a dataset.
Args:
n (int): Number of observations to print/return
"""
df = self._df.tail(n)
subtitle = "Last {} Rows".format(str(n))
self._printer.print_title(self.passport.name, subtitle)
self._printer.print_dataframe(df)
def sample(self, n: int = 5, as_dict: bool = True, random_state: int = None) -> pd.DataFrame:
"""Prints and returns n randomly selected rows from a dataset.
Args:
n (int): Number of randomly selected observations to print/return
as_dict (bool): Prints each sample as a dictionary
random_stage (int): Seed for pseudo-random generator
"""
df = self._df.sample(n=n, replace=False, random_state=random_state)
subtitle = "{} Randomly Selected Samples".format(str(n))
self._printer.print_title(self.passport.name, subtitle)
if as_dict is True:
d = df.to_dict(orient="index")
for index, data in d.items():
subtitle = "Index = {}".format(index)
self._printer.print_dictionary(data, subtitle)
self._printer.print_blank_line()
else:
self._printer.print_dataframe(df)
# ------------------------------------------------------------------------ #
# AGGREGATION AND SUMMARIZATION #
# ------------------------------------------------------------------------ #
def _infoize(self) -> pd.DataFrame:
"""Prepares dataset information similar to pandas info method."""
if self._info is not None:
pass
else:
df1 = self._df.dtypes.to_frame()
df2 = self._df.count(axis=0, numeric_only=False).to_frame()
df3 = self._df.isna().sum().to_frame()
df4 = df3[0] / len(self._df) * 100
df5 = self._df.nunique().to_frame()
df6 = df5[0] / len(self._df) * 100
df7 = self._df.memory_usage(deep=True).to_frame()
df8 = pd.concat([df1, df2, df3, df4, df5, df6, df7], axis=1, join="inner")
df8.columns = [
"Data Type",
"Count",
"Missing",
"% Missing",
"Unique",
"% Unique",
"Memory Usage",
]
self._info = df8
return self._info
# ------------------------------------------------------------------------ #
def _summarize(self) -> dict:
"""Renders dataset level statistics."""
if not self._summary:
d = {}
d["Rows"] = self._df.shape[0]
d["Columns"] = self._df.shape[1]
d["Cells"] = self._df.shape[0] * self._df.shape[1]
d["Size in Memory (Mb)"] = round(
self._df.memory_usage(deep=True).sum() / (1024 * 1024), 2
)
d["Non-Null Cells"] = self._df.notna().sum().sum()
d["Missing Cells"] = self._df.isna().sum().sum()
d["Sparsity"] = round(d["Missing Cells"] / d["Cells"] * 100, 2)
d["Duplicate Rows"] = self._df.duplicated(keep="first").sum()
d["Duplicate Rows %"] = round(d["Duplicate Rows"] / d["Rows"] * 100, 2)
datatypes = self._datatypes()
d.update(datatypes)
def _datatypes(self) -> dict:
"""Returns a dictionary of data type counts."""
d = self._df.dtypes.astype(str).value_counts().to_frame().to_dict()[0]
d2 = {}
for k, v in d.items():
k = k + " datatypes"
d2[k] = v
return d2
# ------------------------------------------------------------------------ #
def describe(self, column: str) -> pd.DataFrame:
"""Descriptive statistics for a column in a dataframe
Args:
column (str): Name of a column in the dataset.
"""
if | is_numeric_dtype(self._df[column]) | pandas.api.types.is_numeric_dtype |
import pandas as pd
data_av_week = pd.read_csv("data_av_week.csv")
supermarkt_urls = pd.read_csv("supermarkt_urls.csv")
s_details = pd.read_csv("notebooksdetailed_supermarkt_python_mined.csv", header= None)
migros_details = pd.read_csv("notebooksdetailed_Migros_python_mined.csv", header= None)
coop_details = pd.read_csv("notebooksdetailed_Coop_python_mined.csv", header= None)
data_av_week = data_av_week.drop(["Unnamed: 0"], axis=1)
data_av_week = data_av_week.rename({'url':'urls'}, axis=1)
head = ["name_supermarkt", "address", "lat", "long", "key_words", "codes", "postal_code", "address2", "url2"]
s_details.columns = head
s_details = s_details.drop(columns=['address2'])
migros_details.columns = head
migros_details = migros_details.drop(columns=['address2'])
coop_details.columns = head
coop_details = coop_details.drop(columns=['address2'])
# merge the supermarkt data
supermarkt_details = pd.merge(s_details, migros_details, how="outer")
supermarkt_details = pd.merge(supermarkt_details, coop_details, how="outer")
data_week_urls = pd.merge(supermarkt_urls, data_av_week, how="outer", on="urls")
data_names_week_all = pd.merge(supermarkt_details, data_week_urls, how="outer", on="codes")
data_names_week_all.to_csv("all_data_per_week.csv", index=False)
# Per day
data_av_day = pd.read_csv("data_av_day.csv")
data_av_day = data_av_day.drop(["Unnamed: 0"], axis=1)
data_av_day = data_av_day.rename({'url':'urls'}, axis=1)
data_days_urls = pd.merge(supermarkt_urls, data_av_day, how="outer", on="urls")
data_names_days_all = | pd.merge(supermarkt_details, data_days_urls, how="outer", on="codes") | pandas.merge |
"""
Tests for zipline/utils/pandas_utils.py
"""
from unittest import skipIf
import pandas as pd
from zipline.testing import parameter_space, ZiplineTestCase
from zipline.testing.predicates import assert_equal
from zipline.utils.pandas_utils import (
categorical_df_concat,
nearest_unequal_elements,
new_pandas,
skip_pipeline_new_pandas,
)
class TestNearestUnequalElements(ZiplineTestCase):
@parameter_space(tz=['UTC', 'US/Eastern'], __fail_fast=True)
def test_nearest_unequal_elements(self, tz):
dts = pd.to_datetime(
['2014-01-01', '2014-01-05', '2014-01-06', '2014-01-09'],
).tz_localize(tz)
def t(s):
return None if s is None else pd.Timestamp(s, tz=tz)
for dt, before, after in (('2013-12-30', None, '2014-01-01'),
('2013-12-31', None, '2014-01-01'),
('2014-01-01', None, '2014-01-05'),
('2014-01-02', '2014-01-01', '2014-01-05'),
('2014-01-03', '2014-01-01', '2014-01-05'),
('2014-01-04', '2014-01-01', '2014-01-05'),
('2014-01-05', '2014-01-01', '2014-01-06'),
('2014-01-06', '2014-01-05', '2014-01-09'),
('2014-01-07', '2014-01-06', '2014-01-09'),
('2014-01-08', '2014-01-06', '2014-01-09'),
('2014-01-09', '2014-01-06', None),
('2014-01-10', '2014-01-09', None),
('2014-01-11', '2014-01-09', None)):
computed = nearest_unequal_elements(dts, t(dt))
expected = (t(before), t(after))
self.assertEqual(computed, expected)
@parameter_space(tz=['UTC', 'US/Eastern'], __fail_fast=True)
def test_nearest_unequal_elements_short_dts(self, tz):
# Length 1.
dts = pd.to_datetime(['2014-01-01']).tz_localize(tz)
def t(s):
return None if s is None else pd.Timestamp(s, tz=tz)
for dt, before, after in (('2013-12-31', None, '2014-01-01'),
('2014-01-01', None, None),
('2014-01-02', '2014-01-01', None)):
computed = nearest_unequal_elements(dts, t(dt))
expected = (t(before), t(after))
self.assertEqual(computed, expected)
# Length 0
dts = pd.to_datetime([]).tz_localize(tz)
for dt, before, after in (('2013-12-31', None, None),
('2014-01-01', None, None),
('2014-01-02', None, None)):
computed = nearest_unequal_elements(dts, t(dt))
expected = (t(before), t(after))
self.assertEqual(computed, expected)
def test_nearest_unequal_bad_input(self):
with self.assertRaises(ValueError) as e:
nearest_unequal_elements(
pd.to_datetime(['2014', '2014']),
pd.Timestamp('2014'),
)
self.assertEqual(str(e.exception), 'dts must be unique')
with self.assertRaises(ValueError) as e:
nearest_unequal_elements(
pd.to_datetime(['2014', '2013']),
pd.Timestamp('2014'),
)
self.assertEqual(
str(e.exception),
'dts must be sorted in increasing order',
)
class TestCatDFConcat(ZiplineTestCase):
@skipIf(new_pandas, skip_pipeline_new_pandas)
def test_categorical_df_concat(self):
inp = [
pd.DataFrame(
{
'A': pd.Series(['a', 'b', 'c'], dtype='category'),
'B': pd.Series([100, 102, 103], dtype='int64'),
'C': pd.Series(['x', 'x', 'x'], dtype='category'),
}
),
pd.DataFrame(
{
'A': pd.Series(['c', 'b', 'd'], dtype='category'),
'B': pd.Series([103, 102, 104], dtype='int64'),
'C': pd.Series(['y', 'y', 'y'], dtype='category'),
}
),
pd.DataFrame(
{
'A': pd.Series(['a', 'b', 'd'], dtype='category'),
'B': pd.Series([101, 102, 104], dtype='int64'),
'C': pd.Series(['z', 'z', 'z'], dtype='category'),
}
),
]
result = categorical_df_concat(inp)
expected = pd.DataFrame(
{
'A': pd.Series(
['a', 'b', 'c', 'c', 'b', 'd', 'a', 'b', 'd'],
dtype='category'
),
'B': pd.Series(
[100, 102, 103, 103, 102, 104, 101, 102, 104],
dtype='int64'
),
'C': pd.Series(
['x', 'x', 'x', 'y', 'y', 'y', 'z', 'z', 'z'],
dtype='category'
),
},
)
expected.index = pd.Int64Index([0, 1, 2, 0, 1, 2, 0, 1, 2])
assert_equal(expected, result)
assert_equal(
expected['A'].cat.categories,
result['A'].cat.categories
)
assert_equal(
expected['C'].cat.categories,
result['C'].cat.categories
)
def test_categorical_df_concat_value_error(self):
mismatched_dtypes = [
pd.DataFrame(
{
'A': pd.Series(['a', 'b', 'c'], dtype='category'),
'B': pd.Series([100, 102, 103], dtype='int64'),
}
),
pd.DataFrame(
{
'A': pd.Series(['c', 'b', 'd'], dtype='category'),
'B': pd.Series([103, 102, 104], dtype='float64'),
}
),
]
mismatched_column_names = [
pd.DataFrame(
{
'A': pd.Series(['a', 'b', 'c'], dtype='category'),
'B': pd.Series([100, 102, 103], dtype='int64'),
}
),
pd.DataFrame(
{
'A': | pd.Series(['c', 'b', 'd'], dtype='category') | pandas.Series |
import pandas as pd
import numpy as np
import matplotlib as plt
pd.set_option('display.max_columns', None)
df=pd.read_csv('train_HK6lq50.csv')
def train_data_preprocess(df,train,test):
df['trainee_engagement_rating'].fillna(value=1.0,inplace=True)
df['isage_null']=0
df.isage_null[df.age.isnull()]=1
df['age'].fillna(value=0,inplace=True)
#new cols actual_programs_enrolled and total_test_taken
total=train.append(test)
unique_trainee=pd.DataFrame(total.trainee_id.value_counts())
unique_trainee['trainee_id']=unique_trainee.index
value=[]
for i in unique_trainee.trainee_id:
value.append(len(total[total.trainee_id==i].program_id.unique()))
unique_trainee['actual_programs_enrolled']=value
dic1=dict(zip(unique_trainee['trainee_id'],unique_trainee['actual_programs_enrolled']))
df['actual_programs_enrolled']=df['trainee_id'].map(dic1).astype(int)
value=[]
for i in unique_trainee.trainee_id:
value.append(len(total[total.trainee_id==i].test_id.unique()))
unique_trainee['total_test_taken']=value
dic2=dict(zip(unique_trainee['trainee_id'],unique_trainee['total_test_taken']))
df['total_test_taken']=df['trainee_id'].map(dic2).astype(int)
#new col total_trainee_in_each_test
unique_test=pd.DataFrame(total.test_id.value_counts())
unique_test['test_id']=unique_test.index
value=[]
for i in unique_test.test_id:
value.append(len(total[total.test_id==i].trainee_id.unique()))
unique_test['total_trainee_in_each_test']=value
dic3=dict(zip(unique_test['test_id'],unique_test['total_trainee_in_each_test']))
df['total_trainee_in_each_test']=df['test_id'].map(dic3).astype(int)
#LABEL ENCODING
test_type=sorted(df['test_type'].unique())
test_type_mapping=dict(zip(test_type,range(1,len(test_type)+1)))
df['test_type_val']=df['test_type'].map(test_type_mapping).astype(int)
df.drop('test_type',axis=1,inplace=True)
program_type=sorted(df['program_type'].unique())
program_type_mapping=dict(zip(program_type,range(1,len(program_type)+1)))
df['program_type_val']=df['program_type'].map(program_type_mapping).astype(int)
df.drop('program_type',axis=1,inplace=True)
program_id=sorted(df['program_id'].unique())
program_id_mapping=dict(zip(program_id,range(1,len(program_id)+1)))
df['program_id_val']=df['program_id'].map(program_id_mapping).astype(int)
#df.drop('program_id',axis=1,inplace=True)
difficulty_level=['easy','intermediate','hard','vary hard']
difficulty_level_mapping=dict(zip(difficulty_level,range(1,len(difficulty_level)+1)))
df['difficulty_level_val']=df['difficulty_level'].map(difficulty_level_mapping).astype(int)
df.drop('difficulty_level',axis=1,inplace=True)
education=['No Qualification','High School Diploma','Matriculation','Bachelors','Masters']
educationmapping=dict(zip(education,range(1,len(education)+1)))
df['education_val']=df['education'].map(educationmapping).astype(int)
df.drop('education',axis=1,inplace=True)
is_handicapped=sorted(df['is_handicapped'].unique())
is_handicappedmapping=dict(zip(is_handicapped,range(1,len(is_handicapped)+1)))
df['is_handicapped_val']=df['is_handicapped'].map(is_handicappedmapping).astype(int)
df.drop('is_handicapped',axis=1,inplace=True)
#creating new program_id group based on is_pass percentage
df['new_program_id_group']=pd.DataFrame(df['program_id'])
df.loc[(df.new_program_id_group=='X_1')|(df.new_program_id_group=='X_3'),'new_program_id_group']=1
df.loc[(df.new_program_id_group=='Y_1')|(df.new_program_id_group=='Y_2')|(df.new_program_id_group=='Y_3')|(df.new_program_id_group=='Y_4')|(df.new_program_id_group=='X_2'),'new_program_id_group']=2
df.loc[(df.new_program_id_group=='Z_1')|(df.new_program_id_group=='Z_2')|(df.new_program_id_group=='Z_3')|(df.new_program_id_group=='T_2')|(df.new_program_id_group=='T_3')|(df.new_program_id_group=='T_4'),'new_program_id_group']=3
df.loc[(df.new_program_id_group=='U_1'),'new_program_id_group']=4
df.loc[(df.new_program_id_group=='V_1')|(df.new_program_id_group=='U_2'),'new_program_id_group']=5
df.loc[(df.new_program_id_group=='V_3')|(df.new_program_id_group=='S_2')|(df.new_program_id_group=='V_4')|(df.new_program_id_group=='V_2'),'new_program_id_group']=6
df.loc[(df.new_program_id_group=='T_1')|(df.new_program_id_group=='S_1'),'new_program_id_group']=7
df.drop('program_id',axis=1,inplace=True)
#creating col test_id and rating category together
train=pd.read_csv('train_HK6lq50.csv')
test=pd.read_csv('test_2nAIblo.csv')
total=train.append(test)
count=0
total['test_id_and_rating']=0
for a in total.trainee_engagement_rating.unique():
for b in total.test_id.unique():
count+=1
total.loc[(total.trainee_engagement_rating==a)&(total.test_id==b),'test_id_and_rating']=count
dic=dict(zip(total['id'],total['test_id_and_rating']))
df['test_id_and_rating']=df['id'].map(dic)
count=0
total['test_id_and_education']=0
for a in total.education.unique():
for b in total.test_id.unique():
count+=1
total.loc[(total.education==a)&(total.test_id==b),'test_id_and_education']=count
dic=dict(zip(total['id'],total['test_id_and_education']))
df['test_id_and_education']=df['id'].map(dic)
count=0
total['program_type_and_rating']=0
for a in total.trainee_engagement_rating.unique():
for b in total.program_type.unique():
count+=1
total.loc[(total.trainee_engagement_rating==a)&(total.program_type==b),'program_type_and_rating']=count
dic=dict(zip(total['id'],total['program_type_and_rating']))
df['program_type_and_rating']=df['id'].map(dic)
#grouping of test_id_and_rating
c=pd.crosstab(df.test_id_and_rating,df.is_pass)
c_pct=c.div(c.sum(1).astype(float),axis=0)
c_pct.columns = ['fail', 'pass']
c_pct['id_group']=pd.DataFrame(c_pct['pass'])
c_pct.loc[(c_pct.id_group>=.20)&(c_pct.id_group<.30),'id_group']=1
c_pct.loc[(c_pct.id_group>=.30)&(c_pct.id_group<.40),'id_group']=2
c_pct.loc[(c_pct.id_group>=.40)&(c_pct.id_group<.50),'id_group']=3
c_pct.loc[(c_pct.id_group>=.50)&(c_pct.id_group<.60),'id_group']=4
c_pct.loc[(c_pct.id_group>=.60)&(c_pct.id_group<.70),'id_group']=5
c_pct.loc[(c_pct.id_group>=.70)&(c_pct.id_group<.80),'id_group']=6
c_pct.loc[(c_pct.id_group>=.80)&(c_pct.id_group<.90),'id_group']=7
c_pct.loc[(c_pct.id_group>=.90)&(c_pct.id_group<1),'id_group']=8
c_pct.id_group=c_pct.id_group.astype(int)
c_pct.drop(['fail','pass'],axis=1,inplace=True)
dic=c_pct.to_dict()
dic4=dic['id_group']
df['test_id_and_rating_group']=df['test_id_and_rating'].map(dic4).astype(int)
#grouping of program_type_and_rating
c=pd.crosstab(df.program_type_and_rating,df.is_pass)
c_pct=c.div(c.sum(1).astype(float),axis=0)
c_pct.columns = ['fail', 'pass']
c_pct['id_group']=pd.DataFrame(c_pct['pass'])
c_pct.loc[(c_pct.id_group>=.20)&(c_pct.id_group<.30),'id_group']=1
c_pct.loc[(c_pct.id_group>=.30)&(c_pct.id_group<.40),'id_group']=2
c_pct.loc[(c_pct.id_group>=.40)&(c_pct.id_group<.50),'id_group']=3
c_pct.loc[(c_pct.id_group>=.50)&(c_pct.id_group<.60),'id_group']=4
c_pct.loc[(c_pct.id_group>=.60)&(c_pct.id_group<.70),'id_group']=5
c_pct.loc[(c_pct.id_group>=.70)&(c_pct.id_group<.80),'id_group']=6
c_pct.loc[(c_pct.id_group>=.80)&(c_pct.id_group<.90),'id_group']=7
c_pct.loc[(c_pct.id_group>=.90)&(c_pct.id_group<1),'id_group']=8
c_pct.id_group=c_pct.id_group.astype(int)
c_pct.drop(['fail','pass'],axis=1,inplace=True)
dic=c_pct.to_dict()
dic41=dic['id_group']
df['program_type_and_rating_group']=df['program_type_and_rating'].map(dic41).astype(int)
#col avg_rating by test_id
total=train.append(test)
c=pd.crosstab(total.test_id,total.trainee_engagement_rating) #use this for final submission
c['avg_rating']=(c[1.0]+2*c[2.0]+3*c[3.0]+4*c[4.0]+5*c[5.0])/(c[1.0]+c[2.0]+c[3.0]+c[4.0]+c[5.0])
c['test_id']=c.index
dic5=dict(zip(c['test_id'],c['avg_rating']))
df['avg_rating']=df['test_id'].map(dic5)
#rating_diff(count(1.0+2.0)-count(4.0+5.0))
#c=pd.crosstab(total.test_id,total.trainee_engagement_rating) #use this for final submission
c=pd.crosstab(df.test_id,df.trainee_engagement_rating)
c['rating_diff_test_id']=c[1.0]+c[2.0]-c[4.0]-c[5.0]+c[3.0]
c['test_id']=c.index
dic6=dict(zip(c['test_id'],c['rating_diff_test_id']))
df['rating_diff_test_id']=df['test_id'].map(dic6)
#col avg_rating by trainee_id
#c=pd.crosstab(total.test_id,total.trainee_engagement_rating) #use this for final submission
c=pd.crosstab(df.trainee_id,df.trainee_engagement_rating)
c['avg_rating_trainee_id']=(c[1.0]+2*c[2.0]+3*c[3.0]+4*c[4.0]+5*c[5.0])/(c[1.0]+c[2.0]+c[3.0]+c[4.0]+c[5.0])
c['trainee_id']=c.index
dic7=dict(zip(c['trainee_id'],c['avg_rating_trainee_id']))
df['avg_rating_trainee_id']=df['trainee_id'].map(dic7)
#is_pass_diff wrt trainee_engagement_rating
c= | pd.crosstab(df.trainee_engagement_rating,df.is_pass) | pandas.crosstab |
"""
TODO Pendletoon, doc this whole module
"""
import logging
import pandas as pd
import capture.devconfig as config
from utils.data_handling import update_sheet_column
from utils import globals
from utils.globals import lab_safeget
modlog = logging.getLogger('capture.prepare.interface')
def _get_reagent_header_cells(column: str):
"""Get all cells in the rows that start each reagent for a given colum
:param column: (str) in {A, B, ..., Z, AA, AB, ...}
"""
startrow = lab_safeget(config.lab_vars, globals.get_lab(), 'reagent_interface_amount_startrow')
reagent_interface_step = int(lab_safeget(config.lab_vars, globals.get_lab(), 'maxreagentchemicals')) + 1
num_reagents = lab_safeget(config.lab_vars, globals.get_lab(), 'max_reagents')
stoprow = startrow + reagent_interface_step * num_reagents
result = [column + str(i) for i in range(startrow, stoprow, reagent_interface_step)]
return result
def get_reagent_target_volumes(erdf, deadvolume):
"""Target volumes for reagent preparation as dictionary"""
reagent_target_volumes = {}
for reagent in erdf.columns:
reagent_volume = erdf[reagent].sum() + deadvolume
reagentname = reagent.split(' ')[0]
reagent_target_volumes[reagentname] = reagent_volume
return reagent_target_volumes
def build_nominals_df(rdict,
chemicalnamedf,
target_final_volume,
liquidlist,
maxreagentchemicals,
chemdf):
''' calculate the mass of each chemical return dataframe
TODO: write out nominal molarity to google sheets, see issue#52
:param chemdf: Chemical data frame from google drive.
:returns: a dataframe sized for export to version 2.x interface
'''
nominalsdf = pd.DataFrame()
itemcount = 1
chemicalnamedf.sort_index(inplace=True)
for index, row in chemicalnamedf.iterrows():
reagentname = row['reagentnames']
chemabbr = row['chemabbr']
if row['chemabbr'] == 'Final Volume = ':
formulavollist = []
formulavol = 'null'
itemcount = 1
finalvolindex = index
pass
else:
# stock solutions should be summed for final total volume
if chemabbr in liquidlist or chemabbr == 'FAH': # todo dejank
formulavol = (target_final_volume[reagentname]/1000).round(2)
formulavollist.append(formulavol)
nominalsdf.loc[index, "nominal_amount"] = formulavol
nominalsdf.loc[index, "Unit"] = 'milliliter'
itemcount+=1
elif chemabbr == 'null':
nominalsdf.loc[index, "nominal_amount"] = 'null'
nominalsdf.loc[index, "Unit"] = 'null'
nominalsdf.loc[index, "actualsnull"] = 'null'
itemcount+=1
pass
else:
#calculate reagent amounts from formula
reagentnum = str(reagentname.split('t')[1])
nominalamount = (target_final_volume[reagentname]/1000/1000 * \
rdict[reagentnum].concs['conc_item%s' %(itemcount)] * \
float(chemdf.loc["%s" %chemabbr, "Molecular Weight (g/mol)"])
).round(2)
nominalsdf.loc[index, "nominal_amount"] = nominalamount
nominalsdf.loc[index, "Unit"] = 'gram'
itemcount+=1
if itemcount == (maxreagentchemicals+1):
if len(formulavollist) > 0:
nominalsdf.loc[finalvolindex, "nominal_amount"] = sum(formulavollist)
nominalsdf.loc[finalvolindex, "Unit"] = 'milliliter'
else:
nominalsdf.loc[finalvolindex, "nominal_amount"] = formulavol
nominalsdf.loc[finalvolindex, "Unit"] = 'null'
nominalsdf.loc[finalvolindex, "actualsnull"] = 'null'
modlog.info((reagentname, "formula calculation complete"))
nominalsdf.sort_index(inplace=True)
return nominalsdf
def build_nominals_v1(rdict,
chemicalnamedf,
target_final_volume_dict,
liquidlist,
maxreagentchemicals,
chemdf):
''' calculate the mass of each chemical return dataframe
Uses model 1 of the density calculation to get a better approximation
for the contribution of solids to the final volume
TODO: write out nominal molarity to google sheets, see issue#52
TODO: ensure column integrity of read in chemical dataframe
:param chemdf: Chemical data frame from google drive.
:returns: a dataframe sized for export to version 2.x interface
'''
nominalsdf = pd.DataFrame()
itemcount = 1
chemicalnamedf.sort_index(inplace=True)
reagentname = []
for index, row in chemicalnamedf.iterrows():
reagent_name_updater = row['reagentnames']
if reagentname != reagent_name_updater:
reagentname = row['reagentnames']
reagentnum = str(reagentname.split('t')[1])
total_remaining_volume = target_final_volume_dict[reagentname] / 1000 / 1000
target_final_volume = target_final_volume_dict[reagentname] / 1000 / 1000
chemabbr = row['chemabbr']
# First iteration should always lead with this string (formatting)
if row['chemabbr'] == 'Final Volume = ':
formulavollist = []
formulavol = 'null'
itemcount = 1
finalvolindex = index
else:
# stock solutions should be summed for final total volume
# Returns nulls to the dataframe where no chemicals / information is expected
if chemabbr == 'null':
nominalsdf.loc[index, "nominal_amount"] = 'null'
nominalsdf.loc[index, "Unit"] = 'null'
nominalsdf.loc[index, "actualsnull"] = 'null'
itemcount+=1
pass
else:
# If the chemical being considered is the final the remaining volume is assigned
if rdict[reagentnum].chemicals[-1] == chemabbr:
nominalsdf.loc[index, "nominal_amount"] = (total_remaining_volume * 1000).round(2)
nominalsdf.loc[index, "Unit"] = 'milliliter'
itemcount+=1
elif chemabbr in liquidlist or chemabbr == 'FAH': # todo dejank
myvariable = rdict[reagentnum].concs['conc_item%s' %(itemcount)]
needed_mol = target_final_volume * rdict[reagentnum].concs['conc_item%s' %(itemcount)]
chemical_volume = needed_mol * float(chemdf.loc["%s" %chemabbr, "Molecular Weight (g/mol)"])\
/ float(chemdf.loc["%s" %chemabbr, "Density (g/mL)"])
total_remaining_volume = total_remaining_volume - chemical_volume / 1000
nominalsdf.loc[index, "nominal_amount"] = chemical_volume.round(2)
nominalsdf.loc[index, "Unit"] = 'milliliter'
itemcount+=1
else:
myvariable = rdict[reagentnum].concs['conc_item%s' %(itemcount)]
needed_mol = target_final_volume * (rdict[reagentnum].concs['conc_item%s' %(itemcount)])
chemical_mass_g = needed_mol * float(chemdf.loc["%s" %chemabbr, "Molecular Weight (g/mol)"])
chemical_volume = needed_mol * float(chemdf.loc["%s" %chemabbr, "Molecular Weight (g/mol)"])\
/ float(chemdf.loc["%s" %chemabbr, "Density (g/mL)"])
total_remaining_volume = total_remaining_volume - chemical_volume / 1000
nominalsdf.loc[index, "nominal_amount"] = chemical_mass_g.round(2)
nominalsdf.loc[index, "Unit"] = 'gram'
itemcount+=1
if itemcount == (maxreagentchemicals+1):
if total_remaining_volume == target_final_volume:
nominalsdf.loc[finalvolindex, "nominal_amount"] = 'null'
nominalsdf.loc[finalvolindex, "Unit"] = 'null'
nominalsdf.loc[finalvolindex, "actualsnull"] = 'null'
else:
nominalsdf.loc[finalvolindex, "nominal_amount"] = (target_final_volume * 1000).round(2)
nominalsdf.loc[finalvolindex, "Unit"] = 'milliliter'
modlog.info((reagentname, "formula calculation complete"))
nominalsdf.sort_index(inplace=True)
return nominalsdf
def build_chemical_names_df(rdict, maxreagentchemicals):
"""generates a dataframe of chemical names for reagent interface
:param chemdf: Chemical data frame from google drive.
:returns: a dataframe sized for export to version:: 3.0 interface
"""
chemicalnamelist = []
reagentnamelist = []
holdreagentnum = 1
for reagentnum in sorted(rdict.keys()):
#ensure any reagents not used have placeholders
while int(reagentnum) > holdreagentnum:
chemicalnamelist.append('Final Volume = ')
chemicalnamelist.extend(['null'] * maxreagentchemicals)
maxinterfaceslots = maxreagentchemicals + 1
reagentnamelist.extend(['Reagent%s' %holdreagentnum] * maxinterfaceslots)
holdreagentnum += 1
else:
count = 0
holdreagentnum = int(reagentnum)+1
chemicalnamelist.append('Final Volume = ')
reagentnamelist.append('Reagent%s' %reagentnum)
for chemical in rdict[reagentnum].chemicals:
chemicalnamelist.append(chemical)
reagentnamelist.append('Reagent%s' %reagentnum)
count += 1
while count < maxreagentchemicals:
chemicalnamelist.append('null')
reagentnamelist.append('Reagent%s' %reagentnum)
count += 1
chemicalnamedf = pd.DataFrame(chemicalnamelist, columns=['chemabbr'])
reagentnamedf = pd.DataFrame(reagentnamelist, columns=['reagentnames'])
chemicalnamedf = pd.concat([chemicalnamedf, reagentnamedf], axis=1)
return chemicalnamedf
def build_reagent_spec_df(rxndict, vardict, erdf, rdict, chemdf):
"""Build the dataframe for the bottom portion of the reagent preparation_interface
:param rxndict:
:param vardict:
:param erdf:
:param rdict:
:param chemdf:
:return:
"""
modlog.info('Starting reagent interface upload')
chemical_names_df = build_chemical_names_df(rdict, lab_safeget(config.lab_vars, globals.get_lab(), 'maxreagentchemicals'))
reagent_target_volumes = get_reagent_target_volumes(erdf, rxndict['reagent_dead_volume'] * 1000)
nominals_df = build_nominals_v1(rdict, chemical_names_df, reagent_target_volumes,
vardict['solventlist'], lab_safeget(config.lab_vars, globals.get_lab(), 'maxreagentchemicals'), chemdf)
reagent_spec_df = | pd.concat([chemical_names_df, nominals_df], axis=1) | pandas.concat |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# QTPyLib: Quantitative Trading Python Library
# https://github.com/ranaroussi/qtpylib
#
# Copyright 2016-2018 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import atexit
import json
import logging
import os
import pickle
import sys
import tempfile
import time
import glob
import subprocess
from datetime import datetime
from abc import ABCMeta
import zmq
import pandas as pd
from dateutil.parser import parse as parse_date
import pymysql
from pymysql.constants.CLIENT import MULTI_STATEMENTS
from numpy import (
isnan as np_isnan,
nan as np_nan,
int64 as np_int64
)
from ezibpy import (
ezIBpy, dataTypes as ibDataTypes
)
from qtpylib import (
tools, asynctools, path, futures, __version__
)
# =============================================
# check min, python version
if sys.version_info < (3, 4):
raise SystemError("QTPyLib requires Python version >= 3.4")
# =============================================
# Configure logging
tools.createLogger(__name__, logging.INFO)
# Disable ezIBpy logging (Blotter handles errors itself)
logging.getLogger('ezibpy').setLevel(logging.CRITICAL)
# =============================================
# set up threading pool
__threads__ = tools.read_single_argv("--threads")
__threads__ = __threads__ if tools.is_number(__threads__) else None
asynctools.multitasking.createPool(__name__, __threads__)
# =============================================
cash_ticks = {}
class Blotter():
"""Broker class initilizer
:Optional:
name : string
name of the blotter (used by other modules)
symbols : str
IB contracts CSV database (default: ./symbols.csv)
ibport : int
TWS/GW Port to use (default: 4001)
ibclient : int
TWS/GW Client ID (default: 999)
ibserver : str
IB TWS/GW Server hostname (default: localhost)
zmqport : str
ZeroMQ Port to use (default: 12345)
zmqtopic : str
ZeroMQ string to use (default: _qtpylib_BLOTTERNAME_)
orderbook : str
Get Order Book (Market Depth) data (default: False)
dbhost : str
MySQL server hostname (default: localhost)
dbport : str
MySQL server port (default: 3306)
dbname : str
MySQL server database (default: qpy)
dbuser : str
MySQL server username (default: root)
dbpass : str
MySQL server password (default: none)
dbskip : str
Skip MySQL logging (default: False)
"""
__metaclass__ = ABCMeta
def __init__(self, name=None, symbols="symbols.csv",
ibport=4001, ibclient=999, ibserver="localhost",
dbhost="localhost", dbport="3306", dbname="qtpy",
dbuser="root", dbpass="", dbskip=False, orderbook=False,
zmqport="12345", zmqtopic=None, **kwargs):
# whats my name?
self.name = str(self.__class__).split('.')[-1].split("'")[0].lower()
if name is not None:
self.name = name
# initilize class logger
self.log_blotter = logging.getLogger(__name__)
# do not act on first tick (timezone is incorrect)
self.first_tick = True
self._bars = pd.DataFrame(
columns=['open', 'high', 'low', 'close', 'volume'])
self._bars.index.names = ['datetime']
self._bars.index = pd.to_datetime(self._bars.index, utc=True)
# self._bars.index = self._bars.index.tz_convert(settings['timezone'])
self._bars = {"~": self._bars}
self._raw_bars = pd.DataFrame(columns=['last', 'volume'])
self._raw_bars.index.names = ['datetime']
self._raw_bars.index = | pd.to_datetime(self._raw_bars.index, utc=True) | pandas.to_datetime |
#!/usr/bin/env python3
"""
Class to organize and extract data from a .vmrk file.
Created 8/20/2020 by <NAME>.
Last updated 5/20/2021 by <NAME>.
"""
from pathlib import Path
import pandas
import re
from dataclasses import dataclass
from os import PathLike
from functools import cached_property
from typing import List
@dataclass
class Vmrk():
"""
Class to organize and extract data from a .vmrk file.
Parameters
----------
input_path : str or Path
Path to a .vmrk file.
Attributes
----------
path : Path
Path to a .vmrk file.
dataframe : DataFrame
DataFrame containing the values of the body of the .vmrk file.
"""
path: PathLike
ONSET_CODE = "S 2"
FMRI_CODE = "R128"
def __post_init__(self):
self.path = Path(self.path).absolute()
@cached_property
def dataframe(self) -> pandas.DataFrame:
"""
Reads the .vmrk file and returns it as a fresh, clean DataFrame.
Columns are numbered in order. Rows are also numbered in order.
"""
# Format our data into a clean list of lists.
line_list = [line.replace("Mk", "") for line in self.body_string.splitlines()]
split_line_list = [re.split(pattern="[,=]", string=line) for line in line_list]
# From the list of lists birth a glorious DataFrame with column names extracted from the header.
dataframe = | pandas.DataFrame(split_line_list, columns=self.column_names) | pandas.DataFrame |
# -*- coding: utf-8 -*-
__author__ = '<NAME>'
import re
import os
import pymorphy2
import pandas as pd
import numpy as np
from sklearn.metrics import roc_auc_score
from nltk.corpus import stopwords
from nltk.tokenize import RegexpTokenizer
from gensim import models
from datetime import datetime as dt
def get_similarity_KE(lemmas1, lemmas2):
big = lemmas2
small = lemmas1
if len(lemmas1) > len(lemmas2):
big = lemmas1
small = lemmas2
inters = [i for i in small if i in big] # TODO: or conversely
# no intersection
if len(inters) == 0:
return 0
ratio = (len(inters)/len(lemmas1) + len(inters)/len(lemmas2)) / 2.0
return ratio
def text_similarity(text1, text2, stemmer, stop_words, model):
text1 = str(text1)
text2 = str(text2)
text1 = text1.replace('\n', '')
text2 = text2.replace('\n', '')
lemmas1 = []
lemmas2 = []
digits1 = [] # for simbols with digits
digits2 = [] # for simbols with digits
# english1 = [] # for brands or english words
# english2 = [] # for brands or english words
tokenizer = RegexpTokenizer(r'\w+')
tk1 = tokenizer.tokenize(text1)
tk2 = tokenizer.tokenize(text2)
for word in tk1:
normal = stemmer.parse(word)[0].normal_form
# normal = re.search('[а-я]+', normal)
if not word.isalpha():
digits1.append(word)
continue
# if re.match("^[A-Za-z_-]*$", word):
# english1.append(word)
# continue
if word not in stop_words:
lemmas1.append(normal)
for word in tk2:
normal = stemmer.parse(word)[0].normal_form
if not word.isalpha():
digits2.append(word)
continue
# if re.match("^[A-Za-z_-]*$", word):
# english1.append(word)
# continue
if word not in stop_words:
lemmas2.append(normal)
try:
score = model.n_similarity(lemmas1, lemmas2)
except KeyError as e:
# print('KEY ERROR', e)
score = get_similarity_KE(lemmas1, lemmas2)
# dscore = get_similarity_KE(digits1, digits2)
# total_score = (score+dscore)/2.0
return float(score)
def split_train():
oidx = 1
for i in range(1, 5):
fname = 'data/texts/Ptext_' + str(i) + '.csv'
df = pd.read_csv(fname, compression='gzip')
splitrow = int(df.shape[0]/2)
print(df.shape[0])
set1 = df[:splitrow]
set1.to_csv('data/texts/splits/train_'+str(oidx)+'.csv', compression='gzip')
oidx += 1
print(set1.shape[0])
set2 = df[splitrow:]
set2.to_csv('data/texts/splits/train_' + str(oidx) + '.csv', compression='gzip')
oidx += 1
print(set2.shape[0])
def split_test():
oidx = 1
df = pd.read_csv('data/texts/FPairs_text_test.csv', compression='gzip')
splitrow = int(df.shape[0]/4)
print(df.shape[0])
set1 = df[:splitrow]
set1.to_csv('data/texts/splits/test_' + str(oidx) + '.csv', compression='gzip')
oidx += 1
print(set1.shape[0])
set2 = df[splitrow:splitrow*2]
set2.to_csv('data/texts/splits/test_' + str(oidx) + '.csv', compression='gzip')
oidx += 1
print(set2.shape[0])
set3 = df[splitrow*2:splitrow*3]
set3.to_csv('data/texts/splits/test_' + str(oidx) + '.csv', compression='gzip')
oidx += 1
print(set3.shape[0])
set4 = df[splitrow*3:]
set4.to_csv('data/texts/splits/test_' + str(oidx) + '.csv', compression='gzip')
oidx += 1
print(set4.shape[0])
#
#
#
if __name__ == '__main__':
set1 = pd.read_csv('data/texts/splits/OUTtrain_1.csv', compression='gzip')
set2 = | pd.read_csv('data/texts/splits/OUTtrain_2.csv', compression='gzip') | pandas.read_csv |
import sys, os, socket
os.environ["CUDA_VISIBLE_DEVICES"]="0"
hostname = socket.gethostname()
if hostname=='tianx-pc':
homeDir = '/analyse/cdhome/'
projDir = '/analyse/Project0257/'
elif hostname[0:7]=='deepnet':
homeDir = '/home/chrisd/'
projDir = '/analyse/Project0257/'
import keras
keras.backend.clear_session()
import tensorflow as tf
from keras.models import Model
from keras import backend as K
import numpy as np
import pandas as pd
from PIL import Image
from keras.preprocessing.image import ImageDataGenerator
from keras.optimizers import SGD, Adam
from keras.callbacks import ModelCheckpoint, TensorBoard, ProgbarLogger
from keras.layers import Dense, Input, AveragePooling2D, Flatten, BatchNormalization
from keras.models import Model
sys.path.append(os.path.abspath(homeDir+'dlfaceScripts/'))
sys.path.append(os.path.abspath(homeDir+'dlfaceScripts/SchynsLabDNN/faceNets/'))
from vae_models import ResNet10Encoder, ResNet10Decoder, Darknet19Encoder, Darknet19Decoder, classifierOnVAE
from vae import AutoEncoder
classes = [2004]
classLabels = list(map(lambda x: "{:04d}".format(x),range(classes[0]))) # <NAME> is an angel and a genius
classLabels = dict(zip(classLabels,list(range(classes[0]))))
# define training and validation data files
dataDir = projDir+'christoph_face_render_withAUs_20190730/'
main0_data_txt = dataDir+'images_firstGen_ctrlSim_k1_355ModelsEquivalents/path/linksToImages.txt'
main1_data_txt = dataDir+'images_firstGen_ctrlSim_k1/path/linksToImages.txt'
colleagues0_data_txt = dataDir+'colleagueFaces355Models/meta/links_to_images.txt'
colleagues1_data_txt = dataDir+'colleagueFacesSimStructk1/meta/links_to_images.txt'
# read in 2k IDs from 0th and 1st generation
main0_df = pd.read_csv(main0_data_txt, delim_whitespace = True, header=None)
main1_df = pd.read_csv(main1_data_txt, delim_whitespace = True, header=None)
main_df = pd.concat([main0_df, main1_df])
main_df.reset_index(drop=True, inplace=True)
main_df.columns = ['filename', 'yID', 'yVector', 'yGender', 'yEthn', 'yAge', 'yEmo', 'yAnglex', 'yAngley', 'yAnglelx', 'yAnglely']
main_df = main_df[['filename','yID']]
main_df['yID'] = main_df['yID'].astype(str).str.zfill(4)
# sample 80% of 2k faces for training
train_df = main_df.sample(frac=0.8,random_state=1)
# create val&test frame \ training images
valtest_df = main_df.drop(train_df.index)
valtest_df.reset_index(drop=True, inplace=True)
# select half of the val&test data to be val data
val_df = valtest_df.sample(frac=0.5,random_state=1)
# drop validation data from val&test to obtain test data
test_df = valtest_df.drop(val_df.index)
# read in colleague faces of 0th and 1st generation
colleague0_df = pd.read_csv(colleagues0_data_txt, delim_whitespace = True, header=None)
colleague1_df = pd.read_csv(colleagues1_data_txt, delim_whitespace = True, header=None)
colleague_df = | pd.concat([colleague0_df, colleague1_df]) | pandas.concat |
import pandas as pd
from functools import reduce
from aggregations import Aggragator, Measure, MeasureF, MeasureF1, MeasureTime
from file_helper import write_file
import sys
import numpy as np
from logger import Logger
import scipy.stats as stats
from collections import namedtuple
TableData = namedtuple('TableData',
'cols alignment header_name groups header body formatted_rank pvalue groups_range grouped_midrule header_midrule')
log = Logger(name='latex')
eol = '\n'
line_end = '\\\\'
row_end = line_end
sep = ' & '
pm = '\\,$\\pm$\\,'
def identify(x):
print(x)
return x
def mappings(value):
if isinstance(value, str):
return value \
.replace('f_2np2', '2n^2') \
.replace('f_2n', '2n') \
.replace('f_n3', 'n^3') \
.replace('f_2pn', '2^n') \
.replace('tp', 'TP') \
.replace('fp', 'FP') \
.replace('tn', 'TN') \
.replace('fn', 'FN') \
.replace('accuracy', 'acc') \
.replace('precision', 'p') \
.replace('recall', 'r')
else:
return value
def header_mappings(value):
if isinstance(value, str):
return value.replace('standardized', 'Standaryzacja') \
.replace('margin', 'Margines') \
.replace('constraints_generator', 'Ograniczenia') \
.replace('sigma', '\sigma') \
.replace('clustering', 'k_{min}') \
.replace('total_experiments', 'Total')
else:
return value
def math(value):
return '${{{}}}$'.format(value)
def boldmath(value):
return '$\\bm{{{}}}$'.format(value)
def convert_attribute_value(value):
return math(mappings(value))
def bold(text):
return '\\textbf{{{input}}}'.format(input=text)
def bm(text):
if '$' in text:
return '$\\bm{{{}}}$'.format(text)
else:
return '\\textbf{{{input}}}'.format(input=text)
class Formatter:
@staticmethod
def format_color(x: float, reverse = False) -> str:
if reverse:
x = 1 - x
x = np.round(x, decimals=5)
return '{green!70!lime!%d!red!70!yellow!80!white}' % int(100 * x)
@staticmethod
def format_error(x: float) -> str:
# x = min(x, 1.0) if x is float else 0.0
# x = max(x, np.nan)
return '\\begin{tikzpicture}[y=0.75em,baseline=1pt]\\draw[very thick] (0,0) -- (0,%f);\\end{tikzpicture}' % x
@staticmethod
def format_cell(norm_rank: float, value: float, error: float, reverse_colors: bool = False) -> str:
error = error / value if value > 0 else 0
error = min(error, 1)
return '\cellcolor%s %0.2f %s ' % \
(Formatter.format_color(norm_rank, reverse_colors), value, Formatter.format_error(error))
@staticmethod
def format_header(attribute_values, name):
attribute_values = ["(1,1)", "(1,\infty)", "(2,\infty)"] * 2 if attribute_values == [0, 1, 2] * 2 else attribute_values
attributes = list(map(lambda x: convert_attribute_value(x), attribute_values))
header = math(name) + sep + reduce(lambda x, y: x + sep + y, attributes)
return header
@staticmethod
def first_row_formatter(value):
return Formatter.format_model_name(value)
@staticmethod
def format_rank(series: pd.Series):
return math(series.name) + sep + reduce(lambda x, y: str(x) + sep + str(y),
series.apply(lambda x: "%0.3f" % x if np.isfinite(x) else "---"))
@staticmethod
def format_model_name(name):
if isinstance(name, tuple):
components = list(name)
elif isinstance(name, list):
components = name
else:
components = name.split('_')
formats = ['%s', '^{%s}', '_{%s}']
items = [format % component for format, component in zip(formats, components)]
return '$%s$' % reduce(lambda x, y: x + y, items).title()
class DataTable:
decimals = 2
def __init__(self, data: pd.DataFrame, top_header_name: str, attribute: str, attribute_values: list):
self.cols = len(attribute_values)
self.attribute = attribute
self.attribute_values = attribute_values
self.top_header_name = top_header_name
self.data = data
self.title: str = ''
def create_row_section(self, df: pd.DataFrame):
keys = [('row', key) for key in self.attribute_values]
df[keys] = df.apply(self.format_series_element, axis=1)
def format_series_element(self, series: pd.Series):
unstucked = series.unstack().apply(
lambda row: Formatter.format_cell(row['rank_norm'], row['f_mean'], row['sem_norm']))
return unstucked
def concat_row(self, x: pd.Series):
component = '\\midrule\n' if x.name.split('_')[2] == '2' else ''
return component + Formatter.format_model_name(x.name) + sep + reduce(lambda xi, yi: xi + sep + yi,
x) + row_end + eol
def rank(self, data: pd.DataFrame):
result = '\\midrule\n' + math("Rank") + sep + reduce(lambda x, y: str(x) + sep + str(y),
map(lambda x: "%0.2f" % x,
data['rank'].mean())) + row_end + eol
return result
def build(self) -> str:
data = self.data.round(decimals=self.decimals)
self.create_row_section(data)
table_data = data['row'].apply(self.concat_row, axis=1)
elements = list(table_data)
elements.append(self.rank(data))
header = Formatter.format_header(self.attribute_values, self.title)
reduced: str = reduce(lambda x, y: x + y, elements)
return '\\toprule\n ' \
'\multicolumn{{{count}}}{{{alignment}}}{{{name}}} \\\\ \n' \
'\\midrule\n' \
'{header} \\\\ \n' \
'{body}' \
'\\bottomrule\n'.format(count=self.cols + 1, alignment='c', name=self.top_header_name, header=header,
body=reduced)
class DataPivotTable(DataTable):
def __init__(self, data: pd.DataFrame, top_header_name: str, attribute: str, attribute_values: list,
pivot: bool = True, header_formatter=Formatter.format_header,
row_formatter=Formatter.first_row_formatter, reverse_colors=False):
data.fillna(value=0, inplace=True)
super().__init__(data=data, top_header_name=top_header_name, attribute=attribute,
attribute_values=attribute_values)
self.pivot = pivot
self.formatters = {'header': header_formatter,
'first_row': row_formatter}
self.measures = data.index.levels[0]
self.total_cols = len(self.measures) * self.cols + 1
self.reverse_colors = reverse_colors
def format_series(self, series):
col_formatter = lambda s, attribute: Formatter.format_cell(s[('rank_norm', attribute)],
s[('f_mean', attribute)],
s[('sem_norm', attribute)],
self.reverse_colors)
formatted_cols = [col_formatter(series, attribute) for attribute in self.attribute_values]
return | pd.Series(data=formatted_cols, name=series.name) | pandas.Series |
# -*- coding: utf-8 -*-
import csv
import os
import platform
import codecs
import re
import sys
from datetime import datetime
import pytest
import numpy as np
from pandas._libs.lib import Timestamp
import pandas as pd
import pandas.util.testing as tm
from pandas import DataFrame, Series, Index, MultiIndex
from pandas import compat
from pandas.compat import (StringIO, BytesIO, PY3,
range, lrange, u)
from pandas.errors import DtypeWarning, EmptyDataError, ParserError
from pandas.io.common import URLError
from pandas.io.parsers import TextFileReader, TextParser
class ParserTests(object):
"""
Want to be able to test either C+Cython or Python+Cython parsers
"""
data1 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
def test_empty_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
# Parsers support only length-1 decimals
msg = 'Only length-1 decimal markers supported'
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(data), decimal='')
def test_bad_stream_exception(self):
# Issue 13652:
# This test validates that both python engine
# and C engine will raise UnicodeDecodeError instead of
# c engine raising ParserError and swallowing exception
# that caused read to fail.
handle = open(self.csv_shiftjs, "rb")
codec = codecs.lookup("utf-8")
utf8 = codecs.lookup('utf-8')
# stream must be binary UTF8
stream = codecs.StreamRecoder(
handle, utf8.encode, utf8.decode, codec.streamreader,
codec.streamwriter)
if compat.PY3:
msg = "'utf-8' codec can't decode byte"
else:
msg = "'utf8' codec can't decode byte"
with tm.assert_raises_regex(UnicodeDecodeError, msg):
self.read_csv(stream)
stream.close()
def test_read_csv(self):
if not compat.PY3:
if compat.is_platform_windows():
prefix = u("file:///")
else:
prefix = u("file://")
fname = prefix + compat.text_type(self.csv1)
self.read_csv(fname, index_col=0, parse_dates=True)
def test_1000_sep(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334, 13],
'C': [5, 10.]
})
df = self.read_csv(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
def test_squeeze(self):
data = """\
a,1
b,2
c,3
"""
idx = Index(['a', 'b', 'c'], name=0)
expected = Series([1, 2, 3], name=1, index=idx)
result = self.read_table(StringIO(data), sep=',', index_col=0,
header=None, squeeze=True)
assert isinstance(result, Series)
tm.assert_series_equal(result, expected)
def test_squeeze_no_view(self):
# see gh-8217
# Series should not be a view
data = """time,data\n0,10\n1,11\n2,12\n4,14\n5,15\n3,13"""
result = self.read_csv(StringIO(data), index_col='time', squeeze=True)
assert not result._is_view
def test_malformed(self):
# see gh-6607
# all
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
"""
msg = 'Expected 3 fields in line 4, saw 5'
with tm.assert_raises_regex(Exception, msg):
self.read_table(StringIO(data), sep=',',
header=1, comment='#')
# first chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
msg = 'Expected 3 fields in line 6, saw 5'
with tm.assert_raises_regex(Exception, msg):
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#',
iterator=True, chunksize=1,
skiprows=[2])
it.read(5)
# middle chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
msg = 'Expected 3 fields in line 6, saw 5'
with tm.assert_raises_regex(Exception, msg):
it = self.read_table(StringIO(data), sep=',', header=1,
comment='#', iterator=True, chunksize=1,
skiprows=[2])
it.read(3)
# last chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
msg = 'Expected 3 fields in line 6, saw 5'
with tm.assert_raises_regex(Exception, msg):
it = self.read_table(StringIO(data), sep=',', header=1,
comment='#', iterator=True, chunksize=1,
skiprows=[2])
it.read()
# skipfooter is not supported with the C parser yet
if self.engine == 'python':
# skipfooter
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
footer
"""
msg = 'Expected 3 fields in line 4, saw 5'
with tm.assert_raises_regex(Exception, msg):
self.read_table(StringIO(data), sep=',',
header=1, comment='#',
skipfooter=1)
def test_quoting(self):
bad_line_small = """printer\tresult\tvariant_name
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jacob
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jakob
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\t"Furststiftische Hofdruckerei, <Kempten""
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tGaller, Alois
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tHochfurstliche Buchhandlung <Kempten>""" # noqa
pytest.raises(Exception, self.read_table, StringIO(bad_line_small),
sep='\t')
good_line_small = bad_line_small + '"'
df = self.read_table(StringIO(good_line_small), sep='\t')
assert len(df) == 3
def test_unnamed_columns(self):
data = """A,B,C,,
1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
expected = np.array([[1, 2, 3, 4, 5],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]], dtype=np.int64)
df = self.read_table(StringIO(data), sep=',')
tm.assert_almost_equal(df.values, expected)
tm.assert_index_equal(df.columns,
Index(['A', 'B', 'C', 'Unnamed: 3',
'Unnamed: 4']))
def test_csv_mixed_type(self):
data = """A,B,C
a,1,2
b,3,4
c,4,5
"""
expected = DataFrame({'A': ['a', 'b', 'c'],
'B': [1, 3, 4],
'C': [2, 4, 5]})
out = self.read_csv(StringIO(data))
tm.assert_frame_equal(out, expected)
def test_read_csv_dataframe(self):
df = self.read_csv(self.csv1, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv1, sep=',', index_col=0,
parse_dates=True)
tm.assert_index_equal(df.columns, pd.Index(['A', 'B', 'C', 'D']))
assert df.index.name == 'index'
assert isinstance(
df.index[0], (datetime, np.datetime64, Timestamp))
assert df.values.dtype == np.float64
tm.assert_frame_equal(df, df2)
def test_read_csv_no_index_name(self):
df = self.read_csv(self.csv2, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv2, sep=',', index_col=0,
parse_dates=True)
tm.assert_index_equal(df.columns,
pd.Index(['A', 'B', 'C', 'D', 'E']))
assert isinstance(df.index[0], (datetime, np.datetime64, Timestamp))
assert df.loc[:, ['A', 'B', 'C', 'D']].values.dtype == np.float64
tm.assert_frame_equal(df, df2)
def test_read_table_unicode(self):
fin = BytesIO(u('\u0141aski, Jan;1').encode('utf-8'))
df1 = self.read_table(fin, sep=";", encoding="utf-8", header=None)
assert isinstance(df1[0].values[0], compat.text_type)
def test_read_table_wrong_num_columns(self):
# too few!
data = """A,B,C,D,E,F
1,2,3,4,5,6
6,7,8,9,10,11,12
11,12,13,14,15,16
"""
pytest.raises(ValueError, self.read_csv, StringIO(data))
def test_read_duplicate_index_explicit(self):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
result = self.read_csv(StringIO(data), index_col=0)
expected = self.read_csv(StringIO(data)).set_index(
'index', verify_integrity=False)
tm.assert_frame_equal(result, expected)
result = self.read_table(StringIO(data), sep=',', index_col=0)
expected = self.read_table(StringIO(data), sep=',', ).set_index(
'index', verify_integrity=False)
tm.assert_frame_equal(result, expected)
def test_read_duplicate_index_implicit(self):
data = """A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
# make sure an error isn't thrown
self.read_csv(StringIO(data))
self.read_table(StringIO(data), sep=',')
def test_parse_bools(self):
data = """A,B
True,1
False,2
True,3
"""
data = self.read_csv(StringIO(data))
assert data['A'].dtype == np.bool_
data = """A,B
YES,1
no,2
yes,3
No,3
Yes,3
"""
data = self.read_csv(StringIO(data),
true_values=['yes', 'Yes', 'YES'],
false_values=['no', 'NO', 'No'])
assert data['A'].dtype == np.bool_
data = """A,B
TRUE,1
FALSE,2
TRUE,3
"""
data = self.read_csv(StringIO(data))
assert data['A'].dtype == np.bool_
data = """A,B
foo,bar
bar,foo"""
result = self.read_csv(StringIO(data), true_values=['foo'],
false_values=['bar'])
expected = DataFrame({'A': [True, False], 'B': [False, True]})
tm.assert_frame_equal(result, expected)
def test_int_conversion(self):
data = """A,B
1.0,1
2.0,2
3.0,3
"""
data = self.read_csv(StringIO(data))
assert data['A'].dtype == np.float64
assert data['B'].dtype == np.int64
def test_read_nrows(self):
expected = self.read_csv(StringIO(self.data1))[:3]
df = self.read_csv(StringIO(self.data1), nrows=3)
tm.assert_frame_equal(df, expected)
# see gh-10476
df = self.read_csv(StringIO(self.data1), nrows=3.0)
tm.assert_frame_equal(df, expected)
msg = r"'nrows' must be an integer >=0"
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), nrows=1.2)
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), nrows='foo')
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), nrows=-1)
def test_read_chunksize(self):
reader = self.read_csv(StringIO(self.data1), index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(reader)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
# with invalid chunksize value:
msg = r"'chunksize' must be an integer >=1"
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), chunksize=1.3)
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), chunksize='foo')
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), chunksize=0)
def test_read_chunksize_and_nrows(self):
# gh-15755
# With nrows
reader = self.read_csv(StringIO(self.data1), index_col=0,
chunksize=2, nrows=5)
df = self.read_csv(StringIO(self.data1), index_col=0, nrows=5)
tm.assert_frame_equal(pd.concat(reader), df)
# chunksize > nrows
reader = self.read_csv(StringIO(self.data1), index_col=0,
chunksize=8, nrows=5)
df = self.read_csv(StringIO(self.data1), index_col=0, nrows=5)
tm.assert_frame_equal(pd.concat(reader), df)
# with changing "size":
reader = self.read_csv(StringIO(self.data1), index_col=0,
chunksize=8, nrows=5)
df = self.read_csv(StringIO(self.data1), index_col=0, nrows=5)
tm.assert_frame_equal(reader.get_chunk(size=2), df.iloc[:2])
tm.assert_frame_equal(reader.get_chunk(size=4), df.iloc[2:5])
with pytest.raises(StopIteration):
reader.get_chunk(size=3)
def test_read_chunksize_named(self):
reader = self.read_csv(
StringIO(self.data1), index_col='index', chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col='index')
chunks = list(reader)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
def test_get_chunk_passed_chunksize(self):
data = """A,B,C
1,2,3
4,5,6
7,8,9
1,2,3"""
result = self.read_csv(StringIO(data), chunksize=2)
piece = result.get_chunk()
assert len(piece) == 2
def test_read_chunksize_generated_index(self):
# GH 12185
reader = self.read_csv(StringIO(self.data1), chunksize=2)
df = self.read_csv(StringIO(self.data1))
tm.assert_frame_equal(pd.concat(reader), df)
reader = self.read_csv(StringIO(self.data1), chunksize=2, index_col=0)
df = self.read_csv(StringIO(self.data1), index_col=0)
tm.assert_frame_equal(pd.concat(reader), df)
def test_read_text_list(self):
data = """A,B,C\nfoo,1,2,3\nbar,4,5,6"""
as_list = [['A', 'B', 'C'], ['foo', '1', '2', '3'], ['bar',
'4', '5', '6']]
df = self.read_csv(StringIO(data), index_col=0)
parser = TextParser(as_list, index_col=0, chunksize=2)
chunk = parser.read(None)
tm.assert_frame_equal(chunk, df)
def test_iterator(self):
# See gh-6607
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunk = reader.read(3)
tm.assert_frame_equal(chunk, df[:3])
last_chunk = reader.read(5)
tm.assert_frame_equal(last_chunk, df[3:])
# pass list
lines = list(csv.reader(StringIO(self.data1)))
parser = TextParser(lines, index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
# pass skiprows
parser = TextParser(lines, index_col=0, chunksize=2, skiprows=[1])
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[1:3])
treader = self.read_table(StringIO(self.data1), sep=',', index_col=0,
iterator=True)
assert isinstance(treader, TextFileReader)
# gh-3967: stopping iteration when chunksize is specified
data = """A,B,C
foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
reader = self.read_csv(StringIO(data), iterator=True)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
tm.assert_frame_equal(result[0], expected)
# chunksize = 1
reader = self.read_csv(StringIO(data), chunksize=1)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
assert len(result) == 3
tm.assert_frame_equal(pd.concat(result), expected)
# skipfooter is not supported with the C parser yet
if self.engine == 'python':
# test bad parameter (skipfooter)
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True, skipfooter=1)
pytest.raises(ValueError, reader.read, 3)
def test_pass_names_with_index(self):
lines = self.data1.split('\n')
no_header = '\n'.join(lines[1:])
# regular index
names = ['index', 'A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=0, names=names)
expected = self.read_csv(StringIO(self.data1), index_col=0)
tm.assert_frame_equal(df, expected)
# multi index
data = """index1,index2,A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
lines = data.split('\n')
no_header = '\n'.join(lines[1:])
names = ['index1', 'index2', 'A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=[0, 1],
names=names)
expected = self.read_csv(StringIO(data), index_col=[0, 1])
tm.assert_frame_equal(df, expected)
df = self.read_csv(StringIO(data), index_col=['index1', 'index2'])
tm.assert_frame_equal(df, expected)
def test_multi_index_no_level_names(self):
data = """index1,index2,A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
data2 = """A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
lines = data.split('\n')
no_header = '\n'.join(lines[1:])
names = ['A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=[0, 1],
header=None, names=names)
expected = self.read_csv(StringIO(data), index_col=[0, 1])
tm.assert_frame_equal(df, expected, check_names=False)
# 2 implicit first cols
df2 = self.read_csv(StringIO(data2))
tm.assert_frame_equal(df2, df)
# reverse order of index
df = self.read_csv(StringIO(no_header), index_col=[1, 0], names=names,
header=None)
expected = self.read_csv(StringIO(data), index_col=[1, 0])
tm.assert_frame_equal(df, expected, check_names=False)
def test_multi_index_blank_df(self):
# GH 14545
data = """a,b
"""
df = self.read_csv(StringIO(data), header=[0])
expected = DataFrame(columns=['a', 'b'])
tm.assert_frame_equal(df, expected)
round_trip = self.read_csv(StringIO(
expected.to_csv(index=False)), header=[0])
tm.assert_frame_equal(round_trip, expected)
data_multiline = """a,b
c,d
"""
df2 = self.read_csv(StringIO(data_multiline), header=[0, 1])
cols = MultiIndex.from_tuples([('a', 'c'), ('b', 'd')])
expected2 = DataFrame(columns=cols)
tm.assert_frame_equal(df2, expected2)
round_trip = self.read_csv(StringIO(
expected2.to_csv(index=False)), header=[0, 1])
tm.assert_frame_equal(round_trip, expected2)
def test_no_unnamed_index(self):
data = """ id c0 c1 c2
0 1 0 a b
1 2 0 c d
2 2 2 e f
"""
df = self.read_table(StringIO(data), sep=' ')
assert df.index.name is None
def test_read_csv_parse_simple_list(self):
text = """foo
bar baz
qux foo
foo
bar"""
df = self.read_csv( | StringIO(text) | pandas.compat.StringIO |
""" test the scalar Timestamp """
import pytz
import pytest
import dateutil
import calendar
import locale
import numpy as np
from dateutil.tz import tzutc
from pytz import timezone, utc
from datetime import datetime, timedelta
import pandas.util.testing as tm
import pandas.util._test_decorators as td
from pandas.tseries import offsets
from pandas._libs.tslibs import conversion
from pandas._libs.tslibs.timezones import get_timezone, dateutil_gettz as gettz
from pandas.errors import OutOfBoundsDatetime
from pandas.compat import long, PY3
from pandas.compat.numpy import np_datetime64_compat
from pandas import Timestamp, Period, Timedelta, NaT
class TestTimestampProperties(object):
def test_properties_business(self):
ts = Timestamp('2017-10-01', freq='B')
control = Timestamp('2017-10-01')
assert ts.dayofweek == 6
assert not ts.is_month_start # not a weekday
assert not ts.is_quarter_start # not a weekday
# Control case: non-business is month/qtr start
assert control.is_month_start
assert control.is_quarter_start
ts = Timestamp('2017-09-30', freq='B')
control = Timestamp('2017-09-30')
assert ts.dayofweek == 5
assert not ts.is_month_end # not a weekday
assert not ts.is_quarter_end # not a weekday
# Control case: non-business is month/qtr start
assert control.is_month_end
assert control.is_quarter_end
def test_fields(self):
def check(value, equal):
# that we are int/long like
assert isinstance(value, (int, long))
assert value == equal
# GH 10050
ts = Timestamp('2015-05-10 09:06:03.000100001')
check(ts.year, 2015)
check(ts.month, 5)
check(ts.day, 10)
check(ts.hour, 9)
check(ts.minute, 6)
check(ts.second, 3)
pytest.raises(AttributeError, lambda: ts.millisecond)
check(ts.microsecond, 100)
check(ts.nanosecond, 1)
check(ts.dayofweek, 6)
check(ts.quarter, 2)
check(ts.dayofyear, 130)
check(ts.week, 19)
check(ts.daysinmonth, 31)
check(ts.daysinmonth, 31)
# GH 13303
ts = Timestamp('2014-12-31 23:59:00-05:00', tz='US/Eastern')
check(ts.year, 2014)
check(ts.month, 12)
check(ts.day, 31)
check(ts.hour, 23)
check(ts.minute, 59)
check(ts.second, 0)
pytest.raises(AttributeError, lambda: ts.millisecond)
check(ts.microsecond, 0)
check(ts.nanosecond, 0)
check(ts.dayofweek, 2)
check(ts.quarter, 4)
check(ts.dayofyear, 365)
check(ts.week, 1)
check(ts.daysinmonth, 31)
ts = Timestamp('2014-01-01 00:00:00+01:00')
starts = ['is_month_start', 'is_quarter_start', 'is_year_start']
for start in starts:
assert getattr(ts, start)
ts = Timestamp('2014-12-31 23:59:59+01:00')
ends = ['is_month_end', 'is_year_end', 'is_quarter_end']
for end in ends:
assert getattr(ts, end)
# GH 12806
@pytest.mark.parametrize('data',
[Timestamp('2017-08-28 23:00:00'),
Timestamp('2017-08-28 23:00:00', tz='EST')])
@pytest.mark.parametrize('time_locale', [
None] if tm.get_locales() is None else [None] + tm.get_locales())
def test_names(self, data, time_locale):
# GH 17354
# Test .weekday_name, .day_name(), .month_name
with tm.assert_produces_warning(DeprecationWarning,
check_stacklevel=False):
assert data.weekday_name == 'Monday'
if time_locale is None:
expected_day = 'Monday'
expected_month = 'August'
else:
with tm.set_locale(time_locale, locale.LC_TIME):
expected_day = calendar.day_name[0].capitalize()
expected_month = calendar.month_name[8].capitalize()
assert data.day_name(time_locale) == expected_day
assert data.month_name(time_locale) == expected_month
# Test NaT
nan_ts = Timestamp(NaT)
assert np.isnan(nan_ts.day_name(time_locale))
assert np.isnan(nan_ts.month_name(time_locale))
@pytest.mark.parametrize('tz', [None, 'UTC', 'US/Eastern', 'Asia/Tokyo'])
def test_is_leap_year(self, tz):
# GH 13727
dt = Timestamp('2000-01-01 00:00:00', tz=tz)
assert dt.is_leap_year
assert isinstance(dt.is_leap_year, bool)
dt = Timestamp('1999-01-01 00:00:00', tz=tz)
assert not dt.is_leap_year
dt = Timestamp('2004-01-01 00:00:00', tz=tz)
assert dt.is_leap_year
dt = Timestamp('2100-01-01 00:00:00', tz=tz)
assert not dt.is_leap_year
def test_woy_boundary(self):
# make sure weeks at year boundaries are correct
d = datetime(2013, 12, 31)
result = Timestamp(d).week
expected = 1 # ISO standard
assert result == expected
d = datetime(2008, 12, 28)
result = Timestamp(d).week
expected = 52 # ISO standard
assert result == expected
d = datetime(2009, 12, 31)
result = Timestamp(d).week
expected = 53 # ISO standard
assert result == expected
d = datetime(2010, 1, 1)
result = Timestamp(d).week
expected = 53 # ISO standard
assert result == expected
d = datetime(2010, 1, 3)
result = Timestamp(d).week
expected = 53 # ISO standard
assert result == expected
result = np.array([Timestamp(datetime(*args)).week
for args in [(2000, 1, 1), (2000, 1, 2), (
2005, 1, 1), (2005, 1, 2)]])
assert (result == [52, 52, 53, 53]).all()
class TestTimestampConstructors(object):
def test_constructor(self):
base_str = '2014-07-01 09:00'
base_dt = datetime(2014, 7, 1, 9)
base_expected = 1404205200000000000
# confirm base representation is correct
import calendar
assert (calendar.timegm(base_dt.timetuple()) * 1000000000 ==
base_expected)
tests = [(base_str, base_dt, base_expected),
('2014-07-01 10:00', datetime(2014, 7, 1, 10),
base_expected + 3600 * 1000000000),
('2014-07-01 09:00:00.000008000',
datetime(2014, 7, 1, 9, 0, 0, 8),
base_expected + 8000),
('2014-07-01 09:00:00.000000005',
Timestamp('2014-07-01 09:00:00.000000005'),
base_expected + 5)]
timezones = [(None, 0), ('UTC', 0), (pytz.utc, 0), ('Asia/Tokyo', 9),
('US/Eastern', -4), ('dateutil/US/Pacific', -7),
(pytz.FixedOffset(-180), -3),
(dateutil.tz.tzoffset(None, 18000), 5)]
for date_str, date, expected in tests:
for result in [Timestamp(date_str), Timestamp(date)]:
# only with timestring
assert result.value == expected
assert conversion.pydt_to_i8(result) == expected
# re-creation shouldn't affect to internal value
result = Timestamp(result)
assert result.value == expected
assert conversion.pydt_to_i8(result) == expected
# with timezone
for tz, offset in timezones:
for result in [Timestamp(date_str, tz=tz), Timestamp(date,
tz=tz)]:
expected_tz = expected - offset * 3600 * 1000000000
assert result.value == expected_tz
assert conversion.pydt_to_i8(result) == expected_tz
# should preserve tz
result = Timestamp(result)
assert result.value == expected_tz
assert conversion.pydt_to_i8(result) == expected_tz
# should convert to UTC
result = Timestamp(result, tz='UTC')
expected_utc = expected - offset * 3600 * 1000000000
assert result.value == expected_utc
assert conversion.pydt_to_i8(result) == expected_utc
def test_constructor_with_stringoffset(self):
# GH 7833
base_str = '2014-07-01 11:00:00+02:00'
base_dt = datetime(2014, 7, 1, 9)
base_expected = 1404205200000000000
# confirm base representation is correct
import calendar
assert (calendar.timegm(base_dt.timetuple()) * 1000000000 ==
base_expected)
tests = [(base_str, base_expected),
('2014-07-01 12:00:00+02:00',
base_expected + 3600 * 1000000000),
('2014-07-01 11:00:00.000008000+02:00', base_expected + 8000),
('2014-07-01 11:00:00.000000005+02:00', base_expected + 5)]
timezones = [(None, 0), ('UTC', 0), (pytz.utc, 0), ('Asia/Tokyo', 9),
('US/Eastern', -4), ('dateutil/US/Pacific', -7),
(pytz.FixedOffset(-180), -3),
(dateutil.tz.tzoffset(None, 18000), 5)]
for date_str, expected in tests:
for result in [Timestamp(date_str)]:
# only with timestring
assert result.value == expected
assert conversion.pydt_to_i8(result) == expected
# re-creation shouldn't affect to internal value
result = Timestamp(result)
assert result.value == expected
assert conversion.pydt_to_i8(result) == expected
# with timezone
for tz, offset in timezones:
result = Timestamp(date_str, tz=tz)
expected_tz = expected
assert result.value == expected_tz
assert conversion.pydt_to_i8(result) == expected_tz
# should preserve tz
result = Timestamp(result)
assert result.value == expected_tz
assert conversion.pydt_to_i8(result) == expected_tz
# should convert to UTC
result = Timestamp(result, tz='UTC')
expected_utc = expected
assert result.value == expected_utc
assert conversion.pydt_to_i8(result) == expected_utc
# This should be 2013-11-01 05:00 in UTC
# converted to Chicago tz
result = Timestamp('2013-11-01 00:00:00-0500', tz='America/Chicago')
assert result.value == Timestamp('2013-11-01 05:00').value
expected = "Timestamp('2013-11-01 00:00:00-0500', tz='America/Chicago')" # noqa
assert repr(result) == expected
assert result == eval(repr(result))
# This should be 2013-11-01 05:00 in UTC
# converted to Tokyo tz (+09:00)
result = Timestamp('2013-11-01 00:00:00-0500', tz='Asia/Tokyo')
assert result.value == Timestamp('2013-11-01 05:00').value
expected = "Timestamp('2013-11-01 14:00:00+0900', tz='Asia/Tokyo')"
assert repr(result) == expected
assert result == eval(repr(result))
# GH11708
# This should be 2015-11-18 10:00 in UTC
# converted to Asia/Katmandu
result = Timestamp("2015-11-18 15:45:00+05:45", tz="Asia/Katmandu")
assert result.value == Timestamp("2015-11-18 10:00").value
expected = "Timestamp('2015-11-18 15:45:00+0545', tz='Asia/Katmandu')"
assert repr(result) == expected
assert result == eval(repr(result))
# This should be 2015-11-18 10:00 in UTC
# converted to Asia/Kolkata
result = Timestamp("2015-11-18 15:30:00+05:30", tz="Asia/Kolkata")
assert result.value == Timestamp("2015-11-18 10:00").value
expected = "Timestamp('2015-11-18 15:30:00+0530', tz='Asia/Kolkata')"
assert repr(result) == expected
assert result == eval(repr(result))
def test_constructor_invalid(self):
with tm.assert_raises_regex(TypeError, 'Cannot convert input'):
Timestamp(slice(2))
with tm.assert_raises_regex(ValueError, 'Cannot convert Period'):
Timestamp(Period('1000-01-01'))
def test_constructor_invalid_tz(self):
# GH#17690
with tm.assert_raises_regex(TypeError, 'must be a datetime.tzinfo'):
Timestamp('2017-10-22', tzinfo='US/Eastern')
with tm.assert_raises_regex(ValueError, 'at most one of'):
Timestamp('2017-10-22', tzinfo=utc, tz='UTC')
with tm.assert_raises_regex(ValueError, "Invalid frequency:"):
# GH#5168
# case where user tries to pass tz as an arg, not kwarg, gets
# interpreted as a `freq`
Timestamp('2012-01-01', 'US/Pacific')
def test_constructor_tz_or_tzinfo(self):
# GH#17943, GH#17690, GH#5168
stamps = [Timestamp(year=2017, month=10, day=22, tz='UTC'),
Timestamp(year=2017, month=10, day=22, tzinfo=utc),
Timestamp(year=2017, month=10, day=22, tz=utc),
Timestamp(datetime(2017, 10, 22), tzinfo=utc),
Timestamp(datetime(2017, 10, 22), tz='UTC'),
Timestamp(datetime(2017, 10, 22), tz=utc)]
assert all(ts == stamps[0] for ts in stamps)
def test_constructor_positional(self):
# see gh-10758
with pytest.raises(TypeError):
Timestamp(2000, 1)
with pytest.raises(ValueError):
Timestamp(2000, 0, 1)
with pytest.raises(ValueError):
Timestamp(2000, 13, 1)
with pytest.raises(ValueError):
Timestamp(2000, 1, 0)
with pytest.raises(ValueError):
Timestamp(2000, 1, 32)
# see gh-11630
assert (repr(Timestamp(2015, 11, 12)) ==
repr(Timestamp('20151112')))
assert (repr(Timestamp(2015, 11, 12, 1, 2, 3, 999999)) ==
repr(Timestamp('2015-11-12 01:02:03.999999')))
def test_constructor_keyword(self):
# GH 10758
with pytest.raises(TypeError):
Timestamp(year=2000, month=1)
with pytest.raises(ValueError):
Timestamp(year=2000, month=0, day=1)
with pytest.raises(ValueError):
Timestamp(year=2000, month=13, day=1)
with pytest.raises(ValueError):
Timestamp(year=2000, month=1, day=0)
with pytest.raises(ValueError):
Timestamp(year=2000, month=1, day=32)
assert (repr(Timestamp(year=2015, month=11, day=12)) ==
repr(Timestamp('20151112')))
assert (repr(Timestamp(year=2015, month=11, day=12, hour=1, minute=2,
second=3, microsecond=999999)) ==
repr(Timestamp('2015-11-12 01:02:03.999999')))
def test_constructor_fromordinal(self):
base = datetime(2000, 1, 1)
ts = Timestamp.fromordinal(base.toordinal(), freq='D')
assert base == ts
assert ts.freq == 'D'
assert base.toordinal() == ts.toordinal()
ts = Timestamp.fromordinal(base.toordinal(), tz='US/Eastern')
assert Timestamp('2000-01-01', tz='US/Eastern') == ts
assert base.toordinal() == ts.toordinal()
# GH#3042
dt = datetime(2011, 4, 16, 0, 0)
ts = Timestamp.fromordinal(dt.toordinal())
assert ts.to_pydatetime() == dt
# with a tzinfo
stamp = Timestamp('2011-4-16', tz='US/Eastern')
dt_tz = stamp.to_pydatetime()
ts = Timestamp.fromordinal(dt_tz.toordinal(), tz='US/Eastern')
assert ts.to_pydatetime() == dt_tz
@pytest.mark.parametrize('result', [
Timestamp(datetime(2000, 1, 2, 3, 4, 5, 6), nanosecond=1),
Timestamp(year=2000, month=1, day=2, hour=3, minute=4, second=5,
microsecond=6, nanosecond=1),
Timestamp(year=2000, month=1, day=2, hour=3, minute=4, second=5,
microsecond=6, nanosecond=1, tz='UTC'),
Timestamp(2000, 1, 2, 3, 4, 5, 6, 1, None),
Timestamp(2000, 1, 2, 3, 4, 5, 6, 1, pytz.UTC)])
def test_constructor_nanosecond(self, result):
# GH 18898
expected = Timestamp(datetime(2000, 1, 2, 3, 4, 5, 6), tz=result.tz)
expected = expected + Timedelta(nanoseconds=1)
assert result == expected
@pytest.mark.parametrize('arg', ['year', 'month', 'day', 'hour', 'minute',
'second', 'microsecond', 'nanosecond'])
def test_invalid_date_kwarg_with_string_input(self, arg):
kwarg = {arg: 1}
with pytest.raises(ValueError):
Timestamp('2010-10-10 12:59:59.999999999', **kwarg)
def test_out_of_bounds_value(self):
one_us = np.timedelta64(1).astype('timedelta64[us]')
# By definition we can't go out of bounds in [ns], so we
# convert the datetime64s to [us] so we can go out of bounds
min_ts_us = np.datetime64(Timestamp.min).astype('M8[us]')
max_ts_us = np.datetime64(Timestamp.max).astype('M8[us]')
# No error for the min/max datetimes
Timestamp(min_ts_us)
Timestamp(max_ts_us)
# One us less than the minimum is an error
with pytest.raises(ValueError):
Timestamp(min_ts_us - one_us)
# One us more than the maximum is an error
with pytest.raises(ValueError):
Timestamp(max_ts_us + one_us)
def test_out_of_bounds_string(self):
with pytest.raises(ValueError):
Timestamp('1676-01-01')
with pytest.raises(ValueError):
Timestamp('2263-01-01')
def test_barely_out_of_bounds(self):
# GH#19529
# GH#19382 close enough to bounds that dropping nanos would result
# in an in-bounds datetime
with pytest.raises(OutOfBoundsDatetime):
Timestamp('2262-04-11 23:47:16.854775808')
def test_bounds_with_different_units(self):
out_of_bounds_dates = ('1677-09-21', '2262-04-12')
time_units = ('D', 'h', 'm', 's', 'ms', 'us')
for date_string in out_of_bounds_dates:
for unit in time_units:
dt64 = np.datetime64(date_string, dtype='M8[%s]' % unit)
with pytest.raises(ValueError):
Timestamp(dt64)
in_bounds_dates = ('1677-09-23', '2262-04-11')
for date_string in in_bounds_dates:
for unit in time_units:
dt64 = np.datetime64(date_string, dtype='M8[%s]' % unit)
Timestamp(dt64)
def test_min_valid(self):
# Ensure that Timestamp.min is a valid Timestamp
Timestamp(Timestamp.min)
def test_max_valid(self):
# Ensure that Timestamp.max is a valid Timestamp
Timestamp(Timestamp.max)
def test_now(self):
# GH#9000
ts_from_string = Timestamp('now')
ts_from_method = Timestamp.now()
ts_datetime = datetime.now()
ts_from_string_tz = Timestamp('now', tz='US/Eastern')
ts_from_method_tz = Timestamp.now(tz='US/Eastern')
# Check that the delta between the times is less than 1s (arbitrarily
# small)
delta = Timedelta(seconds=1)
assert abs(ts_from_method - ts_from_string) < delta
assert abs(ts_datetime - ts_from_method) < delta
assert abs(ts_from_method_tz - ts_from_string_tz) < delta
assert (abs(ts_from_string_tz.tz_localize(None) -
ts_from_method_tz.tz_localize(None)) < delta)
def test_today(self):
ts_from_string = Timestamp('today')
ts_from_method = Timestamp.today()
ts_datetime = datetime.today()
ts_from_string_tz = Timestamp('today', tz='US/Eastern')
ts_from_method_tz = Timestamp.today(tz='US/Eastern')
# Check that the delta between the times is less than 1s (arbitrarily
# small)
delta = Timedelta(seconds=1)
assert abs(ts_from_method - ts_from_string) < delta
assert abs(ts_datetime - ts_from_method) < delta
assert abs(ts_from_method_tz - ts_from_string_tz) < delta
assert (abs(ts_from_string_tz.tz_localize(None) -
ts_from_method_tz.tz_localize(None)) < delta)
class TestTimestamp(object):
def test_tz(self):
tstr = '2014-02-01 09:00'
ts = Timestamp(tstr)
local = ts.tz_localize('Asia/Tokyo')
assert local.hour == 9
assert local == Timestamp(tstr, tz='Asia/Tokyo')
conv = local.tz_convert('US/Eastern')
assert conv == Timestamp('2014-01-31 19:00', tz='US/Eastern')
assert conv.hour == 19
# preserves nanosecond
ts = Timestamp(tstr) + offsets.Nano(5)
local = ts.tz_localize('Asia/Tokyo')
assert local.hour == 9
assert local.nanosecond == 5
conv = local.tz_convert('US/Eastern')
assert conv.nanosecond == 5
assert conv.hour == 19
def test_utc_z_designator(self):
assert get_timezone(Timestamp('2014-11-02 01:00Z').tzinfo) == 'UTC'
def test_asm8(self):
np.random.seed(7960929)
ns = [Timestamp.min.value, Timestamp.max.value, 1000]
for n in ns:
assert (Timestamp(n).asm8.view('i8') ==
np.datetime64(n, 'ns').view('i8') == n)
assert (Timestamp('nat').asm8.view('i8') ==
np.datetime64('nat', 'ns').view('i8'))
def test_class_ops_pytz(self):
def compare(x, y):
assert (int(Timestamp(x).value / 1e9) ==
int(Timestamp(y).value / 1e9))
compare(Timestamp.now(), datetime.now())
compare(Timestamp.now('UTC'), datetime.now(timezone('UTC')))
compare(Timestamp.utcnow(), datetime.utcnow())
compare(Timestamp.today(), datetime.today())
current_time = calendar.timegm(datetime.now().utctimetuple())
compare(Timestamp.utcfromtimestamp(current_time),
datetime.utcfromtimestamp(current_time))
compare(Timestamp.fromtimestamp(current_time),
datetime.fromtimestamp(current_time))
date_component = datetime.utcnow()
time_component = (date_component + timedelta(minutes=10)).time()
compare(Timestamp.combine(date_component, time_component),
datetime.combine(date_component, time_component))
def test_class_ops_dateutil(self):
def compare(x, y):
assert (int(np.round(Timestamp(x).value / 1e9)) ==
int(np.round(Timestamp(y).value / 1e9)))
compare(Timestamp.now(), datetime.now())
compare(Timestamp.now('UTC'), datetime.now(tzutc()))
compare(Timestamp.utcnow(), datetime.utcnow())
compare(Timestamp.today(), datetime.today())
current_time = calendar.timegm(datetime.now().utctimetuple())
compare( | Timestamp.utcfromtimestamp(current_time) | pandas.Timestamp.utcfromtimestamp |
# coding: utf-8
"""Main estimation code.
"""
import re
import numpy as np
import pandas as pd
from scipy.stats.mstats import gmean
from statsmodels.base.model import GenericLikelihoodModel
from numba import jit
_norm_pdf_C = np.sqrt(2 * np.pi)
@jit(nopython=True)
def _norm_pdf(x):
return np.exp(-x ** 2 / 2) / _norm_pdf_C
@jit(nopython=True)
def _kde_local(loc, data, bw, lmbda):
"""Return the locally smoothed kernel density estimate at *loc*
based on *data* with locally smoothed bandwidth *bw x lmbda*,
where *lmbda* is either a scalar or a vector of the same length
as *data*.
"""
l_s_bw = bw * lmbda
d = (loc - data).T / l_s_bw
s = (_norm_pdf(d) / l_s_bw).T
kde = 0.0
for r in range(s.shape[0]):
kde += s[r].prod()
return kde
@jit(nopython=True)
def _kde_local_array_core(index_std, locs_std, leave_one_out_locs, other_locs, nobs, h, lmbda):
# Loop over leave-one-out variables and others.
loo_shape = (index_std.shape[0] - 1, index_std.shape[1])
loo_index = np.empty(loo_shape, dtype=np.double)
loo_lmbda = np.empty(loo_shape[0], dtype=np.double)
out = np.empty(len(locs_std), dtype=np.double) * np.nan
i = 0
for j in leave_one_out_locs:
k_loo = 0
for k in range(index_std.shape[0]):
if not k == i:
loo_index[k_loo, 0] = index_std[k, 0]
loo_index[k_loo, 1] = index_std[k, 1]
loo_lmbda[k_loo] = lmbda[k]
k_loo += 1
out[j] = _kde_local(locs_std[j], loo_index, h, loo_lmbda) / (nobs - 1)
i += 1
for j in other_locs:
out[j] = _kde_local(locs_std[j], index_std, h, lmbda) / nobs
return out
def _kde_local_array(locs, index, leave_one_out_locs, other_locs, nobs, h, lmbda):
"""Return locally smoothed density of *index* evaluated
at each element of *locs*.
Further parameters:
* *h* - the baseline bandwidth
* *lmbda* - the local smoothing parameter adjusting the bandwidth
In KV (2009), this corresponds to the :math:`f^\hat_s, s \in \{0, 1\}`
in D1 (but for all observations instead of one ω).
"""
# Standardise data and locs s.t. the product kernel can be used easily.
Sigma = np.cov(index.T)
if len(Sigma.shape) == 0:
Sigma_inv = Sigma ** -1
sqrt_det = np.sqrt(Sigma_inv)
chol_Sigma_inv = sqrt_det
elif len(Sigma.shape) == 2:
Sigma_inv = np.linalg.inv(Sigma)
sqrt_det = np.sqrt(np.linalg.det(Sigma_inv))
chol_Sigma_inv = np.linalg.cholesky(Sigma_inv)
index_std = index.dot(chol_Sigma_inv)
locs_std = locs.dot(chol_Sigma_inv)
return sqrt_det * _kde_local_array_core(
index_std,
locs_std,
leave_one_out_locs,
other_locs,
nobs,
h,
lmbda
)
class KleinVellaDoubleIndex(GenericLikelihoodModel):
def __init__(self, data, y_name, index_names, index_colnames):
"""Set up the data and basic model. Arguments:
* *data*: A pandas dataframe with all dependent and explanatory
variables
* *y_name*: The name of the dependent variable (string)
* *index_names*: A 2-element list/tuple with the names of the indices.
E.g.: ['Structural Equation', 'Control Function']
* *index_colnames*: A 2-element list of iterables with the names of
the independent variables (strings). E.g.:
[
['age', 'female', 'income'],
['wealth', 'female', 'income']
]
Both should contain a dedicated continuous
variable as the first element (responsibility of the user).
*y_name* and the elements of *index[k]_names* must be present in the
columns of *data*.
"""
cols = data.columns
assert y_name in cols
self.y_name = y_name
assert len(index_names) == 2
assert len(index_colnames) == 2
self.index_names = tuple(index_names)
self.index_colnames = []
self.index_colnames_all = []
self.index_ncoeffs = np.zeros(2, dtype=np.int)
for i in range(2):
for i_n in index_colnames[i]:
assert i_n in cols, "'{}' not in data columns!".format(i_n)
self.index_colnames.append(tuple(index_colnames[i]))
self.index_ncoeffs[i] = len(self.index_colnames[i]) - 1
for v0 in self.index_colnames[0]:
if v0 not in self.index_colnames[1]:
self.index_colnames_all.append(v0)
for v1 in self.index_colnames[1]:
self.index_colnames_all.append(v1)
self.coeffs = [None, None]
# Retain only data without missings in all relevant variables
self._data = data.dropna(subset=[y_name] + self.index_colnames_all)
self._nobs = len(self._data)
self._data = self._data.set_index(np.arange(self._nobs))
# Trimming is done ex post, so we can set the data here already.
super(KleinVellaDoubleIndex, self).__init__(
endog=self._data[self.y_name],
exog=self._data[self.index_colnames_all]
)
self.endog = self._data[self.y_name]
self.exog = self._data[self.index_colnames_all]
# Consistency check - binary dependent variable?
assert set(self._data[self.y_name].unique()) == {0, 1}, (
"\n\nY is not a binary variable: {}\n\n".format(set(self._data[self.y_name].unique()))
)
def coeffs_from_vec(self, coeffs_vec):
"""Set the attribute *coeffs* based on *coeffs_vec*."""
coeffs = [self.coeffs[0].copy(), self.coeffs[1].copy()]
coeffs[0].iloc[1:] = coeffs_vec[:self.index_ncoeffs[0]].copy()
coeffs[1].iloc[1:] = coeffs_vec[self.index_ncoeffs[0]:].copy()
return coeffs
def _coeff_series_to_vec(self, coeffs):
vec = np.zeros(self.index_ncoeffs.sum(), dtype=np.float)
vec[:self.index_ncoeffs[0]] = coeffs[0].iloc[1:].values.copy()
vec[self.index_ncoeffs[0]:] = coeffs[1].iloc[1:].values.copy()
return vec
def get_index(self, coeffs):
"""Return the based on a 2-element list of *coeffs* and the data in *self.exog*.
"""
return pd.DataFrame(
data=[
self.exog[coeffs[0].index].dot(coeffs[0]),
self.exog[coeffs[1].index].dot(coeffs[1])
],
index=[0, 1]
).T
def τ(self, z, a):
"""Return smooth trimming weights, formula in D2 of KV (2009)."""
return 1 / (1 + np.exp(z * self._nobs ** a))
def _λ(self, f):
"""Return the estimated local smoothing parameter, formula in D3 of KV (2009)."""
γ = f / gmean(f)
d = self.τ(z=1 / np.log(self._nobs) - γ, a=0.01)
return (d * γ + (1 - d) / np.log(self._nobs)) ** (-1 / 2)
def λ_multi_stage(self, index, n_stages, h1=None, h2=None):
"""Return the vector of estimated local smoothing parameters in D3/D4 of KV (2009)
for each element of *index*.
The parameter *n_stages ∊ {1, 2, 3}* controls the number of stages:
* 1 just returns a vector of ones
* 2 returns a vector of parameters from a single smoothing step
* 3 returns a vector of parameters from two smoothing steps
"""
if len(index.shape) == 1:
index = index.reshape((len(index), 1))
n = len(index)
all_obs = np.arange(n)
no_obs = np.array([], dtype=np.int64)
λ1 = np.ones(n, dtype=np.double)
if n_stages == 1:
return λ1
elif n_stages in {2, 3}:
assert h1 is not None
λ2 = self._λ(_kde_local_array(index, index, all_obs, no_obs, self._nobs, h1, λ1))
if n_stages == 2:
return λ2
else:
assert h2 is not None, "3-stage smoothing currently not implemented."
return self._λ(_kde_local_array(index, index, all_obs, no_obs, self._nobs, h2, λ2))
else:
raise ValueError(n_stages)
def _xtrim(self, lower, upper):
"""Return trimming indicator series, where trimming is based on
the covariates directly (and the quantiles to be trimmed at, i.e.
*lower* and *upper*).
"""
trm = pd.Series(data=True, index=self._data.index)
for c in self.index_colnames_all:
l_limit = np.percentile(self._data[c], 100 * lower)
u_limit = np.percentile(self._data[c], 100 * upper)
trm &= self._data[c].apply(lambda x: True if l_limit <= x <= u_limit else False)
return trm
def f_s_pilot(self, s, index):
"""Return a pilot density estimate (potentially locally smoothed)
conditional on the outcome of the dependent variable, as defined
in D1-D4 of KV (2009).
In theory (see the paper), the local smoothing step is not needed.
In practice, it is used in the code by the authors.
"""
assert s in {0, 1}
index_s = index[self.endog == s].values
leave_one_out_locs = index[self.endog == s].index.values
other_locs = index[self.endog == 1 - s].index.values
λ = self.λ_multi_stage(index_s, n_stages=self._n_smoothing_stages_pilot, h1=self._h_pilot)
return _kde_local_array(
index.values,
index_s,
leave_one_out_locs,
other_locs,
self._nobs,
self._h_pilot,
λ
)
def semiparametric_probability_function_pilot(self, index):
f0 = self.f_s_pilot(0, index)
f1 = self.f_s_pilot(1, index)
return f1 / (f1 + f0)
def _bin_loglikeobs(self, P):
Y = self.endog
return Y * np.log(P) + (1 - Y) * np.log(1 - P)
def _loglikeobs_pilot(self, coeffs_vec):
"""Return the pilot estimator of the log likelihood function, i.e. the Q
in D6 of KV (2009).
"""
self.coeffs = self.coeffs_from_vec(coeffs_vec)
index = self.get_index(self.coeffs)
P = self.semiparametric_probability_function_pilot(index)
return self._xtrim_series * self._bin_loglikeobs(P)
def fit_pilot(
self,
coeffs_start=[None, None],
trim_lower=0.01,
trim_upper=0.99,
n_smoothing_stages_pilot=1,
maxiter=500
):
"""Fit the initial model, where trimming is based on the covariates
directly (as opposed to the index).
Arguments: *coeffs_start* a 2-element list of start values for the
coefficient vectors of both indices. The order must be the same as
the order of *self.index_colnames* and the initial element of each start
vector must be unity. If the start values are set to *None*, a vector
of ones will be used.
"""
for i in range(2):
if coeffs_start[i] is None:
coeffs_start[i] = pd.Series(data=1.0, index=self.index_colnames[i])
else:
assert tuple(coeffs_start[i].index) == self.index_colnames[i]
assert coeffs_start[i].iloc[0] in [-1.0, 1.0]
self.coeffs[i] = coeffs_start[i].copy()
vec_coeffs_start = self._coeff_series_to_vec(coeffs_start)
self._xtrim_series = self._xtrim(lower=trim_lower, upper=trim_upper)
self._h_pilot = self._nobs ** - (1 / 11)
self._n_smoothing_stages_pilot = n_smoothing_stages_pilot
self.loglikeobs = self._loglikeobs_pilot
print("Starting pilot fit.")
self.results_pilot = self.fit(
start_params=vec_coeffs_start,
method='bfgs',
maxiter=maxiter,
full_output=1,
disp=1,
callback=None,
retall=1,
tol=0.001
)
self.coeffs = self.coeffs_from_vec(self.results_pilot.params)
self._coeffs_pilot_vec = self.results_pilot.params.copy()
self.coeffs_pilot = [self.coeffs[0].copy(), self.coeffs[1].copy()]
self.index_pilot = self.get_index(self.coeffs_pilot)
def _itrim(self, coeffs, lower, upper):
"""Return trimmming vector based on product of trimming vectors
for individual indices.
"""
index = self.get_index(coeffs)
trm = | pd.Series(data=1, index=self._data.index, dtype=np.double) | pandas.Series |
from _thread import start_new_thread
from hamcrest import assert_that, equal_to, is_in
from hamcrest.core.core.is_ import is_
from pandas.core.frame import DataFrame
from pytest import fail
from tanuki.data_store.column import Column
class TestColumn:
def test_type_casting(self) -> None:
data = [1, 2, 3]
column = Column("test", data)
print(column)
assert_that(column.tolist(), equal_to([1, 2, 3]))
column = Column[int]("test", [1, 2, 3])
assert_that(column.tolist(), equal_to([1, 2, 3]))
column = Column[int]("test", [1.23, 2.23, 3.23])
assert_that(column.tolist(), equal_to([1, 2, 3]))
column = Column[float]("test", [1.23, 2.23, 3.23])
assert_that(column.tolist(), equal_to([1.23, 2.23, 3.23]))
column = Column[str]("test", [1, 2, 3])
assert_that(column.tolist(), equal_to(["1", "2", "3"]))
column = Column[bool]("test", [0, 1, 2])
assert_that(column.tolist(), equal_to([False, True, True]))
column: Column[bool] = Column("test", [0, 1, 2])
assert_that(column.tolist(), equal_to([0, 1, 2]))
try:
Column[float]("test", ["a", "b", "c"])
fail("Expected cast exception")
except Exception as e:
assert_that("Failed to cast 'String' to 'Float64'", is_in(str(e)))
def test_multi_threaded(self) -> None:
data = [1, 2, 3]
thread_running = True
def assign_types():
while thread_running:
Column[bool]("test", data)
start_new_thread(assign_types, ((),))
for _ in range(1000):
column = Column("test", data)
assert_that(column.tolist(), equal_to(data))
thread_running = False
def test_first(self) -> None:
column = Column[int]("test", [1, 2, 3])
assert_that(column.first().tolist(), equal_to([1]))
def test_equals(self) -> None:
column1 = Column[int]("test", [1, 2, 3])
column2 = Column[int]("test", [4, 2, 5])
assert_that(column1.equals(column1), equal_to(True))
assert_that(column1.equals(column2), equal_to(False))
assert_that(column1.equals(1), equal_to(False))
def test_eq(self) -> None:
column1 = Column[int]("test", [1, 2, 3])
column2 = Column[int]("test", [4, 2, 5])
assert_that(
DataFrame({"test": [True, True, True]}).equals(column1 == column1), is_(True)
)
assert_that(
DataFrame({"test": [False, True, False]}).equals(column1 == column2), is_(True)
)
assert_that( | DataFrame({"test": [False, True, False]}) | pandas.core.frame.DataFrame |
import pandas as pd
import numpy as np
import requests
from bs4 import BeautifulSoup
import re
import ast
import os
import sys
from urllib.request import urlopen
from datetime import datetime, timedelta, date
from traceback import format_exc
import json
import math
import urllib.error
from urllib.parse import quote
import time
from selenium import webdriver
from selenium.webdriver.support.ui import Select
from selenium.common.exceptions import NoSuchElementException,StaleElementReferenceException
import pandas as pd
import platform
js = """
<script language="javascript" type="text/javascript">
<!--
function MM_reloadPage(init) { //reloads the window if Nav4 resized
if (init==true) with (navigator) {if ((appName=="Netscape")&&(parseInt(appVersion)==4)) {
document.MM_pgW=innerWidth; document.MM_pgH=innerHeight; onresize=MM_reloadPage; }}
else if (innerWidth!=document.MM_pgW || innerHeight!=document.MM_pgH) location.reload();
}
MM_reloadPage(true);
//-->
</script>
<link href="/wbi.css" rel="stylesheet" type="text/css"/>
"""
caption = """
<caption="특별조사기일 style="display:inline !important; visibility:visible !important; width:1px; height:1px; font-size:0px; overflow:hidden; line-height:0; " 공고"="" 관계인집회기일="" 및="" 제2,3회="">
</caption="특별조사기일><table border="0" cellpadding="0" cellspacing="0" height="100%" width="100%">
"""
str1 = """<td height="33" style="padding-left:20px"><img alt="로고" src="/img/hpgonggo/logo_scourt.gif"/></td>"""
str2 = """<td height="27"><img alt="종료" border="0" onclick="window.close();" src="/img/hpgonggo/btn_close.gif" style="cursor:hand"/><img alt="공백" height="10" src="/img/hpgonggo/blank.gif" width="10"/></td>"""
class RescueCrawler:
def __init__(self, term=1):
self.start_date = datetime.today() - timedelta(1)
self.start_date = self.start_date.strftime("%Y.%m.%d")
term = -1 * term
self.end_date = date.today() + timedelta(weeks=term)
self.end_date = self.end_date.strftime("%Y.%m.%d")
self.path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
self.rescue_url = 'http://safind.scourt.go.kr/sf/hpbigonggo/whp_gonggo.jsp?org_bub_nm=&theme=#'
self.naver_news = 'https://search.naver.com/search.naver?sm=top_hty&fbm=1&ie=utf8&query='
self.naver_news_content = 'https://search.naver.com/search.naver?&where=news&query={}&start=1&sort=sim&field=0&pd=6'
self.options = webdriver.ChromeOptions()
self.options.add_argument('headless')
self.options.add_argument('window-size=1920x1080')
self.options.add_argument("disable-gpu")
self.options.add_argument("user-agent=Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36")
if os.path.exists(self.path + '/task_module/backup/') == False:
print('backup 생성')
os.mkdir(self.path + '/task_module/backup')
if os.path.exists(self.path + '/task_module/backup/rescue/') == False:
os.mkdir(self.path + '/task_module/backup/rescue')
print("collect rescue case {} weeks ago".format(term), self.path)
def get_content (self, driver,area,start_date,end_date) :
final_result = pd.DataFrame()
#for area in area_list :
print(area)
driver.get(self.rescue_url)
driver.implicitly_wait(5)
select = Select(driver.find_element_by_xpath("//select[@id='sel_map']"))
select.select_by_visible_text('법인회생')
driver.implicitly_wait(3)
select = Select(driver.find_element_by_xpath("//select[@id='area']"))
select.select_by_visible_text(area)
driver.implicitly_wait(3)
driver.find_element_by_xpath('//*[@id="contants"]/div[2]/div[18]/a').click()
driver.implicitly_wait(5)
temp = self.get_info(driver,area,start_date,end_date)
print(len(temp))
final_result = final_result.append(temp, ignore_index=True)
return final_result
def get_info(self, driver,area,start_date,end_date):
area = area
last_date = start_date
info = []
i,j = 0,0
while last_date > end_date:
i = i+1
driver.implicitly_wait(3)
try:
driver.find_element_by_xpath('/html/body/div/div[4]/a['+str(i)+']').click()
j = j+1
if j == 11 :
i,j = 2,1
except NoSuchElementException:
last_date = end_date
else:
driver.implicitly_wait(3)
html = driver.page_source ## 페이지의 elements모두 가져오기
soup = BeautifulSoup(html, 'html.parser') ## BeautifulSoup사용하기
contents = soup.select('body > div > table > tbody > tr ')
k = 1
for content in contents:
date = content.find_all("td")[3].text
if date > start_date:
k = k+1
else:
case_num = content.find_all("td")[0].text
court = content.find_all("td")[1].text
company = content.find_all("td")[2].text
subject = content.find_all("td")[4].text
subject = re.sub('[\n\t]', '', subject).strip()
driver.find_element_by_xpath('/html/body/div/table/tbody/tr['+str(k)+']/td[6]/a').click()
driver.switch_to_window(driver.window_handles[1])
time.sleep(1)
html = driver.page_source
soup = BeautifulSoup(html, 'html.parser')
sub_info = soup.select('font > p')
if len(sub_info) == 2 :
address = sub_info[0].text
# ceo = sub_info[1].text
elif len(sub_info) == 1:
address = sub_info[0].text
# ceo = 'none'
else :
address = 'none'
# ceo = 'none'
if(date < end_date):
last_date = date
break
else :
info.append({'area':area,'case_num' : case_num,'court' : court,'company' :company,\
'date':date ,'subject' :subject,'sub_info':sub_info,'html':soup, 'address':address})
driver.switch_to_window(driver.window_handles[0])
k = k+1
dataframe = | pd.DataFrame(info) | pandas.DataFrame |
# 1.题出问题
# 什么样的人在泰坦尼克号中更容易存活?
# 2.理解数据
# 2.1 采集数据
# https://www.kaggle.com/c/titanic
# 2.2 导入数据
# 忽略警告提示
import warnings
warnings.filterwarnings('ignore')
# 导入处理数据包
import numpy as np
import pandas as pd
# 导入数据
# 训练数据集
train = pd.read_csv("./train.csv")
# 测试数据集
test = pd.read_csv("./test.csv")
# 显示所有列
| pd.set_option('display.max_columns', None) | pandas.set_option |
from copy import deepcopy
from typing import List
from typing import Tuple
import numpy as np
import pandas as pd
import pytest
from pandas.testing import assert_frame_equal
from etna.datasets import generate_ar_df
from etna.datasets.tsdataset import TSDataset
from etna.transforms import AddConstTransform
from etna.transforms import FilterFeaturesTransform
from etna.transforms import LagTransform
from etna.transforms import MaxAbsScalerTransform
from etna.transforms import OneHotEncoderTransform
from etna.transforms import SegmentEncoderTransform
from etna.transforms import TimeSeriesImputerTransform
@pytest.fixture()
def tsdf_with_exog(random_seed) -> TSDataset:
df_1 = pd.DataFrame.from_dict({"timestamp": pd.date_range("2021-02-01", "2021-07-01", freq="1d")})
df_2 = pd.DataFrame.from_dict({"timestamp": pd.date_range("2021-02-01", "2021-07-01", freq="1d")})
df_1["segment"] = "Moscow"
df_1["target"] = [x ** 2 + np.random.uniform(-2, 2) for x in list(range(len(df_1)))]
df_2["segment"] = "Omsk"
df_2["target"] = [x ** 0.5 + np.random.uniform(-2, 2) for x in list(range(len(df_2)))]
classic_df = pd.concat([df_1, df_2], ignore_index=True)
df = TSDataset.to_dataset(classic_df)
classic_df_exog = generate_ar_df(start_time="2021-01-01", periods=600, n_segments=2)
classic_df_exog.rename(columns={"target": "exog"}, inplace=True)
df_exog = TSDataset.to_dataset(classic_df_exog)
ts = TSDataset(df=df, df_exog=df_exog, freq="1D")
return ts
@pytest.fixture()
def df_and_regressors() -> Tuple[pd.DataFrame, pd.DataFrame, List[str]]:
timestamp = pd.date_range("2021-01-01", "2021-02-01")
df_1 = pd.DataFrame({"timestamp": timestamp, "target": 11, "segment": "1"})
df_2 = pd.DataFrame({"timestamp": timestamp[5:], "target": 12, "segment": "2"})
df = pd.concat([df_1, df_2], ignore_index=True)
df = TSDataset.to_dataset(df)
timestamp = pd.date_range("2020-12-01", "2021-02-11")
df_1 = pd.DataFrame({"timestamp": timestamp, "regressor_1": 1, "regressor_2": 2, "segment": "1"})
df_2 = pd.DataFrame({"timestamp": timestamp[5:], "regressor_1": 3, "regressor_2": 4, "segment": "2"})
df_exog = pd.concat([df_1, df_2], ignore_index=True)
df_exog = TSDataset.to_dataset(df_exog)
return df, df_exog, ["regressor_1", "regressor_2"]
@pytest.fixture()
def df_and_regressors_flat() -> Tuple[pd.DataFrame, pd.DataFrame]:
"""Return flat versions of df and df_exog."""
timestamp = pd.date_range("2021-01-01", "2021-02-01")
df_1 = pd.DataFrame({"timestamp": timestamp, "target": 11, "segment": "1"})
df_2 = pd.DataFrame({"timestamp": timestamp[5:], "target": 12, "segment": "2"})
df = pd.concat([df_1, df_2], ignore_index=True)
timestamp = pd.date_range("2020-12-01", "2021-02-11")
df_1 = pd.DataFrame(
{"timestamp": timestamp, "regressor_1": 1, "regressor_2": "3", "regressor_3": 5, "segment": "1"}
)
df_2 = pd.DataFrame(
{"timestamp": timestamp[5:], "regressor_1": 2, "regressor_2": "4", "regressor_3": 6, "segment": "2"}
)
df_exog = pd.concat([df_1, df_2], ignore_index=True)
df_exog["regressor_2"] = df_exog["regressor_2"].astype("category")
df_exog["regressor_3"] = df_exog["regressor_3"].astype("category")
return df, df_exog
@pytest.fixture
def ts_with_categoricals():
timestamp = pd.date_range("2021-01-01", "2021-01-05")
df_1 = pd.DataFrame({"timestamp": timestamp, "target": 11, "segment": "1"})
df_2 = pd.DataFrame({"timestamp": timestamp, "target": 12, "segment": "2"})
df = pd.concat([df_1, df_2], ignore_index=True)
df = TSDataset.to_dataset(df)
timestamp = pd.date_range("2021-01-01", "2021-01-06")
categorical_values = ["1", "2", "1", "2", "1", "2"]
df_1 = pd.DataFrame(
{"timestamp": timestamp, "regressor": categorical_values, "not_regressor": categorical_values, "segment": "1"}
)
df_2 = pd.DataFrame(
{"timestamp": timestamp, "regressor": categorical_values, "not_regressor": categorical_values, "segment": "2"}
)
df_exog = pd.concat([df_1, df_2], ignore_index=True)
df_exog = TSDataset.to_dataset(df_exog)
ts = TSDataset(df=df, freq="D", df_exog=df_exog, known_future=["regressor"])
return ts
@pytest.fixture()
def ts_future(example_reg_tsds):
future = example_reg_tsds.make_future(10)
return future
@pytest.fixture
def df_segments_int():
"""DataFrame with integer segments."""
timestamp = pd.date_range("2021-01-01", "2021-02-01")
df1 = pd.DataFrame({"timestamp": timestamp, "target": 3, "segment": 1})
df2 = pd.DataFrame({"timestamp": timestamp, "target": 4, "segment": 2})
df = pd.concat([df1, df2], ignore_index=True)
return df
def test_check_endings_error():
"""Check that _check_endings method raises exception if some segments end with nan."""
timestamp = pd.date_range("2021-01-01", "2021-02-01")
df1 = pd.DataFrame({"timestamp": timestamp, "target": 11, "segment": "1"})
df2 = pd.DataFrame({"timestamp": timestamp[:-5], "target": 12, "segment": "2"})
df = pd.concat([df1, df2], ignore_index=True)
df = TSDataset.to_dataset(df)
ts = TSDataset(df=df, freq="D")
with pytest.raises(ValueError):
ts._check_endings()
def test_check_endings_pass():
"""Check that _check_endings method passes if there is no nans at the end of all segments."""
timestamp = pd.date_range("2021-01-01", "2021-02-01")
df1 = pd.DataFrame({"timestamp": timestamp, "target": 11, "segment": "1"})
df2 = pd.DataFrame({"timestamp": timestamp, "target": 12, "segment": "2"})
df = pd.concat([df1, df2], ignore_index=True)
df = TSDataset.to_dataset(df)
ts = TSDataset(df=df, freq="D")
ts._check_endings()
def test_check_known_future_wrong_literal():
"""Check that _check_known_future raises exception if wrong literal is given."""
with pytest.raises(ValueError, match="The only possible literal is 'all'"):
_ = TSDataset._check_known_future("wrong-literal", None)
def test_check_known_future_error_no_df_exog():
"""Check that _check_known_future raises exception if there are no df_exog, but known_future isn't empty."""
with pytest.raises(ValueError, match="Some features in known_future are not present in df_exog"):
_ = TSDataset._check_known_future(["regressor_1"], None)
def test_check_known_future_error_not_matching(df_and_regressors):
"""Check that _check_known_future raises exception if df_exog doesn't contain some features in known_future."""
_, df_exog, known_future = df_and_regressors
known_future.append("regressor_new")
with pytest.raises(ValueError, match="Some features in known_future are not present in df_exog"):
_ = TSDataset._check_known_future(known_future, df_exog)
def test_check_known_future_pass_all_empty():
"""Check that _check_known_future passes if known_future and df_exog are empty."""
regressors = TSDataset._check_known_future([], None)
assert len(regressors) == 0
@pytest.mark.parametrize(
"known_future, expected_columns",
[
([], []),
(["regressor_1"], ["regressor_1"]),
(["regressor_1", "regressor_2"], ["regressor_1", "regressor_2"]),
(["regressor_1", "regressor_1"], ["regressor_1"]),
("all", ["regressor_1", "regressor_2"]),
],
)
def test_check_known_future_pass_non_empty(df_and_regressors, known_future, expected_columns):
_, df_exog, _ = df_and_regressors
"""Check that _check_known_future passes if df_exog is not empty."""
regressors = TSDataset._check_known_future(known_future, df_exog)
assert regressors == expected_columns
def test_categorical_after_call_to_pandas():
classic_df = generate_ar_df(periods=30, start_time="2021-06-01", n_segments=2)
classic_df["categorical_column"] = [0] * 30 + [1] * 30
classic_df["categorical_column"] = classic_df["categorical_column"].astype("category")
df = TSDataset.to_dataset(classic_df[["timestamp", "segment", "target"]])
exog = TSDataset.to_dataset(classic_df[["timestamp", "segment", "categorical_column"]])
ts = TSDataset(df, "D", exog)
flatten_df = ts.to_pandas(flatten=True)
assert flatten_df["categorical_column"].dtype == "category"
@pytest.mark.parametrize(
"borders, true_borders",
(
(
("2021-02-01", "2021-06-20", "2021-06-21", "2021-07-01"),
("2021-02-01", "2021-06-20", "2021-06-21", "2021-07-01"),
),
(
("2021-02-03", "2021-06-20", "2021-06-22", "2021-07-01"),
("2021-02-03", "2021-06-20", "2021-06-22", "2021-07-01"),
),
(
("2021-02-01", "2021-06-20", "2021-06-21", "2021-06-28"),
("2021-02-01", "2021-06-20", "2021-06-21", "2021-06-28"),
),
(
("2021-02-01", "2021-06-20", "2021-06-23", "2021-07-01"),
("2021-02-01", "2021-06-20", "2021-06-23", "2021-07-01"),
),
((None, "2021-06-20", "2021-06-23", "2021-06-28"), ("2021-02-01", "2021-06-20", "2021-06-23", "2021-06-28")),
(("2021-02-03", "2021-06-20", "2021-06-23", None), ("2021-02-03", "2021-06-20", "2021-06-23", "2021-07-01")),
((None, "2021-06-20", "2021-06-23", None), ("2021-02-01", "2021-06-20", "2021-06-23", "2021-07-01")),
((None, "2021-06-20", None, None), ("2021-02-01", "2021-06-20", "2021-06-21", "2021-07-01")),
((None, None, "2021-06-21", None), ("2021-02-01", "2021-06-20", "2021-06-21", "2021-07-01")),
),
)
def test_train_test_split(borders, true_borders, tsdf_with_exog):
train_start, train_end, test_start, test_end = borders
train_start_true, train_end_true, test_start_true, test_end_true = true_borders
train, test = tsdf_with_exog.train_test_split(
train_start=train_start, train_end=train_end, test_start=test_start, test_end=test_end
)
assert isinstance(train, TSDataset)
assert isinstance(test, TSDataset)
assert (train.df == tsdf_with_exog.df[train_start_true:train_end_true]).all().all()
assert (train.df_exog == tsdf_with_exog.df_exog).all().all()
assert (test.df == tsdf_with_exog.df[test_start_true:test_end_true]).all().all()
assert (test.df_exog == tsdf_with_exog.df_exog).all().all()
@pytest.mark.parametrize(
"test_size, true_borders",
(
(11, ("2021-02-01", "2021-06-20", "2021-06-21", "2021-07-01")),
(9, ("2021-02-01", "2021-06-22", "2021-06-23", "2021-07-01")),
(1, ("2021-02-01", "2021-06-30", "2021-07-01", "2021-07-01")),
),
)
def test_train_test_split_with_test_size(test_size, true_borders, tsdf_with_exog):
train_start_true, train_end_true, test_start_true, test_end_true = true_borders
train, test = tsdf_with_exog.train_test_split(test_size=test_size)
assert isinstance(train, TSDataset)
assert isinstance(test, TSDataset)
assert (train.df == tsdf_with_exog.df[train_start_true:train_end_true]).all().all()
assert (train.df_exog == tsdf_with_exog.df_exog).all().all()
assert (test.df == tsdf_with_exog.df[test_start_true:test_end_true]).all().all()
assert (test.df_exog == tsdf_with_exog.df_exog).all().all()
@pytest.mark.parametrize(
"test_size, borders, true_borders",
(
(
10,
("2021-02-01", "2021-06-20", "2021-06-21", "2021-07-01"),
("2021-02-01", "2021-06-20", "2021-06-21", "2021-07-01"),
),
(
15,
("2021-02-03", "2021-06-20", "2021-06-22", "2021-07-01"),
("2021-02-03", "2021-06-20", "2021-06-22", "2021-07-01"),
),
(11, ("2021-02-02", None, None, "2021-06-28"), ("2021-02-02", "2021-06-17", "2021-06-18", "2021-06-28")),
(
4,
("2021-02-03", "2021-06-20", None, "2021-07-01"),
("2021-02-03", "2021-06-20", "2021-06-28", "2021-07-01"),
),
(
4,
("2021-02-03", "2021-06-20", None, None),
("2021-02-03", "2021-06-20", "2021-06-21", "2021-06-24"),
),
),
)
def test_train_test_split_both(test_size, borders, true_borders, tsdf_with_exog):
train_start, train_end, test_start, test_end = borders
train_start_true, train_end_true, test_start_true, test_end_true = true_borders
train, test = tsdf_with_exog.train_test_split(
train_start=train_start, train_end=train_end, test_start=test_start, test_end=test_end, test_size=test_size
)
assert isinstance(train, TSDataset)
assert isinstance(test, TSDataset)
assert (train.df == tsdf_with_exog.df[train_start_true:train_end_true]).all().all()
assert (train.df_exog == tsdf_with_exog.df_exog).all().all()
assert (test.df == tsdf_with_exog.df[test_start_true:test_end_true]).all().all()
assert (test.df_exog == tsdf_with_exog.df_exog).all().all()
@pytest.mark.parametrize(
"borders, match",
(
(("2021-01-01", "2021-06-20", "2021-06-21", "2021-07-01"), "Min timestamp in df is"),
(("2021-02-01", "2021-06-20", "2021-06-21", "2021-08-01"), "Max timestamp in df is"),
),
)
def test_train_test_split_warning(borders, match, tsdf_with_exog):
train_start, train_end, test_start, test_end = borders
with pytest.warns(UserWarning, match=match):
tsdf_with_exog.train_test_split(
train_start=train_start, train_end=train_end, test_start=test_start, test_end=test_end
)
@pytest.mark.parametrize(
"test_size, borders, match",
(
(
10,
("2021-02-01", None, "2021-06-21", "2021-07-01"),
"test_size, test_start and test_end cannot be applied at the same time. test_size will be ignored",
),
),
)
def test_train_test_split_warning2(test_size, borders, match, tsdf_with_exog):
train_start, train_end, test_start, test_end = borders
with pytest.warns(UserWarning, match=match):
tsdf_with_exog.train_test_split(
train_start=train_start, train_end=train_end, test_start=test_start, test_end=test_end, test_size=test_size
)
@pytest.mark.parametrize(
"test_size, borders, match",
(
(
None,
("2021-02-03", None, None, "2021-07-01"),
"At least one of train_end, test_start or test_size should be defined",
),
(
17,
("2021-02-01", "2021-06-20", None, "2021-07-01"),
"The beginning of the test goes before the end of the train",
),
(
17,
("2021-02-01", "2021-06-20", "2021-06-26", None),
"test_size is 17, but only 6 available with your test_start",
),
),
)
def test_train_test_split_failed(test_size, borders, match, tsdf_with_exog):
train_start, train_end, test_start, test_end = borders
with pytest.raises(ValueError, match=match):
tsdf_with_exog.train_test_split(
train_start=train_start, train_end=train_end, test_start=test_start, test_end=test_end, test_size=test_size
)
def test_train_test_split_pass_regressors_to_output(df_and_regressors):
df, df_exog, known_future = df_and_regressors
ts = TSDataset(df=df, df_exog=df_exog, freq="D", known_future=known_future)
train, test = ts.train_test_split(test_size=5)
assert train.regressors == ts.regressors
assert test.regressors == ts.regressors
def test_dataset_datetime_conversion():
classic_df = generate_ar_df(periods=30, start_time="2021-06-01", n_segments=2)
classic_df["timestamp"] = classic_df["timestamp"].astype(str)
df = TSDataset.to_dataset(classic_df[["timestamp", "segment", "target"]])
# todo: deal with pandas datetime format
assert df.index.dtype == "datetime64[ns]"
def test_dataset_datetime_conversion_during_init():
classic_df = generate_ar_df(periods=30, start_time="2021-06-01", n_segments=2)
classic_df["categorical_column"] = [0] * 30 + [1] * 30
classic_df["categorical_column"] = classic_df["categorical_column"].astype("category")
df = TSDataset.to_dataset(classic_df[["timestamp", "segment", "target"]])
exog = TSDataset.to_dataset(classic_df[["timestamp", "segment", "categorical_column"]])
df.index = df.index.astype(str)
exog.index = df.index.astype(str)
ts = TSDataset(df, "D", exog)
assert ts.df.index.dtype == "datetime64[ns]"
def test_to_dataset_segment_conversion(df_segments_int):
"""Test that `TSDataset.to_dataset` makes casting of segment to string."""
df = TSDataset.to_dataset(df_segments_int)
assert np.all(df.columns.get_level_values("segment") == ["1", "2"])
def test_dataset_segment_conversion_during_init(df_segments_int):
"""Test that `TSDataset.__init__` makes casting of segment to string."""
df = TSDataset.to_dataset(df_segments_int)
# make conversion back to integers
columns_frame = df.columns.to_frame()
columns_frame["segment"] = columns_frame["segment"].astype(int)
df.columns = pd.MultiIndex.from_frame(columns_frame)
ts = TSDataset(df=df, freq="D")
assert np.all(ts.columns.get_level_values("segment") == ["1", "2"])
@pytest.mark.xfail
def test_make_future_raise_error_on_diff_endings(ts_diff_endings):
with pytest.raises(ValueError, match="All segments should end at the same timestamp"):
ts_diff_endings.make_future(10)
def test_make_future_with_imputer(ts_diff_endings, ts_future):
imputer = TimeSeriesImputerTransform(in_column="target")
ts_diff_endings.fit_transform([imputer])
future = ts_diff_endings.make_future(10)
assert_frame_equal(future.df, ts_future.df)
def test_make_future():
timestamp = pd.date_range("2020-01-01", periods=100, freq="D")
df1 = pd.DataFrame({"timestamp": timestamp, "target": 1, "segment": "segment_1"})
df2 = pd.DataFrame({"timestamp": timestamp, "target": 2, "segment": "segment_2"})
df = pd.concat([df1, df2], ignore_index=False)
ts = TSDataset(TSDataset.to_dataset(df), freq="D")
ts_future = ts.make_future(10)
assert np.all(ts_future.index == pd.date_range(ts.index.max() + pd.Timedelta("1D"), periods=10, freq="D"))
assert set(ts_future.columns.get_level_values("feature")) == {"target"}
def test_make_future_small_horizon():
timestamp = np.arange(np.datetime64("2021-01-01"), np.datetime64("2021-02-01"))
target1 = [np.sin(i) for i in range(len(timestamp))]
target2 = [np.cos(i) for i in range(len(timestamp))]
df1 = pd.DataFrame({"timestamp": timestamp, "target": target1, "segment": "1"})
df2 = pd.DataFrame({"timestamp": timestamp, "target": target2, "segment": "2"})
df = pd.concat([df1, df2], ignore_index=True)
df = TSDataset.to_dataset(df)
ts = TSDataset(df, freq="D")
train = TSDataset(ts[: ts.index[10], :, :], freq="D")
with pytest.warns(UserWarning, match="TSDataset freq can't be inferred"):
assert len(train.make_future(1).df) == 1
def test_make_future_with_exog():
timestamp = pd.date_range("2020-01-01", periods=100, freq="D")
df1 = pd.DataFrame({"timestamp": timestamp, "target": 1, "segment": "segment_1"})
df2 = pd.DataFrame({"timestamp": timestamp, "target": 2, "segment": "segment_2"})
df = pd.concat([df1, df2], ignore_index=False)
exog = df.copy()
exog.columns = ["timestamp", "exog", "segment"]
ts = TSDataset(df=TSDataset.to_dataset(df), df_exog=TSDataset.to_dataset(exog), freq="D")
ts_future = ts.make_future(10)
assert np.all(ts_future.index == pd.date_range(ts.index.max() + pd.Timedelta("1D"), periods=10, freq="D"))
assert set(ts_future.columns.get_level_values("feature")) == {"target", "exog"}
def test_make_future_with_regressors(df_and_regressors):
df, df_exog, known_future = df_and_regressors
ts = TSDataset(df=df, df_exog=df_exog, freq="D", known_future=known_future)
ts_future = ts.make_future(10)
assert np.all(ts_future.index == pd.date_range(ts.index.max() + pd.Timedelta("1D"), periods=10, freq="D"))
assert set(ts_future.columns.get_level_values("feature")) == {"target", "regressor_1", "regressor_2"}
def test_make_future_inherits_regressors(df_and_regressors):
df, df_exog, known_future = df_and_regressors
ts = TSDataset(df=df, df_exog=df_exog, freq="D", known_future=known_future)
ts_future = ts.make_future(10)
assert ts_future.regressors == ts.regressors
def test_make_future_warn_not_enough_regressors(df_and_regressors):
"""Check that warning is thrown if regressors don't have enough values for the future."""
df, df_exog, known_future = df_and_regressors
ts = TSDataset(df=df, df_exog=df_exog, freq="D", known_future=known_future)
with pytest.warns(UserWarning, match="Some regressors don't have enough values"):
ts.make_future(ts.df_exog.shape[0] + 100)
@pytest.mark.parametrize("exog_starts_later,exog_ends_earlier", ((True, False), (False, True), (True, True)))
def test_check_regressors_error(exog_starts_later: bool, exog_ends_earlier: bool):
"""Check that error is raised if regressors don't have enough values for the train data."""
start_time_main = "2021-01-01"
end_time_main = "2021-02-01"
start_time_regressors = "2021-01-10" if exog_starts_later else start_time_main
end_time_regressors = "2021-01-20" if exog_ends_earlier else end_time_main
timestamp = pd.date_range("2021-01-01", "2021-02-01")
df1 = pd.DataFrame({"timestamp": timestamp, "target": 11, "segment": "1"})
df2 = pd.DataFrame({"timestamp": timestamp[5:], "target": 12, "segment": "2"})
df = pd.concat([df1, df2], ignore_index=True)
df = TSDataset.to_dataset(df)
timestamp = pd.date_range(start_time_regressors, end_time_regressors)
df1 = pd.DataFrame({"timestamp": timestamp, "regressor_aaa": 1, "segment": "1"})
df2 = pd.DataFrame({"timestamp": timestamp[5:], "regressor_aaa": 2, "segment": "2"})
df_regressors = pd.concat([df1, df2], ignore_index=True)
df_regressors = TSDataset.to_dataset(df_regressors)
with pytest.raises(ValueError):
TSDataset._check_regressors(df=df, df_regressors=df_regressors)
def test_check_regressors_pass(df_and_regressors):
"""Check that regressors check on creation passes with correct regressors."""
df, df_exog, _ = df_and_regressors
_ = TSDataset._check_regressors(df=df, df_regressors=df_exog)
def test_check_regressors_pass_empty(df_and_regressors):
"""Check that regressors check on creation passes with no regressors."""
df, _, _ = df_and_regressors
_ = TSDataset._check_regressors(df=df, df_regressors=pd.DataFrame())
def test_getitem_only_date(tsdf_with_exog):
df_date_only = tsdf_with_exog["2021-02-01"]
assert df_date_only.name == pd.Timestamp("2021-02-01")
pd.testing.assert_series_equal(tsdf_with_exog.df.loc["2021-02-01"], df_date_only)
def test_getitem_slice_date(tsdf_with_exog):
df_slice = tsdf_with_exog["2021-02-01":"2021-02-03"]
expected_index = pd.DatetimeIndex(pd.date_range("2021-02-01", "2021-02-03"), name="timestamp")
| pd.testing.assert_index_equal(df_slice.index, expected_index) | pandas.testing.assert_index_equal |
# -*- coding: utf-8 -*-
"""
author: zengbin93
email: <EMAIL>
create_dt: 2021/10/24 16:12
describe: Tushare 数据缓存,这是用pickle缓存数据,是临时性的缓存。单次缓存,多次使用,但是不做增量更新。
"""
import os.path
import shutil
import pandas as pd
from .ts import *
from ..utils import io
class TsDataCache:
"""Tushare 数据缓存"""
def __init__(self, data_path, sdt, edt, verbose=False):
"""
:param data_path: 数据路径
:param sdt: 缓存开始时间
:param edt: 缓存结束时间
:param verbose: 是否显示详细信息
"""
self.date_fmt = "%Y%m%d"
self.verbose = verbose
self.sdt = pd.to_datetime(sdt).strftime(self.date_fmt)
self.edt = pd.to_datetime(edt).strftime(self.date_fmt)
self.data_path = data_path
self.prefix = "TS_CACHE"
self.name = f"{self.prefix}_{self.sdt}_{self.edt}"
self.cache_path = os.path.join(self.data_path, self.name)
os.makedirs(self.cache_path, exist_ok=True)
self.pro = pro
self.__prepare_api_path()
self.freq_map = {
"1min": Freq.F1,
"5min": Freq.F5,
"15min": Freq.F15,
"30min": Freq.F30,
"60min": Freq.F60,
"D": Freq.D,
"W": Freq.W,
"M": Freq.M,
}
def __prepare_api_path(self):
"""给每个tushare数据接口创建一个缓存路径"""
cache_path = self.cache_path
self.api_names = [
'ths_daily', 'ths_index', 'ths_member', 'pro_bar',
'hk_hold', 'cctv_news', 'daily_basic', 'index_weight',
'adj_factor', 'pro_bar_minutes'
]
self.api_path_map = {k: os.path.join(cache_path, k) for k in self.api_names}
for k, path in self.api_path_map.items():
os.makedirs(path, exist_ok=True)
def clear(self):
"""清空缓存"""
for path in os.listdir(self.data_path):
if path.startswith(self.prefix):
path = os.path.join(self.data_path, path)
shutil.rmtree(path)
if self.verbose:
print(f"clear: remove {path}")
if os.path.exists(path):
print(f"Tushare 数据缓存清理失败,请手动删除缓存文件夹:{self.cache_path}")
# ------------------------------------Tushare 原生接口----------------------------------------------
def ths_daily(self, ts_code, start_date, end_date, raw_bar=True):
"""获取同花顺概念板块的日线行情"""
cache_path = self.api_path_map['ths_daily']
file_cache = os.path.join(cache_path, f"ths_daily_{ts_code}.pkl")
if os.path.exists(file_cache):
kline = io.read_pkl(file_cache)
if self.verbose:
print(f"ths_daily: read cache {file_cache}")
else:
kline = pro.ths_daily(ts_code=ts_code, start_date=self.sdt, end_date=self.edt,
fields='ts_code,trade_date,open,close,high,low,vol')
kline = kline.sort_values('trade_date', ignore_index=True)
for bar_number in (1, 2, 3, 5, 10, 20):
# 向后看
n_col_name = 'n' + str(bar_number) + 'b'
kline[n_col_name] = (kline['close'].shift(-bar_number) / kline['close'] - 1) * 10000
kline[n_col_name] = kline[n_col_name].round(4)
# 向前看
b_col_name = 'b' + str(bar_number) + 'b'
kline[b_col_name] = (kline['close'] / kline['close'].shift(bar_number) - 1) * 10000
kline[b_col_name] = kline[b_col_name].round(4)
io.save_pkl(kline, file_cache)
kline['trade_date'] = pd.to_datetime(kline['trade_date'], format=self.date_fmt)
start_date = pd.to_datetime(start_date)
end_date = pd.to_datetime(end_date)
bars = kline[(kline['trade_date'] >= start_date) & (kline['trade_date'] <= end_date)]
bars.reset_index(drop=True, inplace=True)
if raw_bar:
bars = format_kline(bars, freq=Freq.D)
return bars
def ths_index(self, exchange="A", type_="N"):
"""获取同花顺概念
https://tushare.pro/document/2?doc_id=259
"""
cache_path = self.api_path_map['ths_index']
file_cache = os.path.join(cache_path, f"ths_index_{exchange}_{type_}.pkl")
if os.path.exists(file_cache):
df = io.read_pkl(file_cache)
if self.verbose:
print(f"ths_index: read cache {file_cache}")
else:
df = pro.ths_index(exchange=exchange, type=type_)
io.save_pkl(df, file_cache)
return df
def ths_member(self, ts_code):
"""获取同花顺概念成分股
https://tushare.pro/document/2?doc_id=261
:param ts_code:
:return:
"""
cache_path = self.api_path_map['ths_member']
file_cache = os.path.join(cache_path, f"ths_member_{ts_code}.pkl")
if os.path.exists(file_cache):
df = io.read_pkl(file_cache)
else:
df = pro.ths_member(ts_code=ts_code,
fields="ts_code,code,name,weight,in_date,out_date,is_new")
io.save_pkl(df, file_cache)
return df
def pro_bar(self, ts_code, start_date, end_date, freq='D', asset="E", raw_bar=True):
"""获取日线以上数据
https://tushare.pro/document/2?doc_id=109
:param ts_code:
:param start_date:
:param end_date:
:param freq:
:param asset: 资产类别:E股票 I沪深指数 C数字货币 FT期货 FD基金 O期权 CB可转债(v1.2.39),默认E
:param raw_bar:
:return:
"""
cache_path = self.api_path_map['pro_bar']
file_cache = os.path.join(cache_path, f"pro_bar_{ts_code}_{asset}_{freq}.pkl")
if os.path.exists(file_cache):
kline = io.read_pkl(file_cache)
if self.verbose:
print(f"pro_bar: read cache {file_cache}")
else:
start_date_ = (pd.to_datetime(self.sdt) - timedelta(days=1000)).strftime('%Y%m%d')
kline = ts.pro_bar(ts_code=ts_code, asset=asset, adj='qfq', freq=freq,
start_date=start_date_, end_date=self.edt)
kline = kline.sort_values('trade_date', ignore_index=True)
kline['trade_date'] = pd.to_datetime(kline['trade_date'], format=self.date_fmt)
for bar_number in (1, 2, 3, 5, 10, 20):
# 向后看
n_col_name = 'n' + str(bar_number) + 'b'
kline[n_col_name] = (kline['close'].shift(-bar_number) / kline['close'] - 1) * 10000
kline[n_col_name] = kline[n_col_name].round(4)
# 向前看
b_col_name = 'b' + str(bar_number) + 'b'
kline[b_col_name] = (kline['close'] / kline['close'].shift(bar_number) - 1) * 10000
kline[b_col_name] = kline[b_col_name].round(4)
io.save_pkl(kline, file_cache)
start_date = pd.to_datetime(start_date)
end_date = pd.to_datetime(end_date)
bars = kline[(kline['trade_date'] >= start_date) & (kline['trade_date'] <= end_date)]
bars.reset_index(drop=True, inplace=True)
if raw_bar:
bars = format_kline(bars, freq=self.freq_map[freq])
return bars
def pro_bar_minutes(self, ts_code, sdt, edt, freq='60min', asset="E", adj=None, raw_bar=True):
"""获取分钟线
https://tushare.pro/document/2?doc_id=109
:param ts_code: 标的代码
:param sdt: 开始时间,精确到分钟
:param edt: 结束时间,精确到分钟
:param freq: 分钟周期,可选值 1min, 5min, 15min, 30min, 60min
:param asset: 资产类别:E股票 I沪深指数 C数字货币 FT期货 FD基金 O期权 CB可转债(v1.2.39),默认E
:param adj: 复权类型,None不复权,qfq:前复权,hfq:后复权
:param raw_bar: 是否返回 RawBar 对象列表
:return:
"""
cache_path = self.api_path_map['pro_bar_minutes']
file_cache = os.path.join(cache_path, f"pro_bar_minutes_{ts_code}_{asset}_{freq}_{adj}.pkl")
if os.path.exists(file_cache):
kline = io.read_pkl(file_cache)
if self.verbose:
print(f"pro_bar_minutes: read cache {file_cache}")
else:
klines = []
end_dt = pd.to_datetime(self.edt)
dt1 = pd.to_datetime(self.sdt)
delta = timedelta(days=20*int(freq.replace("min", "")))
dt2 = dt1 + delta
while dt1 < end_dt:
df = ts.pro_bar(ts_code=ts_code, asset=asset, freq=freq,
start_date=dt1.strftime(dt_fmt), end_date=dt2.strftime(dt_fmt))
klines.append(df)
dt1 = dt2
dt2 = dt1 + delta
if self.verbose:
print(f"pro_bar_minutes: {ts_code} - {asset} - {freq} - {dt1} - {dt2}")
df_klines = pd.concat(klines, ignore_index=True)
kline = df_klines.drop_duplicates('trade_time')\
.sort_values('trade_time', ascending=True, ignore_index=True)
kline['trade_time'] = pd.to_datetime(kline['trade_time'], format=dt_fmt)
# 删除9:30的K线
kline['keep'] = kline['trade_time'].apply(lambda x: 0 if x.hour == 9 and x.minute == 30 else 1)
kline = kline[kline['keep'] == 1]
# 删除没有成交量的K线
kline = kline[kline['vol'] > 0]
kline = kline.reset_index(drop=True)
kline.drop(['keep'], axis=1, inplace=True)
# 只对股票有复权操作;复权行情说明:https://tushare.pro/document/2?doc_id=146
if asset == 'E' and adj and adj == 'qfq':
# 前复权 = 当日收盘价 × 当日复权因子 / 最新复权因子
factor = self.adj_factor(ts_code)
factor = factor.sort_values('trade_date', ignore_index=True)
latest_factor = factor.iloc[-1]['adj_factor']
kline['trade_date'] = kline.trade_time.apply(lambda x: x.strftime(date_fmt))
adj_map = {row['trade_date']: row['adj_factor'] for _, row in factor.iterrows()}
for col in ['open', 'close', 'high', 'low']:
kline[col] = kline.apply(lambda x: x[col] * adj_map[x['trade_date']] / latest_factor, axis=1)
if asset == 'E' and adj and adj == 'hfq':
# 后复权 = 当日收盘价 × 当日复权因子
factor = self.adj_factor(ts_code)
factor = factor.sort_values('trade_date', ignore_index=True)
kline['trade_date'] = kline.trade_time.apply(lambda x: x.strftime(date_fmt))
adj_map = {row['trade_date']: row['adj_factor'] for _, row in factor.iterrows()}
for col in ['open', 'close', 'high', 'low']:
kline[col] = kline.apply(lambda x: x[col] * adj_map[x['trade_date']], axis=1)
for bar_number in (1, 2, 3, 5, 10, 20):
# 向后看
n_col_name = 'n' + str(bar_number) + 'b'
kline[n_col_name] = (kline['close'].shift(-bar_number) / kline['close'] - 1) * 10000
kline[n_col_name] = kline[n_col_name].round(4)
# 向前看
b_col_name = 'b' + str(bar_number) + 'b'
kline[b_col_name] = (kline['close'] / kline['close'].shift(bar_number) - 1) * 10000
kline[b_col_name] = kline[b_col_name].round(4)
io.save_pkl(kline, file_cache)
sdt = pd.to_datetime(sdt)
edt = pd.to_datetime(edt)
bars = kline[(kline['trade_time'] >= sdt) & (kline['trade_time'] <= edt)]
bars.reset_index(drop=True, inplace=True)
if raw_bar:
bars = format_kline(bars, freq=self.freq_map[freq])
return bars
def stock_basic(self):
"""
https://tushare.pro/document/2?doc_id=25
:return:
"""
file_cache = os.path.join(self.cache_path, f"stock_basic.pkl")
if os.path.exists(file_cache):
df = io.read_pkl(file_cache)
else:
df = pro.stock_basic(exchange='', list_status='L',
fields='ts_code,symbol,name,area,industry,list_date')
io.save_pkl(df, file_cache)
return df
def trade_cal(self):
"""https://tushare.pro/document/2?doc_id=26"""
file_cache = os.path.join(self.cache_path, f"trade_cal.pkl")
if os.path.exists(file_cache):
df = io.read_pkl(file_cache)
else:
df = pro.trade_cal(exchange='', start_date='19900101', end_date="20300101")
io.save_pkl(df, file_cache)
return df
def hk_hold(self, trade_date='20190625'):
"""沪深港股通持股明细
https://tushare.pro/document/2?doc_id=188
"""
cache_path = self.api_path_map['hk_hold']
trade_date = pd.to_datetime(trade_date).strftime("%Y%m%d")
file_cache = os.path.join(cache_path, f"hk_hold_{trade_date}.pkl")
if os.path.exists(file_cache):
df = io.read_pkl(file_cache)
else:
df = pro.hk_hold(trade_date=trade_date)
io.save_pkl(df, file_cache)
return df
def cctv_news(self, date='20190625'):
"""新闻联播
https://tushare.pro/document/2?doc_id=154
"""
cache_path = self.api_path_map['cctv_news']
date = pd.to_datetime(date).strftime("%Y%m%d")
file_cache = os.path.join(cache_path, f"cctv_news_{date}.pkl")
if os.path.exists(file_cache):
df = io.read_pkl(file_cache)
else:
df = pro.cctv_news(date=date)
io.save_pkl(df, file_cache)
return df
def daily_basic(self, ts_code: str, start_date: str, end_date: str):
"""每日指标
https://tushare.pro/document/2?doc_id=32
"""
cache_path = self.api_path_map['daily_basic']
file_cache = os.path.join(cache_path, f"daily_basic_{ts_code}.pkl")
if os.path.exists(file_cache):
df = io.read_pkl(file_cache)
else:
start_date_ = (pd.to_datetime(self.sdt) - timedelta(days=1000)).strftime('%Y%m%d')
df = pro.daily_basic(ts_code=ts_code, start_date=start_date_, end_date="20230101")
df['trade_date'] = pd.to_datetime(df['trade_date'])
io.save_pkl(df, file_cache)
df = df[(df.trade_date >= pd.to_datetime(start_date)) & (df.trade_date <= | pd.to_datetime(end_date) | pandas.to_datetime |
import unittest
import pandas as pd
from pandas.core.indexes.range import RangeIndex
from pandas.testing import assert_frame_equal
import itertools
from datamatch.indices import MultiIndex, NoopIndex, ColumnsIndex
class BaseIndexTestCase(unittest.TestCase):
def assert_pairs_equal(self, pair_a, pair_b):
df1, df2 = pair_a
df3, df4 = pair_b
assert_frame_equal(df1, df3)
assert_frame_equal(df2, df4)
def assert_pairs_list_equal(self, list_a, list_b):
self.assertEqual(len(list_a), len(list_b))
for pair_a, pair_b in itertools.zip_longest(list_a, list_b):
self.assert_pairs_equal(pair_a, pair_b)
class TestNoopIndex(BaseIndexTestCase):
def test_index(self):
df = pd.DataFrame([[1, 2], [3, 4]])
idx = NoopIndex()
keys = idx.keys(df)
self.assertEqual(keys, set([0]))
assert_frame_equal(idx.bucket(df, 0), df)
class TestColumnsIndex(BaseIndexTestCase):
def test_index(self):
cols = ["c", "d"]
df = pd.DataFrame(
[[1, 2], [2, 4], [3, 4]], index=["x", "y", "z"], columns=cols)
idx = ColumnsIndex(["c"])
keys = idx.keys(df)
self.assertEqual(keys, set([(1,), (2,), (3,)]))
assert_frame_equal(
idx.bucket(df, (1,)),
pd.DataFrame([[1, 2]], index=["x"], columns=cols)
)
assert_frame_equal(
idx.bucket(df, (2,)),
pd.DataFrame([[2, 4]], index=["y"], columns=cols)
)
assert_frame_equal(
idx.bucket(df, (3,)),
pd.DataFrame([[3, 4]], index=["z"], columns=cols)
)
def test_multi_columns(self):
cols = ["c", "d"]
df = pd.DataFrame(
[[1, 2], [2, 4], [3, 4]], index=["z", "x", "c"], columns=cols)
idx = ColumnsIndex(["c", "d"])
keys = idx.keys(df)
self.assertEqual(keys, set([(1, 2), (2, 4), (3, 4)]))
assert_frame_equal(
idx.bucket(df, (1, 2)),
pd.DataFrame([[1, 2]], index=["z"], columns=cols)
)
assert_frame_equal(
idx.bucket(df, (2, 4)),
pd.DataFrame([[2, 4]], index=["x"], columns=cols)
)
assert_frame_equal(
idx.bucket(df, (3, 4)),
pd.DataFrame([[3, 4]], index=["c"], columns=cols)
)
def test_ignore_key_error(self):
df = pd.DataFrame(
[[1, 2], [3, 4]], columns=['a', 'b']
)
self.assertRaises(KeyError, lambda: ColumnsIndex('c').keys(df))
self.assertEqual(ColumnsIndex(
'c', ignore_key_error=True).keys(df), set())
def test_index_elements(self):
cols = ['col1', 'col2']
df = pd.DataFrame(
[
[['a', 'b'], 'q'],
[['c'], 'w'],
[['b'], 'e'],
],
index=RangeIndex(start=0, stop=3),
columns=cols
)
idx = ColumnsIndex('col1', index_elements=True)
keys = idx.keys(df)
self.assertEqual(keys, set([('a',), ('b',), ('c',)]))
assert_frame_equal(
idx.bucket(df, ('a',)),
pd.DataFrame([
[['a', 'b'], 'q']
], index=[0], columns=cols)
)
assert_frame_equal(
idx.bucket(df, ('b',)),
pd.DataFrame([
[['a', 'b'], 'q'],
[['b'], 'e'],
], index=[0, 2], columns=cols)
)
def test_index_elements_multi_columns(self):
cols = ['col1', 'col2', 'col3']
df = pd.DataFrame(
[
[['a', 'b'], 'q', [1]],
[['c'], 'w', [2, 3]],
[['b'], 'e', [1]],
],
index=RangeIndex(start=0, stop=3),
columns=cols
)
idx = ColumnsIndex(['col1', 'col3'], index_elements=True)
keys = idx.keys(df)
self.assertEqual(keys, set([
('c', 2), ('a', 1), ('b', 1), ('b', 1), ('c', 3)
]))
assert_frame_equal(
idx.bucket(df, ('a', 1)),
pd.DataFrame([
[['a', 'b'], 'q', [1]],
], index=[0], columns=cols)
)
assert_frame_equal(
idx.bucket(df, ('b', 1)),
pd.DataFrame([
[['a', 'b'], 'q', [1]],
[['b'], 'e', [1]],
], index=[0, 2], columns=cols)
)
class MultiIndexTestCase(BaseIndexTestCase):
def test_index(self):
cols = ["c", "d"]
df = pd.DataFrame(
[[1, 2], [2, 4], [3, 4]], index=["x", "y", "z"], columns=cols
)
idx = MultiIndex([
ColumnsIndex('c'),
ColumnsIndex('d')
])
keys = idx.keys(df)
self.assertEqual(keys, set([(1,), (2,), (3,), (4,)]))
assert_frame_equal(
idx.bucket(df, (1,)),
pd.DataFrame([[1, 2]], index=["x"], columns=cols)
)
assert_frame_equal(
idx.bucket(df, (2,)),
pd.DataFrame([[1, 2], [2, 4]], index=["x", "y"], columns=cols)
)
assert_frame_equal(
idx.bucket(df, (3,)),
pd.DataFrame([[3, 4]], index=["z"], columns=cols)
)
assert_frame_equal(
idx.bucket(df, (4,)),
pd.DataFrame([[2, 4], [3, 4]], index=["y", "z"], columns=cols)
)
idx = MultiIndex([
ColumnsIndex('c'),
ColumnsIndex('d')
], combine_keys=True)
keys = idx.keys(df)
self.assertEqual(keys, set([
((3,), (4,)),
((2,), (4,)),
((1,), (2,)),
]))
assert_frame_equal(
idx.bucket(df, ((1,), (2,))),
| pd.DataFrame([[1, 2]], index=["x"], columns=cols) | pandas.DataFrame |
####
#### Feb 22, 2022
####
"""
After creating the first 250 eval/train set
there are inconsistencies between NASA/Landsat
labels and Forecast/Sentinel labels from experts.
Here we are.
"""
import csv
import numpy as np
import pandas as pd
import datetime
from datetime import date
import time
import scipy
import scipy.signal
import os, os.path
from patsy import cr
# from pprint import pprint
import matplotlib
import matplotlib.pyplot as plt
import seaborn as sb
from pandas.plotting import register_matplotlib_converters
from matplotlib.dates import ConciseDateFormatter
import matplotlib.dates as mdates
from datetime import datetime
| register_matplotlib_converters() | pandas.plotting.register_matplotlib_converters |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
Date: 2022/5/10 17:00
Desc: 股票数据-总貌-市场总貌
股票数据-总貌-成交概括
http://www.szse.cn/market/overview/index.html
http://www.sse.com.cn/market/stockdata/statistic/
"""
import warnings
from io import BytesIO
import pandas as pd
import requests
from bs4 import BeautifulSoup
def stock_szse_summary(date: str = "20200619") -> pd.DataFrame:
"""
深证证券交易所-总貌-证券类别统计
http://www.szse.cn/market/overview/index.html
:param date: 最近结束交易日
:type date: str
:return: 证券类别统计
:rtype: pandas.DataFrame
"""
url = "http://www.szse.cn/api/report/ShowReport"
params = {
"SHOWTYPE": "xlsx",
"CATALOGID": "1803_sczm",
"TABKEY": "tab1",
"txtQueryDate": "-".join([date[:4], date[4:6], date[6:]]),
"random": "0.39339437497296137",
}
r = requests.get(url, params=params)
with warnings.catch_warnings(record=True):
warnings.simplefilter("always")
temp_df = pd.read_excel(BytesIO(r.content), engine="openpyxl")
temp_df["证券类别"] = temp_df["证券类别"].str.strip()
temp_df.iloc[:, 2:] = temp_df.iloc[:, 2:].applymap(lambda x: x.replace(",", ""))
temp_df.columns = ["证券类别", "数量", "成交金额", "总市值", "流通市值"]
temp_df["数量"] = pd.to_numeric(temp_df["数量"])
temp_df["成交金额"] = pd.to_numeric(temp_df["成交金额"])
temp_df["总市值"] = pd.to_numeric(temp_df["总市值"], errors="coerce")
temp_df["流通市值"] = pd.to_numeric(temp_df["流通市值"], errors="coerce")
return temp_df
def stock_szse_area_summary(date: str = "202203") -> pd.DataFrame:
"""
深证证券交易所-总貌-地区交易排序
http://www.szse.cn/market/overview/index.html
:param date: 最近结束交易日
:type date: str
:return: 地区交易排序
:rtype: pandas.DataFrame
"""
url = "http://www.szse.cn/api/report/ShowReport"
params = {
"SHOWTYPE": "xlsx",
"CATALOGID": "1803_sczm",
"TABKEY": "tab2",
"DATETIME": "-".join([date[:4], date[4:6]]),
"random": "0.39339437497296137",
}
r = requests.get(url, params=params)
with warnings.catch_warnings(record=True):
warnings.simplefilter("always")
temp_df = pd.read_excel(BytesIO(r.content), engine="openpyxl")
temp_df.columns = ['序号', '地区', '总交易额', '占市场', '股票交易额', '基金交易额', '债券交易额']
temp_df["总交易额"] = temp_df["总交易额"].str.replace(",", "")
temp_df["总交易额"] = pd.to_numeric(temp_df["总交易额"])
temp_df["占市场"] = pd.to_numeric(temp_df["占市场"])
temp_df["股票交易额"] = temp_df["股票交易额"].str.replace(",", "")
temp_df["股票交易额"] = pd.to_nume | ric(temp_df["股票交易额"], errors="coerce") | pandas.to_numeric |
from abc import ABC, abstractmethod
import matplotlib.pyplot as plt
from matplotlib import animation
from time import time
from datetime import timedelta
import numpy as np
import torch
import pandas as pd
class Trainer:
def __init__(self, env, env_test, algo, seed=0, num_steps=10**6, eval_interval=10**4, num_eval_episodes=1):
self.env = env
self.env_test = env_test
self.algo = algo
self.returns = {'step': [], 'return': [], 'success_rate':[]}
self.num_steps = num_steps
self.eval_interval = eval_interval
self.num_eval_episodes = num_eval_episodes
def train(self):
self.start_time = time()
t = 0
state = self.env.reset()
for steps in range(1, self.num_steps + 1):
state, t = self.algo.step(self.env, state, t, steps)
if self.algo.is_update(steps):
self.algo.update()
# Evaluate the learned policy each eval_interval
if steps % self.eval_interval == 0:
self.evaluate(steps)
self.save_gif() # save gif for final policy
def save_gif(self):
images = []
state = self.env_test.reset()
done = False
while(not done):
images.append(self.env_test.render(mode='rgb_array'))
action = self.algo.exploit(state)
state, reward, done, _ = self.env_test.step(action)
self.display_video(images)
def display_video(self, frames):
plt.figure(figsize=(8, 8), dpi=50)
patch = plt.imshow(frames[0])
plt.axis('off')
def animate(i):
patch.set_data(frames[i])
anim = animation.FuncAnimation(plt.gcf(), animate, frames=len(frames), interval=50)
anim.save('env.gif', writer='PillowWriter')
def evaluate(self, steps):
returns = []
evaluates = []
for _ in range(self.num_eval_episodes):
evaluate_val = 1.0
state = self.env_test.reset()
eval_temp = self.env_test.evaluate_stl_formula() # Return 1.0.
evaluate_val = min(evaluate_val, eval_temp) # \Phi = G\phi
done = False
episode_return = 0.0
while (not done):
action = self.algo.exploit(state)
state, reward, done, _ = self.env_test.step(action)
eval_temp = self.env_test.evaluate_stl_formula() # Return 0.0, if the past state sequence does not satisfy the STL specification at the time after k=tau-1.
evaluate_val = min(evaluate_val, eval_temp) # \Phi = G\phi
episode_return += reward
evaluates.append(evaluate_val)
returns.append(episode_return)
mean_return = np.mean(returns)
success_rate = np.mean(evaluates)
self.returns['step'].append(steps)
self.returns['return'].append(mean_return)
self.returns['success_rate'].append(success_rate)
print(f'Num steps: {steps:<6} '
f'Return: {mean_return:<5.1f} '
f'Success Rate: {success_rate:<5.2f} '
f'Time: {self.time}')
if steps % 100000 == 0:
self.algo.backup_model(steps)
def plot(self):
#fig = plt.figure(figsize=(8, 6))
#plt.plot(self.returns['step'], self.returns['success_rate'])
#plt.xlabel('Steps', fontsize=24)
#plt.ylabel('Success Rate', fontsize=24)
#plt.tick_params(labelsize=18)
#plt.title(f'{self.env.unwrapped.spec.id}', fontsize=24)
#plt.tight_layout()
#fig.savefig("plot_test.png")
datasets = | pd.DataFrame(self.returns['return']) | pandas.DataFrame |
#%%
from initial_data_processing import ProcessSoccerData
from scraper import Scrape_Soccer_Data
import pandas as pd
import os
#%%
NO_PREV_MATCHES_TO_CALULATE_AVERAGE_FROM = 5
class Feature_Engineering:
def __init__(self, calc_features=False):
self.soccer_data = ProcessSoccerData()
self.dictionary_df = self.soccer_data.get_dictionary_df()
self.input_data_df = self.soccer_data.get_matches_df()
self.scraped_match_data_df = Feature_Engineering._get_scraped_match_data_df()
self.feature_df = self._calculate_feature_df(calc_features)
# transform the scraped match data json into a dataframe
@staticmethod
def _get_scraped_match_data_df():
scraped_data_dict = Scrape_Soccer_Data.read_data('../data/matches_data')
scraped_df = pd.DataFrame.from_records(scraped_data_dict).transpose().reset_index()
scraped_df.rename(columns = {'index':'Match_id'}, inplace = True)
scraped_df[['Match_id']] = scraped_df[['Match_id']].apply(pd.to_numeric)
return scraped_df
def _calculate_feature_df(self, calc_features):
path = '../data/calc_features.csv'
if not calc_features:
if os.path.exists(path): # if calc_features is false get the features from csv that have been calculated previously
return | pd.read_csv(path) | pandas.read_csv |
#
# ___ _ ____ ____
# / _ \ _ _ ___ ___| |_| _ \| __ )
# | | | | | | |/ _ \/ __| __| | | | _ \
# | |_| | |_| | __/\__ \ |_| |_| | |_) |
# \__\_\\__,_|\___||___/\__|____/|____/
#
# Copyright (c) 2014-2019 Appsicle
# Copyright (c) 2019-2020 QuestDB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import datetime
import numpy as np
import pandas as pd
from pykit import (
create_table,
insert_values,
drop_table,
df_from_table
)
if __name__ == "__main__":
pd.set_option('display.width', 800)
| pd.set_option('max_columns', 4) | pandas.set_option |
import luigi
import os
import pandas as pd
from db import extract
from db import sql
from forecast import util
import shutil
import luigi.contrib.hadoop
from sqlalchemy import create_engine
from pysandag.database import get_connection_string
from pysandag import database
from db import log
class IncPopulation(luigi.Task):
econ_id = luigi.Parameter()
dem_id = luigi.Parameter()
def requires(self):
return None
def output(self):
return luigi.LocalTarget('temp/data.h5')
def run(self):
engine = create_engine(get_connection_string("model_config.yml", 'output_database'))
db_connection_string = database.get_connection_string('model_config.yml', 'in_db')
sql_in_engine = create_engine(db_connection_string)
in_query = getattr(sql, 'max_run_id')
db_run_id = pd.read_sql(in_query, engine, index_col=None)
# db_run_id = log.new_run(name='inc_run_log', run_id=db_run_id['max'].iloc[0])
run_id = pd.Series([db_run_id['id'].iloc[0]])
run_id.to_hdf('temp/data.h5', 'run_id', mode='a')
dem_sim_rates = extract.create_df('dem_sim_rates', 'dem_sim_rates_table',
rate_id=self.dem_id, index=None)
dem_sim_rates.to_hdf('temp/data.h5', 'dem_sim_rates', mode='a')
econ_sim_rates = extract.create_df('econ_sim_rates', 'econ_sim_rates_table',
rate_id=self.econ_id, index=None)
econ_sim_rates.to_hdf('temp/data.h5', 'econ_sim_rates', mode='a')
tables = util.yaml_to_dict('model_config.yml', 'db_tables')
in_query = getattr(sql, 'inc_pop') % (tables['inc_pop_table'], run_id[0])
in_query2 = getattr(sql, 'inc_mil_hh_pop') % (tables['population_table'], dem_sim_rates.base_population_id[0])
pop = pd.read_sql(in_query, engine, index_col=['age', 'race_ethn', 'sex', 'mildep'])
pop_mil = | pd.read_sql(in_query2, sql_in_engine, index_col=['age', 'race_ethn', 'sex', 'mildep']) | pandas.read_sql |
# -*- coding: utf-8 -*-
"""
Test data
"""
# Imports
import pandas as pd
from edbo.feature_utils import build_experiment_index
# Build data sets from indices
def aryl_amination(aryl_halide='ohe', additive='ohe', base='ohe', ligand='ohe', subset=1):
"""
Load aryl amination data with different features.
"""
# SMILES index
index = pd.read_csv('data/aryl_amination/experiment_index.csv')
# Choose supset:
ar123 = ['FC(F)(F)c1ccc(Cl)cc1','FC(F)(F)c1ccc(Br)cc1','FC(F)(F)c1ccc(I)cc1']
ar456 = ['COc1ccc(Cl)cc1','COc1ccc(Br)cc1','COc1ccc(I)cc1']
ar789 = ['CCc1ccc(Cl)cc1','CCc1ccc(Br)cc1','CCc1ccc(I)cc1']
ar101112 = ['Clc1ccccn1','Brc1ccccn1','Ic1ccccn1']
ar131415 = ['Clc1cccnc1','Brc1cccnc1','Ic1cccnc1']
def get_subset(ar):
a = index[index['Aryl_halide_SMILES'] == ar[0]]
b = index[index['Aryl_halide_SMILES'] == ar[1]]
c = index[index['Aryl_halide_SMILES'] == ar[2]]
return pd.concat([a,b,c])
if subset == 1:
index = get_subset(ar123)
elif subset == 2:
index = get_subset(ar456)
elif subset == 3:
index = get_subset(ar789)
elif subset == 4:
index = get_subset(ar101112)
elif subset == 5:
index = get_subset(ar131415)
# Aryl halide features
if aryl_halide == 'dft':
aryl_features = pd.read_csv('data/aryl_amination/aryl_halide_dft.csv')
elif aryl_halide == 'mordred':
aryl_features = pd.read_csv('data/aryl_amination/aryl_halide_mordred.csv')
elif aryl_halide == 'ohe':
aryl_features = pd.read_csv('data/aryl_amination/aryl_halide_ohe.csv')
# Additive features
if additive == 'dft':
add_features = pd.read_csv('data/aryl_amination/additive_dft.csv')
elif additive == 'mordred':
add_features = pd.read_csv('data/aryl_amination/additive_mordred.csv')
elif additive == 'ohe':
add_features = pd.read_csv('data/aryl_amination/additive_ohe.csv')
# Base features
if base == 'dft':
base_features = pd.read_csv('data/aryl_amination/base_dft.csv')
elif base == 'mordred':
base_features = | pd.read_csv('data/aryl_amination/base_mordred.csv') | pandas.read_csv |
import numpy as np
import pytest
import pandas as pd
from pandas import (
DataFrame,
lreshape,
melt,
wide_to_long,
)
import pandas._testing as tm
class TestMelt:
def setup_method(self, method):
self.df = tm.makeTimeDataFrame()[:10]
self.df["id1"] = (self.df["A"] > 0).astype(np.int64)
self.df["id2"] = (self.df["B"] > 0).astype(np.int64)
self.var_name = "var"
self.value_name = "val"
self.df1 = DataFrame(
[
[1.067683, -1.110463, 0.20867],
[-1.321405, 0.368915, -1.055342],
[-0.807333, 0.08298, -0.873361],
]
)
self.df1.columns = [list("ABC"), list("abc")]
self.df1.columns.names = ["CAP", "low"]
def test_top_level_method(self):
result = melt(self.df)
assert result.columns.tolist() == ["variable", "value"]
def test_method_signatures(self):
tm.assert_frame_equal(self.df.melt(), melt(self.df))
tm.assert_frame_equal(
self.df.melt(id_vars=["id1", "id2"], value_vars=["A", "B"]),
melt(self.df, id_vars=["id1", "id2"], value_vars=["A", "B"]),
)
tm.assert_frame_equal(
self.df.melt(var_name=self.var_name, value_name=self.value_name),
melt(self.df, var_name=self.var_name, value_name=self.value_name),
)
tm.assert_frame_equal(self.df1.melt(col_level=0), melt(self.df1, col_level=0))
def test_default_col_names(self):
result = self.df.melt()
assert result.columns.tolist() == ["variable", "value"]
result1 = self.df.melt(id_vars=["id1"])
assert result1.columns.tolist() == ["id1", "variable", "value"]
result2 = self.df.melt(id_vars=["id1", "id2"])
assert result2.columns.tolist() == ["id1", "id2", "variable", "value"]
def test_value_vars(self):
result3 = self.df.melt(id_vars=["id1", "id2"], value_vars="A")
assert len(result3) == 10
result4 = self.df.melt(id_vars=["id1", "id2"], value_vars=["A", "B"])
expected4 = DataFrame(
{
"id1": self.df["id1"].tolist() * 2,
"id2": self.df["id2"].tolist() * 2,
"variable": ["A"] * 10 + ["B"] * 10,
"value": (self.df["A"].tolist() + self.df["B"].tolist()),
},
columns=["id1", "id2", "variable", "value"],
)
tm.assert_frame_equal(result4, expected4)
def test_value_vars_types(self):
# GH 15348
expected = DataFrame(
{
"id1": self.df["id1"].tolist() * 2,
"id2": self.df["id2"].tolist() * 2,
"variable": ["A"] * 10 + ["B"] * 10,
"value": (self.df["A"].tolist() + self.df["B"].tolist()),
},
columns=["id1", "id2", "variable", "value"],
)
for type_ in (tuple, list, np.array):
result = self.df.melt(id_vars=["id1", "id2"], value_vars=type_(("A", "B")))
tm.assert_frame_equal(result, expected)
def test_vars_work_with_multiindex(self):
expected = DataFrame(
{
("A", "a"): self.df1[("A", "a")],
"CAP": ["B"] * len(self.df1),
"low": ["b"] * len(self.df1),
"value": self.df1[("B", "b")],
},
columns=[("A", "a"), "CAP", "low", "value"],
)
result = self.df1.melt(id_vars=[("A", "a")], value_vars=[("B", "b")])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"id_vars, value_vars, col_level, expected",
[
(
["A"],
["B"],
0,
DataFrame(
{
"A": {0: 1.067683, 1: -1.321405, 2: -0.807333},
"CAP": {0: "B", 1: "B", 2: "B"},
"value": {0: -1.110463, 1: 0.368915, 2: 0.08298},
}
),
),
(
["a"],
["b"],
1,
DataFrame(
{
"a": {0: 1.067683, 1: -1.321405, 2: -0.807333},
"low": {0: "b", 1: "b", 2: "b"},
"value": {0: -1.110463, 1: 0.368915, 2: 0.08298},
}
),
),
],
)
def test_single_vars_work_with_multiindex(
self, id_vars, value_vars, col_level, expected
):
result = self.df1.melt(id_vars, value_vars, col_level=col_level)
tm.assert_frame_equal(result, expected)
def test_tuple_vars_fail_with_multiindex(self):
# melt should fail with an informative error message if
# the columns have a MultiIndex and a tuple is passed
# for id_vars or value_vars.
tuple_a = ("A", "a")
list_a = [tuple_a]
tuple_b = ("B", "b")
list_b = [tuple_b]
msg = r"(id|value)_vars must be a list of tuples when columns are a MultiIndex"
for id_vars, value_vars in (
(tuple_a, list_b),
(list_a, tuple_b),
(tuple_a, tuple_b),
):
with pytest.raises(ValueError, match=msg):
self.df1.melt(id_vars=id_vars, value_vars=value_vars)
def test_custom_var_name(self):
result5 = self.df.melt(var_name=self.var_name)
assert result5.columns.tolist() == ["var", "value"]
result6 = self.df.melt(id_vars=["id1"], var_name=self.var_name)
assert result6.columns.tolist() == ["id1", "var", "value"]
result7 = self.df.melt(id_vars=["id1", "id2"], var_name=self.var_name)
assert result7.columns.tolist() == ["id1", "id2", "var", "value"]
result8 = self.df.melt(
id_vars=["id1", "id2"], value_vars="A", var_name=self.var_name
)
assert result8.columns.tolist() == ["id1", "id2", "var", "value"]
result9 = self.df.melt(
id_vars=["id1", "id2"], value_vars=["A", "B"], var_name=self.var_name
)
expected9 = DataFrame(
{
"id1": self.df["id1"].tolist() * 2,
"id2": self.df["id2"].tolist() * 2,
self.var_name: ["A"] * 10 + ["B"] * 10,
"value": (self.df["A"].tolist() + self.df["B"].tolist()),
},
columns=["id1", "id2", self.var_name, "value"],
)
tm.assert_frame_equal(result9, expected9)
def test_custom_value_name(self):
result10 = self.df.melt(value_name=self.value_name)
assert result10.columns.tolist() == ["variable", "val"]
result11 = self.df.melt(id_vars=["id1"], value_name=self.value_name)
assert result11.columns.tolist() == ["id1", "variable", "val"]
result12 = self.df.melt(id_vars=["id1", "id2"], value_name=self.value_name)
assert result12.columns.tolist() == ["id1", "id2", "variable", "val"]
result13 = self.df.melt(
id_vars=["id1", "id2"], value_vars="A", value_name=self.value_name
)
assert result13.columns.tolist() == ["id1", "id2", "variable", "val"]
result14 = self.df.melt(
id_vars=["id1", "id2"], value_vars=["A", "B"], value_name=self.value_name
)
expected14 = DataFrame(
{
"id1": self.df["id1"].tolist() * 2,
"id2": self.df["id2"].tolist() * 2,
"variable": ["A"] * 10 + ["B"] * 10,
self.value_name: (self.df["A"].tolist() + self.df["B"].tolist()),
},
columns=["id1", "id2", "variable", self.value_name],
)
tm.assert_frame_equal(result14, expected14)
def test_custom_var_and_value_name(self):
result15 = self.df.melt(var_name=self.var_name, value_name=self.value_name)
assert result15.columns.tolist() == ["var", "val"]
result16 = self.df.melt(
id_vars=["id1"], var_name=self.var_name, value_name=self.value_name
)
assert result16.columns.tolist() == ["id1", "var", "val"]
result17 = self.df.melt(
id_vars=["id1", "id2"], var_name=self.var_name, value_name=self.value_name
)
assert result17.columns.tolist() == ["id1", "id2", "var", "val"]
result18 = self.df.melt(
id_vars=["id1", "id2"],
value_vars="A",
var_name=self.var_name,
value_name=self.value_name,
)
assert result18.columns.tolist() == ["id1", "id2", "var", "val"]
result19 = self.df.melt(
id_vars=["id1", "id2"],
value_vars=["A", "B"],
var_name=self.var_name,
value_name=self.value_name,
)
expected19 = DataFrame(
{
"id1": self.df["id1"].tolist() * 2,
"id2": self.df["id2"].tolist() * 2,
self.var_name: ["A"] * 10 + ["B"] * 10,
self.value_name: (self.df["A"].tolist() + self.df["B"].tolist()),
},
columns=["id1", "id2", self.var_name, self.value_name],
)
tm.assert_frame_equal(result19, expected19)
df20 = self.df.copy()
df20.columns.name = "foo"
result20 = df20.melt()
assert result20.columns.tolist() == ["foo", "value"]
def test_col_level(self):
res1 = self.df1.melt(col_level=0)
res2 = self.df1.melt(col_level="CAP")
assert res1.columns.tolist() == ["CAP", "value"]
assert res2.columns.tolist() == ["CAP", "value"]
def test_multiindex(self):
res = self.df1.melt()
assert res.columns.tolist() == ["CAP", "low", "value"]
@pytest.mark.parametrize(
"col",
[
pd.Series(pd.date_range("2010", periods=5, tz="US/Pacific")),
pd.Series(["a", "b", "c", "a", "d"], dtype="category"),
pd.Series([0, 1, 0, 0, 0]),
],
)
def test_pandas_dtypes(self, col):
# GH 15785
df = DataFrame(
{"klass": range(5), "col": col, "attr1": [1, 0, 0, 0, 0], "attr2": col}
)
expected_value = pd.concat([pd.Series([1, 0, 0, 0, 0]), col], ignore_index=True)
result = melt(
df, id_vars=["klass", "col"], var_name="attribute", value_name="value"
)
expected = DataFrame(
{
0: list(range(5)) * 2,
1: pd.concat([col] * 2, ignore_index=True),
2: ["attr1"] * 5 + ["attr2"] * 5,
3: expected_value,
}
)
expected.columns = ["klass", "col", "attribute", "value"]
tm.assert_frame_equal(result, expected)
def test_preserve_category(self):
# GH 15853
data = DataFrame({"A": [1, 2], "B": pd.Categorical(["X", "Y"])})
result = melt(data, ["B"], ["A"])
expected = DataFrame(
{"B": pd.Categorical(["X", "Y"]), "variable": ["A", "A"], "value": [1, 2]}
)
tm.assert_frame_equal(result, expected)
def test_melt_missing_columns_raises(self):
# GH-23575
# This test is to ensure that pandas raises an error if melting is
# attempted with column names absent from the dataframe
# Generate data
df = DataFrame(np.random.randn(5, 4), columns=list("abcd"))
# Try to melt with missing `value_vars` column name
msg = "The following '{Var}' are not present in the DataFrame: {Col}"
with pytest.raises(
KeyError, match=msg.format(Var="value_vars", Col="\\['C'\\]")
):
df.melt(["a", "b"], ["C", "d"])
# Try to melt with missing `id_vars` column name
with pytest.raises(KeyError, match=msg.format(Var="id_vars", Col="\\['A'\\]")):
df.melt(["A", "b"], ["c", "d"])
# Multiple missing
with pytest.raises(
KeyError,
match=msg.format(Var="id_vars", Col="\\['not_here', 'or_there'\\]"),
):
df.melt(["a", "b", "not_here", "or_there"], ["c", "d"])
# Multiindex melt fails if column is missing from multilevel melt
multi = df.copy()
multi.columns = [list("ABCD"), list("abcd")]
with pytest.raises(KeyError, match=msg.format(Var="id_vars", Col="\\['E'\\]")):
multi.melt([("E", "a")], [("B", "b")])
# Multiindex fails if column is missing from single level melt
with pytest.raises(
KeyError, match=msg.format(Var="value_vars", Col="\\['F'\\]")
):
multi.melt(["A"], ["F"], col_level=0)
def test_melt_mixed_int_str_id_vars(self):
# GH 29718
df = DataFrame({0: ["foo"], "a": ["bar"], "b": [1], "d": [2]})
result = melt(df, id_vars=[0, "a"], value_vars=["b", "d"])
expected = DataFrame(
{0: ["foo"] * 2, "a": ["bar"] * 2, "variable": list("bd"), "value": [1, 2]}
)
tm.assert_frame_equal(result, expected)
def test_melt_mixed_int_str_value_vars(self):
# GH 29718
df = DataFrame({0: ["foo"], "a": ["bar"]})
result = melt(df, value_vars=[0, "a"])
expected = DataFrame({"variable": [0, "a"], "value": ["foo", "bar"]})
tm.assert_frame_equal(result, expected)
def test_ignore_index(self):
# GH 17440
df = DataFrame({"foo": [0], "bar": [1]}, index=["first"])
result = melt(df, ignore_index=False)
expected = DataFrame(
{"variable": ["foo", "bar"], "value": [0, 1]}, index=["first", "first"]
)
tm.assert_frame_equal(result, expected)
def test_ignore_multiindex(self):
# GH 17440
index = pd.MultiIndex.from_tuples(
[("first", "second"), ("first", "third")], names=["baz", "foobar"]
)
df = DataFrame({"foo": [0, 1], "bar": [2, 3]}, index=index)
result = melt(df, ignore_index=False)
expected_index = pd.MultiIndex.from_tuples(
[("first", "second"), ("first", "third")] * 2, names=["baz", "foobar"]
)
expected = DataFrame(
{"variable": ["foo"] * 2 + ["bar"] * 2, "value": [0, 1, 2, 3]},
index=expected_index,
)
tm.assert_frame_equal(result, expected)
def test_ignore_index_name_and_type(self):
# GH 17440
index = pd.Index(["foo", "bar"], dtype="category", name="baz")
df = DataFrame({"x": [0, 1], "y": [2, 3]}, index=index)
result = melt(df, ignore_index=False)
expected_index = pd.Index(["foo", "bar"] * 2, dtype="category", name="baz")
expected = DataFrame(
{"variable": ["x", "x", "y", "y"], "value": [0, 1, 2, 3]},
index=expected_index,
)
tm.assert_frame_equal(result, expected)
def test_melt_with_duplicate_columns(self):
# GH#41951
df = DataFrame([["id", 2, 3]], columns=["a", "b", "b"])
result = df.melt(id_vars=["a"], value_vars=["b"])
expected = DataFrame(
[["id", "b", 2], ["id", "b", 3]], columns=["a", "variable", "value"]
)
tm.assert_frame_equal(result, expected)
class TestLreshape:
def test_pairs(self):
data = {
"birthdt": [
"08jan2009",
"20dec2008",
"30dec2008",
"21dec2008",
"11jan2009",
],
"birthwt": [1766, 3301, 1454, 3139, 4133],
"id": [101, 102, 103, 104, 105],
"sex": ["Male", "Female", "Female", "Female", "Female"],
"visitdt1": [
"11jan2009",
"22dec2008",
"04jan2009",
"29dec2008",
"20jan2009",
],
"visitdt2": ["21jan2009", np.nan, "22jan2009", "31dec2008", "03feb2009"],
"visitdt3": ["05feb2009", np.nan, np.nan, "02jan2009", "15feb2009"],
"wt1": [1823, 3338, 1549, 3298, 4306],
"wt2": [2011.0, np.nan, 1892.0, 3338.0, 4575.0],
"wt3": [2293.0, np.nan, np.nan, 3377.0, 4805.0],
}
df = DataFrame(data)
spec = {
"visitdt": [f"visitdt{i:d}" for i in range(1, 4)],
"wt": [f"wt{i:d}" for i in range(1, 4)],
}
result = lreshape(df, spec)
exp_data = {
"birthdt": [
"08jan2009",
"20dec2008",
"30dec2008",
"21dec2008",
"11jan2009",
"08jan2009",
"30dec2008",
"21dec2008",
"11jan2009",
"08jan2009",
"21dec2008",
"11jan2009",
],
"birthwt": [
1766,
3301,
1454,
3139,
4133,
1766,
1454,
3139,
4133,
1766,
3139,
4133,
],
"id": [101, 102, 103, 104, 105, 101, 103, 104, 105, 101, 104, 105],
"sex": [
"Male",
"Female",
"Female",
"Female",
"Female",
"Male",
"Female",
"Female",
"Female",
"Male",
"Female",
"Female",
],
"visitdt": [
"11jan2009",
"22dec2008",
"04jan2009",
"29dec2008",
"20jan2009",
"21jan2009",
"22jan2009",
"31dec2008",
"03feb2009",
"05feb2009",
"02jan2009",
"15feb2009",
],
"wt": [
1823.0,
3338.0,
1549.0,
3298.0,
4306.0,
2011.0,
1892.0,
3338.0,
4575.0,
2293.0,
3377.0,
4805.0,
],
}
exp = DataFrame(exp_data, columns=result.columns)
tm.assert_frame_equal(result, exp)
result = lreshape(df, spec, dropna=False)
exp_data = {
"birthdt": [
"08jan2009",
"20dec2008",
"30dec2008",
"21dec2008",
"11jan2009",
"08jan2009",
"20dec2008",
"30dec2008",
"21dec2008",
"11jan2009",
"08jan2009",
"20dec2008",
"30dec2008",
"21dec2008",
"11jan2009",
],
"birthwt": [
1766,
3301,
1454,
3139,
4133,
1766,
3301,
1454,
3139,
4133,
1766,
3301,
1454,
3139,
4133,
],
"id": [
101,
102,
103,
104,
105,
101,
102,
103,
104,
105,
101,
102,
103,
104,
105,
],
"sex": [
"Male",
"Female",
"Female",
"Female",
"Female",
"Male",
"Female",
"Female",
"Female",
"Female",
"Male",
"Female",
"Female",
"Female",
"Female",
],
"visitdt": [
"11jan2009",
"22dec2008",
"04jan2009",
"29dec2008",
"20jan2009",
"21jan2009",
np.nan,
"22jan2009",
"31dec2008",
"03feb2009",
"05feb2009",
np.nan,
np.nan,
"02jan2009",
"15feb2009",
],
"wt": [
1823.0,
3338.0,
1549.0,
3298.0,
4306.0,
2011.0,
np.nan,
1892.0,
3338.0,
4575.0,
2293.0,
np.nan,
np.nan,
3377.0,
4805.0,
],
}
exp = DataFrame(exp_data, columns=result.columns)
tm.assert_frame_equal(result, exp)
with tm.assert_produces_warning(FutureWarning):
result = lreshape(df, spec, dropna=False, label="foo")
spec = {
"visitdt": [f"visitdt{i:d}" for i in range(1, 3)],
"wt": [f"wt{i:d}" for i in range(1, 4)],
}
msg = "All column lists must be same length"
with pytest.raises(ValueError, match=msg):
lreshape(df, spec)
class TestWideToLong:
def test_simple(self):
np.random.seed(123)
x = np.random.randn(3)
df = DataFrame(
{
"A1970": {0: "a", 1: "b", 2: "c"},
"A1980": {0: "d", 1: "e", 2: "f"},
"B1970": {0: 2.5, 1: 1.2, 2: 0.7},
"B1980": {0: 3.2, 1: 1.3, 2: 0.1},
"X": dict(zip(range(3), x)),
}
)
df["id"] = df.index
exp_data = {
"X": x.tolist() + x.tolist(),
"A": ["a", "b", "c", "d", "e", "f"],
"B": [2.5, 1.2, 0.7, 3.2, 1.3, 0.1],
"year": [1970, 1970, 1970, 1980, 1980, 1980],
"id": [0, 1, 2, 0, 1, 2],
}
expected = DataFrame(exp_data)
expected = expected.set_index(["id", "year"])[["X", "A", "B"]]
result = wide_to_long(df, ["A", "B"], i="id", j="year")
tm.assert_frame_equal(result, expected)
def test_stubs(self):
# GH9204 wide_to_long call should not modify 'stubs' list
df = DataFrame([[0, 1, 2, 3, 8], [4, 5, 6, 7, 9]])
df.columns = ["id", "inc1", "inc2", "edu1", "edu2"]
stubs = ["inc", "edu"]
wide_to_long(df, stubs, i="id", j="age")
assert stubs == ["inc", "edu"]
def test_separating_character(self):
# GH14779
np.random.seed(123)
x = np.random.randn(3)
df = DataFrame(
{
"A.1970": {0: "a", 1: "b", 2: "c"},
"A.1980": {0: "d", 1: "e", 2: "f"},
"B.1970": {0: 2.5, 1: 1.2, 2: 0.7},
"B.1980": {0: 3.2, 1: 1.3, 2: 0.1},
"X": dict(zip(range(3), x)),
}
)
df["id"] = df.index
exp_data = {
"X": x.tolist() + x.tolist(),
"A": ["a", "b", "c", "d", "e", "f"],
"B": [2.5, 1.2, 0.7, 3.2, 1.3, 0.1],
"year": [1970, 1970, 1970, 1980, 1980, 1980],
"id": [0, 1, 2, 0, 1, 2],
}
expected = DataFrame(exp_data)
expected = expected.set_index(["id", "year"])[["X", "A", "B"]]
result = wide_to_long(df, ["A", "B"], i="id", j="year", sep=".")
tm.assert_frame_equal(result, expected)
def test_escapable_characters(self):
np.random.seed(123)
x = np.random.randn(3)
df = DataFrame(
{
"A(quarterly)1970": {0: "a", 1: "b", 2: "c"},
"A(quarterly)1980": {0: "d", 1: "e", 2: "f"},
"B(quarterly)1970": {0: 2.5, 1: 1.2, 2: 0.7},
"B(quarterly)1980": {0: 3.2, 1: 1.3, 2: 0.1},
"X": dict(zip(range(3), x)),
}
)
df["id"] = df.index
exp_data = {
"X": x.tolist() + x.tolist(),
"A(quarterly)": ["a", "b", "c", "d", "e", "f"],
"B(quarterly)": [2.5, 1.2, 0.7, 3.2, 1.3, 0.1],
"year": [1970, 1970, 1970, 1980, 1980, 1980],
"id": [0, 1, 2, 0, 1, 2],
}
expected = DataFrame(exp_data)
expected = expected.set_index(["id", "year"])[
["X", "A(quarterly)", "B(quarterly)"]
]
result = wide_to_long(df, ["A(quarterly)", "B(quarterly)"], i="id", j="year")
tm.assert_frame_equal(result, expected)
def test_unbalanced(self):
# test that we can have a varying amount of time variables
df = DataFrame(
{
"A2010": [1.0, 2.0],
"A2011": [3.0, 4.0],
"B2010": [5.0, 6.0],
"X": ["X1", "X2"],
}
)
df["id"] = df.index
exp_data = {
"X": ["X1", "X2", "X1", "X2"],
"A": [1.0, 2.0, 3.0, 4.0],
"B": [5.0, 6.0, np.nan, np.nan],
"id": [0, 1, 0, 1],
"year": [2010, 2010, 2011, 2011],
}
expected = DataFrame(exp_data)
expected = expected.set_index(["id", "year"])[["X", "A", "B"]]
result = wide_to_long(df, ["A", "B"], i="id", j="year")
tm.assert_frame_equal(result, expected)
def test_character_overlap(self):
# Test we handle overlapping characters in both id_vars and value_vars
df = DataFrame(
{
"A11": ["a11", "a22", "a33"],
"A12": ["a21", "a22", "a23"],
"B11": ["b11", "b12", "b13"],
"B12": ["b21", "b22", "b23"],
"BB11": [1, 2, 3],
"BB12": [4, 5, 6],
"BBBX": [91, 92, 93],
"BBBZ": [91, 92, 93],
}
)
df["id"] = df.index
expected = DataFrame(
{
"BBBX": [91, 92, 93, 91, 92, 93],
"BBBZ": [91, 92, 93, 91, 92, 93],
"A": ["a11", "a22", "a33", "a21", "a22", "a23"],
"B": ["b11", "b12", "b13", "b21", "b22", "b23"],
"BB": [1, 2, 3, 4, 5, 6],
"id": [0, 1, 2, 0, 1, 2],
"year": [11, 11, 11, 12, 12, 12],
}
)
expected = expected.set_index(["id", "year"])[["BBBX", "BBBZ", "A", "B", "BB"]]
result = wide_to_long(df, ["A", "B", "BB"], i="id", j="year")
tm.assert_frame_equal(result.sort_index(axis=1), expected.sort_index(axis=1))
def test_invalid_separator(self):
# if an invalid separator is supplied a empty data frame is returned
sep = "nope!"
df = DataFrame(
{
"A2010": [1.0, 2.0],
"A2011": [3.0, 4.0],
"B2010": [5.0, 6.0],
"X": ["X1", "X2"],
}
)
df["id"] = df.index
exp_data = {
"X": "",
"A2010": [],
"A2011": [],
"B2010": [],
"id": [],
"year": [],
"A": [],
"B": [],
}
expected = DataFrame(exp_data).astype({"year": "int"})
expected = expected.set_index(["id", "year"])[
["X", "A2010", "A2011", "B2010", "A", "B"]
]
expected.index = expected.index.set_levels([0, 1], level=0)
result = wide_to_long(df, ["A", "B"], i="id", j="year", sep=sep)
tm.assert_frame_equal(result.sort_index(axis=1), expected.sort_index(axis=1))
def test_num_string_disambiguation(self):
# Test that we can disambiguate number value_vars from
# string value_vars
df = DataFrame(
{
"A11": ["a11", "a22", "a33"],
"A12": ["a21", "a22", "a23"],
"B11": ["b11", "b12", "b13"],
"B12": ["b21", "b22", "b23"],
"BB11": [1, 2, 3],
"BB12": [4, 5, 6],
"Arating": [91, 92, 93],
"Arating_old": [91, 92, 93],
}
)
df["id"] = df.index
expected = DataFrame(
{
"Arating": [91, 92, 93, 91, 92, 93],
"Arating_old": [91, 92, 93, 91, 92, 93],
"A": ["a11", "a22", "a33", "a21", "a22", "a23"],
"B": ["b11", "b12", "b13", "b21", "b22", "b23"],
"BB": [1, 2, 3, 4, 5, 6],
"id": [0, 1, 2, 0, 1, 2],
"year": [11, 11, 11, 12, 12, 12],
}
)
expected = expected.set_index(["id", "year"])[
["Arating", "Arating_old", "A", "B", "BB"]
]
result = wide_to_long(df, ["A", "B", "BB"], i="id", j="year")
tm.assert_frame_equal(result.sort_index(axis=1), expected.sort_index(axis=1))
def test_invalid_suffixtype(self):
# If all stubs names end with a string, but a numeric suffix is
# assumed, an empty data frame is returned
df = DataFrame(
{
"Aone": [1.0, 2.0],
"Atwo": [3.0, 4.0],
"Bone": [5.0, 6.0],
"X": ["X1", "X2"],
}
)
df["id"] = df.index
exp_data = {
"X": "",
"Aone": [],
"Atwo": [],
"Bone": [],
"id": [],
"year": [],
"A": [],
"B": [],
}
expected = DataFrame(exp_data).astype({"year": "int"})
expected = expected.set_index(["id", "year"])
expected.index = expected.index.set_levels([0, 1], level=0)
result = wide_to_long(df, ["A", "B"], i="id", j="year")
tm.assert_frame_equal(result.sort_index(axis=1), expected.sort_index(axis=1))
def test_multiple_id_columns(self):
# Taken from http://www.ats.ucla.edu/stat/stata/modules/reshapel.htm
df = DataFrame(
{
"famid": [1, 1, 1, 2, 2, 2, 3, 3, 3],
"birth": [1, 2, 3, 1, 2, 3, 1, 2, 3],
"ht1": [2.8, 2.9, 2.2, 2, 1.8, 1.9, 2.2, 2.3, 2.1],
"ht2": [3.4, 3.8, 2.9, 3.2, 2.8, 2.4, 3.3, 3.4, 2.9],
}
)
expected = DataFrame(
{
"ht": [
2.8,
3.4,
2.9,
3.8,
2.2,
2.9,
2.0,
3.2,
1.8,
2.8,
1.9,
2.4,
2.2,
3.3,
2.3,
3.4,
2.1,
2.9,
],
"famid": [1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3],
"birth": [1, 1, 2, 2, 3, 3, 1, 1, 2, 2, 3, 3, 1, 1, 2, 2, 3, 3],
"age": [1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2],
}
)
expected = expected.set_index(["famid", "birth", "age"])[["ht"]]
result = wide_to_long(df, "ht", i=["famid", "birth"], j="age")
tm.assert_frame_equal(result, expected)
def test_non_unique_idvars(self):
# GH16382
# Raise an error message if non unique id vars (i) are passed
df = DataFrame(
{"A_A1": [1, 2, 3, 4, 5], "B_B1": [1, 2, 3, 4, 5], "x": [1, 1, 1, 1, 1]}
)
msg = "the id variables need to uniquely identify each row"
with pytest.raises(ValueError, match=msg):
wide_to_long(df, ["A_A", "B_B"], i="x", j="colname")
def test_cast_j_int(self):
df = DataFrame(
{
"actor_1": ["CCH Pounder", "<NAME>", "<NAME>"],
"actor_2": ["<NAME>", "<NAME>", "<NAME>"],
"actor_fb_likes_1": [1000.0, 40000.0, 11000.0],
"actor_fb_likes_2": [936.0, 5000.0, 393.0],
"title": ["Avatar", "Pirates of the Caribbean", "Spectre"],
}
)
expected = DataFrame(
{
"actor": [
"CCH Pounder",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
],
"actor_fb_likes": [1000.0, 40000.0, 11000.0, 936.0, 5000.0, 393.0],
"num": [1, 1, 1, 2, 2, 2],
"title": [
"Avatar",
"Pirates of the Caribbean",
"Spectre",
"Avatar",
"Pirates of the Caribbean",
"Spectre",
],
}
).set_index(["title", "num"])
result = wide_to_long(
df, ["actor", "actor_fb_likes"], i="title", j="num", sep="_"
)
tm.assert_frame_equal(result, expected)
def test_identical_stubnames(self):
df = DataFrame(
{
"A2010": [1.0, 2.0],
"A2011": [3.0, 4.0],
"B2010": [5.0, 6.0],
"A": ["X1", "X2"],
}
)
msg = "stubname can't be identical to a column name"
with pytest.raises(ValueError, match=msg):
wide_to_long(df, ["A", "B"], i="A", j="colname")
def test_nonnumeric_suffix(self):
df = DataFrame(
{
"treatment_placebo": [1.0, 2.0],
"treatment_test": [3.0, 4.0],
"result_placebo": [5.0, 6.0],
"A": ["X1", "X2"],
}
)
expected = DataFrame(
{
"A": ["X1", "X2", "X1", "X2"],
"colname": ["placebo", "placebo", "test", "test"],
"result": [5.0, 6.0, np.nan, np.nan],
"treatment": [1.0, 2.0, 3.0, 4.0],
}
)
expected = expected.set_index(["A", "colname"])
result = wide_to_long(
df, ["result", "treatment"], i="A", j="colname", suffix="[a-z]+", sep="_"
)
tm.assert_frame_equal(result, expected)
def test_mixed_type_suffix(self):
df = DataFrame(
{
"A": ["X1", "X2"],
"result_1": [0, 9],
"result_foo": [5.0, 6.0],
"treatment_1": [1.0, 2.0],
"treatment_foo": [3.0, 4.0],
}
)
expected = DataFrame(
{
"A": ["X1", "X2", "X1", "X2"],
"colname": ["1", "1", "foo", "foo"],
"result": [0.0, 9.0, 5.0, 6.0],
"treatment": [1.0, 2.0, 3.0, 4.0],
}
).set_index(["A", "colname"])
result = wide_to_long(
df, ["result", "treatment"], i="A", j="colname", suffix=".+", sep="_"
)
tm.assert_frame_equal(result, expected)
def test_float_suffix(self):
df = DataFrame(
{
"treatment_1.1": [1.0, 2.0],
"treatment_2.1": [3.0, 4.0],
"result_1.2": [5.0, 6.0],
"result_1": [0, 9],
"A": ["X1", "X2"],
}
)
expected = DataFrame(
{
"A": ["X1", "X2", "X1", "X2", "X1", "X2", "X1", "X2"],
"colname": [1.2, 1.2, 1.0, 1.0, 1.1, 1.1, 2.1, 2.1],
"result": [5.0, 6.0, 0.0, 9.0, np.nan, np.nan, np.nan, np.nan],
"treatment": [np.nan, np.nan, np.nan, np.nan, 1.0, 2.0, 3.0, 4.0],
}
)
expected = expected.set_index(["A", "colname"])
result = wide_to_long(
df, ["result", "treatment"], i="A", j="colname", suffix="[0-9.]+", sep="_"
)
tm.assert_frame_equal(result, expected)
def test_col_substring_of_stubname(self):
# GH22468
# Don't raise ValueError when a column name is a substring
# of a stubname that's been passed as a string
wide_data = {
"node_id": {0: 0, 1: 1, 2: 2, 3: 3, 4: 4},
"A": {0: 0.80, 1: 0.0, 2: 0.25, 3: 1.0, 4: 0.81},
"PA0": {0: 0.74, 1: 0.56, 2: 0.56, 3: 0.98, 4: 0.6},
"PA1": {0: 0.77, 1: 0.64, 2: 0.52, 3: 0.98, 4: 0.67},
"PA3": {0: 0.34, 1: 0.70, 2: 0.52, 3: 0.98, 4: 0.67},
}
wide_df = DataFrame.from_dict(wide_data)
expected = | wide_to_long(wide_df, stubnames=["PA"], i=["node_id", "A"], j="time") | pandas.wide_to_long |
import pandas as pd
import numpy as np
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.pipeline import make_pipeline
from sklearn.metrics import f1_score
import pickle
from sklearn.metrics import classification_report, roc_curve
import os
import matplotlib.pyplot as plt
import seaborn as sns
import sys
only = None
if len(sys.argv) >= 2:
only = sys.argv[1].split(',')
print('Only', only)
sns.set(style='ticks', palette='Set2')
sns.despine()
def myplot(y, y_pred, y_pred_round, title, outpath):
plt.figure()
df = pd.DataFrame({
'y': y,
'y_pred': y_pred,
'y_pred_rnd': y_pred_round
})
# kde
df[['y', 'y_pred']].plot(kind='kde', title=title+'_kde')
plt.tight_layout(pad=0.5)
plt.savefig(os.path.join(outpath, title+'_kde.png'))
plt.close()
# roc curve
fpr, tpr, thresholds = roc_curve(df['y'], df['y_pred'])
plt.figure()
pd.DataFrame({
'tpr': tpr,
'fpr': fpr,
'thresholds': thresholds
}).plot(x='fpr', y='tpr', title=title+'_roc')
plt.tight_layout(pad=0.5)
plt.savefig(os.path.join(outpath, title+'_roc.png'))
plt.close()
# correct
correct = df.query('y == y_pred_rnd')['y_pred'].rename('correct')
plt.figure()
correct.plot(kind='kde', title=title+'_kde', legend=True)
plt.tight_layout(pad=0.5)
# plt.savefig(os.path.join(outpath, title+'_kde_correct.png'))
# plt.close()
# errors
errors = df.query('y != y_pred_rnd')['y_pred'].rename('errors')
# plt.figure()
# errors.plot(kind='density', title=title+'errors kde')
errors.plot(kind='density', title=title+'kde', legend=True)
plt.tight_layout(pad=0.5)
# plt.savefig(os.path.join(outpath, title+'_kde_errors.png'))
plt.savefig(os.path.join(outpath, title+'_kde_correct_errors.png'))
plt.close()
plt.close('all')
outpath = 'output/feature_ablation_study'
os.makedirs(outpath, exist_ok=True)
print('loading dataset...')
dataset = pd.read_pickle('./data/nil_dataset.pickle')
print('loaded...')
tasks = [
{
'name': 'aida_under_cross_max',
'train': ['AIDA-YAGO2_train_ner'],
'test': ['AIDA-YAGO2_testa_ner', 'AIDA-YAGO2_testb_ner'],
'sampling': 'undersample',
'features': [
'cross_stats_10_max',
]
},
{
'name': 'aida_under_all_max',
'train': ['AIDA-YAGO2_train_ner'],
'test': ['AIDA-YAGO2_testa_ner', 'AIDA-YAGO2_testb_ner'],
'sampling': 'undersample',
'features': [
'cross_stats_10_max',
'bi_stats_10_max',
]
},
{
'name': 'aida_all_max',
'train': ['AIDA-YAGO2_train_ner'],
'test': ['AIDA-YAGO2_testa_ner', 'AIDA-YAGO2_testb_ner'],
'sampling': 'no',
'features': [
'cross_stats_10_max',
'bi_stats_10_max',
]
},
{
'name': 'aida_under_all_max_levenshtein',
'train': ['AIDA-YAGO2_train_ner'],
'test': ['AIDA-YAGO2_testa_ner', 'AIDA-YAGO2_testb_ner'],
'sampling': 'undersample',
'features': [
'cross_stats_10_max',
'bi_stats_10_max',
'cross_levenshtein'
# no bi levenshtein
]
},
{
'name': 'aida_under_all_max_jaccard',
'train': ['AIDA-YAGO2_train_ner'],
'test': ['AIDA-YAGO2_testa_ner', 'AIDA-YAGO2_testb_ner'],
'sampling': 'undersample',
'features': [
'cross_stats_10_max',
'bi_stats_10_max',
'cross_jaccard'
# no bi levenshtein
]
},
{
'name': 'aida_under_cross_max_jaccard',
'train': ['AIDA-YAGO2_train_ner'],
'test': ['AIDA-YAGO2_testa_ner', 'AIDA-YAGO2_testb_ner'],
'sampling': 'undersample',
'features': [
'cross_stats_10_max',
'cross_jaccard'
# no bi levenshtein
]
},
{
'name': 'aida_under_all_max_stdev4',
'train': ['AIDA-YAGO2_train_ner'],
'test': ['AIDA-YAGO2_testa_ner', 'AIDA-YAGO2_testb_ner'],
'sampling': 'undersample',
'features': [
'cross_stats_10_max',
'cross_stats_4_stdev',
'bi_stats_10_max',
'bi_stats_4_stdev',
]
},
{
'name': 'aida_under_all_max_stdev10',
'train': ['AIDA-YAGO2_train_ner'],
'test': ['AIDA-YAGO2_testa_ner', 'AIDA-YAGO2_testb_ner'],
'sampling': 'undersample',
'features': [
'cross_stats_10_max',
'cross_stats_10_stdev',
'bi_stats_10_max',
'bi_stats_10_stdev',
]
},
{
'name': 'aida_under_all_max_stats10',
'train': ['AIDA-YAGO2_train_ner'],
'test': ['AIDA-YAGO2_testa_ner', 'AIDA-YAGO2_testb_ner'],
'sampling': 'undersample',
'features': [
'cross_stats_10_max',
'cross_stats_10_mean',
'cross_stats_10_median',
'cross_stats_10_stdev',
'bi_stats_10_max',
'bi_stats_10_mean',
'bi_stats_10_median',
'bi_stats_10_stdev',
]
},
{
'name': 'aida_under_all_max_ner_wiki',
'train': ['AIDA-YAGO2_train_ner'],
'test': ['AIDA-YAGO2_testa_ner', 'AIDA-YAGO2_testb_ner'],
'sampling': 'undersample',
'features': [
'cross_stats_10_max',
'bi_stats_10_max',
'ner_per',
'ner_loc',
'ner_org',
'ner_misc',
'wiki_per_cross',
'wiki_loc_cross',
'wiki_org_cross',
'wiki_misc_cross',
]
},
{
'name': 'aida_under_all_max_ner_wiki_stdev4',
'train': ['AIDA-YAGO2_train_ner'],
'test': ['AIDA-YAGO2_testa_ner', 'AIDA-YAGO2_testb_ner'],
'sampling': 'undersample',
'features': [
'cross_stats_10_max',
'cross_stats_4_stdev',
'bi_stats_10_max',
'bi_stats_4_stdev',
'ner_per',
'ner_loc',
'ner_org',
'ner_misc',
'wiki_per_cross',
'wiki_loc_cross',
'wiki_org_cross',
'wiki_misc_cross',
]
},
{
'name': 'aida_under_all_max_ner_wiki_stdev4_levenshtein',
'train': ['AIDA-YAGO2_train_ner'],
'test': ['AIDA-YAGO2_testa_ner', 'AIDA-YAGO2_testb_ner'],
'sampling': 'undersample',
'features': [
'cross_stats_10_max',
'cross_stats_4_stdev',
'bi_stats_10_max',
'bi_stats_4_stdev',
'cross_levenshtein',
'ner_per',
'ner_loc',
'ner_org',
'ner_misc',
'wiki_per_cross',
'wiki_loc_cross',
'wiki_org_cross',
'wiki_misc_cross',
]
},
{
'name': 'aida_under_all_max_ner_wiki_levenshtein',
'train': ['AIDA-YAGO2_train_ner'],
'test': ['AIDA-YAGO2_testa_ner', 'AIDA-YAGO2_testb_ner'],
'sampling': 'undersample',
'features': [
'cross_stats_10_max',
'bi_stats_10_max',
'cross_levenshtein',
'ner_per',
'ner_loc',
'ner_org',
'ner_misc',
'wiki_per_cross',
'wiki_loc_cross',
'wiki_org_cross',
'wiki_misc_cross',
]
},
{
'name': 'aida_under_all_max_ner_wiki_jaccard',
'train': ['AIDA-YAGO2_train_ner'],
'test': ['AIDA-YAGO2_testa_ner', 'AIDA-YAGO2_testb_ner'],
'sampling': 'undersample',
'features': [
'cross_stats_10_max',
'bi_stats_10_max',
'cross_jaccard',
'ner_per',
'ner_loc',
'ner_org',
'ner_misc',
'wiki_per_cross',
'wiki_loc_cross',
'wiki_org_cross',
'wiki_misc_cross',
]
},
{
'name': 'aida_under_all_max_ner_wiki_levenshtein_jaccard',
'train': ['AIDA-YAGO2_train_ner'],
'test': ['AIDA-YAGO2_testa_ner', 'AIDA-YAGO2_testb_ner'],
'sampling': 'undersample',
'features': [
'cross_stats_10_max',
'bi_stats_10_max',
'cross_levenshtein',
'cross_jaccard',
'ner_per',
'ner_loc',
'ner_org',
'ner_misc',
'wiki_per_cross',
'wiki_loc_cross',
'wiki_org_cross',
'wiki_misc_cross',
]
},
{
'name': 'aida_under_all_max_levenshtein_jaccard',
'train': ['AIDA-YAGO2_train_ner'],
'test': ['AIDA-YAGO2_testa_ner', 'AIDA-YAGO2_testb_ner'],
'sampling': 'undersample',
'features': [
'cross_stats_10_max',
'bi_stats_10_max',
'cross_levenshtein',
'cross_jaccard'
# no bi levenshtein
]
},
{
'name': 'aida_under_all_max_stdev4_levenshtein',
'train': ['AIDA-YAGO2_train_ner'],
'test': ['AIDA-YAGO2_testa_ner', 'AIDA-YAGO2_testb_ner'],
'sampling': 'undersample',
'features': [
'cross_stats_10_max',
'cross_stats_4_stdev',
'bi_stats_10_max',
'bi_stats_4_stdev',
'cross_levenshtein'
]
},
{
'name': 'aida_under_all_max_stdev4_jaccard',
'train': ['AIDA-YAGO2_train_ner'],
'test': ['AIDA-YAGO2_testa_ner', 'AIDA-YAGO2_testb_ner'],
'sampling': 'undersample',
'features': [
'cross_stats_10_max',
'cross_stats_4_stdev',
'bi_stats_10_max',
'bi_stats_4_stdev',
'cross_jaccard'
]
},
{
'name': 'aida_under_all_max_stdev4_levenshtein_jaccard',
'train': ['AIDA-YAGO2_train_ner'],
'test': ['AIDA-YAGO2_testa_ner', 'AIDA-YAGO2_testb_ner'],
'sampling': 'undersample',
'features': [
'cross_stats_10_max',
'cross_stats_4_stdev',
'bi_stats_10_max',
'bi_stats_4_stdev',
'cross_levenshtein',
'cross_jaccard'
]
},
{
'name': 'aida_under_all_max_stats10_levenshtein',
'train': ['AIDA-YAGO2_train_ner'],
'test': ['AIDA-YAGO2_testa_ner', 'AIDA-YAGO2_testb_ner'],
'sampling': 'undersample',
'features': [
'cross_stats_10_max',
'cross_stats_10_mean',
'cross_stats_10_median',
'cross_stats_10_stdev',
'bi_stats_10_max',
'bi_stats_10_mean',
'bi_stats_10_median',
'bi_stats_10_stdev',
'cross_levenshtein'
]
},
{
'name': 'aida_under_all_max_stats10_jaccard',
'train': ['AIDA-YAGO2_train_ner'],
'test': ['AIDA-YAGO2_testa_ner', 'AIDA-YAGO2_testb_ner'],
'sampling': 'undersample',
'features': [
'cross_stats_10_max',
'cross_stats_10_mean',
'cross_stats_10_median',
'cross_stats_10_stdev',
'bi_stats_10_max',
'bi_stats_10_mean',
'bi_stats_10_median',
'bi_stats_10_stdev',
'cross_jaccard'
]
},
{
'name': 'aida_under_all_max_stats10_levenshtein_jaccard',
'train': ['AIDA-YAGO2_train_ner'],
'test': ['AIDA-YAGO2_testa_ner', 'AIDA-YAGO2_testb_ner'],
'sampling': 'undersample',
'features': [
'cross_stats_10_max',
'cross_stats_10_mean',
'cross_stats_10_median',
'cross_stats_10_stdev',
'bi_stats_10_max',
'bi_stats_10_mean',
'bi_stats_10_median',
'bi_stats_10_stdev',
'cross_levenshtein',
'cross_jaccard'
]
},
{
'name': 'aida_under_all_max_stats10_levenshtein_ner_wiki',
'train': ['AIDA-YAGO2_train_ner'],
'test': ['AIDA-YAGO2_testa_ner', 'AIDA-YAGO2_testb_ner'],
'sampling': 'undersample',
'features': [
'cross_stats_10_max',
'cross_stats_10_mean',
'cross_stats_10_median',
'cross_stats_10_stdev',
'bi_stats_10_max',
'bi_stats_10_mean',
'bi_stats_10_median',
'bi_stats_10_stdev',
'cross_levenshtein',
'ner_per',
'ner_loc',
'ner_org',
'ner_misc',
'wiki_per_cross',
'wiki_loc_cross',
'wiki_org_cross',
'wiki_misc_cross',
]
},
{
'name': 'aida_under_all_max_stats10_ner_wiki',
'train': ['AIDA-YAGO2_train_ner'],
'test': ['AIDA-YAGO2_testa_ner', 'AIDA-YAGO2_testb_ner'],
'sampling': 'undersample',
'features': [
'cross_stats_10_max',
'cross_stats_10_mean',
'cross_stats_10_median',
'cross_stats_10_stdev',
'bi_stats_10_max',
'bi_stats_10_mean',
'bi_stats_10_median',
'bi_stats_10_stdev',
'ner_per',
'ner_loc',
'ner_org',
'ner_misc',
'wiki_per_cross',
'wiki_loc_cross',
'wiki_org_cross',
'wiki_misc_cross',
]
},
{
'name': 'aida_all_max_stats10_ner_wiki',
'train': ['AIDA-YAGO2_train_ner'],
'test': ['AIDA-YAGO2_testa_ner', 'AIDA-YAGO2_testb_ner'],
'sampling': 'no',
'features': [
'cross_stats_10_max',
'cross_stats_10_mean',
'cross_stats_10_median',
'cross_stats_10_stdev',
'bi_stats_10_max',
'bi_stats_10_mean',
'bi_stats_10_median',
'bi_stats_10_stdev',
'ner_per',
'ner_loc',
'ner_org',
'ner_misc',
'wiki_per_cross',
'wiki_loc_cross',
'wiki_org_cross',
'wiki_misc_cross',
]
},
{
'name': 'aida_under_cross_max_stats10_ner_wiki',
'train': ['AIDA-YAGO2_train_ner'],
'test': ['AIDA-YAGO2_testa_ner', 'AIDA-YAGO2_testb_ner'],
'sampling': 'undersample',
'features': [
'cross_stats_10_max',
'cross_stats_10_mean',
'cross_stats_10_median',
'cross_stats_10_stdev',
'ner_per',
'ner_loc',
'ner_org',
'ner_misc',
'wiki_per_cross',
'wiki_loc_cross',
'wiki_org_cross',
'wiki_misc_cross',
]
},
{
'name': 'aida_under_all_max_stats10_ner_wiki_jaccard',
'train': ['AIDA-YAGO2_train_ner'],
'test': ['AIDA-YAGO2_testa_ner', 'AIDA-YAGO2_testb_ner'],
'sampling': 'undersample',
'features': [
'cross_stats_10_max',
'cross_stats_10_mean',
'cross_stats_10_median',
'cross_stats_10_stdev',
'bi_stats_10_max',
'bi_stats_10_mean',
'bi_stats_10_median',
'bi_stats_10_stdev',
'cross_jaccard',
'ner_per',
'ner_loc',
'ner_org',
'ner_misc',
'wiki_per_cross',
'wiki_loc_cross',
'wiki_org_cross',
'wiki_misc_cross',
]
},
################## bi
{
'name': 'aida_under_bi_max',
'train': ['AIDA-YAGO2_train_ner'],
'test': ['AIDA-YAGO2_testa_ner', 'AIDA-YAGO2_testb_ner'],
'sampling': 'undersample',
'features': [
'bi_stats_10_max',
],
'y': 'y_bi',
},
{
'name': 'aida_under_bi_max_levenshtein',
'train': ['AIDA-YAGO2_train_ner'],
'test': ['AIDA-YAGO2_testa_ner', 'AIDA-YAGO2_testb_ner'],
'sampling': 'undersample',
'features': [
'bi_stats_10_max',
'bi_levenshtein'
# no cross levenshtein
],
'y': 'y_bi',
},
{
'name': 'aida_under_bi_max_jaccard',
'train': ['AIDA-YAGO2_train_ner'],
'test': ['AIDA-YAGO2_testa_ner', 'AIDA-YAGO2_testb_ner'],
'sampling': 'undersample',
'features': [
'bi_stats_10_max',
'bi_jaccard'
# no cross levenshtein
],
'y': 'y_bi',
},
{
'name': 'aida_under_bi_max_stdev4',
'train': ['AIDA-YAGO2_train_ner'],
'test': ['AIDA-YAGO2_testa_ner', 'AIDA-YAGO2_testb_ner'],
'sampling': 'undersample',
'features': [
'bi_stats_10_max',
'bi_stats_4_stdev',
# no cross levenshtein
],
'y': 'y_bi',
},
{
'name': 'aida_under_bi_max_stdev10',
'train': ['AIDA-YAGO2_train_ner'],
'test': ['AIDA-YAGO2_testa_ner', 'AIDA-YAGO2_testb_ner'],
'sampling': 'undersample',
'features': [
'bi_stats_10_max',
'bi_stats_10_stdev',
# no cross levenshtein
],
'y': 'y_bi',
},
{
'name': 'aida_under_bi_max_stats10',
'train': ['AIDA-YAGO2_train_ner'],
'test': ['AIDA-YAGO2_testa_ner', 'AIDA-YAGO2_testb_ner'],
'sampling': 'undersample',
'features': [
'bi_stats_10_max',
'bi_stats_10_mean',
'bi_stats_10_median',
'bi_stats_10_stdev',
# no cross levenshtein
],
'y': 'y_bi',
},
{
'name': 'aida_under_bi_max_ner_wiki',
'train': ['AIDA-YAGO2_train_ner'],
'test': ['AIDA-YAGO2_testa_ner', 'AIDA-YAGO2_testb_ner'],
'sampling': 'undersample',
'features': [
'bi_stats_10_max',
'ner_per',
'ner_loc',
'ner_org',
'ner_misc',
'wiki_per_bi',
'wiki_loc_bi',
'wiki_org_bi',
'wiki_misc_bi',
# no cross levenshtein
],
'y': 'y_bi',
},
{
'name': 'aida_under_bi_max_ner_wiki_stdev4',
'train': ['AIDA-YAGO2_train_ner'],
'test': ['AIDA-YAGO2_testa_ner', 'AIDA-YAGO2_testb_ner'],
'sampling': 'undersample',
'features': [
'bi_stats_10_max',
'bi_stats_4_stdev',
'ner_per',
'ner_loc',
'ner_org',
'ner_misc',
'wiki_per_bi',
'wiki_loc_bi',
'wiki_org_bi',
'wiki_misc_bi',
# no cross levenshtein
],
'y': 'y_bi',
},
{
'name': 'aida_under_bi_max_ner_wiki_levenshtein',
'train': ['AIDA-YAGO2_train_ner'],
'test': ['AIDA-YAGO2_testa_ner', 'AIDA-YAGO2_testb_ner'],
'sampling': 'undersample',
'features': [
'bi_stats_10_max',
'bi_levenshtein',
'ner_per',
'ner_loc',
'ner_org',
'ner_misc',
'wiki_per_bi',
'wiki_loc_bi',
'wiki_org_bi',
'wiki_misc_bi',
# no cross levenshtein
],
'y': 'y_bi',
},
{
'name': 'aida_under_bi_max_ner_wiki_jaccard',
'train': ['AIDA-YAGO2_train_ner'],
'test': ['AIDA-YAGO2_testa_ner', 'AIDA-YAGO2_testb_ner'],
'sampling': 'undersample',
'features': [
'bi_stats_10_max',
'bi_jaccard',
'ner_per',
'ner_loc',
'ner_org',
'ner_misc',
'wiki_per_bi',
'wiki_loc_bi',
'wiki_org_bi',
'wiki_misc_bi',
# no cross levenshtein
],
'y': 'y_bi',
},
{
'name': 'aida_under_bi_max_ner_wiki_levenshtein_jaccard',
'train': ['AIDA-YAGO2_train_ner'],
'test': ['AIDA-YAGO2_testa_ner', 'AIDA-YAGO2_testb_ner'],
'sampling': 'undersample',
'features': [
'bi_stats_10_max',
'bi_levenshtein',
'bi_jaccard',
'ner_per',
'ner_loc',
'ner_org',
'ner_misc',
'wiki_per_bi',
'wiki_loc_bi',
'wiki_org_bi',
'wiki_misc_bi',
# no cross levenshtein
],
'y': 'y_bi',
},
{
'name': 'aida_under_bi_max_levenshtein_jaccard',
'train': ['AIDA-YAGO2_train_ner'],
'test': ['AIDA-YAGO2_testa_ner', 'AIDA-YAGO2_testb_ner'],
'sampling': 'undersample',
'features': [
'bi_stats_10_max',
'bi_levenshtein',
'bi_jaccard',
# no cross levenshtein
],
'y': 'y_bi',
},
{
'name': 'aida_under_bi_max_stdev4_levenshtein',
'train': ['AIDA-YAGO2_train_ner'],
'test': ['AIDA-YAGO2_testa_ner', 'AIDA-YAGO2_testb_ner'],
'sampling': 'undersample',
'features': [
'bi_stats_10_max',
'bi_stats_4_stdev',
'bi_levenshtein',
# no cross levenshtein
],
'y': 'y_bi',
},
{
'name': 'aida_under_bi_max_stdev4_jaccard',
'train': ['AIDA-YAGO2_train_ner'],
'test': ['AIDA-YAGO2_testa_ner', 'AIDA-YAGO2_testb_ner'],
'sampling': 'undersample',
'features': [
'bi_stats_10_max',
'bi_stats_4_stdev',
'bi_jaccard',
# no cross levenshtein
],
'y': 'y_bi',
},
{
'name': 'aida_under_bi_max_stdev4_levenshtein_jaccard',
'train': ['AIDA-YAGO2_train_ner'],
'test': ['AIDA-YAGO2_testa_ner', 'AIDA-YAGO2_testb_ner'],
'sampling': 'undersample',
'features': [
'bi_stats_10_max',
'bi_stats_4_stdev',
'bi_levenshtein',
'bi_jaccard'
# no cross levenshtein
],
'y': 'y_bi',
},
{
'name': 'aida_under_bi_max_stats10_levenshtein',
'train': ['AIDA-YAGO2_train_ner'],
'test': ['AIDA-YAGO2_testa_ner', 'AIDA-YAGO2_testb_ner'],
'sampling': 'undersample',
'features': [
'bi_stats_10_max',
'bi_stats_10_mean',
'bi_stats_10_median',
'bi_stats_10_stdev',
'bi_levenshtein'
# no cross levenshtein
],
'y': 'y_bi',
},
{
'name': 'aida_under_bi_max_stats10_jaccard',
'train': ['AIDA-YAGO2_train_ner'],
'test': ['AIDA-YAGO2_testa_ner', 'AIDA-YAGO2_testb_ner'],
'sampling': 'undersample',
'features': [
'bi_stats_10_max',
'bi_stats_10_mean',
'bi_stats_10_median',
'bi_stats_10_stdev',
'bi_jaccard'
# no cross levenshtein
],
'y': 'y_bi',
},
{
'name': 'aida_under_bi_max_stats10_levenshtein_jaccard',
'train': ['AIDA-YAGO2_train_ner'],
'test': ['AIDA-YAGO2_testa_ner', 'AIDA-YAGO2_testb_ner'],
'sampling': 'undersample',
'features': [
'bi_stats_10_max',
'bi_stats_10_mean',
'bi_stats_10_median',
'bi_stats_10_stdev',
'bi_levenshtein',
'bi_jaccard'
# no cross levenshtein
],
'y': 'y_bi',
},
{
'name': 'aida_under_bi_max_stats10_levenshtein_ner_wiki',
'train': ['AIDA-YAGO2_train_ner'],
'test': ['AIDA-YAGO2_testa_ner', 'AIDA-YAGO2_testb_ner'],
'sampling': 'undersample',
'features': [
'bi_stats_10_max',
'bi_stats_10_mean',
'bi_stats_10_median',
'bi_stats_10_stdev',
'bi_levenshtein',
'ner_per',
'ner_loc',
'ner_org',
'ner_misc',
'wiki_per_bi',
'wiki_loc_bi',
'wiki_org_bi',
'wiki_misc_bi',
# no cross levenshtein
],
'y': 'y_bi',
},
{
'name': 'aida_under_bi_max_stats10_jaccard_ner_wiki',
'train': ['AIDA-YAGO2_train_ner'],
'test': ['AIDA-YAGO2_testa_ner', 'AIDA-YAGO2_testb_ner'],
'sampling': 'undersample',
'features': [
'bi_stats_10_max',
'bi_stats_10_mean',
'bi_stats_10_median',
'bi_stats_10_stdev',
'bi_jaccard',
'ner_per',
'ner_loc',
'ner_org',
'ner_misc',
'wiki_per_bi',
'wiki_loc_bi',
'wiki_org_bi',
'wiki_misc_bi',
# no cross levenshtein
],
'y': 'y_bi',
},
{
'name': 'aida_under_bi_max_stdev4_levenshtein_ner_wiki',
'train': ['AIDA-YAGO2_train_ner'],
'test': ['AIDA-YAGO2_testa_ner', 'AIDA-YAGO2_testb_ner'],
'sampling': 'undersample',
'features': [
'bi_stats_10_max',
'bi_stats_4_stdev',
'bi_levenshtein',
'ner_per',
'ner_loc',
'ner_org',
'ner_misc',
'wiki_per_bi',
'wiki_loc_bi',
'wiki_org_bi',
'wiki_misc_bi',
# no cross levenshtein
],
'y': 'y_bi',
},
{
'name': 'aida_under_bi_max_stats10_ner_wiki',
'train': ['AIDA-YAGO2_train_ner'],
'test': ['AIDA-YAGO2_testa_ner', 'AIDA-YAGO2_testb_ner'],
'sampling': 'undersample',
'features': [
'bi_stats_10_max',
'bi_stats_10_mean',
'bi_stats_10_median',
'bi_stats_10_stdev',
'ner_per',
'ner_loc',
'ner_org',
'ner_misc',
'wiki_per_bi',
'wiki_loc_bi',
'wiki_org_bi',
'wiki_misc_bi',
# no cross levenshtein
],
'y': 'y_bi',
},
]
# assert no duplicates
vc = pd.DataFrame([task['name'] for task in tasks]).value_counts()
if not (vc <= 1).all():
print('!' * 30)
print('Duplicates:')
print('!' * 30)
print(vc[vc > 1])
raise Exception('duplicate task!')
csv_report = pd.DataFrame()
if only is not None:
tasks = [t for t in tasks if t['name'] in only]
current_report = None
if os.path.isfile(os.path.join(outpath, 'feature_ablation_summary.csv')):
current_report = pd.read_csv(os.path.join(outpath, 'feature_ablation_summary.csv'), index_col=0)
for task in tasks:
print('-'*30)
print(task['name'])
if current_report is not None and task['name'] in current_report.index:
print('skipping yeah....')
continue
y_whom = 'y_cross'
if 'y' in task:
y_whom = task['y']
train_df = dataset[dataset['src'].isin(task['train'])]
if isinstance(task['test'], list):
test_df = dataset[dataset['src'].isin(task['test'])]
elif isinstance(task['test'], float):
train_df, test_df = train_test_split(train_df, test_size = task['test'], random_state = 1234)
else:
raise Exception()
train_df_shape_original = train_df.shape[0]
test_df_shape_original = test_df.shape[0]
train_df = train_df[train_df[task['features']].notna().all(axis=1)]
test_df = test_df[test_df[task['features']].notna().all(axis=1)]
train_df_shape_notna = train_df.shape[0]
test_df_shape_notna = test_df.shape[0]
if task['sampling'] == 'undersample':
print('undersampling...')
train_df_0 = train_df.query(f'{y_whom} == 0')
train_df_1 = train_df.query(f'{y_whom} == 1')
train_df_1 = train_df_1.sample(frac=1).iloc[:train_df_0.shape[0]]
train_df = pd.concat([train_df_0, train_df_1]).sample(frac=1)
elif task['sampling'] == 'no':
pass
else:
raise Exception()
train_df_shape_actual = train_df.shape[0]
test_df_shape_actual = test_df.shape[0]
df_size_report = pd.DataFrame({
'train': [train_df_shape_original, train_df_shape_notna, train_df_shape_actual],
'test': [test_df_shape_original, test_df_shape_notna, test_df_shape_actual]
}, index=['original', 'notna', 'actual']).to_markdown()
print(df_size_report)
print(pd.DataFrame(train_df[y_whom].value_counts()).to_markdown())
X_train = train_df[task['features']].values
y_train = train_df[y_whom].values
X_test = test_df[task['features']].values
y_test = test_df[y_whom].values
# model
clf = make_pipeline(
StandardScaler(),
LogisticRegression(random_state=1234, max_iter=200)
)
clf.fit(X_train, y_train)
y_pred = np.array(list(map(lambda x: x[1], clf.predict_proba(X_test))))
y_pred_round = np.round(y_pred)
test_df['y_pred_round'] = y_pred_round
test_df['y_pred'] = y_pred
bi_baseline = test_df.query('bi_labels == bi_best_candidate or Wikipedia_title == bi_best_candidate_title').shape[0]
cross_baseline = test_df.query('cross_labels == cross_best_candidate or Wikipedia_title == cross_best_candidate_title').shape[0]
bi_acc = test_df.query('(y_pred_round == 1 and (bi_labels == bi_best_candidate or Wikipedia_title == bi_best_candidate_title)) or (bi_labels == -1 and y_pred_round == 0)').shape[0]
cross_acc = test_df.query('(y_pred_round == 1 and (cross_labels == cross_best_candidate or Wikipedia_title == cross_best_candidate_title)) or (cross_labels == -1 and y_pred_round == 0)').shape[0]
bi_acc_correcting_nel = test_df.query(
'(y_pred_round == 1 and (bi_labels == bi_best_candidate or Wikipedia_title == bi_best_candidate_title))'
' or (bi_labels != bi_best_candidate and y_pred_round == 0)').shape[0]
cross_acc_correcting_nel = test_df.query(
'(y_pred_round == 1 and '
'(cross_labels == cross_best_candidate or Wikipedia_title == cross_best_candidate_title))'
' or (cross_labels != cross_best_candidate and y_pred_round == 0)').shape[0]
_classification_report = classification_report(y_test, y_pred_round)
# oracle corrects in [0.25, 0.75]
# TODO maybe look for a better way to get them (e.g. correct-error kde intersections ?)
tl = 0.25
th = 0.75
oracle_df = pd.DataFrame({
'y_test': y_test,
'y_pred': y_pred,
'y_pred_round': y_pred_round
})
oracle_original_shape = oracle_df.shape[0]
oracle_df = oracle_df.query(f'y_pred <= {tl} or y_pred >= {th}')
_classification_report_oracle = classification_report(oracle_df['y_test'], oracle_df['y_pred_round'])
test_df_oracle = test_df.query(f'y_pred <= {tl} or y_pred >= {th}')
bi_acc_oracle = test_df_oracle.query('(y_pred_round == 1 and (bi_labels == bi_best_candidate or Wikipedia_title == bi_best_candidate_title)) or (bi_labels == -1 and y_pred_round == 0)').shape[0]
cross_acc_oracle = test_df_oracle.query('(y_pred_round == 1 and (cross_labels == cross_best_candidate or Wikipedia_title == cross_best_candidate_title)) or (cross_labels == -1 and y_pred_round == 0)').shape[0]
_f1_0 = f1_score(y_test, y_pred_round, pos_label=0)
_f1_1 = f1_score(y_test, y_pred_round, pos_label=1)
_macro_avg_f1 = (_f1_0 + _f1_1) / 2
_f1_0_oracle = f1_score(oracle_df['y_test'], oracle_df['y_pred_round'], pos_label=0)
_f1_1_oracle = f1_score(oracle_df['y_test'], oracle_df['y_pred_round'], pos_label=1)
_macro_avg_f1_oracle = (_f1_0_oracle + _f1_1_oracle) / 2
csv_report = csv_report.append({
'name': task['name'],
'bi_baseline': bi_baseline / test_df_shape_actual,
'cross_baseline': cross_baseline / test_df_shape_actual,
'bi_acc': bi_acc / test_df_shape_actual,
'cross_acc': cross_acc / test_df_shape_actual,
'bi_acc_adjusted': bi_acc / test_df_shape_original,
'cross_acc_adjusted': cross_acc / test_df_shape_original,
'bi_acc_correcting_nel': bi_acc_correcting_nel / test_df_shape_actual,
'cross_acc_correcting_nel': cross_acc_correcting_nel / test_df_shape_actual,
'0-f1': _f1_0,
'1-f1': _f1_1,
'macro-avg-f1': _macro_avg_f1,
'oracle_ratio': 1 - (oracle_df.shape[0] / oracle_original_shape),
'bi_acc_oracle': bi_acc_oracle / test_df_oracle.shape[0],
'cross_acc_oracle': cross_acc_oracle / test_df_oracle.shape[0],
'0-f1-oracle': _f1_0_oracle,
'1-f1-oracle': _f1_1_oracle,
'macro-avg-f1-oracle': _macro_avg_f1_oracle,
}, ignore_index=True)
print(_classification_report)
print('-- Performances over test set:', task['test'], '--')
print('Bi baseline:', bi_baseline / test_df_shape_actual)
print('Cross baseline:', cross_baseline / test_df_shape_actual)
print('Bi acc:', bi_acc / test_df_shape_actual)
print('Cross acc:', cross_acc / test_df_shape_actual)
print('Bi acc adjusted:', bi_acc / test_df_shape_original)
print('Cross acc adjusted:', cross_acc / test_df_shape_original)
print(f'-- Oracle HITL evaluation when y_pred in [{tl}, {th}]')
print('Ratio to human validator:', 1 - (oracle_df.shape[0] / oracle_original_shape))
print(_classification_report_oracle)
print('Bi acc oracle:', bi_acc_oracle / test_df_oracle.shape[0])
print('Cross acc oracle:', cross_acc_oracle / test_df_oracle.shape[0])
with open(os.path.join(outpath, task['name']+'_report.txt'), 'w') as fd:
print(pd.DataFrame(train_df[y_whom].value_counts()).to_markdown(), file=fd)
print(df_size_report, file=fd)
print(_classification_report, file=fd)
print('-- Performances over test set:', task['test'], '--', file=fd)
print('Bi baseline:', bi_baseline / test_df_shape_actual, file=fd)
print('Cross baseline:', cross_baseline / test_df_shape_actual, file=fd)
print('Bi acc:', bi_acc / test_df_shape_actual, file=fd)
print('Cross acc:', cross_acc / test_df_shape_actual, file=fd)
print('Bi acc adjusted:', bi_acc / test_df_shape_original, file=fd)
print('Cross acc adjusted:', cross_acc / test_df_shape_original, file=fd)
print(f'-- Oracle HITL evaluation when y_pred in [{tl}, {th}]', file=fd)
print('Ratio to human validator:', oracle_df.shape[0] / oracle_original_shape, file=fd)
print(_classification_report_oracle, file=fd)
print('Bi acc oracle:', bi_acc_oracle / test_df_oracle.shape[0], file=fd)
print('Cross acc oracle:', cross_acc_oracle / test_df_oracle.shape[0], file=fd)
with open(os.path.join(outpath, task['name']+'_model.pickle'), 'wb') as fd:
pickle.dump(clf, fd)
myplot(y_test, y_pred, y_pred_round, task['name'], outpath)
print('-'*30)
# if only is not None:
# csv_report_old = pd.read_csv(os.path.join(outpath, 'feature_ablation_summary.csv'), index_col=0)
# csv_report_old = csv_report_old[~csv_report_old['name'].isin(csv_report['name'].unique())]
# csv_report = pd.concat([csv_report_old, csv_report])
csv_report = csv_report.set_index('name')
csv_report = (csv_report*100).round(decimals=1)
if current_report is not None:
current_report = current_report[~current_report.index.isin(csv_report.index)]
csv_report = | pd.concat([current_report, csv_report]) | pandas.concat |
from datetime import datetime
from os import system
import pandas as pd
import json
def merge_mysql_csv():
mysql_gdax = pd.read_csv('/home/bitnami/backfire/data/resources/gdax_mysql.csv')
most_recent_date = datetime.strptime(mysql_gdax.time.max(), '%Y-%m-%d %H:%M:%S')
mysql_gdax = mysql_gdax[pd.to_datetime(mysql_gdax['time']) < most_recent_date]
old_gdax = pd.read_csv('/home/bitnami/backfire/data/resources/coinbase_fixed_2017-01-01_current.csv')
old_gdax = | pd.concat([old_gdax, mysql_gdax]) | pandas.concat |
import random
import numpy as np
import pytest
import pandas as pd
from pandas import (
Categorical,
DataFrame,
NaT,
Timestamp,
date_range,
)
import pandas._testing as tm
class TestDataFrameSortValues:
def test_sort_values(self):
frame = DataFrame(
[[1, 1, 2], [3, 1, 0], [4, 5, 6]], index=[1, 2, 3], columns=list("ABC")
)
# by column (axis=0)
sorted_df = frame.sort_values(by="A")
indexer = frame["A"].argsort().values
expected = frame.loc[frame.index[indexer]]
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by="A", ascending=False)
indexer = indexer[::-1]
expected = frame.loc[frame.index[indexer]]
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by="A", ascending=False)
tm.assert_frame_equal(sorted_df, expected)
# GH4839
sorted_df = frame.sort_values(by=["A"], ascending=[False])
tm.assert_frame_equal(sorted_df, expected)
# multiple bys
sorted_df = frame.sort_values(by=["B", "C"])
expected = frame.loc[[2, 1, 3]]
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by=["B", "C"], ascending=False)
tm.assert_frame_equal(sorted_df, expected[::-1])
sorted_df = frame.sort_values(by=["B", "A"], ascending=[True, False])
tm.assert_frame_equal(sorted_df, expected)
msg = "No axis named 2 for object type DataFrame"
with pytest.raises(ValueError, match=msg):
frame.sort_values(by=["A", "B"], axis=2, inplace=True)
# by row (axis=1): GH#10806
sorted_df = frame.sort_values(by=3, axis=1)
expected = frame
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by=3, axis=1, ascending=False)
expected = frame.reindex(columns=["C", "B", "A"])
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by=[1, 2], axis="columns")
expected = frame.reindex(columns=["B", "A", "C"])
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by=[1, 3], axis=1, ascending=[True, False])
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by=[1, 3], axis=1, ascending=False)
expected = frame.reindex(columns=["C", "B", "A"])
tm.assert_frame_equal(sorted_df, expected)
msg = r"Length of ascending \(5\) != length of by \(2\)"
with pytest.raises(ValueError, match=msg):
frame.sort_values(by=["A", "B"], axis=0, ascending=[True] * 5)
def test_sort_values_by_empty_list(self):
# https://github.com/pandas-dev/pandas/issues/40258
expected = DataFrame({"a": [1, 4, 2, 5, 3, 6]})
result = expected.sort_values(by=[])
tm.assert_frame_equal(result, expected)
assert result is not expected
def test_sort_values_inplace(self):
frame = DataFrame(
np.random.randn(4, 4), index=[1, 2, 3, 4], columns=["A", "B", "C", "D"]
)
sorted_df = frame.copy()
return_value = sorted_df.sort_values(by="A", inplace=True)
assert return_value is None
expected = frame.sort_values(by="A")
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.copy()
return_value = sorted_df.sort_values(by=1, axis=1, inplace=True)
assert return_value is None
expected = frame.sort_values(by=1, axis=1)
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.copy()
return_value = sorted_df.sort_values(by="A", ascending=False, inplace=True)
assert return_value is None
expected = frame.sort_values(by="A", ascending=False)
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.copy()
return_value = sorted_df.sort_values(
by=["A", "B"], ascending=False, inplace=True
)
assert return_value is None
expected = frame.sort_values(by=["A", "B"], ascending=False)
tm.assert_frame_equal(sorted_df, expected)
def test_sort_values_multicolumn(self):
A = np.arange(5).repeat(20)
B = np.tile(np.arange(5), 20)
random.shuffle(A)
random.shuffle(B)
frame = DataFrame({"A": A, "B": B, "C": np.random.randn(100)})
result = frame.sort_values(by=["A", "B"])
indexer = np.lexsort((frame["B"], frame["A"]))
expected = frame.take(indexer)
tm.assert_frame_equal(result, expected)
result = frame.sort_values(by=["A", "B"], ascending=False)
indexer = np.lexsort(
(frame["B"].rank(ascending=False), frame["A"].rank(ascending=False))
)
expected = frame.take(indexer)
tm.assert_frame_equal(result, expected)
result = frame.sort_values(by=["B", "A"])
indexer = np.lexsort((frame["A"], frame["B"]))
expected = frame.take(indexer)
tm.assert_frame_equal(result, expected)
def test_sort_values_multicolumn_uint64(self):
# GH#9918
# uint64 multicolumn sort
df = DataFrame(
{
"a": pd.Series([18446637057563306014, 1162265347240853609]),
"b": pd.Series([1, 2]),
}
)
df["a"] = df["a"].astype(np.uint64)
result = df.sort_values(["a", "b"])
expected = DataFrame(
{
"a": pd.Series([18446637057563306014, 1162265347240853609]),
"b": pd.Series([1, 2]),
},
index=pd.Index([1, 0]),
)
tm.assert_frame_equal(result, expected)
def test_sort_values_nan(self):
# GH#3917
df = DataFrame(
{"A": [1, 2, np.nan, 1, 6, 8, 4], "B": [9, np.nan, 5, 2, 5, 4, 5]}
)
# sort one column only
expected = DataFrame(
{"A": [np.nan, 1, 1, 2, 4, 6, 8], "B": [5, 9, 2, np.nan, 5, 5, 4]},
index=[2, 0, 3, 1, 6, 4, 5],
)
sorted_df = df.sort_values(["A"], na_position="first")
tm.assert_frame_equal(sorted_df, expected)
expected = DataFrame(
{"A": [np.nan, 8, 6, 4, 2, 1, 1], "B": [5, 4, 5, 5, np.nan, 9, 2]},
index=[2, 5, 4, 6, 1, 0, 3],
)
sorted_df = df.sort_values(["A"], na_position="first", ascending=False)
tm.assert_frame_equal(sorted_df, expected)
expected = df.reindex(columns=["B", "A"])
sorted_df = df.sort_values(by=1, axis=1, na_position="first")
tm.assert_frame_equal(sorted_df, expected)
# na_position='last', order
expected = DataFrame(
{"A": [1, 1, 2, 4, 6, 8, np.nan], "B": [2, 9, np.nan, 5, 5, 4, 5]},
index=[3, 0, 1, 6, 4, 5, 2],
)
sorted_df = df.sort_values(["A", "B"])
tm.assert_frame_equal(sorted_df, expected)
# na_position='first', order
expected = DataFrame(
{"A": [np.nan, 1, 1, 2, 4, 6, 8], "B": [5, 2, 9, np.nan, 5, 5, 4]},
index=[2, 3, 0, 1, 6, 4, 5],
)
sorted_df = df.sort_values(["A", "B"], na_position="first")
tm.assert_frame_equal(sorted_df, expected)
# na_position='first', not order
expected = DataFrame(
{"A": [np.nan, 1, 1, 2, 4, 6, 8], "B": [5, 9, 2, np.nan, 5, 5, 4]},
index=[2, 0, 3, 1, 6, 4, 5],
)
sorted_df = df.sort_values(["A", "B"], ascending=[1, 0], na_position="first")
tm.assert_frame_equal(sorted_df, expected)
# na_position='last', not order
expected = DataFrame(
{"A": [8, 6, 4, 2, 1, 1, np.nan], "B": [4, 5, 5, np.nan, 2, 9, 5]},
index=[5, 4, 6, 1, 3, 0, 2],
)
sorted_df = df.sort_values(["A", "B"], ascending=[0, 1], na_position="last")
tm.assert_frame_equal(sorted_df, expected)
def test_sort_values_stable_descending_sort(self):
# GH#6399
df = DataFrame(
[[2, "first"], [2, "second"], [1, "a"], [1, "b"]],
columns=["sort_col", "order"],
)
sorted_df = df.sort_values(by="sort_col", kind="mergesort", ascending=False)
tm.assert_frame_equal(df, sorted_df)
@pytest.mark.parametrize(
"expected_idx_non_na, ascending",
[
[
[3, 4, 5, 0, 1, 8, 6, 9, 7, 10, 13, 14],
[True, True],
],
[
[0, 3, 4, 5, 1, 8, 6, 7, 10, 13, 14, 9],
[True, False],
],
[
[9, 7, 10, 13, 14, 6, 8, 1, 3, 4, 5, 0],
[False, True],
],
[
[7, 10, 13, 14, 9, 6, 8, 1, 0, 3, 4, 5],
[False, False],
],
],
)
@pytest.mark.parametrize("na_position", ["first", "last"])
def test_sort_values_stable_multicolumn_sort(
self, expected_idx_non_na, ascending, na_position
):
# GH#38426 Clarify sort_values with mult. columns / labels is stable
df = DataFrame(
{
"A": [1, 2, np.nan, 1, 1, 1, 6, 8, 4, 8, 8, np.nan, np.nan, 8, 8],
"B": [9, np.nan, 5, 2, 2, 2, 5, 4, 5, 3, 4, np.nan, np.nan, 4, 4],
}
)
# All rows with NaN in col "B" only have unique values in "A", therefore,
# only the rows with NaNs in "A" have to be treated individually:
expected_idx = (
[11, 12, 2] + expected_idx_non_na
if na_position == "first"
else expected_idx_non_na + [2, 11, 12]
)
expected = df.take(expected_idx)
sorted_df = df.sort_values(
["A", "B"], ascending=ascending, na_position=na_position
)
tm.assert_frame_equal(sorted_df, expected)
def test_sort_values_stable_categorial(self):
# GH#16793
df = DataFrame({"x": Categorical(np.repeat([1, 2, 3, 4], 5), ordered=True)})
expected = df.copy()
sorted_df = df.sort_values("x", kind="mergesort")
tm.assert_frame_equal(sorted_df, expected)
def test_sort_values_datetimes(self):
# GH#3461, argsort / lexsort differences for a datetime column
df = DataFrame(
["a", "a", "a", "b", "c", "d", "e", "f", "g"],
columns=["A"],
index=date_range("20130101", periods=9),
)
dts = [
Timestamp(x)
for x in [
"2004-02-11",
"2004-01-21",
"2004-01-26",
"2005-09-20",
"2010-10-04",
"2009-05-12",
"2008-11-12",
"2010-09-28",
"2010-09-28",
]
]
df["B"] = dts[::2] + dts[1::2]
df["C"] = 2.0
df["A1"] = 3.0
df1 = df.sort_values(by="A")
df2 = df.sort_values(by=["A"])
tm.assert_frame_equal(df1, df2)
df1 = df.sort_values(by="B")
df2 = df.sort_values(by=["B"])
tm.assert_frame_equal(df1, df2)
df1 = df.sort_values(by="B")
df2 = df.sort_values(by=["C", "B"])
tm.assert_frame_equal(df1, df2)
def test_sort_values_frame_column_inplace_sort_exception(self, float_frame):
s = float_frame["A"]
with pytest.raises(ValueError, match="This Series is a view"):
s.sort_values(inplace=True)
cp = s.copy()
cp.sort_values() # it works!
def test_sort_values_nat_values_in_int_column(self):
# GH#14922: "sorting with large float and multiple columns incorrect"
# cause was that the int64 value NaT was considered as "na". Which is
# only correct for datetime64 columns.
int_values = (2, int(NaT.value))
float_values = (2.0, -1.797693e308)
df = DataFrame(
{"int": int_values, "float": float_values}, columns=["int", "float"]
)
df_reversed = DataFrame(
{"int": int_values[::-1], "float": float_values[::-1]},
columns=["int", "float"],
index=[1, 0],
)
# NaT is not a "na" for int64 columns, so na_position must not
# influence the result:
df_sorted = df.sort_values(["int", "float"], na_position="last")
tm.assert_frame_equal(df_sorted, df_reversed)
df_sorted = df.sort_values(["int", "float"], na_position="first")
tm.assert_frame_equal(df_sorted, df_reversed)
# reverse sorting order
df_sorted = df.sort_values(["int", "float"], ascending=False)
tm.assert_frame_equal(df_sorted, df)
# and now check if NaT is still considered as "na" for datetime64
# columns:
df = DataFrame(
{"datetime": [Timestamp("2016-01-01"), NaT], "float": float_values},
columns=["datetime", "float"],
)
df_reversed = DataFrame(
{"datetime": [NaT, Timestamp("2016-01-01")], "float": float_values[::-1]},
columns=["datetime", "float"],
index=[1, 0],
)
df_sorted = df.sort_values(["datetime", "float"], na_position="first")
tm.assert_frame_equal(df_sorted, df_reversed)
df_sorted = df.sort_values(["datetime", "float"], na_position="last")
tm.assert_frame_equal(df_sorted, df)
# Ascending should not affect the results.
df_sorted = df.sort_values(["datetime", "float"], ascending=False)
tm.assert_frame_equal(df_sorted, df)
def test_sort_nat(self):
# GH 16836
d1 = [Timestamp(x) for x in ["2016-01-01", "2015-01-01", np.nan, "2016-01-01"]]
d2 = [
Timestamp(x)
for x in ["2017-01-01", "2014-01-01", "2016-01-01", "2015-01-01"]
]
df = DataFrame({"a": d1, "b": d2}, index=[0, 1, 2, 3])
d3 = [Timestamp(x) for x in ["2015-01-01", "2016-01-01", "2016-01-01", np.nan]]
d4 = [
Timestamp(x)
for x in ["2014-01-01", "2015-01-01", "2017-01-01", "2016-01-01"]
]
expected = DataFrame({"a": d3, "b": d4}, index=[1, 3, 0, 2])
sorted_df = df.sort_values(by=["a", "b"])
tm.assert_frame_equal(sorted_df, expected)
def test_sort_values_na_position_with_categories(self):
# GH#22556
# Positioning missing value properly when column is Categorical.
categories = ["A", "B", "C"]
category_indices = [0, 2, 4]
list_of_nans = [np.nan, np.nan]
na_indices = [1, 3]
na_position_first = "first"
na_position_last = "last"
column_name = "c"
reversed_categories = sorted(categories, reverse=True)
reversed_category_indices = sorted(category_indices, reverse=True)
reversed_na_indices = sorted(na_indices)
df = DataFrame(
{
column_name: Categorical(
["A", np.nan, "B", np.nan, "C"], categories=categories, ordered=True
)
}
)
# sort ascending with na first
result = df.sort_values(
by=column_name, ascending=True, na_position=na_position_first
)
expected = DataFrame(
{
column_name: Categorical(
list_of_nans + categories, categories=categories, ordered=True
)
},
index=na_indices + category_indices,
)
tm.assert_frame_equal(result, expected)
# sort ascending with na last
result = df.sort_values(
by=column_name, ascending=True, na_position=na_position_last
)
expected = DataFrame(
{
column_name: Categorical(
categories + list_of_nans, categories=categories, ordered=True
)
},
index=category_indices + na_indices,
)
tm.assert_frame_equal(result, expected)
# sort descending with na first
result = df.sort_values(
by=column_name, ascending=False, na_position=na_position_first
)
expected = DataFrame(
{
column_name: Categorical(
list_of_nans + reversed_categories,
categories=categories,
ordered=True,
)
},
index=reversed_na_indices + reversed_category_indices,
)
tm.assert_frame_equal(result, expected)
# sort descending with na last
result = df.sort_values(
by=column_name, ascending=False, na_position=na_position_last
)
expected = DataFrame(
{
column_name: Categorical(
reversed_categories + list_of_nans,
categories=categories,
ordered=True,
)
},
index=reversed_category_indices + reversed_na_indices,
)
tm.assert_frame_equal(result, expected)
def test_sort_values_nat(self):
# GH#16836
d1 = [Timestamp(x) for x in ["2016-01-01", "2015-01-01", np.nan, "2016-01-01"]]
d2 = [
Timestamp(x)
for x in ["2017-01-01", "2014-01-01", "2016-01-01", "2015-01-01"]
]
df = DataFrame({"a": d1, "b": d2}, index=[0, 1, 2, 3])
d3 = [Timestamp(x) for x in ["2015-01-01", "2016-01-01", "2016-01-01", np.nan]]
d4 = [
Timestamp(x)
for x in ["2014-01-01", "2015-01-01", "2017-01-01", "2016-01-01"]
]
expected = DataFrame({"a": d3, "b": d4}, index=[1, 3, 0, 2])
sorted_df = df.sort_values(by=["a", "b"])
tm.assert_frame_equal(sorted_df, expected)
def test_sort_values_na_position_with_categories_raises(self):
df = DataFrame(
{
"c": Categorical(
["A", np.nan, "B", np.nan, "C"],
categories=["A", "B", "C"],
ordered=True,
)
}
)
with pytest.raises(ValueError, match="invalid na_position: bad_position"):
df.sort_values(by="c", ascending=False, na_position="bad_position")
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize(
"original_dict, sorted_dict, ignore_index, output_index",
[
({"A": [1, 2, 3]}, {"A": [3, 2, 1]}, True, [0, 1, 2]),
({"A": [1, 2, 3]}, {"A": [3, 2, 1]}, False, [2, 1, 0]),
(
{"A": [1, 2, 3], "B": [2, 3, 4]},
{"A": [3, 2, 1], "B": [4, 3, 2]},
True,
[0, 1, 2],
),
(
{"A": [1, 2, 3], "B": [2, 3, 4]},
{"A": [3, 2, 1], "B": [4, 3, 2]},
False,
[2, 1, 0],
),
],
)
def test_sort_values_ignore_index(
self, inplace, original_dict, sorted_dict, ignore_index, output_index
):
# GH 30114
df = DataFrame(original_dict)
expected = DataFrame(sorted_dict, index=output_index)
kwargs = {"ignore_index": ignore_index, "inplace": inplace}
if inplace:
result_df = df.copy()
result_df.sort_values("A", ascending=False, **kwargs)
else:
result_df = df.sort_values("A", ascending=False, **kwargs)
tm.assert_frame_equal(result_df, expected)
tm.assert_frame_equal(df, DataFrame(original_dict))
def test_sort_values_nat_na_position_default(self):
# GH 13230
expected = DataFrame(
{
"A": [1, 2, 3, 4, 4],
"date": pd.DatetimeIndex(
[
"2010-01-01 09:00:00",
"2010-01-01 09:00:01",
"2010-01-01 09:00:02",
"2010-01-01 09:00:03",
"NaT",
]
),
}
)
result = expected.sort_values(["A", "date"])
tm.assert_frame_equal(result, expected)
def test_sort_values_item_cache(self, using_array_manager):
# previous behavior incorrect retained an invalid _item_cache entry
df = DataFrame(np.random.randn(4, 3), columns=["A", "B", "C"])
df["D"] = df["A"] * 2
ser = df["A"]
if not using_array_manager:
assert len(df._mgr.blocks) == 2
df.sort_values(by="A")
ser.values[0] = 99
assert df.iloc[0, 0] == df["A"][0]
def test_sort_values_reshaping(self):
# GH 39426
values = list(range(21))
expected = DataFrame([values], columns=values)
df = expected.sort_values(expected.index[0], axis=1, ignore_index=True)
tm.assert_frame_equal(df, expected)
class TestDataFrameSortKey: # test key sorting (issue 27237)
def test_sort_values_inplace_key(self, sort_by_key):
frame = DataFrame(
np.random.randn(4, 4), index=[1, 2, 3, 4], columns=["A", "B", "C", "D"]
)
sorted_df = frame.copy()
return_value = sorted_df.sort_values(by="A", inplace=True, key=sort_by_key)
assert return_value is None
expected = frame.sort_values(by="A", key=sort_by_key)
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.copy()
return_value = sorted_df.sort_values(
by=1, axis=1, inplace=True, key=sort_by_key
)
assert return_value is None
expected = frame.sort_values(by=1, axis=1, key=sort_by_key)
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.copy()
return_value = sorted_df.sort_values(
by="A", ascending=False, inplace=True, key=sort_by_key
)
assert return_value is None
expected = frame.sort_values(by="A", ascending=False, key=sort_by_key)
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.copy()
sorted_df.sort_values(
by=["A", "B"], ascending=False, inplace=True, key=sort_by_key
)
expected = frame.sort_values(by=["A", "B"], ascending=False, key=sort_by_key)
tm.assert_frame_equal(sorted_df, expected)
def test_sort_values_key(self):
df = DataFrame(np.array([0, 5, np.nan, 3, 2, np.nan]))
result = df.sort_values(0)
expected = df.iloc[[0, 4, 3, 1, 2, 5]]
tm.assert_frame_equal(result, expected)
result = df.sort_values(0, key=lambda x: x + 5)
expected = df.iloc[[0, 4, 3, 1, 2, 5]]
tm.assert_frame_equal(result, expected)
result = df.sort_values(0, key=lambda x: -x, ascending=False)
expected = df.iloc[[0, 4, 3, 1, 2, 5]]
tm.assert_frame_equal(result, expected)
def test_sort_values_by_key(self):
df = DataFrame(
{
"a": np.array([0, 3, np.nan, 3, 2, np.nan]),
"b": np.array([0, 2, np.nan, 5, 2, np.nan]),
}
)
result = df.sort_values("a", key=lambda x: -x)
expected = df.iloc[[1, 3, 4, 0, 2, 5]]
tm.assert_frame_equal(result, expected)
result = df.sort_values(by=["a", "b"], key=lambda x: -x)
expected = df.iloc[[3, 1, 4, 0, 2, 5]]
tm.assert_frame_equal(result, expected)
result = df.sort_values(by=["a", "b"], key=lambda x: -x, ascending=False)
expected = df.iloc[[0, 4, 1, 3, 2, 5]]
tm.assert_frame_equal(result, expected)
def test_sort_values_by_key_by_name(self):
df = DataFrame(
{
"a": np.array([0, 3, np.nan, 3, 2, np.nan]),
"b": np.array([0, 2, np.nan, 5, 2, np.nan]),
}
)
def key(col):
if col.name == "a":
return -col
else:
return col
result = df.sort_values(by="a", key=key)
expected = df.iloc[[1, 3, 4, 0, 2, 5]]
tm.assert_frame_equal(result, expected)
result = df.sort_values(by=["a"], key=key)
expected = df.iloc[[1, 3, 4, 0, 2, 5]]
tm.assert_frame_equal(result, expected)
result = df.sort_values(by="b", key=key)
expected = df.iloc[[0, 1, 4, 3, 2, 5]]
tm.assert_frame_equal(result, expected)
result = df.sort_values(by=["a", "b"], key=key)
expected = df.iloc[[1, 3, 4, 0, 2, 5]]
tm.assert_frame_equal(result, expected)
def test_sort_values_key_string(self):
df = DataFrame(np.array([["hello", "goodbye"], ["hello", "Hello"]]))
result = df.sort_values(1)
expected = df[::-1]
tm.assert_frame_equal(result, expected)
result = df.sort_values([0, 1], key=lambda col: col.str.lower())
tm.assert_frame_equal(result, df)
result = df.sort_values(
[0, 1], key=lambda col: col.str.lower(), ascending=False
)
expected = df.sort_values(1, key=lambda col: col.str.lower(), ascending=False)
tm.assert_frame_equal(result, expected)
def test_sort_values_key_empty(self, sort_by_key):
df = DataFrame(np.array([]))
df.sort_values(0, key=sort_by_key)
df.sort_index(key=sort_by_key)
def test_changes_length_raises(self):
df = DataFrame({"A": [1, 2, 3]})
with pytest.raises(ValueError, match="change the shape"):
df.sort_values("A", key=lambda x: x[:1])
def test_sort_values_key_axes(self):
df = DataFrame({0: ["Hello", "goodbye"], 1: [0, 1]})
result = df.sort_values(0, key=lambda col: col.str.lower())
expected = df[::-1]
tm.assert_frame_equal(result, expected)
result = df.sort_values(1, key=lambda col: -col)
expected = df[::-1]
tm.assert_frame_equal(result, expected)
def test_sort_values_key_dict_axis(self):
df = DataFrame({0: ["Hello", 0], 1: ["goodbye", 1]})
result = df.sort_values(0, key=lambda col: col.str.lower(), axis=1)
expected = df.loc[:, ::-1]
tm.assert_frame_equal(result, expected)
result = df.sort_values(1, key=lambda col: -col, axis=1)
expected = df.loc[:, ::-1]
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("ordered", [True, False])
def test_sort_values_key_casts_to_categorical(self, ordered):
# https://github.com/pandas-dev/pandas/issues/36383
categories = ["c", "b", "a"]
df = DataFrame({"x": [1, 1, 1], "y": ["a", "b", "c"]})
def sorter(key):
if key.name == "y":
return pd.Series(
Categorical(key, categories=categories, ordered=ordered)
)
return key
result = df.sort_values(by=["x", "y"], key=sorter)
expected = DataFrame(
{"x": [1, 1, 1], "y": ["c", "b", "a"]}, index=pd.Index([2, 1, 0])
)
tm.assert_frame_equal(result, expected)
@pytest.fixture
def df_none():
return DataFrame(
{
"outer": ["a", "a", "a", "b", "b", "b"],
"inner": [1, 2, 2, 2, 1, 1],
"A": np.arange(6, 0, -1),
("B", 5): ["one", "one", "two", "two", "one", "one"],
}
)
@pytest.fixture(params=[["outer"], ["outer", "inner"]])
def df_idx(request, df_none):
levels = request.param
return df_none.set_index(levels)
@pytest.fixture(
params=[
"inner", # index level
["outer"], # list of index level
"A", # column
[("B", 5)], # list of column
["inner", "outer"], # two index levels
[("B", 5), "outer"], # index level and column
["A", ("B", 5)], # Two columns
["inner", "outer"], # two index levels and column
]
)
def sort_names(request):
return request.param
@pytest.fixture(params=[True, False])
def ascending(request):
return request.param
class TestSortValuesLevelAsStr:
def test_sort_index_level_and_column_label(
self, df_none, df_idx, sort_names, ascending
):
# GH#14353
# Get index levels from df_idx
levels = df_idx.index.names
# Compute expected by sorting on columns and the setting index
expected = df_none.sort_values(
by=sort_names, ascending=ascending, axis=0
).set_index(levels)
# Compute result sorting on mix on columns and index levels
result = df_idx.sort_values(by=sort_names, ascending=ascending, axis=0)
tm.assert_frame_equal(result, expected)
def test_sort_column_level_and_index_label(
self, df_none, df_idx, sort_names, ascending
):
# GH#14353
# Get levels from df_idx
levels = df_idx.index.names
# Compute expected by sorting on axis=0, setting index levels, and then
# transposing. For some cases this will result in a frame with
# multiple column levels
expected = (
df_none.sort_values(by=sort_names, ascending=ascending, axis=0)
.set_index(levels)
.T
)
# Compute result by transposing and sorting on axis=1.
result = df_idx.T.sort_values(by=sort_names, ascending=ascending, axis=1)
tm.assert_frame_equal(result, expected)
def test_sort_values_pos_args_deprecation(self):
# https://github.com/pandas-dev/pandas/issues/41485
df = DataFrame({"a": [1, 2, 3]})
msg = (
r"In a future version of pandas all arguments of DataFrame\.sort_values "
r"except for the argument 'by' will be keyword-only"
)
with tm.assert_produces_warning(FutureWarning, match=msg):
result = df.sort_values("a", 0)
expected = | DataFrame({"a": [1, 2, 3]}) | pandas.DataFrame |
import os
import glob
import psycopg2
import psycopg2.extras
import pandas as pd
from sql_queries import *
def process_song_file(cur, filepath):
"""
Reads raw data from the data files to split artist and songs
corresponding tables
:param cur: Postgres cursor
:param filepath: A path to a file to process
:return: void
"""
# open song file
df = pd.read_json(filepath, lines=True)
# ideally should be added in batch, iterative approach is just for simplicity
# not sure if it's a good option to store data in array, just testing batch insert
songs = []
artists = []
for index, row in df.iterrows():
songs.append((row.song_id, row.title, row.artist_id, row.year, row.duration))
artists.append((row.artist_id, row.artist_name, row.artist_location,
row.artist_latitude, row.artist_longitude))
psycopg2.extras.execute_batch(cur, song_table_insert, songs)
psycopg2.extras.execute_batch(cur, artist_table_insert, artists)
def process_log_file(cur, filepath):
"""
This function is responsible for splitting raw log data and saving it
in different postgres tables
:param cur: Postgres cursor
:param filepath: A path to a file to process
:return: void
"""
# open log file
df = pd.read_json(filepath, lines=True)
# filter by NextSong action
df = df[df.page == 'NextSong']
# convert timestamp column to datetime
df['ts'] = pd.to_datetime(df['ts'], unit='ms')
t = df.copy()
# insert time data records
time_data = (t.ts, t.ts.dt.hour, t.ts.dt.day, t.ts.dt.dayofweek,
t.ts.dt.month, t.ts.dt.year, t.ts.dt.weekday)
column_labels = ['start_time', 'hour', 'day', 'week',
'month', 'year', 'weekday']
time_df = | pd.DataFrame(columns=column_labels) | pandas.DataFrame |
#!/usr/bin/env python3
import pandas as pd
import numpy as np
from sklearn.cluster import DBSCAN
from sklearn.metrics import accuracy_score
def nonconvex_clusters():
return | pd.DataFrame() | pandas.DataFrame |
#############################################################
# Begin defining Dash app layout
# code sections
# 1 Environment setup
# 2 Setup Dataframes
# 3 Define Useful Functions
# 4 Heatmap UI controls
# 5 Curves plot UI controls
# 6 Navbar definition
# 7 Blank figure to display during initial app loading
# 8 Overall app layout
# 9 Dynamic UI callbacks
# 10 Callback for Updating Heat Map Figure
# 11 Callback for Adding Rows to curve_plot_df (dataframe define curves to plot)
# 12 Callback for Updating Curves Plot Figure
# 13 Callback for Updating the first Epidemiology Sandbox Figure
# 14 Callbacks to Update UI of the Second Epidemiology Sandbox
# 15 Callback for Updating the Second Epidemiology Sandbox Figure
import time
import os
import platform
import json
import pickle
import base64
from urllib.request import urlopen
import boto3
import pandas as pd
import numpy as np
from scipy.integrate import solve_ivp
import matplotlib, matplotlib.cm as cm
import datetime as dt
import dash
import dash_table
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output, State
import dash_bootstrap_components as dbc
from plotly import subplots
from plotly import graph_objects as go
#################################################################
# 1 Environment Setup
# setup global variables
proj_path = ""
print("The execution environment is: ")
print(platform.release())
if os.name == "nt":
# running on my local Windows machine
ENV = "local"
os.chdir("C:/Users/adiad/Anaconda3/envs/CovidApp36/covidapp/")
elif "microsoft" in platform.release():
# example: "4.19.104-microsoft-standard"
# running on wsl in a docker container I made
ENV = "docker-wsl"
proj_path = "/docker_app/"
elif ("aws" in platform.release()) | ("amzn" in platform.release()):
# examples: "4.4.0-1074-aws", "4.14.158-129.185.amzn2.x86_64"
# running in a docker container I made
ENV = "docker-aws"
else:
# running on heroku server
ENV = "heroku"
if ENV in ["docker-aws", "heroku"]:
# download covid data from aws s3 bucket
os.environ["AWS_CONFIG_FILE"] = proj_path + "secret_credentials/config"
os.environ["AWS_SHARED_CREDENTIALS_FILE"] = proj_path + "secret_credentials/credentials"
bucket_name = "my-covid-data-7918"
local_file_path = proj_path + "data_clean/"
covid_filenames = ["Johns_Hopkins_Clean.pkl", "init_heatmap.pkl"]
s3 = boto3.client("s3")
for file_name in covid_filenames:
os.remove(local_file_path + file_name)
s3.download_file(bucket_name, file_name, local_file_path + file_name)
print("Finished downloading covid data from AWS.")
# set graphic elements & color palette
invis = "rgba(0,0,0,0)"
update_jh_data = True # controls whether Johns Hopkins data will be updated
data_path = proj_path + "data_clean/"
secrets_path = proj_path + "secret_credentials/"
# setting up images for rendering
image_path = proj_path + "images/"
cross_icon_image = base64.b64encode(open(image_path + "icon.png", "rb").read())
herd_immunity_image = base64.b64encode(open(image_path + "Herd_Immunity_Fig.png", "rb").read())
# get mapbox token
token = open(secrets_path + ".mapbox_token").read()
# read US county geojson file
# from: https://raw.githubusercontent.com/plotly/datasets/master/geojson-counties-fips.json
with open(data_path + "us_county_geo.json") as f:
us_counties_json = json.load(f)
# read US county geojson file
# from: https://eric.clst.org/tech/usgeojson/ (States with 5m resolution)#
with open(data_path + "us_states_geo.json") as f:
us_states_json = json.load(f)
# read China province geojson file
# from: https://github.com/secsilm/plotly-choropleth-mapbox-demo/blob/master/china_province.geojson
with open(data_path + "china_province_geo2.json") as f:
china_json = json.load(f)
# read Australia state geojson file
# from: https://github.com/rowanhogan/australian-states/blob/master/states.geojson
with open(data_path + "australia_state_geo2.json") as f:
australia_json = json.load(f)
# read Canadian geojson file
# from: https://download2.exploratory.io/maps/canada_provinces.zip
with open(data_path + "canada_provinces_geo.json") as f:
canada_json = json.load(f)
# read world geojson file
# from: https://github.com/datasets/geo-countries/blob/master/data/countries.geojson
with open(data_path + "all_countries_geo.json") as f:
world_json = json.load(f)
# read initial heatmap figure file
with open(data_path + "init_heatmap.pkl", "rb") as f:
init_heatmap = pickle.load(f)
#################################################################
# 2 Setup Dataframes
# read dataframes from pickle files
df = pd.read_pickle(data_path + "Johns_Hopkins_Clean.pkl")
# add Active variables
def add_active_col(var_suffix, df):
confirmed = df["Confirmed" + var_suffix].values
recovered = np.clip(df["Recovered" + var_suffix].values, 0, None)
deaths = np.clip(df["Deaths" + var_suffix].values, 0, None)
df["Active" + var_suffix] = confirmed - recovered - deaths
# correct occurrences where Recovered + Deaths > Confirmed
# (where negative value rolls back to an enormous positive value)
mask = ((recovered + deaths) > confirmed)
df.loc[mask, "Active" + var_suffix] = 0
return df
df = add_active_col("", df)
df = add_active_col("PerDate", df)
df = add_active_col("PerCapita", df)
df = add_active_col("PerDatePerCapita", df)
# define a dataframe that defines which geographic areas to plot infection curves
curve_plot_data = [[0, "United States of America", "New York", "nan"],
[1, "United States of America", "Massachusetts", "nan"],
[2, "United States of America", "Indiana", "nan"]]
curve_plot_cols = ["Row ID", "Country/Region", "Province/State", "County"]
curve_plot_df = pd.DataFrame(curve_plot_data, columns=curve_plot_cols)
# define a dataframe that defines the dynamic parameter values for the simulation
# in sandbox 2
sandbox2_df = pd.DataFrame([[0, 14, 3.0, True, True], \
[50, 14, 1.5, False, True]], \
columns=["t", "d", "r", "In Base", "In Alt"])
#################################################################
# 3 Define Useful Functions
# converts numpy's datetime64 dtype (used by pandas) to datetime.datetime()
def numpy_dt64_to_dt(dt64):
day_timestamp_dt = (dt64 - np.datetime64('1970-01-01T00:00:00Z')) / np.timedelta64(1, 's')
day_dt = dt.datetime.utcfromtimestamp(day_timestamp_dt)
return day_dt
# converts numpy's datetime64 dtype (used by pandas) to a string
def numpy_dt64_to_str(dt64):
day_dt = numpy_dt64_to_dt(dt64)
return day_dt.strftime("%b %d")
# Define function for predicting epidemic, used in sandboxes
# assuming 1 person is infected in the whole population of size N
# and the params d & r0 are providef in a listed arrange as:
# [[t0, d0, r0], [t1, d1, r1], ...]
# where t1, t2, etc. reprsent the beginning of new values for d & r
# dur defines the time point to terminate the simulation
def predict_sir(N, params_t, dur):
# define a function which
def SIR(t, y):
S = y[0]
I = y[1]
R = y[2]
return [-beta*S*I/N, beta*S*I/N-gamma*I, gamma*I]
# define a function which extras individual parameters given the time index
def get_params(t_ind):
# get basic parameters
t = params_t[t_ind][0]
d = params_t[t_ind][1]
r = params_t[t_ind][2]
# derive exponential function parameters
gamma = 1 / d
beta = r * gamma
return t, gamma, beta
# simulatd population sub-group sizes
sir_init_pop = [N - 1, 1, 0] # [S, I, R]
# set initial values for loop variables
epidemic_stopped = False
n_days = 0
continue_calc = True
removed = 0
n_periods = len(params_t)
period_ind = 0
t_period_loop = params_t[0][1] # sim will pause to check termination criterion
t_start, gamma, beta = get_params(period_ind)
if n_periods == 1:
t_period_end = t_period_loop
else:
period_ind_max = n_periods - 1
t_end, ignore1, ignore2 = get_params(period_ind + 1)
t_period_end = t_end
while continue_calc:
# predict SIR for loop period days
predict_period_sir = solve_ivp(SIR, [0, t_period_end], sir_init_pop, \
t_eval=np.arange(0, t_period_end, 1))
# append loop results to previous results
if removed == 0:
t = predict_period_sir["t"]
s = predict_period_sir["y"][0]
i = predict_period_sir["y"][1]
r = predict_period_sir["y"][2]
else:
# segmenting the sim into periods causes the first day's prediction
# to be a repeat of the results from the last loop's last day, so
# drop the first day
t = np.concatenate((t, t_start - 1 + predict_period_sir["t"][1:]))
s = np.concatenate((s, predict_period_sir["y"][0][1:]))
i = np.concatenate((i, predict_period_sir["y"][1][1:]))
r = np.concatenate((r, predict_period_sir["y"][2][1:]))
# update loop variables with new period results
n_days = len(t)
removed = r[-1]
sir_init_pop = [s[-1], i[-1], r[-1]]
# look for epidemic burnout
period_i = predict_period_sir["y"][1]
if period_i[-1] < period_i[0]:
# infected population is shrinking
if (period_i[0] - period_i[-1]) < 1:
# change in the size of the infected population
# over the loop period is < 1
epidemic_stopped = True
if n_periods > 1:
if period_ind_max > period_ind + 1:
# simulate the next period until its end
period_ind += 1
t_start, gamma, beta = get_params(period_ind)
t_end, ignore1, ignore2 = get_params(period_ind + 1)
t_period_end = t_end - t_start + 1
elif period_ind_max > period_ind:
# simulate the last period until termination criteria are met
period_ind += 1
t_start, gamma, beta = get_params(period_ind)
t_period_end = params_t[period_ind][1]
else:
# continue simulating the last period until termination criteria are met
t_start = t[-1] + 1
else:
# continue simulating the only period until termination criteria are met
t_start = t[-1] + 1
# determine whether to continue looping
if np.isinf(dur):
continue_calc = not epidemic_stopped
else:
continue_calc = (dur > n_days)
# trim results to desired duration
if len(t) > dur:
t = t[:dur + 1]
s = s[:dur + 1]
i = i[:dur + 1]
r = r[:dur + 1]
return np.column_stack((t, s, i, r))
# find the most recent date in the initial heatmap
init_days = np.sort(df[df["MapScope"] == "US Counties"].Date.unique())
init_heatmap_date = numpy_dt64_to_dt(init_days[-1])
# Basic setup of Dash app
external_stylesheets = [dbc.themes.COSMO]
btn_color = "primary"
navbar_color = "primary"
navbar_is_dark = True
# dash instantiation
app = dash.Dash(__name__, external_stylesheets=external_stylesheets, assets_folder='assets')
server = app.server
# adding Google Analytics
app.index_string = """<!DOCTYPE html>
<html>
<head>
<!-- Global site tag (gtag.js) - Google Analytics -->
<script async src="https://www.googletagmanager.com/gtag/js?id=UA-44205806-4"></script>
<script>
window.dataLayer = window.dataLayer || [];
function gtag(){dataLayer.push(arguments);}
gtag('js', new Date());
gtag('config', 'UA-44205806-4');
</script>
{%metas%}
<title>{%title%}</title>
{%favicon%}
{%css%}
</head>
<body>
{%app_entry%}
<footer>
{%config%}
{%scripts%}
{%renderer%}
</footer>
</body>
</html>"""
#################################################################
# 4 Heatmap UI controls
heat_ctrls_row1 = \
dbc.Row([
dbc.Col(html.Div([
dbc.InputGroup([
dbc.InputGroupAddon("Map Scope", addon_type="prepend"),
dbc.Select(
id="map-scope",
options=[
{"label": "Australian States", "value": "Australia"},
{"label": "Canadian Provinces", "value": "Canada"},
{"label": "Chinese Provinces", "value": "China"},
{"label": "US Counties", "value": "UScounties"},
{"label": "US States", "value": "USstates"},
{"label": "Whole World", "value": "World"}
],
value="UScounties"
)
])
]), md=4, xs=12, style={"padding": "5px 10px"}),
dbc.Col(html.Div([
dbc.InputGroup([
dbc.InputGroupAddon("Heat Variable", addon_type="prepend"),
dbc.Select(
id="map-var",
options=[
{"label": "Confirmed", "value": "Confirmed"},
{"label": "Active", "value": "Active"},
{"label": "Recovered", "value": "Recovered"},
{"label": "Deaths", "value": "Deaths"}
],
value="Confirmed"
)
])
]), md=4, xs=12, style={"padding": "5px 10px"}),
dbc.Col(html.Div([
dbc.InputGroup([
dbc.InputGroupAddon("New or Total Cases", addon_type="prepend"),
dbc.Select(
id="map-calc",
options=[
{"label": "Total Cases to Date", "value": "Total"},
{"label": "New Cases on Date", "value": "PerDate"}
],
value="Total"
)
])
]), md=4, xs=12, style={"padding": "5px 10px"}),
dbc.Col(html.Div([
dbc.InputGroup([
dbc.InputGroupAddon("Heat & Bar Scales", addon_type="prepend"),
dbc.Select(
id="map-scale",
options=[
{"label": "Linear", "value": "Linear"},
{"label": "Logarithmic", "value": "Logarithmic"}
],
value="Logarithmic"
)
])
]), md=4, xs=12, style={"padding": "5px 10px"}),
dbc.Col(html.Div([
dbc.InputGroup([
dbc.InputGroupAddon("Normalize Heat & Bar", addon_type="prepend"),
dbc.Select(
id="map-norm-type",
options=[
{"label": "None", "value": "None"},
{"label": "Per Capita", "value": "PerCapita"}
],
value="None"
)
])
]), md=4, xs=12, style={"padding": "5px 10px"}),
dbc.Col(html.Div([
dbc.InputGroup([
dbc.InputGroupAddon("Normalize Per", addon_type="prepend"),
dbc.Select(
id="map-norm-val",
options=[
{"label": "1 Capita", "value": 1},
{"label": "10 Capita", "value": 10},
{"label": "100 Capita", "value": 100},
{"label": "1k Capita", "value": 1000},
{"label": "10k Capita", "value": 10000},
{"label": "100k Capita", "value": 100000},
{"label": "1M Capita", "value": 1000000}
],
value=100000
)
])
]), md=4, xs=12, style={"padding": "5px 10px"}),
], style={'padding-left': 20, 'padding-right': 20, 'margin-top': 5})
heat_cntrls_accordion = html.Div([
dbc.Card([
dbc.CardHeader(
html.H1(
dbc.Button(
"Plot Controls",
color=btn_color,
id="heat-edit-toggle",
), style={"padding-bottom": 6}
), style={"padding-bottom": 0, "padding-top": 0}
),
dbc.Collapse([
heat_ctrls_row1],
id="collapse-heat-edit",
),
]),
], className="accordion")
#################################################################
# 5 Curves plot UI controls
curve_ctrls_row1 = \
dbc.Row([
dbc.Col(html.Div([
dbc.InputGroup([
dbc.InputGroupAddon("Country", addon_type="prepend"),
dbc.Select(
id="curve-country",
options=[{"label": country, "value": country} for country in \
np.sort(df["Country/Region"].unique())],
value="United States of America"
)
])
]), md=4, xs=12, style={"padding": "5px 10px"}),
dbc.Col(html.Div([
dbc.InputGroup([
dbc.InputGroupAddon("State", addon_type="prepend"),
dbc.Select(
id="curve-state",
options=[],
disabled=True,
value=""
)
])
]), md=4, xs=12, style={"padding": "5px 10px"}),
dbc.Col(html.Div([
dbc.InputGroup([
dbc.InputGroupAddon("County", addon_type="prepend"),
dbc.Select(
id="curve-county",
options=[],
disabled=True,
value=""
)
])
]), md=4, xs=12, style={"padding": "5px 10px"}),
], style={'padding-left': 20, 'padding-right': 20, 'margin-top': 5})
curve_ctrls_row2 = \
dbc.Row([
dbc.Col(html.Div(""), md=3, xs=1, style={'textAlign': 'right', 'margin-top': 0}),
dbc.Col(html.Div([
dbc.Button("Add", id="curve-add", n_clicks=0, color=btn_color)
]), md=1, xs=2, style={"textAlign": "center", "margin-top": 0, "padding-left": 0}),
# Hidden div inside the app that tracks how many times the Add button has been clicked
# This enables a determination for whether Add button triggered the edit_plotted_curves callback
html.Div(0, id='curve-add-click-count', style={'display': 'none'}),
dbc.Col(html.Div([
dbc.Button("Clear All", id="curve-clear", n_clicks=0, color=btn_color)
]), md=2, xs=2, style={"textAlign": "center", "margin-top": 0, "padding-left": 0}),
# Hidden div inside the app that tracks how many times the Clear All button has been clicked
# This enables a determination for whether Clear All button triggered the edit_plotted_curves callback
html.Div(0, id='curve-clear-click-count', style={'display': 'none'}),
dbc.Col(html.Div([
dbc.InputGroup([
dbc.InputGroupAddon("Drop Row by ID", addon_type="prepend"),
dbc.Select(
id="curve-drop",
options=[{"label": val, "value": val} for val in curve_plot_df["Row ID"].values],
value=""
)
])
]), md=3, xs=6, style={"padding": "5px 10px"}),
dbc.Col(html.Div(""), md=3, xs=1, style={'textAlign': 'right', 'margin-top': 0})
], style={'padding-left': 20, 'padding-right': 20, 'margin-top': 5})
curve_ctrls_row3 = \
dbc.Row([
dbc.Col(html.Div([
dbc.InputGroup([
dbc.InputGroupAddon("New or Total Case", addon_type="prepend"),
dbc.Select(
id="curve-calc",
options=[
{"label": "Total Cases to Date", "value": "Total"},
{"label": "New Cases on Date", "value": "PerDate"}
],
value="PerDate"
)
])
]), md=4, xs=12, style={"padding": "5px 10px"}),
dbc.Col(html.Div([
dbc.InputGroup([
dbc.InputGroupAddon("Normalize", addon_type="prepend"),
dbc.Select(
id="curve-norm-type",
options=[
{"label": "None", "value": "None"},
{"label": "Per Capita", "value": "PerCapita"}
],
value="None"
)
])
]), md=4, xs=12, style={"padding": "5px 10px"}),
dbc.Col(html.Div([
dbc.InputGroup([
dbc.InputGroupAddon("Normalize Per", addon_type="prepend"),
dbc.Select(
id="curve-norm-val",
options=[
{"label": "1 Capita", "value": 1},
{"label": "10 Capita", "value": 10},
{"label": "100 Capita", "value": 100},
{"label": "1k Capita", "value": 1000},
{"label": "10k Capita", "value": 10000},
{"label": "100k Capita", "value": 100000},
{"label": "1M Capita", "value": 1000000}
],
value=100000
)
])
]), md=4, xs=12, style={"padding": "5px 10px"}),
dbc.Col(html.Div([
dbc.InputGroup([
dbc.InputGroupAddon("Zero Date", addon_type="prepend"),
dbc.Select(
id="curve-zero",
options=[
{"label": "None (just use dates)", "value": "None"},
{"label": "When 1 case is reported", "value": "Total"},
{"label": "When 1 case per 10k capita", "value": "PerCapita"},
],
value="None"
)
])
]), md=6, xs=12, style={"padding": "5px 10px"}),
dbc.Col(html.Div([
dbc.InputGroup([
dbc.InputGroupAddon("Case Scale", addon_type="prepend"),
dbc.Select(
id="curve-scale",
options=[
{"label": "Linear", "value": "linear"},
{"label": "Logarithmic", "value": "log"},
],
value="log"
)
])
]), md=6, xs=12, style={"padding": "5px 10px"}),
dbc.Col(html.Div([
dbc.InputGroup([
dbc.InputGroupAddon("Case Types", addon_type="prepend"),
dbc.Checklist(
id="curve-type",
options=[
{"label": "Confirmed", "value": "Confirmed"},
{"label": "Active", "value": "Active"},
{"label": "Recovered", "value": "Recovered"},
{"label": "Deaths", "value": "Deaths"}
],
value=["Confirmed", "Deaths"],
inline=True,
custom=True,
style={"display": "inline-block", "margin-left": 10, "margin-top": 8}
)
])
]), xl=6, lg=7, md=12, xs=12, style={"padding": "5px 10px"}),
dbc.Col(html.Div([
dbc.InputGroup([
dbc.InputGroupAddon(
"Tune Curve Fit",
addon_type="prepend",
id="curve-avg-label",
style={"width": 140, "padding-right": 0}
),
html.Span([
html.Span([
dcc.Slider(
id="curve-avg-period",
marks={1: "1", 7: "7", 14: "14", 21: "21", 28: "28"},
min=1,
max=28,
step=1,
value=14,
included=False
),
], style={"width": "100%", "display": "inline-block"})
], style={"width": "60%", "text-align": "left", "padding": "10px 0 0 0"})
]),
dbc.Tooltip(
"Curve fitting is calculated with a moving average. This parameter " + \
"determines the max number of days to use in averaging each point.",
target="curve-avg-label",
)
]), xl=6, lg=5, md=8, xs=12, style={"padding": "5px 10px"}),
], style={'padding-left': 20, 'padding-right': 20, 'margin-top': 5, 'margin-bottom': 10})
data_tbl = \
dbc.Row([
dbc.Col(
html.Div(""), md=3, xs=1, style={'textAlign': 'right', 'margin-top': 0}
),
dbc.Col(html.Div([
dash_table.DataTable(
id="data-table",
data=curve_plot_df.to_dict('records'),
columns=[{"id": c, "name": c}
for c in curve_plot_df.columns],
editable=False,
style_cell={
'textAlign': 'center'
},
style_cell_conditional=[
{
'if': {'column_id': c},
'textAlign': 'center'
} for c in ['Date', 'Region']
],
style_data_conditional=[
{
'if': {'row_index': 'odd'},
'backgroundColor': 'rgb(248, 248, 248)'
}
],
style_header={
'backgroundColor': 'rgb(230, 230, 230)',
'fontWeight': 'bold'
}
)]), md=6, xs=10, style={'textAlign': 'right', 'margin-top': 0}
),
dbc.Col(
html.Div(""), md=3, xs=1, style={'textAlign': 'right', 'margin-top': 0}
),
], style={'margin-bottom': 10, 'margin-top': 10})
curve_cntrls_accordion = html.Div([
dbc.Card([
dbc.CardHeader(
html.H1(
dbc.Button(
"Curve Picker",
color=btn_color,
id="curve-edit-toggle",
), style={"padding-bottom": 6}
), style={'padding-bottom': 0, 'padding-top': 0}
),
dbc.Collapse([
curve_ctrls_row1,
curve_ctrls_row2,
# visualize the curve_plot_df
data_tbl,
# Hidden div inside the app that allows the curve_plot_df tp be shared among callbacks
html.Div([curve_plot_df.to_json(date_format='iso', orient='split')],
id='curve-plot-df', style={'display': 'none'})],
id="collapse-curve-edit",
),
]),
dbc.Card([
dbc.CardHeader(
html.H1(
dbc.Button(
"Plot Settings",
color=btn_color,
id="curve-setting-toggle",
), style={"padding-bottom": 6}
), style={'padding-bottom': 0, 'padding-top': 0}
),
dbc.Collapse([
curve_ctrls_row3],
id="collapse-curve-setting",
),
]),
], className="accordion")
#################################################################
# 6 Navbar definition
dropdown_menu_items = dbc.DropdownMenu(
children=[
dbc.DropdownMenuItem("Discussion of this App", href="https://buckeye17.github.io/COVID-Dashboard/"),
dbc.DropdownMenuItem("About the Author", href="https://buckeye17.github.io/about/"),
dbc.DropdownMenuItem("LinkedIn Profile", href="https://www.linkedin.com/in/chris-raper/"),
dbc.DropdownMenuItem("Github Repo", href="https://github.com/buckeye17/seecovid"),
dbc.DropdownMenuItem("Powered by plotly|Dash", href="https://plotly.com/dash/")
],
nav=True,
in_navbar=True,
label="Menu",
)
#################################################################
# 7 Blank figure to display during initial app loading
axopts = dict(showticklabels=False)
blank_fig = go.Figure()
blank_fig.update_layout(
paper_bgcolor=invis,
plot_bgcolor=invis,
xaxis=axopts,
yaxis=axopts,
annotations=[dict(x=2.5,
y=4,
xref="x1",
yref="y1",
text="Please wait while the heatmap is initialized",
showarrow=False,
font=dict(size=16)
)]
)
# define sandbox2 dynamic table
sandbox2_tbl = \
html.Div([
dash_table.DataTable(
id="sandbox2-data-table",
data=sandbox2_df.to_dict('records'),
columns=[{"id": c, "name": c} for c in sandbox2_df.columns],
editable=False,
style_cell={
'fontSize': '14px',
'textAlign': 'center',
'width': '100px',
'minWidth': '100px',
'maxWidth': '100px'
},
style_data_conditional=[
{
'if': {'row_index': 'odd'},
'backgroundColor': 'rgb(248, 248, 248)'
}
],
style_header={
'backgroundColor': 'rgb(230, 230, 230)',
'fontWeight': 'bold'
}
)
], style={"margin": 10, "width": "40%", "padding-left": 15})
#################################################################
# 8 Overall app layout
app.layout = html.Div([
# Banner/header block
dbc.Navbar(
dbc.Container([
# left side of navbar: logo & app name
html.A(
# Use row and col to control vertical alignment of logo / brand-
dbc.Row(
[
dbc.Col(html.Img(
src='data:image/png;base64,{}'.format(cross_icon_image.decode()),
height="40px"
)),
dbc.Col(dbc.NavbarBrand([
"COVID-19 Dashboard",
html.Br(),
html.Div("Developed by <NAME>", style={"fontSize": "small"})
], className="ml-2")),
],
align="center", no_gutters=True, className="ml-2",
),
href="https://seecovid.herokuapp.com/",
),
# right side of navbar: nav links & menu
dbc.NavbarToggler(id="navbar-toggler"),
dbc.Collapse(
dbc.Nav([
dbc.NavItem(dbc.NavLink("My Portfolio", href="https://buckeye17.github.io/")),
dropdown_menu_items
], className="ml-auto", navbar=True, style={"margin-right": 100}),
id="navbar-collapse", navbar=True,
),
]),
color=navbar_color,
dark=navbar_is_dark
),
# define tabs which provide different perspectives on data
dbc.Tabs([
# heatmap tab
dbc.Tab([
heat_cntrls_accordion,
dbc.Row(dbc.Spinner(type="grow", color="primary", fullscreen=True), id="initial-spinner"),
# Date Picker
dbc.Row([
dbc.Col(html.Div([""]), xs=5),
dbc.Col(html.Div([
dbc.InputGroup([
dbc.InputGroupAddon("Select Plotted Date:", addon_type="prepend"),
dcc.DatePickerSingle(
id="heat-date-picker",
date=init_heatmap_date,
display_format="MMM Do, YYYY"
)
])
]), xs=7),
], style={'padding-left': 20, 'padding-right': 20, 'margin-top': 15, 'margin-bottom': 0}),
dbc.Row(
dbc.Col(html.Div([dcc.Loading(dcc.Graph(id="heatmap", figure=blank_fig), type="cube")]))
),
], label="Heatmaps"),
# curves tab
dbc.Tab([
curve_cntrls_accordion,
dbc.Row(dbc.Col(html.Div([dcc.Loading(dcc.Graph(id="curves",
responsive=True,
style={"height": 400}),
type="cube")]))),
], label="The Curves"),
# epidemiology tab
dbc.Tab([
# this tab will consist of a single row, which contains a single column
# that is centered horizontally in the page
dbc.Row([
dbc.Col([
# section header for Sandbox #1
html.Div([dcc.Markdown('''
#### Sandbox #1: Varying Basic Parameters of Infectious Disease
''', style={"margin": 20, "textAlign": "center"}
)]),
# intro for Sandbox #1
html.Div([dcc.Markdown('''
The following sandbox allows you to simulate two scenarios of a generic epidemic,
assuming a population of 10,000 people and that 1 of them is infected
on day zero. The sandbox allows you to adjust the underlying parameters of
the epidemic. These parameters are:
* **d**: the average number of days someone is infectious
* **r**: AKA the basic reproduction number, the number of people an infectious
person would infect if everyone they contact is infectious.
With these parameters, the sandbox will predict how the fixed population
will move through the 3 stages of infection: susceptible, infected,
removed. For further discussion of the underlying modeling method, it has
been provided further below.
''', style={"margin": 20, "textAlign": "justify"}
)]),
# Sandbox #1
html.Div([
# Sandbox #1 title
html.Div([dcc.Markdown('''
##### Epidemic Simulation Sandbox #1
''', style={"margin": 20, "textAlign": "center"}
)]),
# UI for Scenario #1 of Sanbox #1
dbc.Row([
dbc.Col(["Scenario #1"], md=2, sm=12, \
style={"text-align": "center", "margin": "10px 0"}),
dbc.Col([
html.B("d0"), ": ", html.Span("28", id="sandbox1-scenario1-d-text"),
], md=1, sm=2, style={"text-align": "right", "margin": "10px 0", \
"padding-right": 0}),
dbc.Col([
dcc.Slider(
id="sandbox1-scenario1-d",
marks={1: "1", 7: "7", 14: "14", 21: "21", 28: "28"},
min=1,
max=28,
step=1,
value=14,
included=False
)
], md=4, sm=10, style={"margin": "10px 0", "padding-left": 0}),
dbc.Col([
html.B("r0"), ": ", html.Span("8", id="sandbox1-scenario1-r-text"),
], md=1, sm=2, style={"text-align": "right", "margin": "10px 0", \
"padding-right": 0}),
dbc.Col([
dcc.Slider(
id="sandbox1-scenario1-r",
marks={0: "0", 1: "1", 2: "2", 4: "4", 6: "6", 8: "8"},
min=0,
max=8,
step=0.1,
value=1.5,
included=False
)
], md=4, sm=10, style={"margin": "10px 0", "padding-left": 0}),
]),
# UI for Scenario #2 of Sanbox #1
dbc.Row([
dbc.Col(["Scenario #2"], md=2, sm=12, \
style={"text-align": "center", "margin": "10px 0"}),
dbc.Col([
html.B("d0"), ": ", html.Span("28", id="sandbox1-scenario2-d-text"),
], md=1, sm=2, style={"text-align": "right", "margin": "10px 0", \
"padding-right": 0}),
dbc.Col([
dcc.Slider(
id="sandbox1-scenario2-d",
marks={1: "1", 7: "7", 14: "14", 21: "21", 28: "28"},
min=1,
max=28,
step=1,
value=14,
included=False
)
], md=4, sm=10, style={"margin": "10px 0", "padding-left": 0}),
dbc.Col([
html.B("r0"), ": ", html.Span("8", id="sandbox1-scenario2-r-text"),
], md=1, sm=2, style={"text-align": "right", "margin": "10px 0", \
"padding-right": 0}),
dbc.Col([
dcc.Slider(
id="sandbox1-scenario2-r",
marks={0: "0", 1: "1", 2: "2", 4: "4", 6: "6", 8: "8"},
min=0,
max=8,
step=0.1,
value=3,
included=False
)
], md=4, sm=10, style={"margin": "10px 0", "padding-left": 0}),
]),
# Area Plot for Sandbox #1
dcc.Loading(dcc.Graph(id="sandbox1-area-fig",
responsive=True,
style={"height": 400}), type="dot"),
# Lines Plot for Sandbox #1
dcc.Loading(dcc.Graph(id="sandbox1-line-fig",
responsive=True,
style={"height": 200}), type="dot"),
], style={"border-style": "solid", "border-color": "#aaaaaa", "padding": 10}),
# section header for Sandbox #2
html.Div([dcc.Markdown('''
#### Sandbox #2: Time-Dependence of Basic Parameters of Infectious Disease
''', style={"margin-top": 40, "margin-bottom": 20, "textAlign": "center"}
)]),
# intro for Sandbox #2
html.Div([dcc.Markdown('''
This second sandbox is similar to the first, but it allows you
to vary the parameters of the epidemic in time, whereas the first
sandbox simulated constant parameters values. With COVID-19,
social distancing was implemented to reduce the **r** parameter
of the disease (AKA "slowing the spread") and would reduce the
total number of infected if sustained.
In this sandbox you can chose the initial parameter values, then
add time points when the parameters will change values. You can add
as many time points as you want. The Baseline scenario will
consist of all the parmeter value changes except for the final
change. The Alternate scenario will consist of all the parameter value
changes. The "In Base" and "In Alt" table columns should clarify
this point.
''', style={"margin": 20, "textAlign": "justify"}
)]),
# Sandbox #2
html.Div([
# Title for Sandbox #2
html.Div([dcc.Markdown('''
##### Epidemic Simulation Sandbox #2
''', style={"padding": "20px 0", "textAlign": "center", \
"border-bottom": "solid", "border-width": "thin"}
)]),
# UI for initial conditions of Sandbox #2
dbc.Row([
dbc.Col(["Initial (t0) Values"], md=2, sm=12, \
style={"text-align": "center", "margin": "10px 0"}),
dbc.Col([
html.B("d0"), ": ", html.Span("28", id="sandbox2-baseline-d-text"),
], md=1, sm=2, style={"text-align": "right", "margin": "10px 0", \
"padding-right": 0}),
dbc.Col([
dcc.Slider(
id="sandbox2-baseline-d",
marks={1: "1", 7: "7", 14: "14", 21: "21", 28: "28"},
min=1,
max=28,
step=1,
value=14,
included=False
)
], md=4, sm=10, style={"margin": "10px 0", "padding-left": 0}),
dbc.Col([
html.B("r0"), ": ", html.Span("8", id="sandbox2-baseline-r-text"),
], md=1, sm=2, style={"text-align": "right", "margin": "10px 0", \
"padding-right": 0}),
dbc.Col([
dcc.Slider(
id="sandbox2-baseline-r",
marks={0: "0", 1: "1", 2: "2", 4: "4", 6: "6", 8: "8"},
min=0,
max=8,
step=0.1,
value=3,
included=False
)
], md=4, sm=10, style={"margin": "10px 0", "padding-left": 0}),
]),
# UI for adding or editing dynamic values of Sandbox #2
# these UI element have a light blue background to distinguish
# their function from the row above, which pertains to
# initial values, not dnamic values
html.Div([
# UI for defining new dynamic value of d & r in Sandbox #2
dbc.Row([
dbc.Col(["New Values at time t"], md=2, sm=12, \
style={"text-align": "center", "margin": "10px 0"}),
dbc.Col([
html.B("d"), html.Span(": "), html.Span("28", id="sandbox2-new-d-text"),
], md=1, sm=2, style={"text-align": "right", "margin": "10px 0", \
"padding-right": 0}),
dbc.Col([
dcc.Slider(
id="sandbox2-new-d",
marks={1: "1", 7: "7", 14: "14", 21: "21", 28: "28"},
min=1,
max=28,
step=1,
value=14,
included=False
)
], md=4, sm=10, style={"margin": "10px 0", "padding-left": 0}),
dbc.Col([
html.B("r"), html.Span(": "), html.Span("8", id="sandbox2-new-r-text"),
], md=1, sm=2, style={"text-align": "right", "margin": "10px 0", \
"padding-right": 0}),
dbc.Col([
dcc.Slider(
id="sandbox2-new-r",
marks={0: "0", 1: "1", 2: "2", 4: "4", 6: "6", 8: "8"},
min=0,
max=8,
step=0.1,
value=1.5,
included=False
)
], md=4, sm=10, style={"margin": "10px 0", "padding-left": 0}),
]),
# UI for defining the time point when the new dynamic values
# of d & r will take effect, as well to add, clear & edit
# these dynamic values for Sandbox #2
dbc.Row([
dbc.Col([dbc.InputGroup([
dbc.InputGroupAddon("t=", addon_type="prepend"),
dbc.Input(id="sandbox2-new-t", placeholder="", type="number", min=0),
dbc.Tooltip(dcc.Markdown('''
Enter the time (in days) when when **d** & **r** values
should change. The value must be positive.
'''
), target="sandbox2-new-t"
),
])], md=3, sm=4, style={"margin": "10px 0"}),
dbc.Col([
dbc.Button("Add", id="sandbox2-add", n_clicks=0, color=btn_color)
], md=2, sm=4, style={"margin": "10px 0"}),
# Hidden span inside the app that tracks how many times the Add button has been clicked
# This enables a determination for whether Add button triggered the edit_plotted_curves callback
html.Span(0, id='sandbox2-add-click-count', style={'display': 'none'}),
dbc.Col([
dbc.Button("Clear All", id="sandbox2-clear", n_clicks=0, color=btn_color)
], md=2, sm=4, style={"margin": "10px 0"}),
# Hidden span inside the app that tracks how many times the Clear All button has been clicked
# This enables a determination for whether Clear All button triggered the edit_plotted_curves callback
html.Span(0, id='sandbox2-clear-click-count', style={'display': 'none'}),
dbc.Col([
dbc.InputGroup([
dbc.InputGroupAddon("Drop Row @ t=", addon_type="prepend"),
dbc.Select(
id="sandbox2-drop",
options=[{"label": val, "value": val} for val in sandbox2_df.t.values],
value=""
)
]),
], md=5, sm=12, style={"margin": "10px 0"})
]),
], style={"background-color": "#e8f6fc", "padding": 10}),
# UI to specify the current dynamic values in table form for.
# both the baseline and alternate scenarios for Sandbox #2
dbc.Row([
dbc.Col([
html.Div("All Dynamic Values", \
style={"padding-top": 10, "text-align": "center"}),
# visualize the sandbox2_df
sandbox2_tbl,
# Hidden span inside the app that allows the sandbox2_df tp be shared among callbacks
html.Span([
sandbox2_df.to_json(date_format='iso', orient='split')
], id='sandbox2-df', style={'display': 'none'})
], width=9),
], justify="center"),
dcc.Loading(dcc.Graph(id="sandbox2-area-fig",
responsive=True,
style={"height": 400}
), type="dot"),
dcc.Loading(dcc.Graph(id="sandbox2-lines-fig",
responsive=True,
style={"height": 200}
), type="dot"),
], style={"border-style": "solid", "border-color": "#aaaaaa"}),
# section header for discussing modeling methodology
html.Div([dcc.Markdown('''
#### Examining the Fundamentals of Epidemics
''', style={"margin": 20, "textAlign": "center"}
)]),
# body of section discussing modeling methodology
html.Div([dcc.Markdown('''
##### Introduction
The sandboxes above use a simple class of models for epidemics called
compartmental models. Specifically they use an
[SIR compartmental model](https://en.wikipedia.org/wiki/Compartmental_models_in_epidemiology),
which segments a population into 3 stages of infection. These stages are
named **Susceptible**, **Infected** and **Removed**. The meaning of these
segments should be self-explanatory, with the clarification that the Removed
segment includes those who have survived the infection (then becoming immune)
as well as those who have died from it.
SIR models have two parameters which govern how quickly infection spreads through
the population. The first is **d**, which represents the average time
someone is infectious. The second is **r0** (pronounced r naught), representing
the average number of people a single infected person would infect
if everyone they contact is susceptible. In some simulations, this value may
change with time, in which case **r0** is the initial value of **r**.
All that remains before simulating an epidemic is to make some assumptions
about the initial condition of the population. The sandboxes above assumed the
population has 10,000 people, with 1 infected person and the remainder are
susceptible.
##### Examining Simulation Results
An epidemic is technically defined as a disease which has **r0** > 1. If a
disease yields **r0** < 1, then each chain of infections will die out. But it
should be noted that **r0** is not solely dependent on the nature of the
disease. It also depends on the behavior of disease's host. So the occurrence
of an epidemic depends on the combination of a disease's efficiency to infect
the susceptible as well as the host's social behavior.
The following figure depicts two scenarios. Scenario 1 assumes **d** = 10 days and
**r0** = 1.5 people while Scenario 2 assumes **d** = 10 and **r0** = 3. Notice
both of these epidemic scenarios end without the disease infecting the whole
population. If **r0** > 1, this occurs because the number of infected people
will peak when the number of susceptible people makes up 50% of the total
population. After this point, the number of infected will decline until it
reaches zero. The combination of the declining susceptible sub-population along
with the recovery or death of infected people ensures that epidemic will die out
before it infects everyone. This phenomenon is called **herd immunity**.
''', style={"margin": 20, "textAlign": "justify"}
)]),
dbc.Row(dbc.Col(
html.Div(
html.Img(src="data:image/png;base64,{}".format(herd_immunity_image.decode()),
height=300,
style={"display": "inline-block"}
),
style={"text-align": "center"}
),
width=12,
)),
html.Div([dcc.Markdown('''
Also note that in the two scenarios above, herd immunity was reached with
different sizes of the population never being infected (i.e. those who are still
susceptible). Scenario #1 ends the epidemic with 4,175 never being infected,
while Scenario #2 ends with 595. This illustrates that herd immunity is not only
dependent on the parameters of the epidemic, but is also very sensitive to those
values. The difference was solely due to **r0** being 1.5 or 3.
##### Examining Weaknesses of SIR Models
One manner in which SIR models over-simplify reality is that they assume that
there is no variation in the parameter models. Even if the the parmeters
are allowed to change with time, they still assume that at each time point the
infected will be sick for X days on average and will infect Y people. But in
reality, some people will recover quicker than others, some will shed more
virus and some will be more social.
This leads to the concept of so called "super-spreaders". These
people account for a drastic number of infections. One example is a South Korean
woman referred to as patient 31. Through contact tracing the government had
identified 3,700 COVID-19 patients who could be traced back to her, representing
60% of all known cases by March 1 in South Korea. This was reported by the
[Wall Street Journal](https://www.wsj.com/articles/why-a-south-korean-church-was-the-perfect-petri-dish-for-coronavirus-11583082110).
Compartmental models do not account for any variation in parameters as exhibited
with super-spreaders.
Another shortcoming of this modeling method is that the parameter **d** is a little
misleading. If **d** = 14, then the precise calculation to determine how many
infected people have been removed (by recovery or death) is to divide the number
of infected by 14. This implies that when the number of infected is peaking,
the rate of removal will be greatest at this time. Conversely, when the number of
infected is small, the rate of recovery will be much slower. In reality, the
number of infected should not affect the rate of recovery. This is why **d**
is referred to as an "average" number of infectious days, because this
characteristic actually varies with time in the simulation, even when **d**
is constant.
If you'd like more information on this subject, I would recommend the following
YouTube video.
''', style={"margin": 20, "textAlign": "justify"}
)]),
html.Div(
html.Iframe(width="560", height="315", src="https://www.youtube.com/embed/gxAaO2rsdIs"),
style={"padding": 20})
], sm=12, md=10, xl=8),
], justify="center")
], label="Epidemiology Sandbox"),
# links tab
dbc.Tab([
# this tab will consist of a single row, which contains a single column
# that is centered horizontally in the page
dbc.Row([
dbc.Col([
html.Div([dcc.Markdown('''
##### Useful Dashboards & Visualizations
* [Johns Hopkins COVID-19 Dashboard](https://www.arcgis.com/apps/opsdashboard/index.html#/bda7594740fd40299423467b48e9ecf6)
* [CDC's COVID-NET](https://gis.cdc.gov/grasp/COVIDNet/COVID19_5.html): Provides US Demographics for COVID-19
* [University of Washington IHME COVID-19 Predictions for US](https://covid19.healthdata.org/united-states-of-america)
* [University of Minnesota Center for Infectious Disease Research and Policy](https://www.cidrap.umn.edu/): Provides latest research news on many infectious diseases, including COVID-19
* [Covidly.com](https://covidly.com/): Another COVID-19 dashboard
* Many US state health agencies have produced great COVID-19 dashboards for their state. Just search for them.
##### References
SIR Model Help:
* [Wikipedia](https://en.wikipedia.org/wiki/Compartmental_models_in_epidemiology)
* [Oregon State University presentation](http://sites.science.oregonstate.edu/~gibsonn/Teaching/MTH323-010S18/Supplements/Infectious.pdf)
* [A blog post](https://www.lewuathe.com/covid-19-dynamics-with-sir-model.html)
All of the data sources used for this dashboard:
* [Johns Hopkins COVID data CSV files](https://github.com/CSSEGISandData/COVID-19/tree/master/csse_covid_19_data/csse_covid_19_daily_reports)
* [Australian State Populations](https://en.wikipedia.org/wiki/States_and_territories_of_Australia)
* [Australian State Geo JSON File](https://github.com/rowanhogan/australian-states/blob/master/states.geojson)
* [Canadian Province Populations](https://en.wikipedia.org/wiki/Population_of_Canada_by_province_and_territory)
* [Canadian Province Geo JSON File](https://download2.exploratory.io/maps/canada_provinces.zip)
* [Chinese Province Populations](https://en.wikipedia.org/wiki/Provinces_of_China#List_of_province-level_divisions)
* [Chinese Province Geo JSON File](https://github.com/secsilm/plotly-choropleth-mapbox-demo/blob/master/china_province.geojson)
* [US County Populations (2019 Census Estimate)](https://www2.census.gov/programs-surveys/popest/datasets/2010-2019/counties/totals/co-est2019-alldata.csv)
* [US County Geo JSON File](https://raw.githubusercontent.com/plotly/datasets/master/geojson-counties-fips.json)
* [US State Geo JSON File](https://eric.clst.org/tech/usgeojson/)
* [All other Country Populations](https://en.wikipedia.org/wiki/List_of_countries_by_population_%28United_Nations%29)
* [All Countries Geo JSON File](https://github.com/datasets/geo-countries/blob/master/data/countries.geojson)
''', style={"margin": 20}
)])
], sm=12, md=10, xl=8, style={"border": "solid", "border-width": "thin", "margin-top": 40}),
], justify="center")
], label="Links & References")
])
])
#################################################################
# 9 Dynamic UI callbacks
# add callback for toggling the right nav menu collapse on small screens
@app.callback(
Output("navbar-collapse", "is_open"),
[Input("navbar-toggler", "n_clicks")],
[State("navbar-collapse", "is_open")],
)
def toggle_navbar_collapse(n, is_open):
if n:
return not is_open
return is_open
# callback for curve plot accordion containing plot controls
@app.callback(
[Output("collapse-heat-edit", "is_open"),
Output("collapse-curve-edit", "is_open"),
Output("collapse-curve-setting", "is_open")],
[Input("heat-edit-toggle", "n_clicks"),
Input("curve-edit-toggle", "n_clicks"),
Input("curve-setting-toggle", "n_clicks")],
[State("collapse-heat-edit", "is_open"),
State("collapse-curve-edit", "is_open"),
State("collapse-curve-setting", "is_open")])
def toggle_accordion(n_heat, n_curve_edit, n_curve_set, is_open_heat, \
is_open_curve_edit, is_open_curve_set):
ctx = dash.callback_context
if ctx.triggered:
button_id = ctx.triggered[0]["prop_id"].split(".")[0]
else:
return False, False, False
if button_id == "heat-edit-toggle" and n_heat:
return not is_open_heat, is_open_curve_edit, is_open_curve_set
elif button_id == "curve-edit-toggle" and n_curve_edit:
return is_open_heat, not is_open_curve_edit, is_open_curve_set
elif button_id == "curve-setting-toggle" and n_curve_set:
return is_open_heat, is_open_curve_edit, not is_open_curve_set
# define curves tab control callbacks
# add callback for defining the contents of the State dropdown
@app.callback(
[Output("curve-state", "options"),
Output("curve-state", "value"),
Output("curve-state", "disabled")],
[Input("curve-country", "value")]
)
def country_selected(country):
default_val = "All of " + country
options = [{"label": default_val, "value": "nan"}]
states_ls = np.sort(df.loc[df["Country/Region"] == country, "Province/State"].unique().tolist())
states_ls = states_ls[states_ls != "nan"]
state_options = [{"label": state, "value": state} for state in states_ls]
if len(states_ls) > 0:
options.extend(state_options)
return options, default_val, False
# add callback for defining the contents of the County dropdown
@app.callback(
[Output("curve-county", "options"),
Output("curve-county", "value"),
Output("curve-county", "disabled")],
[Input("curve-state", "value")]
)
def state_selected(state):
if state == "":
# no state has been selected, so don't give county options
options = []
default_value = ""
county_disabled = True
elif state.startswith("All of ") | (state == "nan"):
# whole state has been selected, so don't give county options
options = []
default_value = ""
county_disabled = True
else:
# a state was selected, determine county options
county_disabled = False
default_value = "All of " + state
options = [{"label": default_value, "value": "nan"}]
county_ls = np.sort(df.loc[df["Province/State"] == state, "County"].unique().tolist())
county_ls = county_ls[county_ls != "nan"]
county_options = [{"label": county, "value": county} for county in county_ls]
if len(county_ls) > 0:
options.extend(county_options)
return options, default_value, county_disabled
#################################################################
# 10 Callback for Updating Heat Map Figure
@app.callback(
[Output("heatmap", "figure"),
Output("initial-spinner", "style"),
Output("heat-date-picker", "min_date_allowed"),
Output("heat-date-picker", "max_date_allowed")],
[Input("map-scope", "value"),
Input("map-var", "value"),
Input("map-calc", "value"),
Input("map-scale", "value"),
Input("map-norm-type", "value"),
Input("map-norm-val", "value"),
Input("heat-date-picker", "date")],
[State("initial-spinner", "style")]
)
def update_heatmap(map_scope, map_var, map_calc, map_scale, map_norm_type, map_norm_val, map_date,
init_spinner_style):
#tic = time.perf_counter()
#toc_a = tic
#toc_b = tic
# for an unknown reason, [map_norm_val] is provided as a string, so cast it back to an int
map_norm_val = int(map_norm_val)
# test if this is the initial execution of this callback
is_init = (init_spinner_style is None)
# only generate a new heatmap if the user initialized this callback
if is_init:
fig = init_heatmap
# determine valid date range for the date picker
plot_df = df[df["MapScope"] == "US Counties"]
days = np.sort(plot_df.Date.unique())
picker_min_date = numpy_dt64_to_dt(days[0])
picker_max_date = numpy_dt64_to_dt(days[-1])
else:
# set null values of map parameters
if map_calc == "Total":
map_calc = ""
if map_norm_type == "None":
map_norm_type = ""
plot_var = map_var + map_calc + map_norm_type
# set variables conditioned on the map scope
if map_scope == "UScounties":
geo_json = us_counties_json
plot_df = df[df["MapScope"] == "US Counties"]
plot_df["AreaLabel"] = plot_df.County.astype(str) + ", " + plot_df["Province/State"].astype(str)
location_var = "FIPS"
geo_json_name_field = None
map_center = {"lat": 37.0902, "lon": -95.7129}
title = "US counties"
init_zoom = 3
elif map_scope == "USstates":
geo_json = us_states_json
plot_df = df[df["MapScope"] == "US States"]
plot_df["AreaLabel"] = plot_df["Province/State"].astype(str)
location_var = "Province/State"
geo_json_name_field = "properties.NAME"
map_center = {"lat": 37.0902, "lon": -95.7129}
title = "US states"
init_zoom = 3
elif map_scope == "China":
geo_json = china_json
plot_df = df[df["MapScope"] == "China Provinces"]
plot_df["AreaLabel"] = plot_df["Province/State"].astype(str)
location_var = "Province/State"
geo_json_name_field = "properties.NL_NAME_1"
map_center = {"lat": 37.110573, "lon": 106.493924}
title = "Chinese provinces"
init_zoom = 2
elif map_scope == "Australia":
geo_json = australia_json
plot_df = df[df["MapScope"] == "Australia States"]
plot_df["AreaLabel"] = plot_df["Province/State"].astype(str)
location_var = "Province/State"
geo_json_name_field = None
map_center = {"lat": -26, "lon": 133 + 25/60}
title = "Australian states"
init_zoom = 3
elif map_scope == "Canada":
geo_json = canada_json
plot_df = df[df["MapScope"] == "Canada Provinces"]
plot_df["AreaLabel"] = plot_df["Province/State"].astype(str)
location_var = "Province/State"
geo_json_name_field = "properties.PRENAME"
map_center = {"lat": 58, "lon": -96 - 48/60}
title = "Canadian Provinces"
init_zoom = 2
elif map_scope == "World":
geo_json = world_json
plot_df = df[df["MapScope"] == "Countries"]
plot_df["AreaLabel"] = plot_df["Country/Region"].astype(str)
location_var = "Country/Region"
geo_json_name_field = "properties.ADMIN"
map_center = {"lat": 0, "lon": 0}
title = "Countries"
init_zoom = 0
# set axis variables conditioned on scale settings
def get_min_max(x_arr):
var_finite = x_arr[(x_arr != 0) & (x_arr != -np.inf) & (x_arr != np.inf)]
if len(var_finite) > 0:
var_min = min(var_finite)
var_max = max(var_finite)
else:
var_min = 0
var_max = 0
return var_min, var_max
# determine valid date range for the date picker
days = np.sort(plot_df.Date.unique())
picker_min_date = numpy_dt64_to_dt(days[0])
picker_max_date = numpy_dt64_to_dt(days[-1])
# setup scales
log_txt = ["1e-6", "1e-5", "1e-4", ".001", ".01", ".1", \
"1", "10", "100", "1K", "10K", "100K", "1M"]
map_log_hvr_txt = "Cases per " + log_txt[int(np.log10(map_norm_val)) + 6] + " Capita: "
if map_scale == "Logarithmic":
bar_scale_type = "log"
map_tick_mode = "array"
map_tick_vals = np.arange(-6, 7)
map_tick_txt = log_txt
if map_norm_type == "PerCapita":
plot_df["CaseVar"] = np.log10(plot_df[plot_var]*map_norm_val)
else:
plot_df["CaseVar"] = np.log10(plot_df[plot_var])
var_min, var_max = get_min_max(plot_df.CaseVar.values)
plot_range = np.array([var_min, var_max])
else:
bar_scale_type = "linear"
map_tick_mode = "auto"
map_tick_vals = []
map_tick_txt = []
if map_norm_type == "PerCapita":
plot_df["CaseVar"] = plot_df[plot_var]*map_norm_val
else:
plot_df["CaseVar"] = plot_df[plot_var]
var_min, var_max = get_min_max(plot_df.CaseVar.values)
plot_range = np.array([0, var_max])
if map_var == "Recovered":
heat_color_scale = "ylgn"
bar_color = "rgb(69, 161, 69)"
else:
heat_color_scale = "ylorrd"
bar_color = "rgb(236, 62, 19)"
# limit remaining calcs to data pertaining to picked date
plot_day_df = plot_df[plot_df.Date == map_date]
# define custom hover data
cust_data = np.dstack((plot_day_df.loc[:, map_var + map_calc].values, \
plot_day_df.loc[:, map_var + map_calc + "PerCapita"]. \
values*map_norm_val))[0]
location_series = plot_day_df[location_var]
if map_norm_type == "PerCapita":
bar_txt_format = "{:.2e}"
else:
bar_txt_format = "{:,.0f}"
# define the left bar plot
bar_df = plot_day_df.nlargest(10, "CaseVar", keep="all").reset_index()
bar_df = bar_df.head(10) # nlargest may return more than 10 rows if there are duplicate values
bar_df = bar_df[bar_df.CaseVar > -np.inf]
nrows = bar_df.shape[0]
bar_df = bar_df.iloc[np.arange(nrows - 1, -1, -1),:] # reverse order of top 10 rows
# plotly does not tolerate changing the number of bars in
# a bar graph during animation define a function to pad
# data arrays with blank elements so the bar graph always
# has 10 elements
def pad_10_arr(x, pad_val, unique_fill_bool):
xlen = len(x)
if xlen == 10:
result = x
else:
npad = 10 - xlen
fill_arr = np.array([pad_val for i in range(npad)])
# shorten each string fill element in array to make the elements unique
if unique_fill_bool:
fill_arr = [item[i:] for i, item in enumerate(fill_arr)]
result = np.append(fill_arr, x)
return result
# only build the bar plot if there is data to plot
if plot_day_df[plot_var].max() > 0:
no_data = False
max_width_label = 25
if map_scope == "UScounties":
# some of the county, state labels are too long, taking up too much space
# in the figure. Long labels will have the county label trimmed with an ellipsis appended.
labels_to_trim = bar_df["AreaLabel"].astype(str).str.len() > max_width_label
county_len_arr = max_width_label - 5 - bar_df.loc[labels_to_trim, "Province/State"].astype(str).str.len().values
county_abbr = [bar_df.loc[labels_to_trim, "County"].astype(str).values[i][:county_len_arr[i]] \
for i in range(len(county_len_arr))]
state_abbr = bar_df.loc[labels_to_trim, "Province/State"].astype(str).values.tolist()
county_state_abbr = [county_abbr[i] + "..., " + state_abbr[i] for i in range(len(county_abbr))]
bar_df.loc[labels_to_trim, "AreaLabel"] = county_state_abbr
elif map_scope == "Australia":
# only one label needs to be trimmed
long_label = "Australian Capital Territory"
labels_to_trim = bar_df["AreaLabel"].astype(str) == long_label
bar_df.loc[labels_to_trim, "AreaLabel"] = long_label[:(max_width_label - 3)] + "..."
# bar labels must be padded so all labels have the same length
# as some labels disappear and others are introduced,
# varied-length label cause bad animation behavior
area_labels = [label.rjust(max_width_label) for label in bar_df.AreaLabel.values]
if map_scale == "Logarithmic":
bar_df["CaseVarPlot"] = np.power(np.ones(10)*10, bar_df.CaseVar.values)
else:
bar_df["CaseVarPlot"] = bar_df.CaseVar
bar_df["ValLabels"] = bar_df.CaseVarPlot.astype("float")
bar_fig_data = go.Bar(x=pad_10_arr(bar_df.CaseVarPlot.values, 0, False),
y=pad_10_arr(area_labels, " " * max_width_label, True),
text=pad_10_arr(bar_df.ValLabels.map(bar_txt_format.format).values, "", False),
textposition="auto",
hoverinfo="none",
orientation="h",
marker_color=bar_color,
name="")
else:
no_data = True
bar_fig_data = go.Bar(x=[],
y=[],
orientation="h",
name="")
# build the heatmap
heat_fig_data =go.Choroplethmapbox(geojson=geo_json,
locations=location_series,
featureidkey=geo_json_name_field,
z=plot_day_df.CaseVar,
zmin=plot_range[0], # min([plot_df.CaseVar.min(), 0]),
zmax=plot_range[1], # plot_df.CaseVar.max(),
customdata=cust_data,
name="",
text=plot_day_df.AreaLabel,
hovertemplate="<b>%{text}</b><br>" + \
"<b>Cases</b>: %{customdata[0]:,}<br>" + \
"<b>" + map_log_hvr_txt + "</b>: %{customdata[1]:.2e}",
colorbar=dict(outlinewidth=1,
outlinecolor="#333333",
len=0.9,
lenmode="fraction",
xpad=30,
xanchor="right",
bgcolor=None,
title=dict(text="Cases",
font=dict(size=14)),
tickmode=map_tick_mode,
tickvals=map_tick_vals,
ticktext=map_tick_txt,
tickcolor="#333333",
tickwidth=2,
tickfont=dict(color="#333333",
size=12)),
colorscale=heat_color_scale,
marker_opacity=0.7,
marker_line_width=0)
###########################################################
# The following code block was used in the original app to
# build an animation of the heatmap and bar charts. But
# this function has become too slow as months of data have
# accumulated.
#
## define animation controls
#frame_dur = 1000 # milliseconds, controls animation speed
#fig_ctrls = []
#sliders_dict = dict()
#
## only define the animation controls of there is data to plot
#if plot_df[plot_var].max() > 0:
# fig_ctrls = [dict(type="buttons",
# buttons=[dict(label="Play",
# method="animate",
# args=[None,
# dict(frame=dict(duration=frame_dur,
# redraw=True),
# fromcurrent=True)]),
# dict(label="Pause",
# method="animate",
# args=[[None],
# dict(frame=dict(duration=0,
# redraw=True),
# mode="immediate")])],
# direction="left",
# pad={"r": 10, "t": 35},
# showactive=False,
# x=0.1,
# xanchor="right",
# y=0,
# yanchor="top")]
#
# if (not is_init):
# sliders_dict = dict(active=init_date_ind,
# visible=True,
# yanchor="top",
# xanchor="left",
# currentvalue=dict(font=dict(size=14),
# prefix="Plotted Date: ",
# visible=True,
# xanchor="center"),
# pad=dict(b=10,
# t=10),
# len=0.875,
# x=0.125,
# y=0,
# steps=[])
#
##toc_a = time.perf_counter()
#
## define the animation frames
#fig_frames = []
#
#for day in days:
#
# # this code repeating what was done to build the initial bar plot above
# # .query() method provides faster filtering
# plot_day_df = plot_df.query("Date == @day") #plot_day_df = plot_df[plot_df.Date == day]
# bar_df = plot_day_df.nlargest(10, "CaseVar", keep="all").reset_index()
# bar_df = bar_df.head(10) # nlargest may return more than 10 rows if there are duplicate values
# INF = np.inf
# bar_df = bar_df.query("CaseVar > - @INF") #bar_df = bar_df[bar_df.CaseVar > -np.inf]
# nrows = bar_df.shape[0]
# bar_df = bar_df.iloc[np.arange(nrows - 1, -1, -1),:] # reverse order of top 10 rows
# if map_scope == "UScounties":
# labels_to_trim = bar_df["AreaLabel"].astype(str).str.len() > max_width_label
# county_len_arr = max_width_label - 5 - bar_df.loc[labels_to_trim, "Province/State"].astype(str).str.len().values
# county_abbr = [bar_df.loc[labels_to_trim, "County"].astype(str).values[i][:county_len_arr[i]] \
# for i in range(len(county_len_arr))]
# state_abbr = bar_df.loc[labels_to_trim, "Province/State"].astype(str).values.tolist()
# county_state_abbr = [county_abbr[i] + "..., " + state_abbr[i] for i in range(len(county_abbr))]
# bar_df.loc[labels_to_trim, "AreaLabel"] = county_state_abbr
# elif map_scope == "Australia":
# long_label = "Australian Capital Territory"
# labels_to_trim = bar_df["AreaLabel"].astype(str) == long_label
# bar_df.loc[labels_to_trim, "AreaLabel"] = long_label[:(max_width_label - 3)] + "..."
# area_labels = [label.rjust(max_width_label) for label in bar_df.AreaLabel.values]
# bar_df["ValLabels"] = bar_df.CaseVar.astype("float")
#
# # this code repeats what was done to build the initial heatmap above
# cust_data = np.dstack((plot_day_df.loc[:, map_var + map_calc].values, \
# plot_day_df.loc[:, map_var + map_calc + "PerCapita"]. \
# values*map_norm_val))[0]
# location_series = plot_day_df[location_var]
#
# # define the frame, repeating what was done for the initial plots above
# frame = go.Frame(data=[go.Bar(x=pad_10_arr(bar_df[plot_var].values, 0, False),
# y=pad_10_arr(area_labels, " " * max_width_label, True),
# text=pad_10_arr(bar_df.ValLabels.map(bar_txt_format.format). \
# values, "", False),
# textposition="auto",
# hoverinfo="none",
# name=""),
# go.Choroplethmapbox(locations=location_series,
# featureidkey=geo_json_name_field,
# z=plot_day_df.CaseVar,
# customdata=cust_data,
# name="",
# text=plot_day_df.AreaLabel,
# hovertemplate="<b>%{text}</b><br>" + \
# "<b>Cases</b>: %{customdata[0]:,}<br>" + \
# "<b>" + map_log_hvr_txt + "</b>: %{customdata[1]:.2e}")],
# name=numpy_dt64_to_str(day))
# fig_frames.append(frame)
#
# # define the slider step
# slider_step = dict(args=[[numpy_dt64_to_str(day)],
# dict(mode="immediate",
# frame=dict(duration=300,
# redraw=True))],
# method="animate",
# label=numpy_dt64_to_str(day))
# sliders_dict["steps"].append(slider_step)
#
##toc_b = time.perf_counter()
#
# End of code block for building an animation
###############################################################
# Assemble the entire figure based on the components defined above
fig = subplots.make_subplots(rows=1, cols=2, column_widths=[0.2, 0.8],
subplot_titles=("Top 10 " + title, ""),
horizontal_spacing=0.05,
specs=[[{"type": "bar"},
{"type": "choroplethmapbox"}]])
fig.add_trace(bar_fig_data, row=1, col=1)
fig.add_trace(heat_fig_data, row=1, col=2)
fig.update_layout(mapbox_style="light",
mapbox_zoom=init_zoom,
mapbox_accesstoken=token,
mapbox_center=map_center,
margin={"r": 10, "t": 20, "l": 10, "b": 10},
plot_bgcolor="white")
#sliders=[sliders_dict],
#updatemenus=fig_ctrls)
#fig["frames"] = fig_frames
# update the bar plot axes
if no_data:
fig.update_xaxes(showticklabels=False)
fig.update_yaxes(showticklabels=False)
else:
fig.update_xaxes(type=bar_scale_type,
ticks="outside",
range=plot_range,
showgrid=True,
gridwidth=0.5,
gridcolor="#CCCCCC")
fig.update_yaxes(tickfont=dict(family="Courier New, monospace",
size=13))
if no_data:
# add annotation when theres no data explaining as such
fig["layout"]["annotations"] = [dict(x=0,
y=0,
xref="x1",
yref="y1",
text="All<br>" + title + "<br>have reported<br>zero " + \
map_var + "<br>cases to date",
showarrow=False,
font=dict(size=16))]
else:
# modify the bar plot title font properties
fig["layout"]["annotations"][0]["font"] = dict(size=16)
#toc = time.perf_counter()
#print(toc - tic)
#print(toc_a - tic)
#print(toc_b - toc_a)
#print(toc - toc_b)
# return the figure and hide the dbc.Spinner which is shown during initial app loading
return fig, {"display": "none"}, picker_min_date, picker_max_date
#################################################################
# 11 Callback for Adding Rows to curve_plot_df (dataframe define curves to plot)
@app.callback(
[Output("data-table", "data"),
Output("curve-plot-df", "children"),
Output("curve-drop", "options"),
Output("curve-add-click-count", "children"),
Output("curve-clear-click-count", "children")],
[Input("curve-add", "n_clicks"),
Input("curve-clear", "n_clicks"),
Input("curve-drop", "value")],
[State("curve-country", "value"),
State("curve-state", "value"),
State("curve-county", "value"),
State("curve-plot-df", "children"),
State("curve-add-click-count", "children"),
State("curve-clear-click-count", "children")],
)
def edit_plotted_curves(add_click, clear_click, drop_row_id, country, state, \
county, df_as_json, add_click_last, clear_click_last):
# read the df from the hidden div json data
curve_plot_df = pd.read_json(df_as_json[0], orient='split')
# determine whether this callback was triggered by the Add button, the Clear All button
# or Drop Row dropdown
if add_click > add_click_last:
if state.startswith("All of "):
state = "nan"
county = "nan"
elif county.startswith("All of "):
county = "nan"
nrows = curve_plot_df.shape[0]
curve_plot_df.loc[nrows] = [nrows, country, state, county]
elif clear_click > clear_click_last:
curve_plot_df = curve_plot_df.loc[curve_plot_df["Row ID"] == -999]
elif drop_row_id != "":
curve_plot_df = curve_plot_df.loc[curve_plot_df["Row ID"] != int(drop_row_id)]
curve_plot_df = curve_plot_df.reset_index(drop=True)
curve_plot_df["Row ID"] = curve_plot_df.index
# write the new df to the ui data table and to the hidden div
return curve_plot_df.replace("nan", "").to_dict("records"), \
[curve_plot_df.to_json(date_format='iso', orient='split')], \
[{"label": val, "value": val} for val in [""] + curve_plot_df["Row ID"].tolist()], \
add_click, clear_click
#################################################################
# 12 Callback for Updating Curves Plot Figure
@app.callback(
Output("curves", "figure"),
[Input("curve-plot-df", "children"),
Input("curve-calc", "value"),
Input("curve-norm-type", "value"),
Input("curve-norm-val", "value"),
Input("curve-zero", "value"),
Input("curve-type", "value"),
Input("curve-scale", "value"),
Input("curve-avg-period", "value")]
)
def update_curves_plot(curve_plot_df_as_json, calc, norm_type, norm_val, zero_opt, \
types_ls, y_axis_type, avg_period):
# for an unknown reason, [norm_val] is provided as a string, so cast it back to an int
norm_val = int(norm_val)
# define function which gives a string label for order of magnitude (log10)
def logtxt(val):
log_txt_opts = ["1e-6", "1e-5", "1e-4", ".001", ".01", ".1", \
"1", "10", "100", "1K", "10K", "100K", "1M"]
log_txt = log_txt_opts[int(np.log10(val)) + 6]
return log_txt
# define a function which will scatter points and a fit curve line corresponding to
# place and type of variable
def add_cust_traces(fig, var, place_name, df, color):
# determine the basic variable type to be plotted
if var[:1] == "A":
var_type = "Active"
elif var[:1] == "C":
var_type = "Confirmed"
elif var[:1] == "R":
var_type = "Recovered"
elif var[:1] == "D":
var_type = "Deaths"
# assign marker and line styles depending on how many basic types
# of variables are to plotted
var_id = np.where(np.array(types_ls) == var_type)[0][0]
dash_ls = ["solid", "dash", "dot", "longdash"]
symbol_ls = ["circle", "square", "diamond", "triangle-up"]
if zero_opt == "None":
x_axis_var = "Date"
else:
x_axis_var = "Zero_Day"
# define hover text for scatter points
per_cap = " Cases per " + logtxt(norm_val) + " Capita"
base_hover_txt = "<b>" + place_name + "</b><br>" + \
"<b>Date</b>: %{text}" + \
"<br><b>Days Elapsed</b>: %{customdata[0]}"
if calc == "":
hover_txt = base_hover_txt + \
"<br><b>Total " + var_type + " To Date</b>: %{customdata[1]:,.0f}" + \
"<br><b>Total " + var_type + " To Date</b>:<br>" + \
"%{customdata[2]:.2e}" + per_cap
elif calc == "PerDate":
hover_txt = base_hover_txt + \
"<br><b>New " + var_type + " On Date</b>: %{customdata[1]:,.0f}" + \
"<br><b>New " + var_type + " On Date</b>:<br>" + \
"%{customdata[2]:.2e}" + per_cap
# plot scatter data points
fig.add_trace(go.Scatter(x=df[x_axis_var],
y=df[var],
mode='markers',
name="",
marker=dict(symbol=symbol_ls[var_id],
size=8,
color=color,
opacity=0.4),
customdata=np.dstack((df.loc[:, "Zero_Day"].values, \
df.loc[:, var_type + calc].values, \
df.loc[:, var_type + calc + "PerCapita"].values))[0],
text=df.Date.dt.strftime('%B %d, %Y'),
hovertemplate=hover_txt,
showlegend=False))
# define the hover text for the fit curve line
fit_hover_txt = "<b>Curve Fit for " + place_name + "</b><br>" + \
"<b>Date</b>: %{text}" + \
"<br><b>Days Elapsed</b>: %{customdata[0]}"
if calc == "":
fit_hover_txt = fit_hover_txt + \
"<br><b>Fit Total " + var_type + " To Date</b>: %{customdata[1]:,.0f}" + \
"<br><b>Fit Total " + var_type + " To Date</b>:<br>" + \
"%{customdata[2]:.2e}" + per_cap
elif calc == "PerDate":
fit_hover_txt = fit_hover_txt + \
"<br><b>Fit New " + var_type + " On Date</b>: %{customdata[1]:,.0f}" + \
"<br><b>Fit New " + var_type + " On Date</b>:<br>" + \
"%{customdata[2]:.2e}" + per_cap
# plot the fit curve line
fig.add_trace(go.Scatter(x=df[x_axis_var],
y=df[var + "Avg"],
mode='lines',
name="",
line=dict(width=3, dash=dash_ls[var_id], color=color),
customdata=np.dstack((df.loc[:, "Zero_Day"].values, \
df.loc[:, var_type + calc + "Avg"].values, \
df.loc[:, var_type + calc + "PerCapita" + "Avg"].values))[0],
text=df.Date.dt.strftime('%B %d, %Y'),
hovertemplate=fit_hover_txt,
showlegend=False))
return fig
# set null values of plot parameters
if calc == "Total":
calc = ""
if norm_type == "None":
norm_type = ""
# make a list of all curves to be plotted
plot_vars_ls = [plot_type + calc + norm_type for plot_type in types_ls]
# read the df from the hidden div json data
# this df defines the country/state/county areas which are to be plotted
curve_plot_df = pd.read_json(curve_plot_df_as_json[0], orient='split')
# setup matplotlib colors for distinguishing curves
nplaces = curve_plot_df.shape[0]
ncolors = max(nplaces, len(types_ls))
cmap = cm.get_cmap("tab10", ncolors) # PiYG
colors = ["" for i in range(ncolors)]
for i in range(cmap.N):
rgb = cmap(i)[:3] # will return rgba, we take only first 3 so we get rgb
colors[i] = matplotlib.colors.rgb2hex(rgb)
# set options for deriving the Zero_Day column & X-axis label
max_zero_day = 0
y_min, y_max = 0, 1
if zero_opt == "None":
zero_thresh = 1
thresh_var = "Confirmed"
elif zero_opt == "Total":
zero_thresh = 1
thresh_var = "Confirmed"
elif zero_opt == "PerCapita":
zero_thresh = 1/10000
thresh_var = "ConfirmedPerCapita"
# define a blank figure as the default
fig = go.Figure()
# fill the figure with data if places have been identified by the user
if nplaces > 0:
# pandas doesn't like df == np.nan, so tests are needed to determine proper syntax
# define a function which generically filters for country, state & county
def filter_mask_csc(df, country, state, county):
if isinstance(county, str):
mask = (df["Country/Region"] == country) & \
(df["Province/State"] == state) & \
(df["County"] == county)
elif isinstance(state, str):
mask = (df["Country/Region"] == country) & \
(df["Province/State"] == state) & \
(df["County"] == "nan")
else:
mask = (df["Country/Region"] == country) & \
(df["Province/State"] == "nan") & \
(df["County"] == "nan")
return mask
# generate a local df containing only the data that will be plotted
# this will make subsequent df manipulation faster
mask_bool_ls = [filter_mask_csc(df, curve_plot_df["Country/Region"][i],
curve_plot_df["Province/State"][i],
curve_plot_df.County[i]
)
for i in range(nplaces)
]
# the list of masks needs to consolidated via OR into a single mask
mask_bool = np.array([False for i in range(df.shape[0])])
for mask_bool_item in mask_bool_ls:
mask_bool = mask_bool | mask_bool_item
plot_df = df[mask_bool]
# ensure line plots will move left to right
plot_df = plot_df.sort_values(["Date"]).reset_index()
# initialize values to be ammended in subsequent for loops
item_counter = 0
min_date = plot_df.Date.max()
max_date = plot_df.Date.min()
# build the figure piecewise, adding traces within for loops
for place_i in range(nplaces):
# isolate data for place_i
curve_row = curve_plot_df.iloc[place_i, :]
var_mask_bool = filter_mask_csc(plot_df, \
curve_row["Country/Region"], \
curve_row["Province/State"], \
curve_row["County"])
plot_var_df = plot_df[var_mask_bool]
# calculate zero day column for place_i
plot_var_df["Zero_Day"] = 0
started_df = plot_var_df[plot_var_df[thresh_var] >= zero_thresh]
start_date_series = started_df.Date[:1] - | pd.Timedelta(days=1) | pandas.Timedelta |
## Prep, join and create metrics to model
# Libraries
import os
import pandas as pd
import numpy as np
import seaborn as sns
from datetime import datetime
import matplotlib.pyplot as plt
# Set working directory
os.chdir("---Your working directory path")
print(os.getcwd())
# Set theme for sns plots
sns.set_theme(style="darkgrid")
# Import the categorized CCS data
CCdata = pd.read_csv("--path to categorized credit card transactions data",header=0, index_col=False)
# remove duplicate rows if any
CCdata.remove_duplicates(subset=None, keep='first', inplace=True)
print(CCdata.shape)
# Import weather data
## Importing from: https://climate.weather.gc.ca/climate_data/daily_data_e.html
weather2019 = | pd.read_csv("en_climate_daily__2019_P1D.csv",header=0) | pandas.read_csv |
"""
This module contains a collection of functions which make plots (saved as png files) using matplotlib, generated from
some model fits and cross-validation evaluation within a MAST-ML run.
This module also contains a method to create python notebooks containing plotted data and the relevant source code from
this module, to enable the user to make their own modifications to the created plots in a straightforward way (useful for
tweaking plots for a presentation or publication).
"""
import math
import statistics
import os
import copy
import pandas as pd
import itertools
import warnings
import logging
from collections import Iterable
from os.path import join
from collections import OrderedDict
from math import log, floor, ceil
from sklearn.linear_model import LinearRegression
from sklearn.metrics import r2_score
from sklearn.ensemble._forest import _generate_sample_indices, _get_n_samples_bootstrap
from mpl_toolkits.axes_grid1 import make_axes_locatable
# Ignore the harmless warning about the gelsd driver on mac.
warnings.filterwarnings(action="ignore", module="scipy",
message="^internal gelsd")
# Ignore matplotlib deprecation warning (set as all warnings for now)
warnings.filterwarnings(action="ignore")
import numpy as np
from sklearn.metrics import confusion_matrix, roc_curve, auc, precision_recall_curve
import matplotlib
from matplotlib import pyplot as plt
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure, figaspect
from matplotlib.animation import FuncAnimation
from matplotlib.font_manager import FontProperties
from scipy.stats import gaussian_kde, norm
from mpl_toolkits.axes_grid1.inset_locator import mark_inset
from mpl_toolkits.axes_grid1.inset_locator import zoomed_inset_axes
# Needed imports for ipynb_maker
#from mastml.utils import nice_range
#from mastml.metrics import nice_names
import inspect
import textwrap
from pandas import DataFrame, Series
import nbformat
from functools import wraps
import forestci as fci
from forestci.calibration import calibrateEB
import copy
matplotlib.rc('font', size=18, family='sans-serif') # set all font to bigger
matplotlib.rc('figure', autolayout=True) # turn on autolayout
# adding dpi as a constant global so it can be changed later
DPI = 250
#logger = logging.getLogger() # only used inside ipynb_maker I guess
# HEADERENDER don't delete this line, it's used by ipynb maker
logger = logging.getLogger('mastml') # the real logger
def ipynb_maker(plot_func):
"""
This method creates Jupyter Notebooks so user can modify and regenerate the plots produced by MAST-ML.
Args:
plot_func: (plot_helper method), a plotting method contained in plot_helper.py which contains the
@ipynb_maker decorator
Returns:
(plot_helper method), the same plot_func as used as input, but after having written the Jupyter notebook with source code to create plot
"""
from mastml import plot_helper # Strange self-import but it works, as had cyclic import issues with ipynb_maker as its own module
@wraps(plot_func)
def wrapper(*args, **kwargs):
# convert everything to kwargs for easier display
# from geniuses at https://stackoverflow.com/a/831164
#kwargs.update(dict(zip(plot_func.func_code.co_varnames, args)))
sig = inspect.signature(plot_func)
binding = sig.bind(*args, **kwargs)
all_args = binding.arguments
# if this is an outdir style function, fill in savepath and delete outdir
if 'savepath' in all_args:
ipynb_savepath = all_args['savepath']
knows_savepath = True
basename = os.path.basename(ipynb_savepath) # fix absolute path problem
elif 'outdir' in all_args:
knows_savepath = False
basename = plot_func.__name__
ipynb_savepath = os.path.join(all_args['outdir'], basename)
else:
raise Exception('you must have an "outdir" or "savepath" argument to use ipynb_maker')
readme = textwrap.dedent(f"""\
This notebook was automatically generated from your MAST-ML run so you can recreate the
plots. Some things are a bit different from the usual way of creating plots - we are
using the [object oriented
interface](https://matplotlib.org/tutorials/introductory/lifecycle.html) instead of
pyplot to create the `fig` and `ax` instances.
""")
# get source of the top of plot_helper.py
header = ""
with open(plot_helper.__file__) as f:
for line in f.readlines():
if 'HEADERENDER' in line:
break
header += line
core_funcs = [plot_helper.stat_to_string, plot_helper.plot_stats, plot_helper.make_fig_ax,
plot_helper.get_histogram_bins, plot_helper.nice_names, plot_helper.nice_range,
plot_helper.nice_mean, plot_helper.nice_std, plot_helper.rounder, plot_helper._set_tick_labels,
plot_helper._set_tick_labels_different, plot_helper._nice_range_helper, plot_helper._nearest_pow_ten,
plot_helper._three_sigfigs, plot_helper._n_sigfigs, plot_helper._int_if_int, plot_helper._round_up,
plot_helper.prediction_intervals]
func_strings = '\n\n'.join(inspect.getsource(func) for func in core_funcs)
plot_func_string = inspect.getsource(plot_func)
# remove first line that has this decorator on it (!!!)
plot_func_string = '\n'.join(plot_func_string.split('\n')[1:])
# put the arguments and their values in the code
arg_assignments = []
arg_names = []
for key, var in all_args.items():
if isinstance(var, DataFrame):
# this is amazing
arg_assignments.append(f"{key} = pd.read_csv(StringIO('''\n{var.to_csv(index=False)}'''))")
elif isinstance(var, Series):
arg_assignments.append(f"{key} = pd.Series(pd.read_csv(StringIO('''\n{var.to_csv(index=False)}''')).iloc[:,0])")
else:
arg_assignments.append(f'{key} = {repr(var)}')
arg_names.append(key)
args_block = ("from numpy import array\n" +
"from collections import OrderedDict\n" +
"from io import StringIO\n" +
"from sklearn.gaussian_process import GaussianProcessRegressor # Need for error plots\n" +
"from sklearn.gaussian_process.kernels import * # Need for error plots\n" +
"from sklearn.ensemble import RandomForestRegressor # Need for error plots\n" +
'\n'.join(arg_assignments))
arg_names = ', '.join(arg_names)
if knows_savepath:
if '.png' not in basename:
basename += '.png'
main = textwrap.dedent(f"""\
import pandas as pd
from IPython.display import Image, display
{plot_func.__name__}({arg_names})
display(Image(filename='{basename}'))
""")
else:
main = textwrap.dedent(f"""\
import pandas as pd
from IPython.display import Image, display
plot_paths = plot_predicted_vs_true(train_quad, test_quad, outdir, label)
for plot_path in plot_paths:
display(Image(filename=plot_path))
""")
nb = nbformat.v4.new_notebook()
readme_cell = nbformat.v4.new_markdown_cell(readme)
text_cells = [header, func_strings, plot_func_string, args_block, main]
cells = [readme_cell] + [nbformat.v4.new_code_cell(cell_text) for cell_text in text_cells]
nb['cells'] = cells
nbformat.write(nb, ipynb_savepath + '.ipynb')
return plot_func(*args, **kwargs)
return wrapper
def make_train_test_plots(run, path, is_classification, label, model, train_X, test_X, groups=None):
"""
General plotting method used to execute sequence of specific plots of train-test data analysis
Args:
run: (dict), a particular split_result from masml_driver
path: (str), path to save the generated plots and analysis of split_result designated in 'run'
is_classification: (bool), whether or not the analysis is a classification task
label: (str), name of the y data variable being fit
model: (scikit-learn model object), a scikit-learn model/estimator
train_X: (numpy array), array of X features used in training
test_X: (numpy array), array of X features used in testing
groups: (numpy array), array of group names
Returns:
None
"""
y_train_true, y_train_pred, y_test_true = \
run['y_train_true'], run['y_train_pred'], run['y_test_true']
y_test_pred, train_metrics, test_metrics = \
run['y_test_pred'], run['train_metrics'], run['test_metrics']
train_groups, test_groups = run['train_groups'], run['test_groups']
if is_classification:
# Need these class prediction probabilities for ROC curve analysis
y_train_pred_proba = run['y_train_pred_proba']
y_test_pred_proba = run['y_test_pred_proba']
title = 'train_confusion_matrix'
plot_confusion_matrix(y_train_true, y_train_pred,
join(path, title+'.png'), train_metrics,
title=title)
title = 'test_confusion_matrix'
plot_confusion_matrix(y_test_true, y_test_pred,
join(path, title+'.png'), test_metrics,
title=title)
title = 'train_roc_curve'
plot_roc_curve(y_train_true, y_train_pred_proba, join(path, title+'png'))
title = 'test_roc_curve'
plot_roc_curve(y_test_true, y_test_pred_proba, join(path, title+'png'))
title = 'train_precision_recall_curve'
plot_precision_recall_curve(y_train_true, y_train_pred_proba, join(path, title+'png'))
title = 'test_precision_recall_curve'
plot_precision_recall_curve(y_test_true, y_test_pred_proba, join(path, title + 'png'))
else: # is_regression
plot_predicted_vs_true((y_train_true, y_train_pred, train_metrics, train_groups),
(y_test_true, y_test_pred, test_metrics, test_groups),
path, label=label)
title = 'train_residuals_histogram'
plot_residuals_histogram(y_train_true, y_train_pred,
join(path, title+'.png'), train_metrics,
title=title, label=label)
title = 'test_residuals_histogram'
plot_residuals_histogram(y_test_true, y_test_pred,
join(path, title+'.png'), test_metrics,
title=title, label=label)
def make_error_plots(run, path, is_classification, label, model, train_X, test_X, rf_error_method, rf_error_percentile,
is_validation, validation_column_name, validation_X, groups=None):
y_train_true, y_train_pred, y_test_true = \
run['y_train_true'], run['y_train_pred'], run['y_test_true']
y_test_pred, train_metrics, test_metrics = \
run['y_test_pred'], run['train_metrics'], run['test_metrics']
train_groups, test_groups = run['train_groups'], run['test_groups']
if is_validation:
y_validation_pred, y_validation_true, prediction_metrics = \
run['y_validation_pred'+'_'+str(validation_column_name)], \
run['y_validation_true'+'_'+str(validation_column_name)], \
run['prediction_metrics']
if is_classification:
logger.debug('There is no error distribution plotting for classification problems, just passing through...')
else: # is_regression
#title = 'train_normalized_error'
#plot_normalized_error(y_train_true, y_train_pred, join(path, title+'.png'), model, error_method, percentile,
# X=train_X, Xtrain=train_X, Xtest=test_X)
title = 'test_normalized_error'
plot_normalized_error(y_test_true, y_test_pred, join(path, title+'.png'), model, rf_error_method,
rf_error_percentile, X=test_X, Xtrain=train_X, Xtest=test_X)
#title = 'train_cumulative_normalized_error'
#plot_cumulative_normalized_error(y_train_true, y_train_pred, join(path, title+'.png'), model, error_method,
# percentile, X=train_X, Xtrain=train_X, Xtest=test_X)
title = 'test_cumulative_normalized_error'
plot_cumulative_normalized_error(y_test_true, y_test_pred, join(path, title+'.png'), model, rf_error_method,
rf_error_percentile, X=test_X, Xtrain=train_X, Xtest=test_X)
# HERE, add your RMS residual vs. error plot function
if model.__class__.__name__ in ['RandomForestRegressor', 'ExtraTreesRegressor', 'GaussianProcessRegressor',
'GradientBoostingRegressor', 'EnsembleRegressor']:
y_all_data = np.concatenate([y_test_true, y_train_true])
plot_real_vs_predicted_error(y_all_data, path, model, data_test_type='test')
if is_validation:
title = 'validation_cumulative_normalized_error'
plot_cumulative_normalized_error(y_validation_true, y_validation_pred, join(path, title+'.png'), model, rf_error_method,
rf_error_percentile, X=validation_X, Xtrain=train_X, Xtest=test_X)
title = 'validation_normalized_error'
plot_normalized_error(y_validation_true, y_validation_pred, join(path, title + '.png'), model, rf_error_method,
rf_error_percentile, X=validation_X, Xtrain=train_X, Xtest=test_X)
if model.__class__.__name__ in ['RandomForestRegressor', 'ExtraTreesRegressor', 'GaussianProcessRegressor',
'GradientBoostingRegressor', 'EnsembleRegressor']:
y_all_data = np.concatenate([y_test_true, y_train_true])
plot_real_vs_predicted_error(y_all_data, path, model, data_test_type='validation')
@ipynb_maker
def plot_confusion_matrix(y_true, y_pred, savepath, stats, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues):
"""
Method used to generate a confusion matrix for a classification run. Additional information can be found
at: http://scikit-learn.org/stable/auto_examples/model_selection/plot_confusion_matrix.html
Args:
y_true: (numpy array), array containing the true y data values
y_pred: (numpy array), array containing the predicted y data values
savepath: (str), path to save the plotted confusion matrix
stats: (dict), dict of training or testing statistics for a particular run
normalize: (bool), whether or not to normalize data output as truncated float vs. double
title: (str), title of the confusion matrix plot
cmap: (matplotlib colormap), the color map to use for confusion matrix plotting
Returns:
None
"""
# calculate confusion matrix and lables in correct order
cm = confusion_matrix(y_true, y_pred)
#classes = sorted(list(set(y_true).intersection(set(y_pred))))
classes = sorted(list(set(y_true).union(set(y_pred))))
x_align = 0.64
fig, ax = make_fig_ax(x_align=x_align)
#ax.set_title(title)
# create the colorbar, not really needed but everyones got 'em
mappable = ax.imshow(cm, interpolation='nearest', cmap=cmap)
#fig.colorbar(mappable)
# set x and y ticks to labels
tick_marks = range(len(classes))
ax.set_xticks(tick_marks)
ax.set_xticklabels(classes, rotation='horizontal', fontsize=18)
ax.set_yticks(tick_marks)
ax.set_yticklabels(classes, rotation='horizontal', fontsize=18)
# draw number in the boxes
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
ax.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
# plots the stats
plot_stats(fig, stats, x_align=0.60, y_align=0.90)
ax.set_ylabel('True label')
ax.set_xlabel('Predicted label')
fig.savefig(savepath, dpi=DPI, bbox_inches='tight')
@ipynb_maker
def plot_roc_curve(y_true, y_pred, savepath):
"""
Method to calculate and plot the receiver-operator characteristic curve for classification model results
Args:
y_true: (numpy array), array of true y data values
y_pred: (numpy array), array of predicted y data values
savepath: (str), path to save the plotted ROC curve
Returns:
None
"""
#TODO: have work when probability=False in model params. Suggest user set probability=True!!
#classes = sorted(list(set(y_true).union(set(y_pred))))
#n_classes = y_pred.shape[1]
classes = list(np.unique(y_true))
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(len(classes)):
fpr[i], tpr[i], _ = roc_curve(y_true, y_pred[:, i])
roc_auc[i] = auc(fpr[i], tpr[i])
x_align = 0.95
fig, ax = make_fig_ax(aspect_ratio=0.66, x_align=x_align, left=0.15)
colors = ['blue', 'red']
#for i in range(len(classes)):
# ax.plot(fpr[i], tpr[i], color=colors[i], lw=2, label='ROC curve class '+str(i)+' (area = %0.2f)' % roc_auc[i])
ax.plot(fpr[1], tpr[1], color=colors[0], lw=2, label='ROC curve' + ' (area = %0.2f)' % roc_auc[1])
ax.plot([0, 1], [0, 1], color='black', label='Random guess', lw=2, linestyle='--')
ax.set_xticks(np.linspace(0, 1, 5))
ax.set_yticks(np.linspace(0, 1, 5))
_set_tick_labels(ax, maxx=1, minn=0)
ax.set_xlabel('False Positive Rate', fontsize='16')
ax.set_ylabel('True Positive Rate', fontsize='16')
ax.legend(loc="lower right", fontsize=12)
#plot_stats(fig, stats, x_align=0.60, y_align=0.90)
fig.savefig(savepath, dpi=DPI, bbox_to_inches='tight')
@ipynb_maker
def plot_precision_recall_curve(y_true, y_pred, savepath):
"""
Method to calculate and plot the precision-recall curve for classification model results
Args:
y_true: (numpy array), array of true y data values
y_pred: (numpy array), array of predicted y data values
savepath: (str), path to save the plotted precision-recall curve
Returns:
None
"""
# Note this only works with probability predictions of the classifier labels.
classes = list(np.unique(y_true))
precision = dict()
recall = dict()
#roc_auc = dict()
for i in range(len(classes)):
precision[i], recall[i], _ = precision_recall_curve(y_true, y_pred[:, i])
x_align = 0.95
fig, ax = make_fig_ax(aspect_ratio=0.66, x_align=x_align, left=0.15)
colors = ['blue', 'red']
#for i in range(len(classes)):
# ax.plot(fpr[i], tpr[i], color=colors[i], lw=2, label='ROC curve class '+str(i)+' (area = %0.2f)' % roc_auc[i])
ax.step(recall[1], precision[1], color=colors[0], lw=2, label='Precision-recall curve')
#ax.fill_between(recall[1], precision[1], alpha=0.4, color=colors[0])
ax.set_xticks(np.linspace(0, 1, 5))
ax.set_yticks(np.linspace(0, 1, 5))
_set_tick_labels(ax, maxx=1, minn=0)
ax.set_xlabel('Recall', fontsize='16')
ax.set_ylabel('Precision', fontsize='16')
ax.legend(loc="upper right", fontsize=12)
#plot_stats(fig, stats, x_align=0.60, y_align=0.90)
fig.savefig(savepath, dpi=DPI, bbox_to_inches='tight')
return
@ipynb_maker
def plot_residuals_histogram(y_true, y_pred, savepath,
stats, title='residuals histogram', label='residuals'):
"""
Method to calculate and plot the histogram of residuals from regression model
Args:
y_true: (numpy array), array of true y data values
y_pred: (numpy array), array of predicted y data values
savepath: (str), path to save the plotted precision-recall curve
stats: (dict), dict of training or testing statistics for a particular run
title: (str), title of residuals histogram
label: (str), label used for axis labeling
Returns:
None
"""
# make fig and ax, use x_align when placing text so things don't overlap
x_align = 0.64
fig, ax = make_fig_ax(x_align=x_align)
#ax.set_title(title)
# do the actual plotting
residuals = y_true - y_pred
#Output residuals data and stats to spreadsheet
path = os.path.dirname(savepath)
pd.DataFrame(residuals).describe().to_csv(os.path.join(path,'residual_statistics.csv'))
pd.DataFrame(residuals).to_csv(path+'/'+'residuals.csv')
#Get num_bins using smarter method
num_bins = get_histogram_bins(y_df=residuals)
ax.hist(residuals, bins=num_bins, color='b', edgecolor='k')
# normal text stuff
ax.set_xlabel('Value of '+label, fontsize=16)
ax.set_ylabel('Number of occurences', fontsize=16)
# make y axis ints, because it is discrete
#ax.yaxis.set_major_locator(MaxNLocator(integer=True))
plot_stats(fig, stats, x_align=x_align, y_align=0.90)
plot_stats(fig, pd.DataFrame(residuals).describe().to_dict()[0], x_align=x_align, y_align=0.60)
fig.savefig(savepath, dpi=DPI, bbox_inches='tight')
@ipynb_maker
def plot_target_histogram(y_df, savepath, title='target histogram', label='target values'):
"""
Method to plot the histogram of true y values
Args:
y_df: (pandas dataframe), dataframe of true y data values
savepath: (str), path to save the plotted precision-recall curve
title: (str), title of residuals histogram
label: (str), label used for axis labeling
Returns:
None
"""
# make fig and ax, use x_align when placing text so things don't overlap
x_align = 0.70
fig, ax = make_fig_ax(aspect_ratio=0.5, x_align=x_align)
#ax.set_title(title)
#Get num_bins using smarter method
num_bins = get_histogram_bins(y_df=y_df)
# do the actual plotting
try:
ax.hist(y_df, bins=num_bins, color='b', edgecolor='k')#, histtype='stepfilled')
except:
print('Could not plot target histgram')
return
# normal text stuff
ax.set_xlabel('Value of '+label, fontsize=16)
ax.set_ylabel('Number of occurences', fontsize=16)
# make y axis ints, because it is discrete
#ax.yaxis.set_major_locator(MaxNLocator(integer=True))
plot_stats(fig, dict(y_df.describe()), x_align=x_align, y_align=0.90, fontsize=14)
# Save input data stats to csv
savepath_parse = savepath.split('target_histogram.png')[0]
y_df.describe().to_csv(savepath_parse+'/''input_data_statistics.csv')
fig.savefig(savepath, dpi=DPI, bbox_inches='tight')
@ipynb_maker
def plot_predicted_vs_true(train_quad, test_quad, outdir, label):
"""
Method to create a parity plot (predicted vs. true values)
Args:
train_quad: (tuple), tuple containing 4 numpy arrays: true training y data, predicted training y data,
training metric data, and groups used in training
test_quad: (tuple), tuple containing 4 numpy arrays: true test y data, predicted test y data,
testing metric data, and groups used in testing
outdir: (str), path to save plots to
label: (str), label used for axis labeling
Returns:
None
"""
filenames = list()
y_train_true, y_train_pred, train_metrics, train_groups = train_quad
y_test_true, y_test_pred, test_metrics, test_groups = test_quad
# make diagonal line from absolute min to absolute max of any data point
# using round because Ryan did - but won't that ruin small numbers??? TODO this
#max1 = max(y_train_true.max(), y_train_pred.max(),
# y_test_true.max(), y_test_pred.max())
max1 = max(y_train_true.max(), y_test_true.max())
#min1 = min(y_train_true.min(), y_train_pred.min(),
# y_test_true.min(), y_test_pred.min())
min1 = min(y_train_true.min(), y_test_true.min())
max1 = round(float(max1), rounder(max1-min1))
min1 = round(float(min1), rounder(max1-min1))
for y_true, y_pred, stats, groups, title_addon in \
(train_quad+('train',), test_quad+('test',)):
# make fig and ax, use x_align when placing text so things don't overlap
x_align=0.64
fig, ax = make_fig_ax(x_align=x_align)
# set tick labels
# notice that we use the same max and min for all three. Don't
# calculate those inside the loop, because all the should be on the same scale and axis
_set_tick_labels(ax, max1, min1)
# plot diagonal line
ax.plot([min1, max1], [min1, max1], 'k--', lw=2, zorder=1)
# do the actual plotting
if groups is None:
ax.scatter(y_true, y_pred, color='blue', edgecolors='black', s=100, zorder=2, alpha=0.7)
else:
handles = dict()
unique_groups = np.unique(np.concatenate((train_groups, test_groups), axis=0))
unique_groups_train = np.unique(train_groups)
unique_groups_test = np.unique(test_groups)
#logger.debug(' '*12 + 'unique groups: ' +str(list(unique_groups)))
colors = ['blue', 'red', 'green', 'purple', 'orange', 'black']
markers = ['o', 'v', '^', 's', 'p', 'h', 'D', '*', 'X', '<', '>', 'P']
colorcount = markercount = 0
for groupcount, group in enumerate(unique_groups):
mask = groups == group
#logger.debug(' '*12 + f'{group} group_percent = {np.count_nonzero(mask) / len(groups)}')
handles[group] = ax.scatter(y_true[mask], y_pred[mask], label=group, color=colors[colorcount],
marker=markers[markercount], s=100, alpha=0.7)
colorcount += 1
if colorcount % len(colors) == 0:
markercount += 1
colorcount = 0
if title_addon == 'train':
to_delete = [k for k in handles.keys() if k not in unique_groups_train]
for k in to_delete:
del handles[k]
elif title_addon == 'test':
to_delete = [k for k in handles.keys() if k not in unique_groups_test]
for k in to_delete:
del handles[k]
ax.legend(handles.values(), handles.keys(), loc='lower right', fontsize=12)
# set axis labels
ax.set_xlabel('True '+label, fontsize=16)
ax.set_ylabel('Predicted '+label, fontsize=16)
plot_stats(fig, stats, x_align=x_align, y_align=0.90)
filename = 'predicted_vs_true_'+ title_addon + '.png'
filenames.append(filename)
fig.savefig(join(outdir, filename), dpi=DPI, bbox_inches='tight')
df = pd.DataFrame({'y_pred': y_pred, 'y_true': y_true})
df.to_csv(join(outdir, 'predicted_vs_true_' + title_addon + '.csv'))
return filenames
def plot_scatter(x, y, savepath, groups=None, xlabel='x', label='target data'):
"""
Method to create a general scatter plot
Args:
x: (numpy array), array of x data
y: (numpy array), array of y data
savepath: (str), path to save plots to
groups: (list), list of group labels
xlabel: (str), label used for x-axis labeling
label: (str), label used for y-axis labeling
Returns:
None
"""
# Set image aspect ratio:
fig, ax = make_fig_ax()
# set tick labels
max_tick_x = max(x)
min_tick_x = min(x)
max_tick_y = max(y)
min_tick_y = min(y)
max_tick_x = round(float(max_tick_x), rounder(max_tick_x-min_tick_x))
min_tick_x = round(float(min_tick_x), rounder(max_tick_x-min_tick_x))
max_tick_y = round(float(max_tick_y), rounder(max_tick_y-min_tick_y))
min_tick_y = round(float(min_tick_y), rounder(max_tick_y-min_tick_y))
#divisor_y = get_divisor(max(y), min(y))
#max_tick_y = round_up(max(y), divisor_y)
#min_tick_y = round_down(min(y), divisor_y)
_set_tick_labels_different(ax, max_tick_x, min_tick_x, max_tick_y, min_tick_y)
if groups is None:
ax.scatter(x, y, c='b', edgecolor='darkblue', zorder=2, s=100, alpha=0.7)
else:
colors = ['blue', 'red', 'green', 'purple', 'orange', 'black']
markers = ['o', 'v', '^', 's', 'p', 'h', 'D', '*', 'X', '<', '>', 'P']
colorcount = markercount = 0
for groupcount, group in enumerate(np.unique(groups)):
mask = groups == group
ax.scatter(x[mask], y[mask], label=group, color=colors[colorcount], marker=markers[markercount], s=100, alpha=0.7)
ax.legend(loc='lower right', fontsize=12)
colorcount += 1
if colorcount % len(colors) == 0:
markercount += 1
colorcount = 0
ax.set_xlabel(xlabel, fontsize=16)
ax.set_ylabel('Value of '+label, fontsize=16)
#ax.set_xticklabels(rotation=45)
fig.savefig(savepath, dpi=DPI, bbox_inches='tight')
def plot_keras_history(model_history, savepath, plot_type):
# Set image aspect ratio:
fig, ax = make_fig_ax()
keys = model_history.history.keys()
for k in keys:
if 'loss' not in k and 'val' not in k:
metric = k
accuracy = model_history.history[str(metric)]
loss = model_history.history['loss']
if plot_type == 'accuracy':
ax.plot(accuracy, label='training '+str(metric))
ax.set_ylabel(str(metric)+' (Accuracy)', fontsize=16)
try:
validation_accuracy = model_history.history['val_'+str(metric)]
ax.plot(validation_accuracy, label='validation '+str(metric))
except:
pass
if plot_type == 'loss':
ax.plot(loss, label='training loss')
ax.set_ylabel(str(metric)+' (Loss)', fontsize=16)
try:
validation_loss = model_history.history['val_loss']
ax.plot(validation_loss, label='validation loss')
except:
pass
ax.legend(loc='upper right', fontsize=12)
#_set_tick_labels_different(ax, max_tick_x, min_tick_x, max_tick_y, min_tick_y)
ax.set_xlabel('Epochs', fontsize=16)
fig.savefig(savepath, dpi=DPI, bbox_inches='tight')
return
@ipynb_maker
def plot_best_worst_split(y_true, best_run, worst_run, savepath,
title='Best Worst Overlay', label='target_value'):
"""
Method to create a parity plot (predicted vs. true values) of just the best scoring and worst scoring CV splits
Args:
y_true: (numpy array), array of true y data
best_run: (dict), the best scoring split_result from mastml_driver
worst_run: (dict), the worst scoring split_result from mastml_driver
savepath: (str), path to save plots to
title: (str), title of the best_worst_split plot
label: (str), label used for axis labeling
Returns:
None
"""
# make fig and ax, use x_align when placing text so things don't overlap
x_align = 0.64
fig, ax = make_fig_ax(x_align=x_align)
maxx = max(y_true) # TODO is round the right thing here?
minn = min(y_true)
maxx = round(float(maxx), rounder(maxx-minn))
minn = round(float(minn), rounder(maxx-minn))
ax.plot([minn, maxx], [minn, maxx], 'k--', lw=2, zorder=1)
# set tick labels
_set_tick_labels(ax, maxx, minn)
# do the actual plotting
ax.scatter(best_run['y_test_true'], best_run['y_test_pred'], c='red',
alpha=0.7, label='best test', edgecolor='darkred', zorder=2, s=100)
ax.scatter(worst_run['y_test_true'], worst_run['y_test_pred'], c='blue',
alpha=0.7, label='worst test', edgecolor='darkblue', zorder=3, s=60)
ax.legend(loc='lower right', fontsize=12)
# set axis labels
ax.set_xlabel('True '+label, fontsize=16)
ax.set_ylabel('Predicted '+label, fontsize=16)
#font_dict = {'size' : 10, 'family' : 'sans-serif'}
# Duplicate the stats dicts with an additional label
best_stats = OrderedDict([('Best Run', None)])
best_stats.update(best_run['test_metrics'])
worst_stats = OrderedDict([('worst Run', None)])
worst_stats.update(worst_run['test_metrics'])
plot_stats(fig, best_stats, x_align=x_align, y_align=0.90)
plot_stats(fig, worst_stats, x_align=x_align, y_align=0.60)
fig.savefig(savepath + '.png', dpi=DPI, bbox_inches='tight')
df_best = | pd.DataFrame({'best run pred': best_run['y_test_pred'], 'best run true': best_run['y_test_true']}) | pandas.DataFrame |
import datetime
import glob
import os
from scipy import stats
import numpy as np
from dashboard.models import Location, Report
from dashboard.libraries import constants
import pandas as pd
# 日次実績レポートを更新する
def update_report(row_report_date: datetime.date):
# カラム名を辞書形式で取得
column_names = get_column_names(row_report_date)
column_name_province_state = column_names[constants.COLUMN_KEYS[0]]
column_name_country_region = column_names[constants.COLUMN_KEYS[1]]
column_name_latitude = column_names[constants.COLUMN_KEYS[2]]
column_name_longitude = column_names[constants.COLUMN_KEYS[3]]
column_name_confirmed = column_names[constants.COLUMN_KEYS[4]]
column_name_deaths = column_names[constants.COLUMN_KEYS[5]]
column_name_recovered = column_names[constants.COLUMN_KEYS[6]]
if constants.COLUMN_KEYS[7] in column_names.keys():
column_name_active = column_names[constants.COLUMN_KEYS[7]]
else:
column_name_active = None
# 文字列型の日付を取得
str_report_date = row_report_date.strftime(constants.DATE_FORMAT_REPORT_CSV)
# pandasで指定日付のcsvファイルを読み込み
csv_file_name = constants.DIRECTORY_PATH_REPORT_CSV + str_report_date + '.csv'
df_today_report = pd.read_csv(csv_file_name, usecols=column_names.values())
# ------補完処理------
# 緯度/経度が空白の行にそれぞれ0を代入
# 読み込んだcsvファイルにactive caseを指す列がなかった場合補完
if column_name_active is None:
df_today_report[constants.COLUMNS_ACTIVE_CASES_04] = df_today_report[column_name_confirmed] - df_today_report[column_name_deaths] - df_today_report[column_name_recovered]
column_name_active = constants.COLUMNS_ACTIVE_CASES_04
df_today_report[column_name_latitude] = df_today_report[column_name_latitude].fillna(0)
df_today_report[column_name_longitude] = df_today_report[column_name_longitude].fillna(0)
# 州/都が空白の行は'Country_Region'を挿入
df_today_report[column_name_province_state] = df_today_report[column_name_province_state].fillna(
df_today_report[column_name_country_region])
# ------補完処理完了------
# ------データフレーム前処理------
# 群/州、国名ごとに合計を算出するデータフレームを用意
df_sum = df_today_report[[
column_name_province_state,
column_name_country_region,
column_name_confirmed,
column_name_deaths,
column_name_recovered,
column_name_active
]]
# 群/州、国名ごとに合計を算出
df_sum = df_sum.groupby([column_name_province_state, column_name_country_region]).sum()
# 群/州、国名ごとに平均を算出するデータフレームを用意
df_average = df_today_report[[
column_name_province_state,
column_name_country_region,
column_name_latitude,
column_name_longitude,
]]
df_mean = df_average.groupby([column_name_province_state, column_name_country_region]).mean()
# データフレームを結合
df = | pd.merge(df_sum, df_mean, on=[column_name_province_state, column_name_country_region], how='inner') | pandas.merge |
"""
Classes for pipeline processing
$Header: /nfs/slac/g/glast/ground/cvs/pointlike/python/uw/like2/process.py,v 1.31 2018/01/27 15:37:17 burnett Exp $
"""
import os, sys, time, glob
import cPickle as pickle
import numpy as np
import pandas as pd
from scipy import optimize
from skymaps import SkyDir, Band
from uw.utilities import keyword_options
from uw.like2 import (main, tools, sedfuns, maps, sources, localization, roimodel, seeds, fit_diffuse,)
class Process(main.MultiROI):
defaults=(
('outdir', '.', 'output folder'),
('load_kw', {'rings':2}, 'a dict specific for the loading'),
('config_kw', {}, 'additional configuration keywords'),
('localize_flag', False, 'perform localiation'),
('localize_kw', {}, 'keywords for localization'),
('repivot_flag', False, 'repivot the sources'),
('curvature_flag', False, 'check betas, refit if needed'),
('ts_beta_zero', None, 'set to threshold for converting LP<->PL'),
('ts_min', 5, 'Minimum TS for saving after an iteration'),
('dampen', 1.0, 'damping factor: set <1 to dampen, 0 to not fit'),
('selected_pars', None, 'Apply to fit, to select subset of parameters for global fit'),
('counts_dir', None, 'folder for the counts plots'),
('norms_only', False, 'fit to norms only'),
('fix_spectra_flag',False, 'set variable sources to fit norms only, for this and subsequent iterations'),
('countsplot_tsmin', 100, 'minimum for souces in counts plot'),
('source_name', None, 'for localization?'),
('fit_kw', dict(ignore_exception=True), 'extra parameters for fit'),
('associate_flag',False, 'run association'),
('tsmap_dir', None, 'folder for TS maps'),
('sedfig_dir', None, 'folder for sed figs'),
('quiet', False, 'Set false for summary output'),
('finish', False, 'set True to turn on all "finish" output flags'),
('residual_flag', False, 'set True for special residual run; all else ignored'),
('diffuse_key', None, 'set to "gal" or "iso" to evaluate diffuse spectral corrections'),
('profile_flag', False, 'create profile entries for all free sources'),
('tables_flag', False, 'set True for tables run; all else ignored'),
#('xtables_flag', False, 'set True for special tables run; all else ignored'),
('tables_nside', 512, 'nside to use for table generation'),
('table_keys', None, 'list of keys for table generation: if None, all else ignored'),
('seed_key', None, 'set to name of key for seed check run'),
('update_positions_flag',False, 'set True to update positions before fitting'),
('add_seeds_flag', False, 'Add seeds found within the ROI, from the table, "plots/seedcheck/good_seeds.csv"'),
('special_flag', False, 'set for special processing: invoke member func "special"'),
('psc_flag', False, 'Run comparisons with a corresponding psc file'),
('model_counts', None, 'set to run model counts'),
)
@keyword_options.decorate(defaults)
def __init__(self, config_dir, roi_list=None, **kwargs):
""" process the roi object after being set up
"""
keyword_options.process(self, kwargs)
if self.finish:
self.__dict__.update(dampen=0,
localize_flag=True, associate_flag=True,
sedfig_dir='sedfig',
counts_dir='countfig',
tsmap_dir='tsmap_fail',
profile_flag=True,
)
super(Process,self).__init__(config_dir,quiet=self.quiet,)
self.stream = os.environ.get('PIPELINE_STREAMPATH', 'interactive')
#if self.xtables_flag:
# # suppress loading all point sources for from-scratch source finding
# #self.load_kw={'rings':-1} # not now
# pass
if roi_list is not None:
for index in roi_list:
self.process_roi(index)
def process_roi(self, index):
""" special for batch: manage the log file, make sure an exception closes it
"""
print ('Setting up ROI #%04d ...' % index,)
sys.stdout.flush()
self.setup_roi(index)
if self.outdir is not None:
logpath = os.path.join(self.outdir, 'log')
if not os.path.exists(logpath): os.mkdir(logpath)
outtee = tools.OutputTee(os.path.join(logpath, self.name+'.txt'))
else: outtee=None
print ('Processing...')
sys.stdout.flush()
try:
self.process()
finally:
if outtee is not None: outtee.close()
def process(self):
roi=self
dampen=self.dampen
outdir = self.outdir
print ('='*80)
print ('%4d-%02d-%02d %02d:%02d:%02d - %s - %s' %(time.localtime()[:6]+ (roi.name,)+(self.stream,)))
# special processing flags
if self.diffuse_key is not None and self.diffuse_key!='post_gal':
if self.diffuse_key=='iso':
fit_isotropic(self)
return
elif self.diffuse_key=='gal':
if not fit_galactic(self):return
elif self.diffuse_key=='both':
fit_diffuse.fitter(self)
return
elif self.diffuse_key=='both_update':
fit_diffuse.fitter(self, update=True)
# do not return: perform a fit, then update
else:
raise Exception('Unexpected key: {}'.format(self.diffuse_key))
if self.residual_flag:
self.residuals()
return
if self.tables_flag:
self.tables()
return
if self.table_keys is not None:
self.tables(mapkeys=self.table_keys)
return
#if self.xtables_flag:
# self.tables(special=True)
# return
if self.seed_key is not None:
key = self.seed_key
if key=='lost':
if not seeds.add_old_sources(self):
return
else:
if not seeds.add_seeds(self, key, config=self.config) :
# nothing added, so nothing to do with the model for this ROI
write_pickle(self) # make sure to update anyway
return
if self.model_counts is not None:
maps.ModelCountMaps(self, bandlist=self.model_counts, subdir='model_counts' )
return
if self.psc_flag:
psc_check(self)
return
if self.special_flag:
""" special processing, converting something."""
#self.fit_second_order()
zz = sedfuns.add_flat_sed(self,'ALL')
for name, ts in zz:
print ('{:15} {:4.1f}'.format(name,ts))
write_pickle(self)
return
if self.add_seeds_flag:
self.add_sources()
if self.fix_spectra_flag:
# special for monthly or smaller processing
fix_spectra(self)
if self.counts_dir is not None and not os.path.exists(self.counts_dir) :
try: os.makedirs(self.counts_dir) # in case some other process makes it
except: pass
sys.stdout.flush()
init_log_like = roi.log_like()
if self.update_positions_flag:
self.update_positions()
if self.curvature_flag:
print ('====================== Fixing curvature first =======================')
self.fit_curvature('ALL')
roi.print_summary(title='before fit, logL=%0.f'% init_log_like)
fit_sources = [s for s in roi.free_sources if not s.isglobal]
if len(roi.sources.parameters[:])==0 or dampen==0:
print ('===================== not fitting ========================')
else:
fit_kw = self.fit_kw
try:
if self.norms_only:
print ('Fitting parameter names ending in "Norm"')
roi.fit('_Norm', **fit_kw)
roi.fit(select=self.selected_pars, update_by=dampen, **fit_kw)
if self.fix_spectra_flag:
# Check for bad errors,
diag = np.asarray(self.hessian().diagonal())[0]
if np.any(diag<0):
print ('Retrying bad fits, reset ')
for i,v in enumerate(diag):
if v>0: continue
self.fit([i], setpars={i: -13}, **fit_kw)
self.fit(**fit_kw)
change =roi.log_like() - init_log_like
if abs(change)>1.0 :
roi.print_summary(title='after global fit, logL=%0.f, change=%.1f'% (roi.log_like(), change))
if self.repivot_flag:
# repivot, iterating a few times
n = 3
while n>0:
if not self.repivot( fit_sources, select=self.selected_pars): break
n-=1
if self.diffuse_key=='post_gal':
fit_galactic(self)
except Exception as msg:
print ('============== fit failed, no update!! %s'%msg)
raise
def getdir(x ):
if x is None or outdir is None: return None
t = os.path.join(outdir, x)
if not os.path.exists(t): os.mkdir(t)
return t
sedfig_dir = getdir(self.sedfig_dir)
if sedfig_dir is not None:
print ('------------ creating seds, figures ---------------')
sys.stdout.flush()
skymodel_name = os.path.split(os.getcwd())[-1]
roi.plot_sed('all', sedfig_dir=sedfig_dir, suffix='_sed_%s'%skymodel_name, )
if self.profile_flag:
print ('------creating profile entries for all free sources')
sys.stdout.flush()
self.profile('all')
if self.localize_flag:
print ('------localizing all local sources------')
sys.stdout.flush()
tsmap_dir = getdir(self.tsmap_dir)
skymodel = os.getcwd().split('/')[-1]
if skymodel.startswith('month') or skymodel.startswith('year'):
print ('Not running tsmap analysis since data subset')
tsmap_dir=None
roi.localize('all', tsmap_dir=tsmap_dir)
if self.associate_flag:
print ('-------- running associations --------')
sys.stdout.flush()
self.find_associations('all')
print ('-------- analyzing counts histogram, ',)
sys.stdout.flush()
cts=roi.get_count_dict() # always do counts
print ('chisquared: %.1f ----'% cts['chisq'])
counts_dir = getdir(self.counts_dir)
if counts_dir is not None:
print ('------- saving counts plot ------')
sys.stdout.flush()
try:
fig=roi.plot_counts( tsmin=self.countsplot_tsmin, relto='isotropic')
fout = os.path.join(counts_dir, ('%s_counts.jpg'%roi.name) )
print ('----> %s' % fout)
sys.stdout.flush()
fig.savefig(fout, dpi=60, bbox_inches='tight')
except Exception as e:
print ('***Failed to analyze counts for roi %s: %s' %(roi.name,e))
chisq = -1
if outdir is not None:
write_pickle(self)
def repivot(self, fit_sources=None, min_ts = 10, max_beta=1.0, emin=200., emax=100000.,
dampen=1.0, tolerance=0.10, test=False, select=None):
""" invoked if repivot flag set;
returns True if had to refit, allowing iteration
"""
roi = self
if fit_sources is None:
fit_sources = [s for s in roi.sources if s.skydir is not None and np.any(s.spectral_model.free)]
if len(fit_sources)>1:
print ('\ncheck need to repivot % sources with TS>%.0f, beta<%.1f: \n'\
'source TS e0 pivot' % (len(fit_sources), min_ts, max_beta))
need_refit =False
for source in fit_sources:
model = source.spectral_model
try:
ts, e0, pivot = roi.TS(source.name),model.e0, model.pivot_energy()
except Exception as e:
print ('source %s:exception %s' %(source.name, e))
continue
if pivot is None:
print ('pivot is none')
continue
if model.name=='LogParabola': e0 = model[3]
elif model.name=='ExpCutoff' or model.name=='PLSuperExpCutoff':
e0 = model.e0
else:
print ('Model %s is not repivoted' % model.name)
continue
print ('%-20s %8.0f %9.0f %9.0f ' % (source.name, ts, e0, pivot),)
if ts < min_ts:
print ('TS too small')
continue
if model.name=='LogParabola':
if model[2]>max_beta:
print ('beta= %.2f too large' %(model[2]))
continue #very
if pivot < emin or pivot > emax:
print ('pivot energy, not in range (%.0f, %.0f): setting to limit' % (emin, emax))
pivot = min(emax, max(pivot,emin))
model.set_e0(pivot)
limited = True
else: limited=False
if abs(pivot/e0-1.)<tolerance: #0.05:
print ('converged'); continue
print ('will refit')
need_refit=True
if not test and not limited: model.set_e0(pivot*dampen+ e0*(1-dampen))
if need_refit and not test:
roi.fit(select=select, tolerance=0, ignore_exception=True)
return need_refit
def betafit(self, ignore_exception=True,):
""" evalute ts_beta for all sources, add to source info
ts_beta_zero: float or None
"""
for source in self.free_sources:
if source.isglobal: continue #skip global
print ('----------------- %s (%.1f)-------------' % (source.name, source.ts))
t = self.ts_beta(source.name, ignore_exception=ignore_exception)
if t is None: continue
print ('deltaTS {:.1f} beta {:.2f}'.format(t[0],t[1]))
self.fit(ignore_exception=ignore_exception)
return True
def residuals(self, tol=0.3):
print ('Creating tables of residuals')
if not os.path.exists('residuals'):
os.mkdir('residuals')
resids = sedfuns.residual_tables(self, tol)
filename = 'residuals/%s_resids.pickle' %self.name
with open(filename, 'w') as out:
pickle.dump(resids, out)
print ('wrote file %s' %filename)
def tables(self, special=False, mapkeys=['ts', 'kde']):
"""create a set of tables"""
if mapkeys==['rmap']:
# residual maps
maps.residual_maps(self)
return
maps.nside = self.tables_nside
tinfo = [maps.table_info[key] for key in mapkeys]
skyfuns = [(entry[0], key, entry[1]) for key,entry in zip(mapkeys, tinfo)]
rt = maps.ROItables(self.outdir, nside=self.tables_nside, skyfuns=skyfuns )
rt(self)
def update_positions(self, tsmin=10, qualmax=8):
""" use the localization information associated with each source to update position
require ts>tsmin, qual<qualmax
"""
print ('---Updating positions---')
sources = [s for s in self.sources if s.skydir is not None and np.any(s.spectral_model.free)]
#print ('sources:', [s.name for s in sources])
print ('%-15s%6s%8s %8s' % ('name','TS','qual', 'delta_ts'))
for source in sources:
has_ts= hasattr(source, 'ts')
print ('%-15s %6.0f' % (source.name, source.ts if has_ts else -1.0) , )
if not hasattr(source, 'ellipse') or source.ellipse is None:
print (' no localization info')
continue
if not has_ts:
print (' no TS')
continue
if source.ts<tsmin:
print (' TS<%.0f' % (tsmin))
continue
newdir = SkyDir(*source.ellipse[:2]); qual, delta_ts = source.ellipse[-2:]
print ('%6.1f%6.1f' % (qual, delta_ts) ,)
if qual>qualmax:
print (' qual>%.1f' % qualmax)
continue
print (' %s -> %s, moved %.2f' % (source.skydir,newdir, np.degrees(newdir.difference(source.skydir))))
source.skydir = newdir
def fit_second_order(self, summarize=False, beta_bounds=(-0.1, 1.0)):
"""
Fit the second order parameter (beta or Cutoff) for all variable sources
Leave them frozen.
"""
def fit2(source_name, parname='beta', fmt='{:6.2f}'):
s=self.get_source(source_name)
sources.set_default_bounds(s.model, True)# force change of bounds
self.thaw(parname)
parval = s.model[parname]
if parname=='beta':
s.model.bounds[2]=beta_bounds
try:
self.fit(s.name, summarize=summarize, estimate_errors=True, ignore_exception=False)
except Exception as msg:
print ('Failed to fit {} for {}: {}'.format(parname,source_name, msg))
s.model[parname]=parval
self.freeze(parname)
print (('{:17}{:8.0f}'+fmt+fmt).format(source_name, s.ts, s.model[parname], s.model.error(parname)))
LP_sources = [s.name for s in self.free_sources
if not s.isglobal and s.model.name=='LogParabola']
PLEX_sources = [s.name for s in self.free_sources
if not s.isglobal and s.model.name=='PLSuperExpCutoff']
print ('{:17}{:>8} {:6} {}'.format('LP source', 'TS', 'beta', 'error'))
map( lambda s: fit2(s,'beta'), LP_sources)
print ('{:17}{:>8} {:6} {:10}'.format('PLEX source', 'TS','Cutoff', 'error'))
map( lambda s: fit2(s,'Cutoff', '{:6.0f}'), PLEX_sources)
return True
def model_count_maps(self):
maps.ModelCountMaps(self, nbands=12, subdir='model_counts' )
return False
def full_process(self, source_name=None, sedfig_dir='sedfig', outdir='.' ):
""" Correponds to the 'finish' option, but for a single source
"""
if source_name=='all':
print ('TODO!')
return
source = self.get_source(source_name)
sname = source.name
print ('Full processing for source {} ================='.format(sname))
self.profile(sname)
try:
self.localize(sname, )
except Exception as msg:
print ('Fail to localize, {}'.format(msg))
self.find_associations(sname)
skymodel_name = os.path.split(os.getcwd())[-1]
sedfuns.makesed_all(self, source_name=sname, sedfig_dir=sedfig_dir,suffix='_sed_%s'%skymodel_name, )
if outdir is not None:
write_pickle(self)
def fix_spectra(roi):
for src in roi.free_sources:
m=src.model
if src.name=='isotrop':
print ('Freezing isotrop')
roi.freeze('Scale', src.name, 1.0)
continue
for i,parname in enumerate(m.param_names[1:]):
if m.free[i+1]:
roi.freeze(parname, src.name)
src.fixed_spectrum=True
class BatchJob(Process):
"""special interface to be called from uwpipeline
Expect current dir to be output dir.
"""
def __init__(self, **kwargs):
config_dir= kwargs.pop('config_dir', '.')
roi_list = kwargs.pop('roi_list', range(1,3))
kwargs['outdir']= os.getcwd()
super(BatchJob, self).__init__(config_dir, **kwargs)
def __call__(self, roi_index):
self.process_roi(roi_index)
class FitIsotropic(object):
def __init__(self, roi, nbands, folder):
""" fit the front and back"""
from uw.like import Models
iso_source = roi.get_source('isotrop')
old_model=iso_source.model.copy()
roi.sources.set_model(Models.FrontBackConstant(), 'isotrop')
iso_model=iso_source.model
roi.reinitialize()
cx = []; dx=[]
for eband in range(nbands):
roi.select(eband)
print ('*** Energy Band {}: iso counts {}'.format( eband,
[t[1].counts.round() for t in roi.selected]))
iso_model[0]=1; iso_model[1]=1
n = len(roi.selected)
roi.fit(range(n))
u = iso_model.get_all_parameters();
du = np.array([iso_model.error(i) for i in range(2)])
cx.append(u);
dx.append(du)
roi.select()
roi.sources.set_model(old_model)
if folder is not None:
if not os.path.exists(folder): os.mkdir(folder)
filename= '{}/{}.pickle'.format(folder, roi.name)
pickle.dump(dict(val=cx,err=dx), open(filename, 'w'))
print ('wrote file {}'.format(filename))
self.val = cx
self.err = dx
def __call__(self):
return (self.val, self.err)
def fit_isotropic(roi, nbands=8, folder='isotropic_fit'):
return FitIsotropic(roi, nbands, folder)()
class FitGalactic(object):
"""Manage the galactic correction fits
"""
def __init__(self, roi, nbands=8, folder=None, upper_limit=5.0):
""" fit only the galactic normalization"""
if folder is not None and not os.path.exists(folder):
os.mkdir(folder)
self.roi=roi
cx = self.fit( nbands)
self.fitpars= cx[:,:2]
x,s,cf,cb = cx.T
self.chisq= sum(((x-1)/s)**2)
if folder is not None:
filename= '{}/{}.pickle'.format(folder, roi.name)
pickle.dump(cx, open(filename, 'w'))
print ('wrote file {}'.format(filename))
def fit(self, nbands=8):
roi = self.roi
gal_model = roi.get_source('ring').model
roi.thaw('Norm')
gal_norm = gal_model[0]
roi.reinitialize()
cx = []
for eband in range(nbands):
counts = [t[0].counts.round() for t in roi[2*eband:2*eband+2]]
print ('*** Energy Band {}: gal counts {}'.format( eband, counts))
roi.select(eband)
gal_model[0]=gal_norm
roi.fit([0], ignore_exception=True);
cx.append((gal_model[0], gal_model.error(0), counts[0], counts[1]))
cx = np.array(cx) # convert to array, shape (nbands, 4)
# re-select all bands, freeze galactic again
roi.select()
roi.freeze('Norm')
return np.array(cx)
def update(self):
from uw.like2 import (response,diffuse)
r = self.roi
if r.sources.diffuse_normalization is None:
print ('FitGalactic: Setting up diffuse normalization')
roi_index= int(r.name[-4:])
dn = self.create_corr_dict(r.config['diffuse'], roi_index)
r.sources.diffuse_normalization = diffuse.normalization = dn
a = self.roi.sources.diffuse_normalization
b = self.fitpars[:,0]
before = a['gal']
a['gal'] = before * self.fitpars[:,0]
print (before, '\n',a['gal'])
# update the Galactic Response objects
for gr in self.roi[:16]:
gr[0].initialize(force=True)
def create_corr_dict(self, diffuse_dict, roi_index, event_type_names=('front','back')):
import response
corr_dict = {}
galf = diffuse_dict['ring']['correction']
corr_dict['gal'] = response.DiffuseCorrection(galf).roi_norm(roi_index)
isof = diffuse_dict['isotrop']['correction']
corr_dict['iso']= dict()
for x in event_type_names:
isoc = response.DiffuseCorrection(isof.replace('*',x));
corr_dict['iso'][x]= isoc.roi_norm(roi_index)
return corr_dict
def fit_galactic(roi, nbands=8, folder=None, upper_limit=5.0):
t = FitGalactic(roi, nbands, folder, upper_limit)
print ('Chisq: {:.1f}'.format(t.chisq))
if folder is None:
t.update()
return False
return True
# def fit_diffuse(roi, nbands=8, select=[0,1], folder='diffuse_fit', corr_min=-0.95, update=False):
# """
# Perform independent fits to the gal, iso for each of the first nbands bands.
# If such a fit fails, or the correlation coeficient less than corr_min, fit only the isotropic.
# select: None or list of variables
# update: if True, modify the correction coefficients
# """
# from uw.like2 import diffuse
# # thaw gal and iso
# roi.thaw('Norm', 'ring')
# roi.thaw('Scale', 'isotrop')
# roi.get_model('isotrop').bounds[0]=[np.log10(0.5), np.log10(10.0)] # set limits
# roi.reinitialize()
# # do the fitting
# dpars=[]
# energies = []
# covs=[]
# quals = []
# def corr(cov): # correlation coeficiant
# return cov[0,1]/np.sqrt(cov[0,0]*cov[1,1])
# for ie in range(nbands):
# roi.select(ie);
# energy =int(roi.energies[0])
# print ('----- E={} -----'.format(energy))
# roi.fit(select, setpars={0:0, 1:0}, ignore_exception=True)
# cov = roi.fit_info['covariance']
# if cov is None or cov[0,0]<0 or corr(cov)<corr_min:
# #fail, probably since too correlated, or large correlation. So fit only iso
# roi.fit([1], setpars={0:0, 1:0}, ignore_exception=True)
# cov=np.array([ [0, roi.fit_info['covariance'][0]] , [0,0] ])
# energies.append(energy)
# dpars.append( roi.sources.parameters.get_parameters()[:2])
# covs.append(cov)
# quals.append(roi.fit_info['qual'])
# roi.freeze('Norm', 'ring', 1.0)
# roi.freeze('Scale', 'isotrop', 1.0)
# roi.select() # restore
# # set to external pars
# df = pd.DataFrame(np.power(10,np.array(dpars)), columns='gal iso'.split())
# df['cov'] = covs
# df['qual'] = quals
# df.index=energies
# if folder is not None:
# # simply save results
# if not os.path.exists(folder):
# os.mkdir(folder)
# filename= '{}/{}.pickle'.format(folder, roi.name)
# pickle.dump(df, open(filename, 'w'))
# print ('wrote file {}'.format(filename))
# if update:
# # update correction factors, reload all response objects
# dn=diffuse.normalization
# dn['gal'] *= df.gal.values
# dn['iso']['front'] *= df.iso.values
# dn['iso']['back'] *= df.iso.values
# # now reload
# for band in roi:
# for res in band[:2]:
# assert res.source.isglobal
# res.setup=False
# res.initialize()
# print ('Updated coefficients')
# # for interactive: convert covariance matrix to sigmas, correlation
# gsig=[]; isig=[]; corr=[]
# for i in range(len(df)):
# c = df.iloc[i]['cov']
# diag = np.sqrt(c.diagonal())
# gsig.append(diag[0])
# isig.append(diag[1])
# corr.append(c[0,1]/(diag[0]*diag[1]))
# df['gsig']=gsig
# df['isig']=isig
# df['corr']=corr
# del df['cov']
# return df
def write_pickle(roi):
pickle_dir = os.path.join(roi.outdir, 'pickle')
if not os.path.exists(pickle_dir): os.makedirs(pickle_dir)
roi.to_healpix( pickle_dir, dampen=1.0,
counts=roi.get_count_dict(),
stream=os.environ.get('PIPELINE_STREAMPATH', 'interactive'),
ts_min = roi.ts_min,
)
def psc_check(roi, psc_name=None , outdir='psc_check', debug=False):
"""Compare the spectra of sources from a "gll" file with the corresponding
pointlike original fits.
"""
from uw.like2.analyze import fermi_catalog
from uw.like2.plotting import sed
#load the catalog: either a catalog, or filename
fgl = roi.config.get('fgl', None)
if fgl is None:
catpat = roi.config['gllcat']
pat = os.path.expandvars(os.path.join('$FERMI','catalog', catpat))
gllcats = sorted(glob.glob(pat))
assert len(gllcats)>0, 'No gtlike catalogs found using {}'.format(pat)
filename = gllcats[-1]
fgl = fermi_catalog.GLL_PSC2(filename)
roi.config['fgl']= fgl
def chisq(source):
try:
sdf = | pd.DataFrame(source.sedrec) | pandas.DataFrame |
# CODING-STYLE CHECKS:
# pycodestyle test_decorators.py
import os
import sys
import pytest
import importlib
import numpy as np
from pandas import DataFrame
from pandas.util.testing import assert_frame_equal
import taxcalc
from taxcalc.decorators import *
def test_create_apply_function_string():
ans = create_apply_function_string(['a', 'b', 'c'], ['d', 'e'], [])
exp = ("def ap_func(x_0,x_1,x_2,x_3,x_4):\n"
" for i in range(len(x_0)):\n"
" x_0[i],x_1[i],x_2[i] = jitted_f(x_3[i],x_4[i])\n"
" return x_0,x_1,x_2\n")
assert ans == exp
def test_create_apply_function_string_with_params():
ans = create_apply_function_string(['a', 'b', 'c'], ['d', 'e'], ['d'])
exp = ("def ap_func(x_0,x_1,x_2,x_3,x_4):\n"
" for i in range(len(x_0)):\n"
" x_0[i],x_1[i],x_2[i] = jitted_f(x_3,x_4[i])\n"
" return x_0,x_1,x_2\n")
assert ans == exp
def test_create_toplevel_function_string_mult_outputs():
ans = create_toplevel_function_string(['a', 'b'], ['d', 'e'],
['pm', 'pm', 'pf', 'pm'])
exp = ''
exp = ("def hl_func(pm, pf):\n"
" from pandas import DataFrame\n"
" import numpy as np\n"
" import pandas as pd\n"
" def get_values(x):\n"
" if isinstance(x, pd.Series):\n"
" return x.values\n"
" else:\n"
" return x\n"
" outputs = \\\n"
" (pm.a, pm.b) = \\\n"
" applied_f(get_values(pm.a), get_values(pm.b), "
"get_values(pf.d), get_values(pm.e), )\n"
" header = ['a', 'b']\n"
" return DataFrame(data=np.column_stack(outputs),"
"columns=header)")
assert ans == exp
def test_create_toplevel_function_string():
ans = create_toplevel_function_string(['a'], ['d', 'e'],
['pm', 'pf', 'pm'])
exp = ''
exp = ("def hl_func(pm, pf):\n"
" from pandas import DataFrame\n"
" import numpy as np\n"
" import pandas as pd\n"
" def get_values(x):\n"
" if isinstance(x, pd.Series):\n"
" return x.values\n"
" else:\n"
" return x\n"
" outputs = \\\n"
" (pm.a) = \\\n"
" applied_f(get_values(pm.a), get_values(pf.d), "
"get_values(pm.e), )\n"
" header = ['a']\n"
" return DataFrame(data=outputs,"
"columns=header)")
assert ans == exp
def some_calc(x, y, z):
a = x + y
b = x + y + z
return (a, b)
def test_make_apply_function():
ans_do_jit = make_apply_function(some_calc, ['a', 'b'], ['x', 'y', 'z'],
[], do_jit=True, no_python=True)
assert ans_do_jit
ans_no_jit = make_apply_function(some_calc, ['a', 'b'], ['x', 'y', 'z'],
[], do_jit=False, no_python=True)
assert ans_no_jit
@apply_jit(["a", "b"], ["x", "y", "z"], nopython=True)
def Magic_calc(x, y, z):
a = x + y
b = x + y + z
return (a, b)
def Magic(pm, pf):
# Adjustments
outputs = pf.a, pf.b = Magic_calc(pm, pf)
header = ['a', 'b']
return DataFrame(data=np.column_stack(outputs), columns=header)
@iterate_jit(nopython=True)
def Magic_calc2(x, y, z):
a = x + y
b = x + y + z
return (a, b)
class Foo(object):
pass
@iterate_jit(nopython=True)
def faux_function(MARS):
if MARS == 1:
var = 2
else:
var = 1
return var
@iterate_jit(nopython=True)
def ret_everything(a, b, c, d, e, f):
c = a + b
d = a + b
e = a + b
f = a + b
return (c, d, e,
f)
def test_magic_apply_jit():
pm = Foo()
pf = Foo()
pm.a = np.ones((5,))
pm.b = np.ones((5,))
pf.x = np.ones((5,))
pf.y = np.ones((5,))
pf.z = np.ones((5,))
xx = Magic(pm, pf)
exp = DataFrame(data=[[2.0, 3.0]] * 5, columns=["a", "b"])
assert_frame_equal(xx, exp)
def test_magic_apply_jit_swap():
pm = Foo()
pf = Foo()
pm.a = np.ones((5,))
pm.b = np.ones((5,))
pf.x = np.ones((5,))
pf.y = np.ones((5,))
pf.z = np.ones((5,))
xx = Magic(pf, pm)
exp = DataFrame(data=[[2.0, 3.0]] * 5, columns=["a", "b"])
assert_frame_equal(xx, exp)
def test_magic_iterate_jit():
pm = Foo()
pf = Foo()
pm.a = np.ones((5,))
pm.b = np.ones((5,))
pf.x = np.ones((5,))
pf.y = np.ones((5,))
pf.z = np.ones((5,))
xx = Magic_calc2(pm, pf)
exp = DataFrame(data=[[2.0, 3.0]] * 5, columns=["a", "b"])
assert_frame_equal(xx, exp)
def test_faux_function_iterate_jit():
pm = Foo()
pf = Foo()
pf.MARS = np.ones((5,))
pf.var = np.ones((5,))
ans = faux_function(pm, pf)
exp = | DataFrame(data=[2.0] * 5, columns=['var']) | pandas.DataFrame |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pandas
import numpy as np
from .dataframe import DataFrame
from .utils import _reindex_helper
def concat(objs, axis=0, join='outer', join_axes=None, ignore_index=False,
keys=None, levels=None, names=None, verify_integrity=False,
copy=True):
if keys is not None:
objs = [objs[k] for k in keys]
else:
objs = list(objs)
if len(objs) == 0:
raise ValueError("No objects to concatenate")
objs = [obj for obj in objs if obj is not None]
if len(objs) == 0:
raise ValueError("All objects passed were None")
try:
type_check = next(obj for obj in objs
if not isinstance(obj, (pandas.Series,
pandas.DataFrame,
DataFrame)))
except StopIteration:
type_check = None
if type_check is not None:
raise ValueError("cannot concatenate object of type \"{0}\"; only "
"pandas.Series, pandas.DataFrame, "
"and modin.pandas.DataFrame objs are "
"valid", type(type_check))
all_series = all(isinstance(obj, pandas.Series)
for obj in objs)
if all_series:
return DataFrame(pandas.concat(objs, axis, join, join_axes,
ignore_index, keys, levels, names,
verify_integrity, copy))
if isinstance(objs, dict):
raise NotImplementedError(
"Obj as dicts not implemented. To contribute to "
"Pandas on Ray, please visit github.com/ray-project/ray.")
axis = pandas.DataFrame()._get_axis_number(axis)
if join not in ['inner', 'outer']:
raise ValueError("Only can inner (intersect) or outer (union) join the"
" other axis")
# We need this in a list because we use it later.
all_index, all_columns = list(zip(*[(obj.index, obj.columns)
for obj in objs]))
def series_to_df(series, columns):
df = pandas.DataFrame(series)
df.columns = columns
return DataFrame(df)
# Pandas puts all of the Series in a single column named 0. This is
# true regardless of the existence of another column named 0 in the
# concat.
if axis == 0:
objs = [series_to_df(obj, [0])
if isinstance(obj, pandas.Series) else obj for obj in objs]
else:
# Pandas starts the count at 0 so this will increment the names as
# long as there's a new nameless Series being added.
def name_incrementer(i):
val = i[0]
i[0] += 1
return val
i = [0]
objs = [series_to_df(obj, obj.name if obj.name is not None
else name_incrementer(i))
if isinstance(obj, pandas.Series) else obj for obj in objs]
# Using concat on the columns and index is fast because they're empty,
# and it forces the error checking. It also puts the columns in the
# correct order for us.
final_index = \
pandas.concat([ | pandas.DataFrame(index=idx) | pandas.DataFrame |
# -*- coding: utf-8 -*-
from __future__ import print_function
from datetime import datetime, timedelta
import functools
import itertools
import numpy as np
import numpy.ma as ma
import numpy.ma.mrecords as mrecords
from numpy.random import randn
import pytest
from pandas.compat import (
PY3, PY36, OrderedDict, is_platform_little_endian, lmap, long, lrange,
lzip, range, zip)
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
from pandas.core.dtypes.common import is_integer_dtype
import pandas as pd
from pandas import (
Categorical, DataFrame, Index, MultiIndex, Series, Timedelta, Timestamp,
compat, date_range, isna)
from pandas.tests.frame.common import TestData
import pandas.util.testing as tm
MIXED_FLOAT_DTYPES = ['float16', 'float32', 'float64']
MIXED_INT_DTYPES = ['uint8', 'uint16', 'uint32', 'uint64', 'int8', 'int16',
'int32', 'int64']
class TestDataFrameConstructors(TestData):
def test_constructor(self):
df = DataFrame()
assert len(df.index) == 0
df = DataFrame(data={})
assert len(df.index) == 0
def test_constructor_mixed(self):
index, data = tm.getMixedTypeDict()
# TODO(wesm), incomplete test?
indexed_frame = DataFrame(data, index=index) # noqa
unindexed_frame = DataFrame(data) # noqa
assert self.mixed_frame['foo'].dtype == np.object_
def test_constructor_cast_failure(self):
foo = DataFrame({'a': ['a', 'b', 'c']}, dtype=np.float64)
assert foo['a'].dtype == object
# GH 3010, constructing with odd arrays
df = DataFrame(np.ones((4, 2)))
# this is ok
df['foo'] = np.ones((4, 2)).tolist()
# this is not ok
pytest.raises(ValueError, df.__setitem__, tuple(['test']),
np.ones((4, 2)))
# this is ok
df['foo2'] = np.ones((4, 2)).tolist()
def test_constructor_dtype_copy(self):
orig_df = DataFrame({
'col1': [1.],
'col2': [2.],
'col3': [3.]})
new_df = pd.DataFrame(orig_df, dtype=float, copy=True)
new_df['col1'] = 200.
assert orig_df['col1'][0] == 1.
def test_constructor_dtype_nocast_view(self):
df = DataFrame([[1, 2]])
should_be_view = DataFrame(df, dtype=df[0].dtype)
should_be_view[0][0] = 99
assert df.values[0, 0] == 99
should_be_view = DataFrame(df.values, dtype=df[0].dtype)
should_be_view[0][0] = 97
assert df.values[0, 0] == 97
def test_constructor_dtype_list_data(self):
df = DataFrame([[1, '2'],
[None, 'a']], dtype=object)
assert df.loc[1, 0] is None
assert df.loc[0, 1] == '2'
def test_constructor_list_frames(self):
# see gh-3243
result = DataFrame([DataFrame([])])
assert result.shape == (1, 0)
result = DataFrame([DataFrame(dict(A=lrange(5)))])
assert isinstance(result.iloc[0, 0], DataFrame)
def test_constructor_mixed_dtypes(self):
def _make_mixed_dtypes_df(typ, ad=None):
if typ == 'int':
dtypes = MIXED_INT_DTYPES
arrays = [np.array(np.random.rand(10), dtype=d)
for d in dtypes]
elif typ == 'float':
dtypes = MIXED_FLOAT_DTYPES
arrays = [np.array(np.random.randint(
10, size=10), dtype=d) for d in dtypes]
zipper = lzip(dtypes, arrays)
for d, a in zipper:
assert(a.dtype == d)
if ad is None:
ad = dict()
ad.update({d: a for d, a in zipper})
return DataFrame(ad)
def _check_mixed_dtypes(df, dtypes=None):
if dtypes is None:
dtypes = MIXED_FLOAT_DTYPES + MIXED_INT_DTYPES
for d in dtypes:
if d in df:
assert(df.dtypes[d] == d)
# mixed floating and integer coexinst in the same frame
df = _make_mixed_dtypes_df('float')
_check_mixed_dtypes(df)
# add lots of types
df = _make_mixed_dtypes_df('float', dict(A=1, B='foo', C='bar'))
_check_mixed_dtypes(df)
# GH 622
df = _make_mixed_dtypes_df('int')
_check_mixed_dtypes(df)
def test_constructor_complex_dtypes(self):
# GH10952
a = np.random.rand(10).astype(np.complex64)
b = np.random.rand(10).astype(np.complex128)
df = DataFrame({'a': a, 'b': b})
assert a.dtype == df.a.dtype
assert b.dtype == df.b.dtype
def test_constructor_dtype_str_na_values(self, string_dtype):
# https://github.com/pandas-dev/pandas/issues/21083
df = DataFrame({'A': ['x', None]}, dtype=string_dtype)
result = df.isna()
expected = DataFrame({"A": [False, True]})
tm.assert_frame_equal(result, expected)
assert df.iloc[1, 0] is None
df = DataFrame({'A': ['x', np.nan]}, dtype=string_dtype)
assert np.isnan(df.iloc[1, 0])
def test_constructor_rec(self):
rec = self.frame.to_records(index=False)
if PY3:
# unicode error under PY2
rec.dtype.names = list(rec.dtype.names)[::-1]
index = self.frame.index
df = DataFrame(rec)
tm.assert_index_equal(df.columns, pd.Index(rec.dtype.names))
df2 = DataFrame(rec, index=index)
tm.assert_index_equal(df2.columns, pd.Index(rec.dtype.names))
tm.assert_index_equal(df2.index, index)
rng = np.arange(len(rec))[::-1]
df3 = DataFrame(rec, index=rng, columns=['C', 'B'])
expected = DataFrame(rec, index=rng).reindex(columns=['C', 'B'])
tm.assert_frame_equal(df3, expected)
def test_constructor_bool(self):
df = DataFrame({0: np.ones(10, dtype=bool),
1: np.zeros(10, dtype=bool)})
assert df.values.dtype == np.bool_
def test_constructor_overflow_int64(self):
# see gh-14881
values = np.array([2 ** 64 - i for i in range(1, 10)],
dtype=np.uint64)
result = DataFrame({'a': values})
assert result['a'].dtype == np.uint64
# see gh-2355
data_scores = [(6311132704823138710, 273), (2685045978526272070, 23),
(8921811264899370420, 45),
(long(17019687244989530680), 270),
(long(9930107427299601010), 273)]
dtype = [('uid', 'u8'), ('score', 'u8')]
data = np.zeros((len(data_scores),), dtype=dtype)
data[:] = data_scores
df_crawls = DataFrame(data)
assert df_crawls['uid'].dtype == np.uint64
@pytest.mark.parametrize("values", [np.array([2**64], dtype=object),
np.array([2**65]), [2**64 + 1],
np.array([-2**63 - 4], dtype=object),
np.array([-2**64 - 1]), [-2**65 - 2]])
def test_constructor_int_overflow(self, values):
# see gh-18584
value = values[0]
result = DataFrame(values)
assert result[0].dtype == object
assert result[0][0] == value
def test_constructor_ordereddict(self):
import random
nitems = 100
nums = lrange(nitems)
random.shuffle(nums)
expected = ['A%d' % i for i in nums]
df = DataFrame(OrderedDict(zip(expected, [[0]] * nitems)))
assert expected == list(df.columns)
def test_constructor_dict(self):
frame = DataFrame({'col1': self.ts1,
'col2': self.ts2})
# col2 is padded with NaN
assert len(self.ts1) == 30
assert len(self.ts2) == 25
tm.assert_series_equal(self.ts1, frame['col1'], check_names=False)
exp = pd.Series(np.concatenate([[np.nan] * 5, self.ts2.values]),
index=self.ts1.index, name='col2')
tm.assert_series_equal(exp, frame['col2'])
frame = DataFrame({'col1': self.ts1,
'col2': self.ts2},
columns=['col2', 'col3', 'col4'])
assert len(frame) == len(self.ts2)
assert 'col1' not in frame
assert isna(frame['col3']).all()
# Corner cases
assert len(DataFrame({})) == 0
# mix dict and array, wrong size - no spec for which error should raise
# first
with pytest.raises(ValueError):
DataFrame({'A': {'a': 'a', 'b': 'b'}, 'B': ['a', 'b', 'c']})
# Length-one dict micro-optimization
frame = DataFrame({'A': {'1': 1, '2': 2}})
tm.assert_index_equal(frame.index, pd.Index(['1', '2']))
# empty dict plus index
idx = Index([0, 1, 2])
frame = DataFrame({}, index=idx)
assert frame.index is idx
# empty with index and columns
idx = Index([0, 1, 2])
frame = DataFrame({}, index=idx, columns=idx)
assert frame.index is idx
assert frame.columns is idx
assert len(frame._series) == 3
# with dict of empty list and Series
frame = DataFrame({'A': [], 'B': []}, columns=['A', 'B'])
tm.assert_index_equal(frame.index, Index([], dtype=np.int64))
# GH 14381
# Dict with None value
frame_none = DataFrame(dict(a=None), index=[0])
frame_none_list = DataFrame(dict(a=[None]), index=[0])
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
assert frame_none.get_value(0, 'a') is None
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
assert frame_none_list.get_value(0, 'a') is None
tm.assert_frame_equal(frame_none, frame_none_list)
# GH10856
# dict with scalar values should raise error, even if columns passed
msg = 'If using all scalar values, you must pass an index'
with pytest.raises(ValueError, match=msg):
DataFrame({'a': 0.7})
with pytest.raises(ValueError, match=msg):
DataFrame({'a': 0.7}, columns=['a'])
@pytest.mark.parametrize("scalar", [2, np.nan, None, 'D'])
def test_constructor_invalid_items_unused(self, scalar):
# No error if invalid (scalar) value is in fact not used:
result = DataFrame({'a': scalar}, columns=['b'])
expected = DataFrame(columns=['b'])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("value", [2, np.nan, None, float('nan')])
def test_constructor_dict_nan_key(self, value):
# GH 18455
cols = [1, value, 3]
idx = ['a', value]
values = [[0, 3], [1, 4], [2, 5]]
data = {cols[c]: Series(values[c], index=idx) for c in range(3)}
result = DataFrame(data).sort_values(1).sort_values('a', axis=1)
expected = DataFrame(np.arange(6, dtype='int64').reshape(2, 3),
index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx).sort_values('a', axis=1)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("value", [np.nan, None, float('nan')])
def test_constructor_dict_nan_tuple_key(self, value):
# GH 18455
cols = Index([(11, 21), (value, 22), (13, value)])
idx = Index([('a', value), (value, 2)])
values = [[0, 3], [1, 4], [2, 5]]
data = {cols[c]: Series(values[c], index=idx) for c in range(3)}
result = (DataFrame(data)
.sort_values((11, 21))
.sort_values(('a', value), axis=1))
expected = DataFrame(np.arange(6, dtype='int64').reshape(2, 3),
index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx).sort_values(('a', value), axis=1)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
@pytest.mark.skipif(not PY36, reason='Insertion order for Python>=3.6')
def test_constructor_dict_order_insertion(self):
# GH19018
# initialization ordering: by insertion order if python>= 3.6
d = {'b': self.ts2, 'a': self.ts1}
frame = DataFrame(data=d)
expected = DataFrame(data=d, columns=list('ba'))
tm.assert_frame_equal(frame, expected)
@pytest.mark.skipif(PY36, reason='order by value for Python<3.6')
def test_constructor_dict_order_by_values(self):
# GH19018
# initialization ordering: by value if python<3.6
d = {'b': self.ts2, 'a': self.ts1}
frame = DataFrame(data=d)
expected = DataFrame(data=d, columns=list('ab'))
tm.assert_frame_equal(frame, expected)
def test_constructor_multi_index(self):
# GH 4078
# construction error with mi and all-nan frame
tuples = [(2, 3), (3, 3), (3, 3)]
mi = MultiIndex.from_tuples(tuples)
df = DataFrame(index=mi, columns=mi)
assert pd.isna(df).values.ravel().all()
tuples = [(3, 3), (2, 3), (3, 3)]
mi = MultiIndex.from_tuples(tuples)
df = DataFrame(index=mi, columns=mi)
assert pd.isna(df).values.ravel().all()
def test_constructor_error_msgs(self):
msg = "Empty data passed with indices specified."
# passing an empty array with columns specified.
with pytest.raises(ValueError, match=msg):
DataFrame(np.empty(0), columns=list('abc'))
msg = "Mixing dicts with non-Series may lead to ambiguous ordering."
# mix dict and array, wrong size
with pytest.raises(ValueError, match=msg):
DataFrame({'A': {'a': 'a', 'b': 'b'},
'B': ['a', 'b', 'c']})
# wrong size ndarray, GH 3105
msg = r"Shape of passed values is \(3, 4\), indices imply \(3, 3\)"
with pytest.raises(ValueError, match=msg):
DataFrame(np.arange(12).reshape((4, 3)),
columns=['foo', 'bar', 'baz'],
index=pd.date_range('2000-01-01', periods=3))
# higher dim raise exception
with pytest.raises(ValueError, match='Must pass 2-d input'):
DataFrame(np.zeros((3, 3, 3)), columns=['A', 'B', 'C'], index=[1])
# wrong size axis labels
msg = ("Shape of passed values "
r"is \(3, 2\), indices "
r"imply \(3, 1\)")
with pytest.raises(ValueError, match=msg):
DataFrame(np.random.rand(2, 3), columns=['A', 'B', 'C'], index=[1])
msg = ("Shape of passed values "
r"is \(3, 2\), indices "
r"imply \(2, 2\)")
with pytest.raises(ValueError, match=msg):
DataFrame(np.random.rand(2, 3), columns=['A', 'B'], index=[1, 2])
msg = ("If using all scalar "
"values, you must pass "
"an index")
with pytest.raises(ValueError, match=msg):
DataFrame({'a': False, 'b': True})
def test_constructor_with_embedded_frames(self):
# embedded data frames
df1 = DataFrame({'a': [1, 2, 3], 'b': [3, 4, 5]})
df2 = DataFrame([df1, df1 + 10])
df2.dtypes
str(df2)
result = df2.loc[0, 0]
tm.assert_frame_equal(result, df1)
result = df2.loc[1, 0]
tm.assert_frame_equal(result, df1 + 10)
def test_constructor_subclass_dict(self):
# Test for passing dict subclass to constructor
data = {'col1': tm.TestSubDict((x, 10.0 * x) for x in range(10)),
'col2': tm.TestSubDict((x, 20.0 * x) for x in range(10))}
df = DataFrame(data)
refdf = DataFrame({col: dict(compat.iteritems(val))
for col, val in compat.iteritems(data)})
tm.assert_frame_equal(refdf, df)
data = tm.TestSubDict(compat.iteritems(data))
df = DataFrame(data)
tm.assert_frame_equal(refdf, df)
# try with defaultdict
from collections import defaultdict
data = {}
self.frame['B'][:10] = np.nan
for k, v in compat.iteritems(self.frame):
dct = defaultdict(dict)
dct.update(v.to_dict())
data[k] = dct
frame = DataFrame(data)
tm.assert_frame_equal(self.frame.sort_index(), frame)
def test_constructor_dict_block(self):
expected = np.array([[4., 3., 2., 1.]])
df = DataFrame({'d': [4.], 'c': [3.], 'b': [2.], 'a': [1.]},
columns=['d', 'c', 'b', 'a'])
tm.assert_numpy_array_equal(df.values, expected)
def test_constructor_dict_cast(self):
# cast float tests
test_data = {
'A': {'1': 1, '2': 2},
'B': {'1': '1', '2': '2', '3': '3'},
}
frame = DataFrame(test_data, dtype=float)
assert len(frame) == 3
assert frame['B'].dtype == np.float64
assert frame['A'].dtype == np.float64
frame = DataFrame(test_data)
assert len(frame) == 3
assert frame['B'].dtype == np.object_
assert frame['A'].dtype == np.float64
# can't cast to float
test_data = {
'A': dict(zip(range(20), tm.makeStringIndex(20))),
'B': dict(zip(range(15), randn(15)))
}
frame = DataFrame(test_data, dtype=float)
assert len(frame) == 20
assert frame['A'].dtype == np.object_
assert frame['B'].dtype == np.float64
def test_constructor_dict_dont_upcast(self):
d = {'Col1': {'Row1': 'A String', 'Row2': np.nan}}
df = DataFrame(d)
assert isinstance(df['Col1']['Row2'], float)
dm = DataFrame([[1, 2], ['a', 'b']], index=[1, 2], columns=[1, 2])
assert isinstance(dm[1][1], int)
def test_constructor_dict_of_tuples(self):
# GH #1491
data = {'a': (1, 2, 3), 'b': (4, 5, 6)}
result = DataFrame(data)
expected = DataFrame({k: list(v) for k, v in compat.iteritems(data)})
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_constructor_dict_multiindex(self):
def check(result, expected):
return tm.assert_frame_equal(result, expected, check_dtype=True,
check_index_type=True,
check_column_type=True,
check_names=True)
d = {('a', 'a'): {('i', 'i'): 0, ('i', 'j'): 1, ('j', 'i'): 2},
('b', 'a'): {('i', 'i'): 6, ('i', 'j'): 5, ('j', 'i'): 4},
('b', 'c'): {('i', 'i'): 7, ('i', 'j'): 8, ('j', 'i'): 9}}
_d = sorted(d.items())
df = DataFrame(d)
expected = DataFrame(
[x[1] for x in _d],
index=MultiIndex.from_tuples([x[0] for x in _d])).T
expected.index = MultiIndex.from_tuples(expected.index)
check(df, expected)
d['z'] = {'y': 123., ('i', 'i'): 111, ('i', 'j'): 111, ('j', 'i'): 111}
_d.insert(0, ('z', d['z']))
expected = DataFrame(
[x[1] for x in _d],
index=Index([x[0] for x in _d], tupleize_cols=False)).T
expected.index = Index(expected.index, tupleize_cols=False)
df = DataFrame(d)
df = df.reindex(columns=expected.columns, index=expected.index)
check(df, expected)
def test_constructor_dict_datetime64_index(self):
# GH 10160
dates_as_str = ['1984-02-19', '1988-11-06', '1989-12-03', '1990-03-15']
def create_data(constructor):
return {i: {constructor(s): 2 * i}
for i, s in enumerate(dates_as_str)}
data_datetime64 = create_data(np.datetime64)
data_datetime = create_data(lambda x: datetime.strptime(x, '%Y-%m-%d'))
data_Timestamp = create_data(Timestamp)
expected = DataFrame([{0: 0, 1: None, 2: None, 3: None},
{0: None, 1: 2, 2: None, 3: None},
{0: None, 1: None, 2: 4, 3: None},
{0: None, 1: None, 2: None, 3: 6}],
index=[Timestamp(dt) for dt in dates_as_str])
result_datetime64 = DataFrame(data_datetime64)
result_datetime = DataFrame(data_datetime)
result_Timestamp = DataFrame(data_Timestamp)
tm.assert_frame_equal(result_datetime64, expected)
tm.assert_frame_equal(result_datetime, expected)
tm.assert_frame_equal(result_Timestamp, expected)
def test_constructor_dict_timedelta64_index(self):
# GH 10160
td_as_int = [1, 2, 3, 4]
def create_data(constructor):
return {i: {constructor(s): 2 * i}
for i, s in enumerate(td_as_int)}
data_timedelta64 = create_data(lambda x: np.timedelta64(x, 'D'))
data_timedelta = create_data(lambda x: timedelta(days=x))
data_Timedelta = create_data(lambda x: Timedelta(x, 'D'))
expected = DataFrame([{0: 0, 1: None, 2: None, 3: None},
{0: None, 1: 2, 2: None, 3: None},
{0: None, 1: None, 2: 4, 3: None},
{0: None, 1: None, 2: None, 3: 6}],
index=[Timedelta(td, 'D') for td in td_as_int])
result_timedelta64 = DataFrame(data_timedelta64)
result_timedelta = DataFrame(data_timedelta)
result_Timedelta = DataFrame(data_Timedelta)
tm.assert_frame_equal(result_timedelta64, expected)
tm.assert_frame_equal(result_timedelta, expected)
tm.assert_frame_equal(result_Timedelta, expected)
def test_constructor_period(self):
# PeriodIndex
a = pd.PeriodIndex(['2012-01', 'NaT', '2012-04'], freq='M')
b = pd.PeriodIndex(['2012-02-01', '2012-03-01', 'NaT'], freq='D')
df = pd.DataFrame({'a': a, 'b': b})
assert df['a'].dtype == a.dtype
assert df['b'].dtype == b.dtype
# list of periods
df = pd.DataFrame({'a': a.astype(object).tolist(),
'b': b.astype(object).tolist()})
assert df['a'].dtype == a.dtype
assert df['b'].dtype == b.dtype
def test_nested_dict_frame_constructor(self):
rng = pd.period_range('1/1/2000', periods=5)
df = DataFrame(randn(10, 5), columns=rng)
data = {}
for col in df.columns:
for row in df.index:
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
data.setdefault(col, {})[row] = df.get_value(row, col)
result = DataFrame(data, columns=rng)
tm.assert_frame_equal(result, df)
data = {}
for col in df.columns:
for row in df.index:
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
data.setdefault(row, {})[col] = df.get_value(row, col)
result = DataFrame(data, index=rng).T
tm.assert_frame_equal(result, df)
def _check_basic_constructor(self, empty):
# mat: 2d matrix with shape (3, 2) to input. empty - makes sized
# objects
mat = empty((2, 3), dtype=float)
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
# 1-D input
frame = DataFrame(empty((3,)), columns=['A'], index=[1, 2, 3])
assert len(frame.index) == 3
assert len(frame.columns) == 1
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=np.int64)
assert frame.values.dtype == np.int64
# wrong size axis labels
msg = r'Shape of passed values is \(3, 2\), indices imply \(3, 1\)'
with pytest.raises(ValueError, match=msg):
DataFrame(mat, columns=['A', 'B', 'C'], index=[1])
msg = r'Shape of passed values is \(3, 2\), indices imply \(2, 2\)'
with pytest.raises(ValueError, match=msg):
DataFrame(mat, columns=['A', 'B'], index=[1, 2])
# higher dim raise exception
with pytest.raises(ValueError, match='Must pass 2-d input'):
DataFrame(empty((3, 3, 3)), columns=['A', 'B', 'C'],
index=[1])
# automatic labeling
frame = DataFrame(mat)
tm.assert_index_equal(frame.index, pd.Index(lrange(2)))
tm.assert_index_equal(frame.columns, pd.Index(lrange(3)))
frame = DataFrame(mat, index=[1, 2])
tm.assert_index_equal(frame.columns, pd.Index(lrange(3)))
frame = DataFrame(mat, columns=['A', 'B', 'C'])
tm.assert_index_equal(frame.index, pd.Index(lrange(2)))
# 0-length axis
frame = DataFrame(empty((0, 3)))
assert len(frame.index) == 0
frame = DataFrame(empty((3, 0)))
assert len(frame.columns) == 0
def test_constructor_ndarray(self):
self._check_basic_constructor(np.ones)
frame = DataFrame(['foo', 'bar'], index=[0, 1], columns=['A'])
assert len(frame) == 2
def test_constructor_maskedarray(self):
self._check_basic_constructor(ma.masked_all)
# Check non-masked values
mat = ma.masked_all((2, 3), dtype=float)
mat[0, 0] = 1.0
mat[1, 2] = 2.0
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert 1.0 == frame['A'][1]
assert 2.0 == frame['C'][2]
# what is this even checking??
mat = ma.masked_all((2, 3), dtype=float)
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert np.all(~np.asarray(frame == frame))
def test_constructor_maskedarray_nonfloat(self):
# masked int promoted to float
mat = ma.masked_all((2, 3), dtype=int)
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
assert np.all(~np.asarray(frame == frame))
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=np.float64)
assert frame.values.dtype == np.float64
# Check non-masked values
mat2 = ma.copy(mat)
mat2[0, 0] = 1
mat2[1, 2] = 2
frame = DataFrame(mat2, columns=['A', 'B', 'C'], index=[1, 2])
assert 1 == frame['A'][1]
assert 2 == frame['C'][2]
# masked np.datetime64 stays (use NaT as null)
mat = ma.masked_all((2, 3), dtype='M8[ns]')
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
assert isna(frame).values.all()
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=np.int64)
assert frame.values.dtype == np.int64
# Check non-masked values
mat2 = ma.copy(mat)
mat2[0, 0] = 1
mat2[1, 2] = 2
frame = DataFrame(mat2, columns=['A', 'B', 'C'], index=[1, 2])
assert 1 == frame['A'].view('i8')[1]
assert 2 == frame['C'].view('i8')[2]
# masked bool promoted to object
mat = ma.masked_all((2, 3), dtype=bool)
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
assert np.all(~np.asarray(frame == frame))
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=object)
assert frame.values.dtype == object
# Check non-masked values
mat2 = ma.copy(mat)
mat2[0, 0] = True
mat2[1, 2] = False
frame = DataFrame(mat2, columns=['A', 'B', 'C'], index=[1, 2])
assert frame['A'][1] is True
assert frame['C'][2] is False
def test_constructor_mrecarray(self):
# Ensure mrecarray produces frame identical to dict of masked arrays
# from GH3479
assert_fr_equal = functools.partial(tm.assert_frame_equal,
check_index_type=True,
check_column_type=True,
check_frame_type=True)
arrays = [
('float', np.array([1.5, 2.0])),
('int', np.array([1, 2])),
('str', np.array(['abc', 'def'])),
]
for name, arr in arrays[:]:
arrays.append(('masked1_' + name,
np.ma.masked_array(arr, mask=[False, True])))
arrays.append(('masked_all', np.ma.masked_all((2,))))
arrays.append(('masked_none',
np.ma.masked_array([1.0, 2.5], mask=False)))
# call assert_frame_equal for all selections of 3 arrays
for comb in itertools.combinations(arrays, 3):
names, data = zip(*comb)
mrecs = mrecords.fromarrays(data, names=names)
# fill the comb
comb = {k: (v.filled() if hasattr(v, 'filled') else v)
for k, v in comb}
expected = DataFrame(comb, columns=names)
result = DataFrame(mrecs)
assert_fr_equal(result, expected)
# specify columns
expected = DataFrame(comb, columns=names[::-1])
result = DataFrame(mrecs, columns=names[::-1])
assert_fr_equal(result, expected)
# specify index
expected = DataFrame(comb, columns=names, index=[1, 2])
result = DataFrame(mrecs, index=[1, 2])
assert_fr_equal(result, expected)
def test_constructor_corner_shape(self):
df = DataFrame(index=[])
assert df.values.shape == (0, 0)
@pytest.mark.parametrize("data, index, columns, dtype, expected", [
(None, lrange(10), ['a', 'b'], object, np.object_),
(None, None, ['a', 'b'], 'int64', np.dtype('int64')),
(None, lrange(10), ['a', 'b'], int, np.dtype('float64')),
({}, None, ['foo', 'bar'], None, np.object_),
({'b': 1}, lrange(10), list('abc'), int, np.dtype('float64'))
])
def test_constructor_dtype(self, data, index, columns, dtype, expected):
df = DataFrame(data, index, columns, dtype)
assert df.values.dtype == expected
def test_constructor_scalar_inference(self):
data = {'int': 1, 'bool': True,
'float': 3., 'complex': 4j, 'object': 'foo'}
df = DataFrame(data, index=np.arange(10))
assert df['int'].dtype == np.int64
assert df['bool'].dtype == np.bool_
assert df['float'].dtype == np.float64
assert df['complex'].dtype == np.complex128
assert df['object'].dtype == np.object_
def test_constructor_arrays_and_scalars(self):
df = DataFrame({'a': randn(10), 'b': True})
exp = DataFrame({'a': df['a'].values, 'b': [True] * 10})
tm.assert_frame_equal(df, exp)
with pytest.raises(ValueError, match='must pass an index'):
DataFrame({'a': False, 'b': True})
def test_constructor_DataFrame(self):
df = DataFrame(self.frame)
tm.assert_frame_equal(df, self.frame)
df_casted = DataFrame(self.frame, dtype=np.int64)
assert df_casted.values.dtype == np.int64
def test_constructor_more(self):
# used to be in test_matrix.py
arr = randn(10)
dm = DataFrame(arr, columns=['A'], index=np.arange(10))
assert dm.values.ndim == 2
arr = randn(0)
dm = DataFrame(arr)
assert dm.values.ndim == 2
assert dm.values.ndim == 2
# no data specified
dm = DataFrame(columns=['A', 'B'], index=np.arange(10))
assert dm.values.shape == (10, 2)
dm = DataFrame(columns=['A', 'B'])
assert dm.values.shape == (0, 2)
dm = DataFrame(index=np.arange(10))
assert dm.values.shape == (10, 0)
# can't cast
mat = np.array(['foo', 'bar'], dtype=object).reshape(2, 1)
with pytest.raises(ValueError, match='cast'):
DataFrame(mat, index=[0, 1], columns=[0], dtype=float)
dm = DataFrame(DataFrame(self.frame._series))
tm.assert_frame_equal(dm, self.frame)
# int cast
dm = DataFrame({'A': np.ones(10, dtype=int),
'B': np.ones(10, dtype=np.float64)},
index=np.arange(10))
assert len(dm.columns) == 2
assert dm.values.dtype == np.float64
def test_constructor_empty_list(self):
df = DataFrame([], index=[])
expected = DataFrame(index=[])
tm.assert_frame_equal(df, expected)
# GH 9939
df = DataFrame([], columns=['A', 'B'])
expected = DataFrame({}, columns=['A', 'B'])
tm.assert_frame_equal(df, expected)
# Empty generator: list(empty_gen()) == []
def empty_gen():
return
yield
df = DataFrame(empty_gen(), columns=['A', 'B'])
tm.assert_frame_equal(df, expected)
def test_constructor_list_of_lists(self):
# GH #484
df = DataFrame(data=[[1, 'a'], [2, 'b']], columns=["num", "str"])
assert is_integer_dtype(df['num'])
assert df['str'].dtype == np.object_
# GH 4851
# list of 0-dim ndarrays
expected = DataFrame({0: np.arange(10)})
data = [np.array(x) for x in range(10)]
result = DataFrame(data)
tm.assert_frame_equal(result, expected)
def test_constructor_sequence_like(self):
# GH 3783
# collections.Squence like
class DummyContainer(compat.Sequence):
def __init__(self, lst):
self._lst = lst
def __getitem__(self, n):
return self._lst.__getitem__(n)
def __len__(self, n):
return self._lst.__len__()
lst_containers = [DummyContainer([1, 'a']), DummyContainer([2, 'b'])]
columns = ["num", "str"]
result = DataFrame(lst_containers, columns=columns)
expected = DataFrame([[1, 'a'], [2, 'b']], columns=columns)
tm.assert_frame_equal(result, expected, check_dtype=False)
# GH 4297
# support Array
import array
result = DataFrame({'A': array.array('i', range(10))})
expected = DataFrame({'A': list(range(10))})
tm.assert_frame_equal(result, expected, check_dtype=False)
expected = DataFrame([list(range(10)), list(range(10))])
result = DataFrame([array.array('i', range(10)),
array.array('i', range(10))])
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_constructor_iterable(self):
# GH 21987
class Iter():
def __iter__(self):
for i in range(10):
yield [1, 2, 3]
expected = DataFrame([[1, 2, 3]] * 10)
result = DataFrame(Iter())
tm.assert_frame_equal(result, expected)
def test_constructor_iterator(self):
expected = DataFrame([list(range(10)), list(range(10))])
result = DataFrame([range(10), range(10)])
tm.assert_frame_equal(result, expected)
def test_constructor_generator(self):
# related #2305
gen1 = (i for i in range(10))
gen2 = (i for i in range(10))
expected = DataFrame([list(range(10)), list(range(10))])
result = DataFrame([gen1, gen2])
tm.assert_frame_equal(result, expected)
gen = ([i, 'a'] for i in range(10))
result = DataFrame(gen)
expected = DataFrame({0: range(10), 1: 'a'})
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_constructor_list_of_dicts(self):
data = [OrderedDict([['a', 1.5], ['b', 3], ['c', 4], ['d', 6]]),
OrderedDict([['a', 1.5], ['b', 3], ['d', 6]]),
OrderedDict([['a', 1.5], ['d', 6]]),
OrderedDict(),
OrderedDict([['a', 1.5], ['b', 3], ['c', 4]]),
OrderedDict([['b', 3], ['c', 4], ['d', 6]])]
result = DataFrame(data)
expected = DataFrame.from_dict(dict(zip(range(len(data)), data)),
orient='index')
tm.assert_frame_equal(result, expected.reindex(result.index))
result = DataFrame([{}])
expected = DataFrame(index=[0])
tm.assert_frame_equal(result, expected)
def test_constructor_ordered_dict_preserve_order(self):
# see gh-13304
expected = DataFrame([[2, 1]], columns=['b', 'a'])
data = OrderedDict()
data['b'] = [2]
data['a'] = [1]
result = DataFrame(data)
tm.assert_frame_equal(result, expected)
data = OrderedDict()
data['b'] = 2
data['a'] = 1
result = DataFrame([data])
tm.assert_frame_equal(result, expected)
def test_constructor_ordered_dict_conflicting_orders(self):
# the first dict element sets the ordering for the DataFrame,
# even if there are conflicting orders from subsequent ones
row_one = OrderedDict()
row_one['b'] = 2
row_one['a'] = 1
row_two = OrderedDict()
row_two['a'] = 1
row_two['b'] = 2
row_three = {'b': 2, 'a': 1}
expected = DataFrame([[2, 1], [2, 1]], columns=['b', 'a'])
result = DataFrame([row_one, row_two])
tm.assert_frame_equal(result, expected)
expected = DataFrame([[2, 1], [2, 1], [2, 1]], columns=['b', 'a'])
result = DataFrame([row_one, row_two, row_three])
tm.assert_frame_equal(result, expected)
def test_constructor_list_of_series(self):
data = [OrderedDict([['a', 1.5], ['b', 3.0], ['c', 4.0]]),
OrderedDict([['a', 1.5], ['b', 3.0], ['c', 6.0]])]
sdict = OrderedDict(zip(['x', 'y'], data))
idx = Index(['a', 'b', 'c'])
# all named
data2 = [Series([1.5, 3, 4], idx, dtype='O', name='x'),
Series([1.5, 3, 6], idx, name='y')]
result = DataFrame(data2)
expected = DataFrame.from_dict(sdict, orient='index')
tm.assert_frame_equal(result, expected)
# some unnamed
data2 = [Series([1.5, 3, 4], idx, dtype='O', name='x'),
Series([1.5, 3, 6], idx)]
result = DataFrame(data2)
sdict = OrderedDict(zip(['x', 'Unnamed 0'], data))
expected = DataFrame.from_dict(sdict, orient='index')
tm.assert_frame_equal(result.sort_index(), expected)
# none named
data = [OrderedDict([['a', 1.5], ['b', 3], ['c', 4], ['d', 6]]),
OrderedDict([['a', 1.5], ['b', 3], ['d', 6]]),
OrderedDict([['a', 1.5], ['d', 6]]),
OrderedDict(),
OrderedDict([['a', 1.5], ['b', 3], ['c', 4]]),
OrderedDict([['b', 3], ['c', 4], ['d', 6]])]
data = [Series(d) for d in data]
result = DataFrame(data)
sdict = OrderedDict(zip(range(len(data)), data))
expected = DataFrame.from_dict(sdict, orient='index')
tm.assert_frame_equal(result, expected.reindex(result.index))
result2 = DataFrame(data, index=np.arange(6))
tm.assert_frame_equal(result, result2)
result = DataFrame([Series({})])
expected = DataFrame(index=[0])
tm.assert_frame_equal(result, expected)
data = [OrderedDict([['a', 1.5], ['b', 3.0], ['c', 4.0]]),
OrderedDict([['a', 1.5], ['b', 3.0], ['c', 6.0]])]
sdict = OrderedDict(zip(range(len(data)), data))
idx = Index(['a', 'b', 'c'])
data2 = [Series([1.5, 3, 4], idx, dtype='O'),
Series([1.5, 3, 6], idx)]
result = DataFrame(data2)
expected = DataFrame.from_dict(sdict, orient='index')
tm.assert_frame_equal(result, expected)
def test_constructor_list_of_series_aligned_index(self):
series = [pd.Series(i, index=['b', 'a', 'c'], name=str(i))
for i in range(3)]
result = pd.DataFrame(series)
expected = pd.DataFrame({'b': [0, 1, 2],
'a': [0, 1, 2],
'c': [0, 1, 2]},
columns=['b', 'a', 'c'],
index=['0', '1', '2'])
tm.assert_frame_equal(result, expected)
def test_constructor_list_of_derived_dicts(self):
class CustomDict(dict):
pass
d = {'a': 1.5, 'b': 3}
data_custom = [CustomDict(d)]
data = [d]
result_custom = DataFrame(data_custom)
result = DataFrame(data)
tm.assert_frame_equal(result, result_custom)
def test_constructor_ragged(self):
data = {'A': randn(10),
'B': randn(8)}
with pytest.raises(ValueError, match='arrays must all be same length'):
DataFrame(data)
def test_constructor_scalar(self):
idx = Index(lrange(3))
df = DataFrame({"a": 0}, index=idx)
expected = DataFrame({"a": [0, 0, 0]}, index=idx)
tm.assert_frame_equal(df, expected, check_dtype=False)
def test_constructor_Series_copy_bug(self):
df = DataFrame(self.frame['A'], index=self.frame.index, columns=['A'])
df.copy()
def test_constructor_mixed_dict_and_Series(self):
data = {}
data['A'] = {'foo': 1, 'bar': 2, 'baz': 3}
data['B'] = Series([4, 3, 2, 1], index=['bar', 'qux', 'baz', 'foo'])
result = DataFrame(data)
assert result.index.is_monotonic
# ordering ambiguous, raise exception
with pytest.raises(ValueError, match='ambiguous ordering'):
DataFrame({'A': ['a', 'b'], 'B': {'a': 'a', 'b': 'b'}})
# this is OK though
result = DataFrame({'A': ['a', 'b'],
'B': Series(['a', 'b'], index=['a', 'b'])})
expected = DataFrame({'A': ['a', 'b'], 'B': ['a', 'b']},
index=['a', 'b'])
tm.assert_frame_equal(result, expected)
def test_constructor_tuples(self):
result = DataFrame({'A': [(1, 2), (3, 4)]})
expected = DataFrame({'A': Series([(1, 2), (3, 4)])})
tm.assert_frame_equal(result, expected)
def test_constructor_namedtuples(self):
# GH11181
from collections import namedtuple
named_tuple = namedtuple("Pandas", list('ab'))
tuples = [named_tuple(1, 3), named_tuple(2, 4)]
expected = DataFrame({'a': [1, 2], 'b': [3, 4]})
result = DataFrame(tuples)
tm.assert_frame_equal(result, expected)
# with columns
expected = DataFrame({'y': [1, 2], 'z': [3, 4]})
result = DataFrame(tuples, columns=['y', 'z'])
tm.assert_frame_equal(result, expected)
def test_constructor_orient(self):
data_dict = self.mixed_frame.T._series
recons = DataFrame.from_dict(data_dict, orient='index')
expected = self.mixed_frame.sort_index()
tm.assert_frame_equal(recons, expected)
# dict of sequence
a = {'hi': [32, 3, 3],
'there': [3, 5, 3]}
rs = DataFrame.from_dict(a, orient='index')
xp = DataFrame.from_dict(a).T.reindex(list(a.keys()))
tm.assert_frame_equal(rs, xp)
def test_from_dict_columns_parameter(self):
# GH 18529
# Test new columns parameter for from_dict that was added to make
# from_items(..., orient='index', columns=[...]) easier to replicate
result = DataFrame.from_dict(OrderedDict([('A', [1, 2]),
('B', [4, 5])]),
orient='index', columns=['one', 'two'])
expected = DataFrame([[1, 2], [4, 5]], index=['A', 'B'],
columns=['one', 'two'])
tm.assert_frame_equal(result, expected)
msg = "cannot use columns parameter with orient='columns'"
with pytest.raises(ValueError, match=msg):
DataFrame.from_dict(dict([('A', [1, 2]), ('B', [4, 5])]),
orient='columns', columns=['one', 'two'])
with pytest.raises(ValueError, match=msg):
DataFrame.from_dict(dict([('A', [1, 2]), ('B', [4, 5])]),
columns=['one', 'two'])
def test_constructor_Series_named(self):
a = Series([1, 2, 3], index=['a', 'b', 'c'], name='x')
df = DataFrame(a)
assert df.columns[0] == 'x'
tm.assert_index_equal(df.index, a.index)
# ndarray like
arr = np.random.randn(10)
s = Series(arr, name='x')
df = DataFrame(s)
expected = DataFrame(dict(x=s))
tm.assert_frame_equal(df, expected)
s = Series(arr, index=range(3, 13))
df = DataFrame(s)
expected = DataFrame({0: s})
tm.assert_frame_equal(df, expected)
pytest.raises(ValueError, DataFrame, s, columns=[1, 2])
# #2234
a = Series([], name='x')
df = DataFrame(a)
assert df.columns[0] == 'x'
# series with name and w/o
s1 = Series(arr, name='x')
df = DataFrame([s1, arr]).T
expected = DataFrame({'x': s1, 'Unnamed 0': arr},
columns=['x', 'Unnamed 0'])
tm.assert_frame_equal(df, expected)
# this is a bit non-intuitive here; the series collapse down to arrays
df = | DataFrame([arr, s1]) | pandas.DataFrame |
import os
import re
import sys
import warnings
from datetime import timedelta
from runpy import run_path
from time import sleep
import click
import pandas as pd
from six import string_types
import catalyst
from catalyst.data.bundles import load
from catalyst.data.data_portal import DataPortal
from catalyst.exchange.exchange_pricing_loader import ExchangePricingLoader, \
TradingPairPricing
from catalyst.exchange.utils.factory import get_exchange
from logbook import Logger
try:
from pygments import highlight
from pygments.lexers import PythonLexer
from pygments.formatters import TerminalFormatter
PYGMENTS = True
except ImportError:
PYGMENTS = False
from toolz import valfilter, concatv
from functools import partial
from catalyst.finance.trading import TradingEnvironment
from catalyst.utils.calendars import get_calendar
from catalyst.utils.factory import create_simulation_parameters
from catalyst.data.loader import load_crypto_market_data
import catalyst.utils.paths as pth
from catalyst.exchange.exchange_algorithm import (
ExchangeTradingAlgorithmLive,
ExchangeTradingAlgorithmBacktest,
)
from catalyst.exchange.exchange_data_portal import DataPortalExchangeLive, \
DataPortalExchangeBacktest
from catalyst.exchange.exchange_asset_finder import ExchangeAssetFinder
from catalyst.constants import LOG_LEVEL
log = Logger('run_algo', level=LOG_LEVEL)
class _RunAlgoError(click.ClickException, ValueError):
"""Signal an error that should have a different message if invoked from
the cli.
Parameters
----------
pyfunc_msg : str
The message that will be shown when called as a python function.
cmdline_msg : str
The message that will be shown on the command line.
"""
exit_code = 1
def __init__(self, pyfunc_msg, cmdline_msg):
super(_RunAlgoError, self).__init__(cmdline_msg)
self.pyfunc_msg = pyfunc_msg
def __str__(self):
return self.pyfunc_msg
def _run(handle_data,
initialize,
before_trading_start,
analyze,
algofile,
algotext,
defines,
data_frequency,
capital_base,
data,
bundle,
bundle_timestamp,
start,
end,
output,
print_algo,
local_namespace,
environ,
live,
exchange,
algo_namespace,
quote_currency,
live_graph,
analyze_live,
simulate_orders,
auth_aliases,
stats_output):
"""Run a backtest for the given algorithm.
This is shared between the cli and :func:`catalyst.run_algo`.
"""
# TODO: refactor for more granularity
if algotext is not None:
if local_namespace:
ip = get_ipython() # noqa
namespace = ip.user_ns
else:
namespace = {}
for assign in defines:
try:
name, value = assign.split('=', 2)
except ValueError:
raise ValueError(
'invalid define %r, should be of the form name=value' %
assign,
)
try:
# evaluate in the same namespace so names may refer to
# eachother
namespace[name] = eval(value, namespace)
except Exception as e:
raise ValueError(
'failed to execute definition for name %r: %s' % (name, e),
)
elif defines:
raise _RunAlgoError(
'cannot pass define without `algotext`',
"cannot pass '-D' / '--define' without '-t' / '--algotext'",
)
else:
namespace = {}
if algofile is not None:
algotext = algofile.read()
if print_algo:
if PYGMENTS:
highlight(
algotext,
PythonLexer(),
TerminalFormatter(),
outfile=sys.stdout,
)
else:
click.echo(algotext)
log.warn(
'Catalyst is currently in ALPHA. It is going through rapid '
'development and it is subject to errors. Please use carefully. '
'We encourage you to report any issue on GitHub: '
'https://github.com/enigmampc/catalyst/issues'
)
log.info('Catalyst version {}'.format(catalyst.__version__))
sleep(3)
if live:
if simulate_orders:
mode = 'paper-trading'
else:
mode = 'live-trading'
else:
mode = 'backtest'
log.info('running algo in {mode} mode'.format(mode=mode))
exchange_name = exchange
if exchange_name is None:
raise ValueError('Please specify at least one exchange.')
if isinstance(auth_aliases, string_types):
aliases = auth_aliases.split(',')
if len(aliases) < 2 or len(aliases) % 2 != 0:
raise ValueError(
'the `auth_aliases` parameter must contain an even list '
'of comma-delimited values. For example, '
'"binance,auth2" or "binance,auth2,bittrex,auth2".'
)
auth_aliases = dict(zip(aliases[::2], aliases[1::2]))
exchange_list = [x.strip().lower() for x in exchange.split(',')]
exchanges = dict()
for name in exchange_list:
if auth_aliases is not None and name in auth_aliases:
auth_alias = auth_aliases[name]
else:
auth_alias = None
exchanges[name] = get_exchange(
exchange_name=name,
quote_currency=quote_currency,
must_authenticate=(live and not simulate_orders),
skip_init=True,
auth_alias=auth_alias,
)
open_calendar = get_calendar('OPEN')
env = TradingEnvironment(
load=partial(
load_crypto_market_data,
environ=environ,
start_dt=start,
end_dt=end
),
environ=environ,
exchange_tz='UTC',
asset_db_path=None # We don't need an asset db, we have exchanges
)
env.asset_finder = ExchangeAssetFinder(exchanges=exchanges)
def choose_loader(column):
bound_cols = TradingPairPricing.columns
if column in bound_cols:
return ExchangePricingLoader(data_frequency)
raise ValueError(
"No PipelineLoader registered for column %s." % column
)
if live:
# TODO: fix the start data.
# is_start checks if a start date was specified by user
# needed for live clock
is_start = True
if start is None:
start = pd.Timestamp.utcnow()
is_start = False
elif start:
assert pd.Timestamp.utcnow() <= start, \
"specified start date is in the past."
elif start and end:
assert start < end, "start date is later than end date."
# TODO: fix the end data.
# is_end checks if an end date was specified by user
# needed for live clock
is_end = True
if end is None:
end = start + timedelta(hours=8760)
is_end = False
data = DataPortalExchangeLive(
exchanges=exchanges,
asset_finder=env.asset_finder,
trading_calendar=open_calendar,
first_trading_day= | pd.to_datetime('today', utc=True) | pandas.to_datetime |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Modelagem em tempo real | COVID-19 no Brasil
--------------------------------------------
Ideias e modelagens desenvolvidas pela trinca:
. <NAME>
. <NAME>
. <NAME>
Esta modelagem possui as seguintes características:
a) NÃO seguimos modelos paramétricos => Não existem durante a epidemia dados
suficientes ou confiáveis para alimentar modelos epidemiológicos como a excelente
calaculadora http://gabgoh.github.io/COVID/index.html (ela serve para gerar cená-
rios e para modelar a epidemia DEPOIS que ela passar). Além disso, a natureza
exponencial das curvas as torna extremamente sensíveis aos parâmetros que a defi-
nem. Isso faz com que a confiabilidade preditiva desses modelos seja ilusória.
b) A evolução epidemia no Brasil começou depois da de outros países. Nossa mode-
lagem se apoia nesse fato. Com os dados disponíveis, procuramos no instante pre-
sente determinar quem estamos seguindo, ou seja, que países mais se pareceram
conosco passado o mesmo período de disseminação. A partir do que aconteceu nesses
países projetamos o que pode acontecer aqui.
c) Esta conta é refeita dia a dia. Dependendo de nossa competência em conter ou
não a disseminação do Covid-19 nos aproximaremos dos países que melhor ou pior
lidaram com a epidemia e a projeção refletirá essa similaridade.
d) As decisões de modelagem são indicadas no código com os zoinhos: # ◔◔ {...}
São pontos de partida para discutir a modelagem e propor alternativas.
"""
import datetime
import requests
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
import seaborn as sns
sns.set()
# no ipython usar este comando antes de rodar => %matplotlib osx
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
__author__ = "<NAME>" # codigo
__copyright__ = "Copyright 2020"
__license__ = "New BSD License"
__version__ = "1.5.2"
__email__ = "<EMAIL>"
__status__ = "Experimental"
def preparar_dados(p1, uf="SP", cidade=u"São Paulo"):
u"""Busca dados e organiza tabela "data" com os dados de referência para a
modelagem.
Fontes:
. Mundo: https://covid.ourworldindata.org
. Brasil: https://brasil.io
Retorna:
raw <DataFrame> | Série completa do número de mortes/dia por país, sem trans-
posição temporal
inicio <Series> | Referência dos indexes em raw para justapor o início das
curvas dos diferentes países
data <DataFrame> | Série de número de mortes/dia por país trazendo para o
zero (index 0) o primeiro dia em que ocorrem pelo menos p1 mortes
(ver macro parâmetros). Isto reduz a quantidade de países para o grupo
que está à frente ou pareado ao Brazil. A partir do index 0 é possível
comparar a evolução dos casos entre os países.
nbr <int> | Número de dias da série de dados para o Brasil
"""
# ◔◔ {usamos as mortes diárias por parecer ser o dado mais confiável}
raw = pd.read_csv("https://covid.ourworldindata.org/data/ecdc/new_deaths.csv").fillna(0.0)
# ◔◔ {o link abaixo carrega o acumulado de mortes, não usamos pq a soma vai alisando a série}
# raw_soma = pd.read_csv("https://covid.ourworldindata.org/data/ecdc/total_deaths.csv").fillna(0.0)
# tempo = raw['date'] # ◔◔ {não usamos as datas}
raw = raw.drop(columns='date')
raw = raw.drop(columns='World')
# para ver tbem os dados "oficias"
para_oficial = raw['Brazil']
# correcao de subnotificacao Brasil:
sub, hip = estimar_subnotificacao('Brasil')
p4br = ((sub + raw['Brazil'].sum()) / raw['Brazil'].sum())
raw['Brasil'] = raw['Brazil'] * p4br
# dict subs usa mesmas refs como chave => para reportar nos graficos
subs = {"Brasil": str(round(p4br, 1)) + " (" + hip + ")"}
# contruir base para a tabela "data"
inicio = raw.ge(p1).idxmax() # ◔◔ {encontra os index de qdo cada pais alcança p1}
data = pd.DataFrame({'Brasil':raw['Brasil'][inicio['Brasil']:]}).reset_index().drop(columns='index')
nbr = data.shape[0]
oficial = pd.DataFrame({'Brasil':para_oficial[inicio['Brasil']:]}).reset_index().drop(columns='index')
# dados Brasil
estados = [
'AC', 'AL', 'AP', 'AM', 'BA', 'CE', 'DF', 'ES', 'GO', 'MA', 'MT', 'MS',
'MG', 'PA', 'PB', 'PR', 'PE', 'PI', 'RJ', 'RN', 'RS', 'RO', 'RR', 'SC',
'SP', 'SE', 'TO',
]
if uf not in estados or type(uf) is not str:
uf = "SP"
print(uf, u": UF inválida, usando 'SP'")
# ◔◔ {já baixamos filtrado para uf, mas pode se usar outros estados}
uf_data = pd.read_csv("https://brasil.io/dataset/covid19/caso?state="+uf+"&format=csv")
# adicionar dados da uf
uf_select = uf_data.loc[lambda df: df['place_type'] == "state", :]
uf_mortes = list(uf_select['deaths'].head(nbr + 1).fillna(0.0))
uf_mortes = [uf_mortes[i] - uf_mortes[i+1] for i in range(len(uf_mortes)-1)]
uf_mortes += [0 for _ in range(nbr-len(uf_mortes))] # corrigir tamanho
uf_mortes.reverse()
oficial[uf] = pd.Series(uf_mortes).values
sub_uf, hip_uf = estimar_subnotificacao(uf)
p4uf = ((sub_uf + pd.Series(uf_mortes).values.sum())/ | pd.Series(uf_mortes) | pandas.Series |
import pytest
import numpy as np
import pandas as pd
from pandas import Categorical, Series, CategoricalIndex
from pandas.core.dtypes.concat import union_categoricals
from pandas.util import testing as tm
class TestUnionCategoricals(object):
def test_union_categorical(self):
# GH 13361
data = [
(list('abc'), list('abd'), list('abcabd')),
([0, 1, 2], [2, 3, 4], [0, 1, 2, 2, 3, 4]),
([0, 1.2, 2], [2, 3.4, 4], [0, 1.2, 2, 2, 3.4, 4]),
(['b', 'b', np.nan, 'a'], ['a', np.nan, 'c'],
['b', 'b', np.nan, 'a', 'a', np.nan, 'c']),
(pd.date_range('2014-01-01', '2014-01-05'),
pd.date_range('2014-01-06', '2014-01-07'),
pd.date_range('2014-01-01', '2014-01-07')),
(pd.date_range('2014-01-01', '2014-01-05', tz='US/Central'),
pd.date_range('2014-01-06', '2014-01-07', tz='US/Central'),
pd.date_range('2014-01-01', '2014-01-07', tz='US/Central')),
(pd.period_range('2014-01-01', '2014-01-05'),
pd.period_range('2014-01-06', '2014-01-07'),
pd.period_range('2014-01-01', '2014-01-07')),
]
for a, b, combined in data:
for box in [Categorical, CategoricalIndex, Series]:
result = union_categoricals([box(Categorical(a)),
box(Categorical(b))])
expected = Categorical(combined)
tm.assert_categorical_equal(result, expected,
check_category_order=True)
# new categories ordered by appearance
s = Categorical(['x', 'y', 'z'])
s2 = Categorical(['a', 'b', 'c'])
result = union_categoricals([s, s2])
expected = Categorical(['x', 'y', 'z', 'a', 'b', 'c'],
categories=['x', 'y', 'z', 'a', 'b', 'c'])
tm.assert_categorical_equal(result, expected)
s = Categorical([0, 1.2, 2], ordered=True)
s2 = Categorical([0, 1.2, 2], ordered=True)
result = union_categoricals([s, s2])
expected = Categorical([0, 1.2, 2, 0, 1.2, 2], ordered=True)
tm.assert_categorical_equal(result, expected)
# must exactly match types
s = Categorical([0, 1.2, 2])
s2 = Categorical([2, 3, 4])
msg = 'dtype of categories must be the same'
with tm.assert_raises_regex(TypeError, msg):
union_categoricals([s, s2])
msg = 'No Categoricals to union'
with tm.assert_raises_regex(ValueError, msg):
union_categoricals([])
def test_union_categoricals_nan(self):
# GH 13759
res = union_categoricals([pd.Categorical([1, 2, np.nan]),
pd.Categorical([3, 2, np.nan])])
exp = Categorical([1, 2, np.nan, 3, 2, np.nan])
tm.assert_categorical_equal(res, exp)
res = union_categoricals([pd.Categorical(['A', 'B']),
pd.Categorical(['B', 'B', np.nan])])
exp = Categorical(['A', 'B', 'B', 'B', np.nan])
tm.assert_categorical_equal(res, exp)
val1 = [pd.Timestamp('2011-01-01'), pd.Timestamp('2011-03-01'),
pd.NaT]
val2 = [pd.NaT, pd.Timestamp('2011-01-01'),
pd.Timestamp('2011-02-01')]
res = union_categoricals([pd.Categorical(val1), pd.Categorical(val2)])
exp = Categorical(val1 + val2,
categories=[pd.Timestamp('2011-01-01'),
pd.Timestamp('2011-03-01'),
pd.Timestamp('2011-02-01')])
tm.assert_categorical_equal(res, exp)
# all NaN
res = union_categoricals([pd.Categorical([np.nan, np.nan]),
pd.Categorical(['X'])])
exp = Categorical([np.nan, np.nan, 'X'])
tm.assert_categorical_equal(res, exp)
res = union_categoricals([pd.Categorical([np.nan, np.nan]),
pd.Categorical([np.nan, np.nan])])
exp = Categorical([np.nan, np.nan, np.nan, np.nan])
tm.assert_categorical_equal(res, exp)
def test_union_categoricals_empty(self):
# GH 13759
res = union_categoricals([pd.Categorical([]),
pd.Categorical([])])
exp = Categorical([])
tm.assert_categorical_equal(res, exp)
res = union_categoricals([pd.Categorical([]),
pd.Categorical([1.0])])
exp = Categorical([1.0])
tm.assert_categorical_equal(res, exp)
# to make dtype equal
nanc = pd.Categorical(np.array([np.nan], dtype=np.float64))
res = union_categoricals([nanc,
pd.Categorical([])])
tm.assert_categorical_equal(res, nanc)
def test_union_categorical_same_category(self):
# check fastpath
c1 = Categorical([1, 2, 3, 4], categories=[1, 2, 3, 4])
c2 = Categorical([3, 2, 1, np.nan], categories=[1, 2, 3, 4])
res = union_categoricals([c1, c2])
exp = Categorical([1, 2, 3, 4, 3, 2, 1, np.nan],
categories=[1, 2, 3, 4])
tm.assert_categorical_equal(res, exp)
c1 = Categorical(['z', 'z', 'z'], categories=['x', 'y', 'z'])
c2 = Categorical(['x', 'x', 'x'], categories=['x', 'y', 'z'])
res = union_categoricals([c1, c2])
exp = Categorical(['z', 'z', 'z', 'x', 'x', 'x'],
categories=['x', 'y', 'z'])
tm.assert_categorical_equal(res, exp)
def test_union_categoricals_ordered(self):
c1 = Categorical([1, 2, 3], ordered=True)
c2 = Categorical([1, 2, 3], ordered=False)
msg = 'Categorical.ordered must be the same'
with tm.assert_raises_regex(TypeError, msg):
union_categoricals([c1, c2])
res = union_categoricals([c1, c1])
exp = Categorical([1, 2, 3, 1, 2, 3], ordered=True)
tm.assert_categorical_equal(res, exp)
c1 = Categorical([1, 2, 3, np.nan], ordered=True)
c2 = Categorical([3, 2], categories=[1, 2, 3], ordered=True)
res = union_categoricals([c1, c2])
exp = Categorical([1, 2, 3, np.nan, 3, 2], ordered=True)
tm.assert_categorical_equal(res, exp)
c1 = Categorical([1, 2, 3], ordered=True)
c2 = Categorical([1, 2, 3], categories=[3, 2, 1], ordered=True)
msg = "to union ordered Categoricals, all categories must be the same"
with tm.assert_raises_regex(TypeError, msg):
union_categoricals([c1, c2])
def test_union_categoricals_ignore_order(self):
# GH 15219
c1 = Categorical([1, 2, 3], ordered=True)
c2 = Categorical([1, 2, 3], ordered=False)
res = union_categoricals([c1, c2], ignore_order=True)
exp = Categorical([1, 2, 3, 1, 2, 3])
tm.assert_categorical_equal(res, exp)
msg = 'Categorical.ordered must be the same'
with tm.assert_raises_regex(TypeError, msg):
union_categoricals([c1, c2], ignore_order=False)
res = union_categoricals([c1, c1], ignore_order=True)
exp = Categorical([1, 2, 3, 1, 2, 3])
tm.assert_categorical_equal(res, exp)
res = union_categoricals([c1, c1], ignore_order=False)
exp = Categorical([1, 2, 3, 1, 2, 3],
categories=[1, 2, 3], ordered=True)
tm.assert_categorical_equal(res, exp)
c1 = Categorical([1, 2, 3, np.nan], ordered=True)
c2 = Categorical([3, 2], categories=[1, 2, 3], ordered=True)
res = union_categoricals([c1, c2], ignore_order=True)
exp = Categorical([1, 2, 3, np.nan, 3, 2])
tm.assert_categorical_equal(res, exp)
c1 = Categorical([1, 2, 3], ordered=True)
c2 = Categorical([1, 2, 3], categories=[3, 2, 1], ordered=True)
res = union_categoricals([c1, c2], ignore_order=True)
exp = Categorical([1, 2, 3, 1, 2, 3])
tm.assert_categorical_equal(res, exp)
res = union_categoricals([c2, c1], ignore_order=True,
sort_categories=True)
exp = Categorical([1, 2, 3, 1, 2, 3], categories=[1, 2, 3])
tm.assert_categorical_equal(res, exp)
c1 = Categorical([1, 2, 3], ordered=True)
c2 = Categorical([4, 5, 6], ordered=True)
result = union_categoricals([c1, c2], ignore_order=True)
expected = Categorical([1, 2, 3, 4, 5, 6])
tm.assert_categorical_equal(result, expected)
msg = "to union ordered Categoricals, all categories must be the same"
with tm.assert_raises_regex(TypeError, msg):
union_categoricals([c1, c2], ignore_order=False)
with tm.assert_raises_regex(TypeError, msg):
union_categoricals([c1, c2])
def test_union_categoricals_sort(self):
# GH 13846
c1 = Categorical(['x', 'y', 'z'])
c2 = Categorical(['a', 'b', 'c'])
result = union_categoricals([c1, c2], sort_categories=True)
expected = Categorical(['x', 'y', 'z', 'a', 'b', 'c'],
categories=['a', 'b', 'c', 'x', 'y', 'z'])
tm.assert_categorical_equal(result, expected)
# fastpath
c1 = Categorical(['a', 'b'], categories=['b', 'a', 'c'])
c2 = Categorical(['b', 'c'], categories=['b', 'a', 'c'])
result = union_categoricals([c1, c2], sort_categories=True)
expected = Categorical(['a', 'b', 'b', 'c'],
categories=['a', 'b', 'c'])
tm.assert_categorical_equal(result, expected)
c1 = Categorical(['a', 'b'], categories=['c', 'a', 'b'])
c2 = Categorical(['b', 'c'], categories=['c', 'a', 'b'])
result = union_categoricals([c1, c2], sort_categories=True)
expected = Categorical(['a', 'b', 'b', 'c'],
categories=['a', 'b', 'c'])
tm.assert_categorical_equal(result, expected)
# fastpath - skip resort
c1 = Categorical(['a', 'b'], categories=['a', 'b', 'c'])
c2 = Categorical(['b', 'c'], categories=['a', 'b', 'c'])
result = union_categoricals([c1, c2], sort_categories=True)
expected = Categorical(['a', 'b', 'b', 'c'],
categories=['a', 'b', 'c'])
tm.assert_categorical_equal(result, expected)
c1 = Categorical(['x', np.nan])
c2 = Categorical([np.nan, 'b'])
result = union_categoricals([c1, c2], sort_categories=True)
expected = Categorical(['x', np.nan, np.nan, 'b'],
categories=['b', 'x'])
tm.assert_categorical_equal(result, expected)
c1 = Categorical([np.nan])
c2 = Categorical([np.nan])
result = union_categoricals([c1, c2], sort_categories=True)
expected = Categorical([np.nan, np.nan], categories=[])
tm.assert_categorical_equal(result, expected)
c1 = Categorical([])
c2 = Categorical([])
result = union_categoricals([c1, c2], sort_categories=True)
expected = Categorical([])
tm.assert_categorical_equal(result, expected)
c1 = Categorical(['b', 'a'], categories=['b', 'a', 'c'], ordered=True)
c2 = Categorical(['a', 'c'], categories=['b', 'a', 'c'], ordered=True)
with pytest.raises(TypeError):
union_categoricals([c1, c2], sort_categories=True)
def test_union_categoricals_sort_false(self):
# GH 13846
c1 = Categorical(['x', 'y', 'z'])
c2 = Categorical(['a', 'b', 'c'])
result = union_categoricals([c1, c2], sort_categories=False)
expected = Categorical(['x', 'y', 'z', 'a', 'b', 'c'],
categories=['x', 'y', 'z', 'a', 'b', 'c'])
tm.assert_categorical_equal(result, expected)
# fastpath
c1 = Categorical(['a', 'b'], categories=['b', 'a', 'c'])
c2 = Categorical(['b', 'c'], categories=['b', 'a', 'c'])
result = union_categoricals([c1, c2], sort_categories=False)
expected = Categorical(['a', 'b', 'b', 'c'],
categories=['b', 'a', 'c'])
tm.assert_categorical_equal(result, expected)
# fastpath - skip resort
c1 = Categorical(['a', 'b'], categories=['a', 'b', 'c'])
c2 = Categorical(['b', 'c'], categories=['a', 'b', 'c'])
result = union_categoricals([c1, c2], sort_categories=False)
expected = Categorical(['a', 'b', 'b', 'c'],
categories=['a', 'b', 'c'])
tm.assert_categorical_equal(result, expected)
c1 = Categorical(['x', np.nan])
c2 = | Categorical([np.nan, 'b']) | pandas.Categorical |
import gzip
import pandas as pd
import os
import shutil
from prepare_vcf_files_helpers import update_dict_with_file, change_format, change_info
pd.options.mode.chained_assignment = None
def make_unique_files(input_folder, output_folder):
if not os.path.exists(output_folder):
os.makedirs(output_folder)
if not os.path.exists(output_folder + '/temp'):
os.makedirs(output_folder + '/temp')
files = [[x[0] + '/' + y for y in x[2]] for x in os.walk(input_folder)]
flat_files = [file for sublist in files for file in sublist]
gz_files = [file for file in flat_files if file.endswith('vep.vcf.gz')]
dict_with_files = {}
for gz_file in gz_files:
dict_with_files = update_dict_with_file(gz_file, dict_with_files)
df_dict_with_files = pd.DataFrame.from_dict(dict_with_files, orient='index')
df_dict_with_files.index.name = 'filename'
df_dict_with_files.to_csv(output_folder + '/files_summary_before_merging.csv', sep=',')
df_dict_with_files_grouped = df_dict_with_files.reset_index().groupby(['indiv_name',
'type_of_file']).agg('nunique')
df_dict_with_files_grouped.to_csv(output_folder + '/files_summary_count_per_patient_before_merging.csv', sep=',')
df_not_unique_patients = df_dict_with_files_grouped.loc[df_dict_with_files_grouped['filename'] != 1, :]
df_not_unique_patients.to_csv(output_folder + '/not_unique_patients.csv', sep=',')
with open(output_folder+'/do_not_use.txt', 'w+') as do_not_use_file:
for patient in list(df_not_unique_patients.unstack().index.unique()):
this_patient = df_not_unique_patients.xs(patient, level=0)
for file_type in list(this_patient.index.unique()):
first = True
with open(output_folder + '/temp/' + patient+'_'+file_type+'.vcf', 'wb') as combined:
temp_df = df_dict_with_files.loc[(df_dict_with_files['indiv_name'] == patient) &
(df_dict_with_files['type_of_file'] == file_type), :]
lines_df = pd.DataFrame()
columns = []
for filename in list(temp_df.index.unique()):
print(filename)
do_not_use_file.write(filename+'\n')
with gzip.open(filename) as f:
for line in f.readlines():
dline = line.decode('ascii')
if dline.startswith('##') and first:
combined.write(line)
elif dline.startswith('##'):
pass
elif dline.startswith('#') and first:
combined.write(line)
columns = dline.replace('#', '').strip().split('\t')
elif dline.startswith('#'):
columns = dline.replace('#', '').strip().split('\t')
else:
new_record = \
pd.DataFrame([dline.replace('\n',
'').replace(';',
':').replace('"',
'').split('\t')],
columns=columns)
new_columns_normal = new_record['NORMAL'].str.split(":", expand=True)
normal_columns = list(map(lambda x: x + '_normal',
new_record['FORMAT'].str.strip().str.split(":").
values[0]))
try:
new_columns_normal.columns = normal_columns
except ValueError:
normal_columns.remove('SS_normal')
new_columns_normal.columns = normal_columns
new_record = | pd.concat([new_record, new_columns_normal], axis=1) | pandas.concat |
import pandas as pd
import tensorflow as tf
from pathlib import Path
from datetime import datetime
from tensorflow.keras.callbacks import EarlyStopping
from tensorflow.keras.models import load_model
#enviroment settings
path = Path(__file__).parent.absolute()/'Deep Training'
name_data = 'none_'#''
metric = 'binary_accuracy'
minimise = False
#parameter settings
model_keys = ['optimizer','layers','activations','dropouts']
blueprint_keys = ['predictors','identifier']+model_keys
#log settings
log_keys = ['timestamp']+blueprint_keys+['dimensions','length','nodes','loss',metric,'time','epochs']
sort_fields = [metric, 'loss', 'epochs', 'nodes', 'time']
sort_conditions = [minimise, True, True, True, True]
predictor_log_path = path/'Logs'/(name_data+'predictor_evaluation_log.csv')
parameter_log_path = path/'Logs'/(name_data+'parameter_evaluation_log.csv')
re_parameter_log_path = path/'Logs'/(name_data+'re_parameter_evaluation_log.csv')
#model settings
models_path = path/'Models'
#data settings
data_path = path/'Data'
targets_name = 'None_Targets.csv'
predictors_name = 'None_Predictors.csv'
targets_columns = ['Home: Win','Visiting: Win']
predictors_columns = None
targets_index = False
predictors_index = False
#data enviroment
targets = pd.read_csv(data_path/targets_name, usecols=targets_columns, index_col=targets_index)
predictors = pd.read_csv(data_path/predictors_name, usecols=predictors_columns, index_col=predictors_index)
data_date = datetime.fromtimestamp((data_path/'Validation'/(name_data+'validation_targets.csv')).stat().st_mtime)
validation_targets = pd.read_csv(data_path/'Validation'/(name_data+'validation_targets.csv'), index_col=targets_index)
validation_predictors = pd.read_csv(data_path/'Validation'/(name_data+'validation_predictors.csv'), index_col=predictors_index)
training_targets = | pd.read_csv(data_path/'Training'/(name_data+'training_targets.csv'), index_col=targets_index) | pandas.read_csv |
# Packages
import os
import pandas as pd
import spacy
import matplotlib.pyplot as plt
from spacytextblob.spacytextblob import SpacyTextBlob
# Achieving polarity
def polarity(df):
polarity_scores = []
for doc in nlp.pipe(df["headline_text"]):
polarity_scores.append(doc._.sentiment.polarity)
return polarity_scores
# Plot function 1
def plot_polarity(df, roll_val1 = 7, roll_val2 = 30, save = False):
plt.figure(figsize=(10, 5))
plt.plot(df.groupby("publish_date").mean("polarity").rolling(roll_val1).mean(), label = 'Weekly')
plt.plot(df.groupby("publish_date").mean("polarity").rolling(roll_val2).mean(), label = 'Monthly')
plt.title('Polarity scores for headlines')
plt.xlabel('Date')
plt.ylabel('Polarity score')
plt.legend()
if save == True:
plt.savefig("polarity_plot.png")
plt.show()
# Plot function 2
def plot_polarity(df, roll_val1 = 7, roll_val2 = 30, save = False):
fig = plt.figure(figsize = (10.0, 3.0))
axes_1 = fig.add_subplot(1,2,1) # 1 row , 3 columns, 1st column position
axes_2 = fig.add_subplot(1,2,2) # 1 row , 3 columns, 2nd column position
# axes_1.set_ylabel("polarity")
axes_1.set_ylabel(f"Polarity (rolling mean of {roll_val1} days)")
smoothed_sent_week = df.groupby("publish_date").mean("polarity").rolling(roll_val1).mean()
axes_1.plot(smoothed_sent_week) # on the "canvas" we made above, plot the mean_val on the axes_1
axes_1.legend("Week average",loc="upper left")
## axes_2.set_ylabel("polarity")
axes_2.set_ylabel(f"Polarity average (rolling mean of {roll_val2} days)")
smoothed_sent_month = df.groupby("publish_date").mean("polarity").rolling(roll_val2).mean()
axes_2.plot(smoothed_sent_month) # on the "canvas" we made above, plot the mean_val on the axes_1
axes_2.legend("Month average",loc="upper left")
fig.tight_layout()
if save == True:
plt.savefig("polarity_plot.png")
plt.show()
def main():
# initialising spaCy
nlp = spacy.load("en_core_web_sm")
# defining path and csv
path_to_csv = os.path.join("abc_data", "abcnews-date-text.csv")
abc_news = pd.read_csv(path_to_csv)
spacy_text_blob = SpacyTextBlob()
nlp.add_pipe(spacy_text_blob)
# choosing 10000 rows as sample
sample = abc_news[1:10000]
# changing dates
sample["publish_date"]= | pd.to_datetime(sample.publish_date, format="%Y%m%d") | pandas.to_datetime |
from __future__ import division
import logging
from time import time
from os import getpid
from timeit import default_timer as timer
import pandas
import numpy as np
import scipy
import statsmodels.api as sm
import traceback
from settings import (
ALGORITHMS,
CONSENSUS,
FULL_DURATION,
MAX_TOLERABLE_BOREDOM,
MIN_TOLERABLE_LENGTH,
BOREDOM_SET_SIZE,
PANDAS_VERSION,
RUN_OPTIMIZED_WORKFLOW,
SKYLINE_TMP_DIR,
ENABLE_ALGORITHM_RUN_METRICS,
ENABLE_ALL_ALGORITHMS_RUN_METRICS,
# @added 20200607 - Feature #3566: custom_algorithms
FULL_NAMESPACE,
)
from algorithm_exceptions import TooShort, Stale, Boring
# @added 20200607 - Feature #3566: custom_algorithms
try:
from settings import CUSTOM_ALGORITHMS
except:
CUSTOM_ALGORITHMS = None
try:
from settings import DEBUG_CUSTOM_ALGORITHMS
except:
DEBUG_CUSTOM_ALGORITHMS = False
if CUSTOM_ALGORITHMS:
try:
from custom_algorithms_to_run import get_custom_algorithms_to_run
except:
get_custom_algorithms_to_run = None
try:
from custom_algorithms import run_custom_algorithm_on_timeseries
except:
run_custom_algorithm_on_timeseries = None
# @added 20200817 - Feature #3684: ROOMBA_BATCH_METRICS_CUSTOM_DURATIONS
# Feature #3650: ROOMBA_DO_NOT_PROCESS_BATCH_METRICS
# Feature #3480: batch_processing
# Allow for custom durations on namespaces
ROOMBA_BATCH_METRICS_CUSTOM_DURATIONS = None
try:
from settings import ROOMBA_DO_NOT_PROCESS_BATCH_METRICS
except:
ROOMBA_DO_NOT_PROCESS_BATCH_METRICS = False
if ROOMBA_DO_NOT_PROCESS_BATCH_METRICS:
try:
from settings import ROOMBA_BATCH_METRICS_CUSTOM_DURATIONS
except:
ROOMBA_BATCH_METRICS_CUSTOM_DURATIONS = None
skyline_app = 'analyzer_batch'
skyline_app_logger = '%sLog' % skyline_app
logger = logging.getLogger(skyline_app_logger)
try:
send_algorithm_run_metrics = ENABLE_ALGORITHM_RUN_METRICS
except:
send_algorithm_run_metrics = False
"""
This is no man's land. Do anything you want in here,
as long as you return a boolean that determines whether the input timeseries is
anomalous or not.
The key here is to return a True or False boolean.
You should use the pythonic except mechanism to ensure any excpetions do not
cause things to halt and the record_algorithm_error utility can be used to
sample any algorithm errors to log.
To add an algorithm, define it here, and add its name to settings.ALGORITHMS.
"""
# @modified 20200817 - Feature #3684: ROOMBA_BATCH_METRICS_CUSTOM_DURATIONS
# Added use_full_duration to all algorithms
def tail_avg(timeseries, use_full_duration):
"""
This is a utility function used to calculate the average of the last three
datapoints in the series as a measure, instead of just the last datapoint.
It reduces noise, but it also reduces sensitivity and increases the delay
to detection.
"""
try:
t = (timeseries[-1][1] + timeseries[-2][1] + timeseries[-3][1]) / 3
return t
except IndexError:
return timeseries[-1][1]
def median_absolute_deviation(timeseries, use_full_duration):
"""
A timeseries is anomalous if the deviation of its latest datapoint with
respect to the median is X times larger than the median of deviations.
"""
# logger.info('Running ' + str(get_function_name()))
try:
series = pandas.Series([x[1] for x in timeseries])
median = series.median()
demedianed = np.abs(series - median)
median_deviation = demedianed.median()
except:
traceback_format_exc_string = traceback.format_exc()
algorithm_name = str(get_function_name())
record_algorithm_error(algorithm_name, traceback_format_exc_string)
return None
# The test statistic is infinite when the median is zero,
# so it becomes super sensitive. We play it safe and skip when this happens.
if median_deviation == 0:
return False
if PANDAS_VERSION < '0.17.0':
try:
test_statistic = demedianed.iget(-1) / median_deviation
except:
traceback_format_exc_string = traceback.format_exc()
algorithm_name = str(get_function_name())
record_algorithm_error(algorithm_name, traceback_format_exc_string)
return None
else:
try:
test_statistic = demedianed.iat[-1] / median_deviation
except:
traceback_format_exc_string = traceback.format_exc()
algorithm_name = str(get_function_name())
record_algorithm_error(algorithm_name, traceback_format_exc_string)
return None
# Completely arbitary...triggers if the median deviation is
# 6 times bigger than the median
if test_statistic > 6:
return True
# As per https://github.com/etsy/skyline/pull/104 by @rugger74
# Although never seen this should return False if not > arbitary_value
# 20160523 @earthgecko
return False
def grubbs(timeseries, use_full_duration):
"""
A timeseries is anomalous if the Z score is greater than the Grubb's score.
"""
try:
# @modified 20191011 - Update least_squares & grubbs algorithms by using sample standard deviation PR #124
# Task #3256: Review and test PR 124
# Change from using scipy/numpy std which calculates the population
# standard deviation to using pandas.std which calculates the sample
# standard deviation which is more appropriate for time series data
# series = scipy.array([x[1] for x in timeseries])
# stdDev = scipy.std(series)
series = pandas.Series(x[1] for x in timeseries)
stdDev = series.std()
# Issue #27 - Handle z_score agent.py RuntimeWarning - https://github.com/earthgecko/skyline/issues/27
# This change avoids spewing warnings on agent.py tests:
# RuntimeWarning: invalid value encountered in double_scalars
# If stdDev is 0 division returns nan which is not > grubbs_score so
# return False here
if stdDev == 0:
return False
mean = np.mean(series)
# @modified 20200904 - Feature #3684: ROOMBA_BATCH_METRICS_CUSTOM_DURATIONS
# Added use_full_duration
tail_average = tail_avg(timeseries, use_full_duration)
z_score = (tail_average - mean) / stdDev
len_series = len(series)
threshold = scipy.stats.t.isf(.05 / (2 * len_series), len_series - 2)
threshold_squared = threshold * threshold
grubbs_score = ((len_series - 1) / np.sqrt(len_series)) * np.sqrt(threshold_squared / (len_series - 2 + threshold_squared))
return z_score > grubbs_score
except:
traceback_format_exc_string = traceback.format_exc()
algorithm_name = str(get_function_name())
record_algorithm_error(algorithm_name, traceback_format_exc_string)
return None
def first_hour_average(timeseries, use_full_duration):
"""
Calcuate the simple average over one hour, use_full_duration seconds ago.
A timeseries is anomalous if the average of the last three datapoints
are outside of three standard deviations of this value.
"""
try:
# @modified 20200817 - Feature #3684: ROOMBA_BATCH_METRICS_CUSTOM_DURATIONS
# Use use_full_duration
# last_hour_threshold = time() - (FULL_DURATION - 3600)
last_hour_threshold = time() - (use_full_duration - 3600)
series = pandas.Series([x[1] for x in timeseries if x[0] < last_hour_threshold])
mean = (series).mean()
stdDev = (series).std()
# @modified 20200904 - Feature #3684: ROOMBA_BATCH_METRICS_CUSTOM_DURATIONS
# Added use_full_duration
t = tail_avg(timeseries, use_full_duration)
return abs(t - mean) > 3 * stdDev
except:
traceback_format_exc_string = traceback.format_exc()
algorithm_name = str(get_function_name())
record_algorithm_error(algorithm_name, traceback_format_exc_string)
return None
def stddev_from_average(timeseries, use_full_duration):
"""
A timeseries is anomalous if the absolute value of the average of the latest
three datapoint minus the moving average is greater than three standard
deviations of the average. This does not exponentially weight the MA and so
is better for detecting anomalies with respect to the entire series.
"""
try:
series = | pandas.Series([x[1] for x in timeseries]) | pandas.Series |
import os
import numpy as np
import pandas as pd
import shap
import json
from ngboost import NGBRegressor
from ngboost.distns import Normal
from ngboost.learners import default_tree_learner
from ngboost.scores import MLE, LogScore
from classes.inputs_gatherer import InputsGatherer
class FeaturesAnalyzer:
"""
Given a dataset composed of features on the columns and days on the rows of a pandas df, this class computes the
best features and their importance
"""
def __init__(self, inputs_gatherer, forecast_type, cfg, logger):
"""
Constructor
:param inputs_gatherer: Inputs Gatherer
:type inputs_gatherer: InputsGatherer
:param forecast_type: Forecast type (MOR | EVE)
:type forecast_type: str
:param cfg: FTP parameters for the files exchange
:type cfg: dict
:param logger: Logger
:type logger: Logger object
"""
# set the variables
self.inputs_gatherer = inputs_gatherer
self.forecast_type = forecast_type
self.cfg = cfg
self.logger = logger
self.dataFrames = None
self.output_folder_name = None
self.current_name = None
self.nan_features = None
def dataset_creator(self):
"""
Build the datasets according to the instructions in the config file in the datasetSettings section
"""
self.inputs_gatherer.dataframe_builder_regions()
def update_datasets(self, name, output_dfs, target_columns):
"""
Initialize folders and add metadata to container of datasets
"""
folder_path = self.inputs_gatherer.output_folder_creator(name)
file_path_df = folder_path + folder_path.split(os.sep)[1] + '_dataset.csv'
if not os.path.isfile(file_path_df):
self.logger.error('File %s does not exist' % file_path_df)
tmp_df = pd.read_csv(file_path_df)
# Filtering on data -> only observations related to output values higher than the limit will be considered
mask = tmp_df[target_columns[0]] >= self.cfg['regions'][name]['dataToConsiderMinLimit']
output_dfs[name] = {'dataset': tmp_df[mask], 'targetColumns': target_columns}
# Select only configured input signals
input_signals = self.inputs_gatherer.generate_input_signals_codes(name)
candidate_signals = list(output_dfs[name]['dataset'].columns)
# Remove date and output from candidates list
candidate_signals.remove('date')
for target_column in self.cfg['regions'][name]['targetColumns']:
candidate_signals.remove(target_column)
for candidate_signal in candidate_signals:
if candidate_signal not in input_signals:
# This signal has not to be used in the grid search
output_dfs[name]['dataset'] = output_dfs[name]['dataset'].drop(candidate_signal, axis=1)
return output_dfs
def dataset_reader(self, region, target_column):
"""
Read a previously created or provided csv file. If the dataset is created from a custom JSON or
from regionals signals, this method has to be preceded by a call of dataset_creator
"""
output_dfs = {}
output_dfs = self.update_datasets(region, output_dfs, target_column)
self.dataFrames = output_dfs
def dataset_splitter(self, region, data, target_column):
"""
Split a dataFrame in design matrix X and response vector Y
:param name: code name of the region/json/csv
:type name: str
:param data: full dataset
:type data: pandas.DataFrame
:return: split datasets in multiple formats
:rtype: numpy.array, numpy.array, list, pandas.DataFrame, pandas.DataFrame
"""
# todo CHECK THIS PART (probably useless!)
# self.current_name = name
# df = data['dataset']
# y_data = pd.DataFrame()
# x_data = pd.DataFrame()
# df_years = list(dict.fromkeys(df['date'].str[:4]))
# # If we're at MOR the value of the max ozone of day ahead is our target. If we're at EVE, it is the max
# # value of 2 days ahead
# days_ahead = 1 if self.forecast_type == 'MOR' else 2
#
# for year in df_years:
# lcl_df = df.loc[df['date'].str[:4] == year, :].reset_index(drop=True)
# lcl_y_data = lcl_df.loc[days_ahead:, ['date', target_column]]
# lcl_x_data = lcl_df.iloc[:-days_ahead, :]
# y_data = pd.concat([y_data, lcl_y_data], axis=0).reset_index(drop=True)
# x_data = pd.concat([x_data, lcl_x_data], axis=0).reset_index(drop=True)
# # Remove the target column
# x_data = x_data.drop(target_column, axis=1)
# Create the inputs dataset (x_data)
x_data = data['dataset']
# Drop from the input dataset all the output variables defined for this region in the dataset
for target in self.cfg['regions'][region]['targetColumns']:
x_data = x_data.drop(target, axis=1)
# Create the outputs dataset (x_data)
y_data = | pd.DataFrame({'date': data['dataset']['date'], target_column: data['dataset'][target_column]}) | pandas.DataFrame |
from dataclasses import replace
import datetime as dt
from functools import partial
import inspect
from pathlib import Path
import re
import types
import uuid
import pandas as pd
from pandas.testing import assert_frame_equal
import pytest
from solarforecastarbiter import datamodel
from solarforecastarbiter.io import api, nwp, utils
from solarforecastarbiter.reference_forecasts import main, models
from solarforecastarbiter.conftest import default_forecast, default_observation
BASE_PATH = Path(nwp.__file__).resolve().parents[0] / 'tests/data'
@pytest.mark.parametrize('model', [
models.gfs_quarter_deg_hourly_to_hourly_mean,
models.gfs_quarter_deg_to_hourly_mean,
models.hrrr_subhourly_to_hourly_mean,
models.hrrr_subhourly_to_subhourly_instantaneous,
models.nam_12km_cloud_cover_to_hourly_mean,
models.nam_12km_hourly_to_hourly_instantaneous,
models.rap_cloud_cover_to_hourly_mean,
models.gefs_half_deg_to_hourly_mean
])
def test_run_nwp(model, site_powerplant_site_type, mocker):
""" to later patch the return value of load forecast, do something like
def load(*args, **kwargs):
return load_forecast_return_value
mocker.patch.object(inspect.unwrap(model), '__defaults__',
(partial(load),))
"""
mocker.patch.object(inspect.unwrap(model), '__defaults__',
(partial(nwp.load_forecast, base_path=BASE_PATH),))
mocker.patch(
'solarforecastarbiter.reference_forecasts.utils.get_init_time',
return_value=pd.Timestamp('20190515T0000Z'))
site, site_type = site_powerplant_site_type
fx = datamodel.Forecast('Test', dt.time(5), pd.Timedelta('1h'),
pd.Timedelta('1h'), pd.Timedelta('6h'),
'beginning', 'interval_mean', 'ghi', site)
run_time = pd.Timestamp('20190515T1100Z')
issue_time = pd.Timestamp('20190515T1100Z')
out = main.run_nwp(fx, model, run_time, issue_time)
for var in ('ghi', 'dni', 'dhi', 'air_temperature', 'wind_speed',
'ac_power'):
if site_type == 'site' and var == 'ac_power':
assert out.ac_power is None
else:
ser = getattr(out, var)
assert len(ser) >= 6
assert isinstance(ser, (pd.Series, pd.DataFrame))
assert ser.index[0] == pd.Timestamp('20190515T1200Z')
assert ser.index[-1] < pd.Timestamp('20190515T1800Z')
@pytest.fixture
def obs_5min_begin(site_metadata):
observation = default_observation(
site_metadata,
interval_length=pd.Timedelta('5min'), interval_label='beginning')
return observation
@pytest.fixture
def observation_values_text():
"""JSON text representation of test data"""
tz = 'UTC'
data_index = pd.date_range(
start='20190101', end='20190112', freq='5min', tz=tz, closed='left')
# each element of data is equal to the hour value of its label
data = pd.DataFrame({'value': data_index.hour, 'quality_flag': 0},
index=data_index)
text = utils.observation_df_to_json_payload(data)
return text.encode()
@pytest.fixture
def session(requests_mock, observation_values_text):
session = api.APISession('')
matcher = re.compile(f'{session.base_url}/observations/.*/values')
requests_mock.register_uri('GET', matcher, content=observation_values_text)
return session
@pytest.mark.parametrize('interval_label', ['beginning', 'ending'])
def test_run_persistence_scalar(session, site_metadata, obs_5min_begin,
interval_label, mocker):
run_time = pd.Timestamp('20190101T1945Z')
# intraday, index=False
forecast = default_forecast(
site_metadata,
issue_time_of_day=dt.time(hour=23),
lead_time_to_start=pd.Timedelta('1h'),
interval_length=pd.Timedelta('1h'),
run_length=pd.Timedelta('1h'),
interval_label=interval_label)
issue_time = pd.Timestamp('20190101T2300Z')
mocker.spy(main.persistence, 'persistence_scalar')
out = main.run_persistence(session, obs_5min_begin, forecast, run_time,
issue_time)
assert isinstance(out, pd.Series)
assert len(out) == 1
assert main.persistence.persistence_scalar.call_count == 1
@pytest.mark.parametrize('interval_label', ['beginning', 'ending'])
def test_run_persistence_scalar_index(session, site_metadata, obs_5min_begin,
interval_label, mocker):
run_time = pd.Timestamp('20190101T1945Z')
forecast = default_forecast(
site_metadata,
issue_time_of_day=dt.time(hour=23),
lead_time_to_start=pd.Timedelta('1h'),
interval_length=pd.Timedelta('1h'),
run_length=pd.Timedelta('1h'),
interval_label=interval_label)
issue_time = pd.Timestamp('20190101T2300Z')
# intraday, index=True
mocker.spy(main.persistence, 'persistence_scalar_index')
out = main.run_persistence(session, obs_5min_begin, forecast, run_time,
issue_time, index=True)
assert isinstance(out, pd.Series)
assert len(out) == 1
assert main.persistence.persistence_scalar_index.call_count == 1
def test_run_persistence_interval(session, site_metadata, obs_5min_begin,
mocker):
run_time = pd.Timestamp('20190102T1945Z')
# day ahead, index = False
forecast = default_forecast(
site_metadata,
issue_time_of_day=dt.time(hour=23),
lead_time_to_start=pd.Timedelta('1h'),
interval_length=pd.Timedelta('1h'),
run_length=pd.Timedelta('24h'),
interval_label='beginning')
issue_time = pd.Timestamp('20190102T2300Z')
mocker.spy(main.persistence, 'persistence_interval')
out = main.run_persistence(session, obs_5min_begin, forecast, run_time,
issue_time)
assert isinstance(out, pd.Series)
assert len(out) == 24
assert main.persistence.persistence_interval.call_count == 1
def test_run_persistence_weekahead(session, site_metadata, mocker):
variable = 'net_load'
observation = default_observation(
site_metadata, variable=variable,
interval_length=pd.Timedelta('5min'), interval_label='beginning')
run_time = pd.Timestamp('20190110T1945Z')
forecast = default_forecast(
site_metadata, variable=variable,
issue_time_of_day=dt.time(hour=23),
lead_time_to_start=pd.Timedelta('1h'),
interval_length=pd.Timedelta('1h'),
run_length=pd.Timedelta('1d'),
interval_label='beginning')
issue_time = pd.Timestamp('20190111T2300Z')
mocker.spy(main.persistence, 'persistence_interval')
out = main.run_persistence(session, observation, forecast, run_time,
issue_time)
assert isinstance(out, pd.Series)
assert len(out) == 24
assert main.persistence.persistence_interval.call_count == 1
def test_run_persistence_interval_index(session, site_metadata,
obs_5min_begin):
# index=True not supported for day ahead
forecast = default_forecast(
site_metadata,
issue_time_of_day=dt.time(hour=23),
lead_time_to_start=pd.Timedelta('1h'),
interval_length=pd.Timedelta('1h'),
run_length=pd.Timedelta('24h'),
interval_label='beginning')
issue_time = pd.Timestamp('20190423T2300Z')
run_time = | pd.Timestamp('20190422T1945Z') | pandas.Timestamp |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#%matplotlib inline
import codecs
import lightgbm as lgb
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.metrics import mean_squared_error
from sklearn.metrics import r2_score
# Read data
image_file_path = './simulated_dpc_data.csv'
with codecs.open(image_file_path, "r", "Shift-JIS", "ignore") as file:
dpc = pd.read_table(file, delimiter=",")
# dpc_r, g_dpc_r_1, g_r: restricted data from dpc
dpc_r=dpc.loc[:, ['ID','code']]
# g_dpc_r_1: made to check the details (: name of the code, ‘name’)
g_dpc_r_1=dpc.loc[:, ['ID','code','name']]
# Dummy Encoding with ‘name’
g_r = pd.get_dummies(dpc_r['code'])
# Reconstruct simulated data for AI learning
df_concat_dpc_get_dummies = pd.concat([dpc_r, g_r], axis=1)
# Remove features that may be the cause of the data leak
dpc_Remove_data_leak = df_concat_dpc_get_dummies.drop(["code",160094710,160094810,160094910,150285010,2113008,8842965,8843014,622224401,810000000,160060010], axis=1)
# Sum up the number of occurrences of each feature for each patient.
total_patient_features= dpc_Remove_data_leak.groupby("ID").sum()
total_patient_features.reset_index()
# Load a new file with ID and treatment availability
# Prepare training data
image_file_path_ID_and_polyp_pn = './simulated_patient_data.csv'
with codecs.open(image_file_path_ID_and_polyp_pn, "r", "Shift-JIS", "ignore") as file:
ID_and_polyp_pn = | pd.read_table(file, delimiter=",") | pandas.read_table |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.