prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import json
import os
import pandas as pd
from ..version import __version__
from .description import describe_label_times
from .plots import LabelPlots
SCHEMA_VERSION = "0.1.0"
class LabelTimes(pd.DataFrame):
"""The data frame that contains labels and cutoff times for the target entity."""
def __init__(
self,
data=None,
target_entity=None,
target_types=None,
target_columns=None,
search_settings=None,
transforms=None,
*args,
**kwargs,
):
super().__init__(data=data, *args, **kwargs)
self.target_entity = target_entity
self.target_columns = target_columns or []
self.target_types = target_types or {}
self.search_settings = search_settings or {}
self.transforms = transforms or []
self.plot = LabelPlots(self)
if not self.empty:
self._check_label_times()
def _assert_single_target(self):
"""Asserts that the label times object contains a single target."""
info = "must first select an individual target"
assert self._is_single_target, info
def _check_target_columns(self):
"""Validates the target columns."""
if not self.target_columns:
self.target_columns = self._infer_target_columns()
else:
for target in self.target_columns:
info = 'target "%s" not found in data frame'
assert target in self.columns, info % target
def _check_target_types(self):
"""Validates the target types."""
if isinstance(self.target_types, dict):
self.target_types = pd.Series(self.target_types)
if self.target_types.empty:
self.target_types = self._infer_target_types()
else:
target_names = self.target_types.index.tolist()
match = target_names == self.target_columns
assert match, "target names in types must match target columns"
def _check_label_times(self):
"""Validates the lables times object."""
self._check_target_columns()
self._check_target_types()
def _infer_target_columns(self):
"""Infers the names of the targets in the data frame.
Returns:
value (list): A list of the target names.
"""
not_targets = [self.target_entity, "time"]
target_columns = self.columns.difference(not_targets)
assert not target_columns.empty, "target columns not found"
value = target_columns.tolist()
return value
@property
def _is_single_target(self):
return len(self.target_columns) == 1
def _get_target_type(self, dtype):
is_discrete = pd.api.types.is_bool_dtype(dtype)
is_discrete |= pd.api.types.is_categorical_dtype(dtype)
is_discrete |= pd.api.types.is_object_dtype(dtype)
value = "discrete" if is_discrete else "continuous"
return value
def _infer_target_types(self):
"""Infers the target type from the data type.
Returns:
types (Series): Inferred label type. Either "continuous" or "discrete".
"""
dtypes = self.dtypes[self.target_columns]
types = dtypes.apply(self._get_target_type)
return types
def select(self, target):
"""Selects one of the target variables.
Args:
target (str): The name of the target column.
Returns:
lt (LabelTimes): A label times object that contains a single target.
Examples:
Create a label times object that contains multiple target variables.
>>> entity = [0, 0, 1, 1]
>>> labels = [True, False, True, False]
>>> time = ['2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04']
>>> data = {'entity': entity, 'time': time, 'A': labels, 'B': labels}
>>> lt = LabelTimes(data=data, target_entity='entity', target_columns=['A', 'B'])
>>> lt
entity time A B
0 0 2020-01-01 True True
1 0 2020-01-02 False False
2 1 2020-01-03 True True
3 1 2020-01-04 False False
Select a single target from the label times.
>>> lt.select('B')
entity time B
0 0 2020-01-01 True
1 0 2020-01-02 False
2 1 2020-01-03 True
3 1 2020-01-04 False
"""
assert not self._is_single_target, "only one target exists"
if not isinstance(target, str):
raise TypeError("target name must be string")
assert target in self.target_columns, 'target "%s" not found' % target
lt = self.copy()
lt.target_columns = [target]
lt.target_types = lt.target_types[[target]]
lt = lt[[self.target_entity, "time", target]]
return lt
@property
def settings(self):
"""Returns metadata about the label times."""
return {
"compose_version": __version__,
"schema_version": SCHEMA_VERSION,
"label_times": {
"target_entity": self.target_entity,
"target_columns": self.target_columns,
"target_types": self.target_types.to_dict(),
"search_settings": self.search_settings,
"transforms": self.transforms,
},
}
@property
def is_discrete(self):
"""Whether labels are discrete."""
return self.target_types.eq("discrete")
@property
def distribution(self):
"""Returns label distribution if labels are discrete."""
self._assert_single_target()
target_column = self.target_columns[0]
if self.is_discrete[target_column]:
labels = self.assign(count=1)
labels = labels.groupby(target_column)
distribution = labels["count"].count()
return distribution
else:
return self[target_column].describe()
@property
def count(self):
"""Returns label count per instance."""
self._assert_single_target()
count = self.groupby(self.target_entity)
count = count[self.target_columns[0]].count()
count = count.to_frame("count")
return count
@property
def count_by_time(self):
"""Returns label count across cutoff times."""
self._assert_single_target()
target_column = self.target_columns[0]
if self.is_discrete[target_column]:
keys = ["time", target_column]
value = self.groupby(keys).time.count()
value = value.unstack(target_column).fillna(0)
else:
value = self.groupby("time")
value = value[target_column].count()
value = (
value.cumsum()
) # In Python 3.5, these values automatically convert to float.
value = value.astype("int")
return value
def describe(self):
"""Prints out the settings used to make the label times."""
if not self.empty:
self._assert_single_target()
describe_label_times(self)
def copy(self, deep=True):
"""Make a copy of this object's indices and data.
Args:
deep (bool): Make a deep copy, including a copy of the data and the indices.
With ``deep=False`` neither the indices nor the data are copied. Default is True.
Returns:
lt (LabelTimes): A copy of the label times object.
"""
lt = super().copy(deep=deep)
lt.target_entity = self.target_entity
lt.target_columns = self.target_columns
lt.target_types = self.target_types.copy()
lt.search_settings = self.search_settings.copy()
lt.transforms = self.transforms.copy()
return lt
def threshold(self, value, inplace=False):
"""Creates binary labels by testing if labels are above threshold.
Args:
value (float) : Value of threshold.
inplace (bool) : Modify labels in place.
Returns:
labels (LabelTimes) : Instance of labels.
"""
self._assert_single_target()
target_column = self.target_columns[0]
labels = self if inplace else self.copy()
labels[target_column] = labels[target_column].gt(value)
labels.target_types[target_column] = "discrete"
transform = {"transform": "threshold", "value": value}
labels.transforms.append(transform)
if not inplace:
return labels
def apply_lead(self, value, inplace=False):
"""Shifts the label times earlier for predicting in advance.
Args:
value (str) : Time to shift earlier.
inplace (bool) : Modify labels in place.
Returns:
labels (LabelTimes) : Instance of labels.
"""
labels = self if inplace else self.copy()
labels["time"] = labels["time"].sub(pd.Timedelta(value))
transform = {"transform": "apply_lead", "value": value}
labels.transforms.append(transform)
if not inplace:
return labels
def bin(self, bins, quantiles=False, labels=None, right=True, precision=3):
"""Bin labels into discrete intervals.
Args:
bins (int or array): The criteria to bin by.
As an integer, the value can be the number of equal-width or quantile-based bins.
If :code:`quantiles` is False, the value is defined as the number of equal-width bins.
The range is extended by .1% on each side to include the minimum and maximum values.
If :code:`quantiles` is True, the value is defined as the number of quantiles (e.g. 10 for deciles, 4 for quartiles, etc.)
As an array, the value can be custom or quantile-based edges.
If :code:`quantiles` is False, the value is defined as bin edges allowing for non-uniform width. No extension is done.
If :code:`quantiles` is True, the value is defined as bin edges usings an array of quantiles (e.g. [0, .25, .5, .75, 1.] for quartiles)
quantiles (bool): Determines whether to use a quantile-based discretization function.
labels (array): Specifies the labels for the returned bins. Must be the same length as the resulting bins.
right (bool) : Indicates whether bins includes the rightmost edge or not. Does not apply to quantile-based bins.
precision (int): The precision at which to store and display the bins labels. Default value is 3.
Returns:
LabelTimes : Instance of labels.
Examples:
These are the target values for the examples.
>>> data = [226.93, 47.95, 283.46, 31.54]
>>> lt = LabelTimes({'target': data})
>>> lt
target
0 226.93
1 47.95
2 283.46
3 31.54
Bin values using equal-widths.
>>> lt.bin(2)
target
0 (157.5, 283.46]
1 (31.288, 157.5]
2 (157.5, 283.46]
3 (31.288, 157.5]
Bin values using custom-widths.
>>> lt.bin([0, 200, 400])
target
0 (200, 400]
1 (0, 200]
2 (200, 400]
3 (0, 200]
Bin values using infinite edges.
>>> lt.bin(['-inf', 100, 'inf'])
target
0 (100.0, inf]
1 (-inf, 100.0]
2 (100.0, inf]
3 (-inf, 100.0]
Bin values using quartiles.
>>> lt.bin(4, quantiles=True)
target
0 (137.44, 241.062]
1 (43.848, 137.44]
2 (241.062, 283.46]
3 (31.538999999999998, 43.848]
Bin values using custom quantiles with precision.
>>> lt.bin([0, .5, 1], quantiles=True, precision=1)
target
0 (137.4, 283.5]
1 (31.4, 137.4]
2 (137.4, 283.5]
3 (31.4, 137.4]
Assign labels to bins.
>>> lt.bin(2, labels=['low', 'high'])
target
0 high
1 low
2 high
3 low
""" # noqa
self._assert_single_target()
target_column = self.target_columns[0]
values = self[target_column].values
if quantiles:
values = pd.qcut(values, q=bins, labels=labels, precision=precision)
else:
if isinstance(bins, list):
for i, edge in enumerate(bins):
if edge in ["-inf", "inf"]:
bins[i] = float(edge)
values = pd.cut(
values, bins=bins, labels=labels, right=right, precision=precision
)
transform = {
"transform": "bin",
"bins": bins,
"quantiles": quantiles,
"labels": labels,
"right": right,
"precision": precision,
}
lt = self.copy()
lt[target_column] = values
lt.transforms.append(transform)
lt.target_types[target_column] = "discrete"
return lt
def _sample(self, key, value, settings, random_state=None, replace=False):
"""Returns a random sample of labels.
Args:
key (str) : Determines the sampling method. Can either be 'n' or 'frac'.
value (int or float) : Quantity to sample.
settings (dict) : Transform settings used for sampling.
random_state (int) : Seed for the random number generator.
replace (bool) : Sample with or without replacement. Default value is False.
Returns:
LabelTimes : Random sample of labels.
"""
sample = super().sample(
random_state=random_state, replace=replace, **{key: value}
)
return sample
def _sample_per_label(self, key, value, settings, random_state=None, replace=False):
"""Returns a random sample per label.
Args:
key (str) : Determines the sampling method. Can either be 'n' or 'frac'.
value (dict) : Quantity to sample per label.
settings (dict) : Transform settings used for sampling.
random_state (int) : Seed for the random number generator.
replace (bool) : Sample with or without replacement. Default value is False.
Returns:
LabelTimes : Random sample per label.
"""
sample_per_label = []
target_column = self.target_columns[0]
for (
label,
value,
) in value.items():
label = self[self[target_column] == label]
sample = label._sample(
key, value, settings, random_state=random_state, replace=replace
)
sample_per_label.append(sample)
sample = | pd.concat(sample_per_label, axis=0, sort=False) | pandas.concat |
import numpy as np
import pandas as pd
from .util import cartesian
class Namespace:
"""Holds all Variables that are defined
"""
def __init__(self):
pass
def _add(self, name, obj):
setattr(self, name, obj)
NS = Namespace()
class Variable:
"""A discrete variable, with a distribution over N states.
"""
def __init__(self, name, n_states):
"""Name the variable, and say how many states it has. Variables start
off as unassigned.
"""
global NS
assert isinstance(name, str)
assert name.isalnum()
self.name = name
self.n_states = n_states
# Generate the actual states; this makes it easy to work with.
self.states = range(n_states)
NS._add(name, self)
def uniform(self):
"""Return a uniform distribution for this variable."""
return np.ones(self.n_states) / float(self.n_states)
def with_state(self, state):
"""Return a distribution with just this state set."""
assert state in self.states
dist = np.zeros(self.n_states)
dist[state] = 1.0
return dist
def make_valid_distribution(self, distn):
"""Convert distribution and check it."""
valid_dist = np.array(distn, dtype=float)
assert valid_dist.shape == (self.n_states,)
assert np.isclose(valid_dist.sum(), 1.0)
return valid_dist
def __repr__(self):
return "({})".format(self.name)
def expand_variables(vs):
"""Make sure we have a list of Variables
This will return a flattened list of variables, no matter what you send
into it.
"""
try:
# This won't work if it ain't iterable
vsi = iter(vs)
list_of = []
for v in vsi:
list_of.extend(expand_variables(v))
return list_of
except TypeError:
# Okay, it must be a single Variable. Return it in a list.
assert isinstance(vs, Variable)
return [vs]
def make_variables(strings, n_states):
"""Just a shortcut for making lots of variables"""
var_names = strings.split()
return [Variable(v, n_states) for v in var_names]
class Distribution:
"""Base class for a distribution over one or more variables
"""
P_LABEL = "Pr"
def __init__(self, variables, pr=None):
# Everything should be a variable
for v in variables:
assert isinstance(v, Variable)
# No duplicates!
assert len(set(variables)) == len(variables)
self.variables = list(variables)
self.names = [v.name for v in self.variables]
self.probabilities = pr
@staticmethod
def generate_from_func(variables, func):
names = [v.name for v in variables]
recs = []
for vals in func:
assert len(vals) == len(variables) + 1
for var, val in zip(variables, vals[:-1]):
assert val in var.states
recs.append(vals)
cols = names + [Distribution.P_LABEL]
df = | pd.DataFrame.from_records(recs, columns=cols) | pandas.DataFrame.from_records |
from collections import OrderedDict
import datetime
from datetime import timedelta
from io import StringIO
import json
import os
import numpy as np
import pytest
from pandas.compat import is_platform_32bit, is_platform_windows
import pandas.util._test_decorators as td
import pandas as pd
from pandas import DataFrame, DatetimeIndex, Series, Timestamp, read_json
import pandas._testing as tm
_seriesd = tm.getSeriesData()
_tsd = tm.getTimeSeriesData()
_frame = DataFrame(_seriesd)
_intframe = DataFrame({k: v.astype(np.int64) for k, v in _seriesd.items()})
_tsframe = DataFrame(_tsd)
_cat_frame = _frame.copy()
cat = ["bah"] * 5 + ["bar"] * 5 + ["baz"] * 5 + ["foo"] * (len(_cat_frame) - 15)
_cat_frame.index = pd.CategoricalIndex(cat, name="E")
_cat_frame["E"] = list(reversed(cat))
_cat_frame["sort"] = np.arange(len(_cat_frame), dtype="int64")
_mixed_frame = _frame.copy()
def assert_json_roundtrip_equal(result, expected, orient):
if orient == "records" or orient == "values":
expected = expected.reset_index(drop=True)
if orient == "values":
expected.columns = range(len(expected.columns))
tm.assert_frame_equal(result, expected)
@pytest.mark.filterwarnings("ignore:the 'numpy' keyword is deprecated:FutureWarning")
class TestPandasContainer:
@pytest.fixture(autouse=True)
def setup(self):
self.intframe = _intframe.copy()
self.tsframe = _tsframe.copy()
self.mixed_frame = _mixed_frame.copy()
self.categorical = _cat_frame.copy()
yield
del self.intframe
del self.tsframe
del self.mixed_frame
def test_frame_double_encoded_labels(self, orient):
df = DataFrame(
[["a", "b"], ["c", "d"]],
index=['index " 1', "index / 2"],
columns=["a \\ b", "y / z"],
)
result = read_json(df.to_json(orient=orient), orient=orient)
expected = df.copy()
assert_json_roundtrip_equal(result, expected, orient)
@pytest.mark.parametrize("orient", ["split", "records", "values"])
def test_frame_non_unique_index(self, orient):
df = DataFrame([["a", "b"], ["c", "d"]], index=[1, 1], columns=["x", "y"])
result = read_json(df.to_json(orient=orient), orient=orient)
expected = df.copy()
assert_json_roundtrip_equal(result, expected, orient)
@pytest.mark.parametrize("orient", ["index", "columns"])
def test_frame_non_unique_index_raises(self, orient):
df = DataFrame([["a", "b"], ["c", "d"]], index=[1, 1], columns=["x", "y"])
msg = f"DataFrame index must be unique for orient='{orient}'"
with pytest.raises(ValueError, match=msg):
df.to_json(orient=orient)
@pytest.mark.parametrize("orient", ["split", "values"])
@pytest.mark.parametrize(
"data",
[
[["a", "b"], ["c", "d"]],
[[1.5, 2.5], [3.5, 4.5]],
[[1, 2.5], [3, 4.5]],
[[Timestamp("20130101"), 3.5], [Timestamp("20130102"), 4.5]],
],
)
def test_frame_non_unique_columns(self, orient, data):
df = DataFrame(data, index=[1, 2], columns=["x", "x"])
result = read_json(
df.to_json(orient=orient), orient=orient, convert_dates=["x"]
)
if orient == "values":
expected = pd.DataFrame(data)
if expected.iloc[:, 0].dtype == "datetime64[ns]":
# orient == "values" by default will write Timestamp objects out
# in milliseconds; these are internally stored in nanosecond,
# so divide to get where we need
# TODO: a to_epoch method would also solve; see GH 14772
expected.iloc[:, 0] = expected.iloc[:, 0].astype(np.int64) // 1000000
elif orient == "split":
expected = df
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("orient", ["index", "columns", "records"])
def test_frame_non_unique_columns_raises(self, orient):
df = DataFrame([["a", "b"], ["c", "d"]], index=[1, 2], columns=["x", "x"])
msg = f"DataFrame columns must be unique for orient='{orient}'"
with pytest.raises(ValueError, match=msg):
df.to_json(orient=orient)
def test_frame_default_orient(self, float_frame):
assert float_frame.to_json() == float_frame.to_json(orient="columns")
@pytest.mark.parametrize("dtype", [False, float])
@pytest.mark.parametrize("convert_axes", [True, False])
@pytest.mark.parametrize("numpy", [True, False])
def test_roundtrip_simple(self, orient, convert_axes, numpy, dtype, float_frame):
data = float_frame.to_json(orient=orient)
result = pd.read_json(
data, orient=orient, convert_axes=convert_axes, numpy=numpy, dtype=dtype
)
expected = float_frame
assert_json_roundtrip_equal(result, expected, orient)
@pytest.mark.parametrize("dtype", [False, np.int64])
@pytest.mark.parametrize("convert_axes", [True, False])
@pytest.mark.parametrize("numpy", [True, False])
def test_roundtrip_intframe(self, orient, convert_axes, numpy, dtype):
data = self.intframe.to_json(orient=orient)
result = pd.read_json(
data, orient=orient, convert_axes=convert_axes, numpy=numpy, dtype=dtype
)
expected = self.intframe.copy()
if (
numpy
and (is_platform_32bit() or is_platform_windows())
and not dtype
and orient != "split"
):
# TODO: see what is causing roundtrip dtype loss
expected = expected.astype(np.int32)
assert_json_roundtrip_equal(result, expected, orient)
@pytest.mark.parametrize("dtype", [None, np.float64, np.int, "U3"])
@pytest.mark.parametrize("convert_axes", [True, False])
@pytest.mark.parametrize("numpy", [True, False])
def test_roundtrip_str_axes(self, orient, convert_axes, numpy, dtype):
df = DataFrame(
np.zeros((200, 4)),
columns=[str(i) for i in range(4)],
index=[str(i) for i in range(200)],
dtype=dtype,
)
# TODO: do we even need to support U3 dtypes?
if numpy and dtype == "U3" and orient != "split":
pytest.xfail("Can't decode directly to array")
data = df.to_json(orient=orient)
result = pd.read_json(
data, orient=orient, convert_axes=convert_axes, numpy=numpy, dtype=dtype
)
expected = df.copy()
if not dtype:
expected = expected.astype(np.int64)
# index columns, and records orients cannot fully preserve the string
# dtype for axes as the index and column labels are used as keys in
# JSON objects. JSON keys are by definition strings, so there's no way
# to disambiguate whether those keys actually were strings or numeric
# beforehand and numeric wins out.
# TODO: Split should be able to support this
if convert_axes and (orient in ("split", "index", "columns")):
expected.columns = expected.columns.astype(np.int64)
expected.index = expected.index.astype(np.int64)
elif orient == "records" and convert_axes:
expected.columns = expected.columns.astype(np.int64)
assert_json_roundtrip_equal(result, expected, orient)
@pytest.mark.parametrize("convert_axes", [True, False])
@pytest.mark.parametrize("numpy", [True, False])
def test_roundtrip_categorical(self, orient, convert_axes, numpy):
# TODO: create a better frame to test with and improve coverage
if orient in ("index", "columns"):
pytest.xfail(f"Can't have duplicate index values for orient '{orient}')")
data = self.categorical.to_json(orient=orient)
if numpy and orient in ("records", "values"):
pytest.xfail(f"Orient {orient} is broken with numpy=True")
result = pd.read_json(
data, orient=orient, convert_axes=convert_axes, numpy=numpy
)
expected = self.categorical.copy()
expected.index = expected.index.astype(str) # Categorical not preserved
expected.index.name = None # index names aren't preserved in JSON
if not numpy and orient == "index":
expected = expected.sort_index()
assert_json_roundtrip_equal(result, expected, orient)
@pytest.mark.parametrize("convert_axes", [True, False])
@pytest.mark.parametrize("numpy", [True, False])
def test_roundtrip_empty(self, orient, convert_axes, numpy, empty_frame):
data = empty_frame.to_json(orient=orient)
result = pd.read_json(
data, orient=orient, convert_axes=convert_axes, numpy=numpy
)
expected = empty_frame.copy()
# TODO: both conditions below are probably bugs
if convert_axes:
expected.index = expected.index.astype(float)
expected.columns = expected.columns.astype(float)
if numpy and orient == "values":
expected = expected.reindex([0], axis=1).reset_index(drop=True)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("convert_axes", [True, False])
@pytest.mark.parametrize("numpy", [True, False])
def test_roundtrip_timestamp(self, orient, convert_axes, numpy):
# TODO: improve coverage with date_format parameter
data = self.tsframe.to_json(orient=orient)
result = pd.read_json(
data, orient=orient, convert_axes=convert_axes, numpy=numpy
)
expected = self.tsframe.copy()
if not convert_axes: # one off for ts handling
# DTI gets converted to epoch values
idx = expected.index.astype(np.int64) // 1000000
if orient != "split": # TODO: handle consistently across orients
idx = idx.astype(str)
expected.index = idx
assert_json_roundtrip_equal(result, expected, orient)
@pytest.mark.parametrize("convert_axes", [True, False])
@pytest.mark.parametrize("numpy", [True, False])
def test_roundtrip_mixed(self, orient, convert_axes, numpy):
if numpy and orient != "split":
pytest.xfail("Can't decode directly to array")
index = pd.Index(["a", "b", "c", "d", "e"])
values = {
"A": [0.0, 1.0, 2.0, 3.0, 4.0],
"B": [0.0, 1.0, 0.0, 1.0, 0.0],
"C": ["foo1", "foo2", "foo3", "foo4", "foo5"],
"D": [True, False, True, False, True],
}
df = DataFrame(data=values, index=index)
data = df.to_json(orient=orient)
result = pd.read_json(
data, orient=orient, convert_axes=convert_axes, numpy=numpy
)
expected = df.copy()
expected = expected.assign(**expected.select_dtypes("number").astype(np.int64))
if not numpy and orient == "index":
expected = expected.sort_index()
assert_json_roundtrip_equal(result, expected, orient)
@pytest.mark.parametrize(
"data,msg,orient",
[
('{"key":b:a:d}', "Expected object or value", "columns"),
# too few indices
(
'{"columns":["A","B"],'
'"index":["2","3"],'
'"data":[[1.0,"1"],[2.0,"2"],[null,"3"]]}',
r"Shape of passed values is \(3, 2\), indices imply \(2, 2\)",
"split",
),
# too many columns
(
'{"columns":["A","B","C"],'
'"index":["1","2","3"],'
'"data":[[1.0,"1"],[2.0,"2"],[null,"3"]]}',
"3 columns passed, passed data had 2 columns",
"split",
),
# bad key
(
'{"badkey":["A","B"],'
'"index":["2","3"],'
'"data":[[1.0,"1"],[2.0,"2"],[null,"3"]]}',
r"unexpected key\(s\): badkey",
"split",
),
],
)
def test_frame_from_json_bad_data_raises(self, data, msg, orient):
with pytest.raises(ValueError, match=msg):
read_json(StringIO(data), orient=orient)
@pytest.mark.parametrize("dtype", [True, False])
@pytest.mark.parametrize("convert_axes", [True, False])
@pytest.mark.parametrize("numpy", [True, False])
def test_frame_from_json_missing_data(self, orient, convert_axes, numpy, dtype):
num_df = DataFrame([[1, 2], [4, 5, 6]])
result = read_json(
num_df.to_json(orient=orient),
orient=orient,
convert_axes=convert_axes,
dtype=dtype,
)
assert np.isnan(result.iloc[0, 2])
obj_df = DataFrame([["1", "2"], ["4", "5", "6"]])
result = read_json(
obj_df.to_json(orient=orient),
orient=orient,
convert_axes=convert_axes,
dtype=dtype,
)
if not dtype: # TODO: Special case for object data; maybe a bug?
assert result.iloc[0, 2] is None
else:
assert np.isnan(result.iloc[0, 2])
@pytest.mark.parametrize("inf", [np.inf, np.NINF])
@pytest.mark.parametrize("dtype", [True, False])
def test_frame_infinity(self, orient, inf, dtype):
# infinities get mapped to nulls which get mapped to NaNs during
# deserialisation
df = DataFrame([[1, 2], [4, 5, 6]])
df.loc[0, 2] = inf
result = read_json(df.to_json(), dtype=dtype)
assert np.isnan(result.iloc[0, 2])
@pytest.mark.skipif(
is_platform_32bit(), reason="not compliant on 32-bit, xref #15865"
)
@pytest.mark.parametrize(
"value,precision,expected_val",
[
(0.95, 1, 1.0),
(1.95, 1, 2.0),
(-1.95, 1, -2.0),
(0.995, 2, 1.0),
(0.9995, 3, 1.0),
(0.99999999999999944, 15, 1.0),
],
)
def test_frame_to_json_float_precision(self, value, precision, expected_val):
df = pd.DataFrame([dict(a_float=value)])
encoded = df.to_json(double_precision=precision)
assert encoded == f'{{"a_float":{{"0":{expected_val}}}}}'
def test_frame_to_json_except(self):
df = DataFrame([1, 2, 3])
msg = "Invalid value 'garbage' for option 'orient'"
with pytest.raises(ValueError, match=msg):
df.to_json(orient="garbage")
def test_frame_empty(self):
df = DataFrame(columns=["jim", "joe"])
assert not df._is_mixed_type
tm.assert_frame_equal(
read_json(df.to_json(), dtype=dict(df.dtypes)), df, check_index_type=False
)
# GH 7445
result = pd.DataFrame({"test": []}, index=[]).to_json(orient="columns")
expected = '{"test":{}}'
assert result == expected
def test_frame_empty_mixedtype(self):
# mixed type
df = DataFrame(columns=["jim", "joe"])
df["joe"] = df["joe"].astype("i8")
assert df._is_mixed_type
tm.assert_frame_equal(
read_json(df.to_json(), dtype=dict(df.dtypes)), df, check_index_type=False
)
def test_frame_mixedtype_orient(self): # GH10289
vals = [
[10, 1, "foo", 0.1, 0.01],
[20, 2, "bar", 0.2, 0.02],
[30, 3, "baz", 0.3, 0.03],
[40, 4, "qux", 0.4, 0.04],
]
df = DataFrame(
vals, index=list("abcd"), columns=["1st", "2nd", "3rd", "4th", "5th"]
)
assert df._is_mixed_type
right = df.copy()
for orient in ["split", "index", "columns"]:
inp = df.to_json(orient=orient)
left = read_json(inp, orient=orient, convert_axes=False)
tm.assert_frame_equal(left, right)
right.index = np.arange(len(df))
inp = df.to_json(orient="records")
left = read_json(inp, orient="records", convert_axes=False)
tm.assert_frame_equal(left, right)
right.columns = np.arange(df.shape[1])
inp = df.to_json(orient="values")
left = read_json(inp, orient="values", convert_axes=False)
tm.assert_frame_equal(left, right)
def test_v12_compat(self, datapath):
df = DataFrame(
[
[1.56808523, 0.65727391, 1.81021139, -0.17251653],
[-0.2550111, -0.08072427, -0.03202878, -0.17581665],
[1.51493992, 0.11805825, 1.629455, -1.31506612],
[-0.02765498, 0.44679743, 0.33192641, -0.27885413],
[0.05951614, -2.69652057, 1.28163262, 0.34703478],
],
columns=["A", "B", "C", "D"],
index=pd.date_range("2000-01-03", "2000-01-07"),
)
df["date"] = pd.Timestamp("19920106 18:21:32.12")
df.iloc[3, df.columns.get_loc("date")] = pd.Timestamp("20130101")
df["modified"] = df["date"]
df.iloc[1, df.columns.get_loc("modified")] = pd.NaT
dirpath = datapath("io", "json", "data")
v12_json = os.path.join(dirpath, "tsframe_v012.json")
df_unser = pd.read_json(v12_json)
tm.assert_frame_equal(df, df_unser)
df_iso = df.drop(["modified"], axis=1)
v12_iso_json = os.path.join(dirpath, "tsframe_iso_v012.json")
df_unser_iso = pd.read_json(v12_iso_json)
tm.assert_frame_equal(df_iso, df_unser_iso)
def test_blocks_compat_GH9037(self):
index = pd.date_range("20000101", periods=10, freq="H")
df_mixed = DataFrame(
OrderedDict(
float_1=[
-0.92077639,
0.77434435,
1.25234727,
0.61485564,
-0.60316077,
0.24653374,
0.28668979,
-2.51969012,
0.95748401,
-1.02970536,
],
int_1=[
19680418,
75337055,
99973684,
65103179,
79373900,
40314334,
21290235,
4991321,
41903419,
16008365,
],
str_1=[
"78c608f1",
"64a99743",
"13d2ff52",
"ca7f4af2",
"97236474",
"bde7e214",
"1a6bde47",
"b1190be5",
"7a669144",
"8d64d068",
],
float_2=[
-0.0428278,
-1.80872357,
3.36042349,
-0.7573685,
-0.48217572,
0.86229683,
1.08935819,
0.93898739,
-0.03030452,
1.43366348,
],
str_2=[
"14f04af9",
"d085da90",
"4bcfac83",
"81504caf",
"2ffef4a9",
"08e2f5c4",
"07e1af03",
"addbd4a7",
"1f6a09ba",
"4bfc4d87",
],
int_2=[
86967717,
98098830,
51927505,
20372254,
12601730,
20884027,
34193846,
10561746,
24867120,
76131025,
],
),
index=index,
)
# JSON deserialisation always creates unicode strings
df_mixed.columns = df_mixed.columns.astype("unicode")
df_roundtrip = pd.read_json(df_mixed.to_json(orient="split"), orient="split")
tm.assert_frame_equal(
df_mixed,
df_roundtrip,
check_index_type=True,
check_column_type=True,
by_blocks=True,
check_exact=True,
)
def test_frame_nonprintable_bytes(self):
# GH14256: failing column caused segfaults, if it is not the last one
class BinaryThing:
def __init__(self, hexed):
self.hexed = hexed
self.binary = bytes.fromhex(hexed)
def __str__(self) -> str:
return self.hexed
hexed = "574b4454ba8c5eb4f98a8f45"
binthing = BinaryThing(hexed)
# verify the proper conversion of printable content
df_printable = DataFrame({"A": [binthing.hexed]})
assert df_printable.to_json() == f'{{"A":{{"0":"{hexed}"}}}}'
# check if non-printable content throws appropriate Exception
df_nonprintable = DataFrame({"A": [binthing]})
msg = "Unsupported UTF-8 sequence length when encoding string"
with pytest.raises(OverflowError, match=msg):
df_nonprintable.to_json()
# the same with multiple columns threw segfaults
df_mixed = DataFrame({"A": [binthing], "B": [1]}, columns=["A", "B"])
with pytest.raises(OverflowError):
df_mixed.to_json()
# default_handler should resolve exceptions for non-string types
result = df_nonprintable.to_json(default_handler=str)
expected = f'{{"A":{{"0":"{hexed}"}}}}'
assert result == expected
assert (
df_mixed.to_json(default_handler=str)
== f'{{"A":{{"0":"{hexed}"}},"B":{{"0":1}}}}'
)
def test_label_overflow(self):
# GH14256: buffer length not checked when writing label
result = pd.DataFrame({"bar" * 100000: [1], "foo": [1337]}).to_json()
expected = f'{{"{"bar" * 100000}":{{"0":1}},"foo":{{"0":1337}}}}'
assert result == expected
def test_series_non_unique_index(self):
s = Series(["a", "b"], index=[1, 1])
msg = "Series index must be unique for orient='index'"
with pytest.raises(ValueError, match=msg):
s.to_json(orient="index")
tm.assert_series_equal(
s, read_json(s.to_json(orient="split"), orient="split", typ="series")
)
unser = read_json(s.to_json(orient="records"), orient="records", typ="series")
tm.assert_numpy_array_equal(s.values, unser.values)
def test_series_default_orient(self, string_series):
assert string_series.to_json() == string_series.to_json(orient="index")
@pytest.mark.parametrize("numpy", [True, False])
def test_series_roundtrip_simple(self, orient, numpy, string_series):
data = string_series.to_json(orient=orient)
result = pd.read_json(data, typ="series", orient=orient, numpy=numpy)
expected = string_series
if orient in ("values", "records"):
expected = expected.reset_index(drop=True)
if orient != "split":
expected.name = None
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("dtype", [False, None])
@pytest.mark.parametrize("numpy", [True, False])
def test_series_roundtrip_object(self, orient, numpy, dtype, object_series):
data = object_series.to_json(orient=orient)
result = pd.read_json(
data, typ="series", orient=orient, numpy=numpy, dtype=dtype
)
expected = object_series
if orient in ("values", "records"):
expected = expected.reset_index(drop=True)
if orient != "split":
expected.name = None
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("numpy", [True, False])
def test_series_roundtrip_empty(self, orient, numpy, empty_series):
data = empty_series.to_json(orient=orient)
result = pd.read_json(data, typ="series", orient=orient, numpy=numpy)
expected = empty_series
if orient in ("values", "records"):
expected = expected.reset_index(drop=True)
else:
expected.index = expected.index.astype(float)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("numpy", [True, False])
def test_series_roundtrip_timeseries(self, orient, numpy, datetime_series):
data = datetime_series.to_json(orient=orient)
result = pd.read_json(data, typ="series", orient=orient, numpy=numpy)
expected = datetime_series
if orient in ("values", "records"):
expected = expected.reset_index(drop=True)
if orient != "split":
expected.name = None
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("dtype", [np.float64, np.int])
@pytest.mark.parametrize("numpy", [True, False])
def test_series_roundtrip_numeric(self, orient, numpy, dtype):
s = Series(range(6), index=["a", "b", "c", "d", "e", "f"])
data = s.to_json(orient=orient)
result = pd.read_json(data, typ="series", orient=orient, numpy=numpy)
expected = s.copy()
if orient in ("values", "records"):
expected = expected.reset_index(drop=True)
tm.assert_series_equal(result, expected)
def test_series_to_json_except(self):
s = Series([1, 2, 3])
msg = "Invalid value 'garbage' for option 'orient'"
with pytest.raises(ValueError, match=msg):
s.to_json(orient="garbage")
def test_series_from_json_precise_float(self):
s = Series([4.56, 4.56, 4.56])
result = read_json(s.to_json(), typ="series", precise_float=True)
tm.assert_series_equal(result, s, check_index_type=False)
def test_series_with_dtype(self):
# GH 21986
s = Series([4.56, 4.56, 4.56])
result = read_json(s.to_json(), typ="series", dtype=np.int64)
expected = Series([4] * 3)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"dtype,expected",
[
(True, Series(["2000-01-01"], dtype="datetime64[ns]")),
(False, Series([946684800000])),
],
)
def test_series_with_dtype_datetime(self, dtype, expected):
s = Series(["2000-01-01"], dtype="datetime64[ns]")
data = s.to_json()
result = pd.read_json(data, typ="series", dtype=dtype)
tm.assert_series_equal(result, expected)
def test_frame_from_json_precise_float(self):
df = DataFrame([[4.56, 4.56, 4.56], [4.56, 4.56, 4.56]])
result = read_json(df.to_json(), precise_float=True)
tm.assert_frame_equal(
result, df, check_index_type=False, check_column_type=False
)
def test_typ(self):
s = Series(range(6), index=["a", "b", "c", "d", "e", "f"], dtype="int64")
result = read_json(s.to_json(), typ=None)
tm.assert_series_equal(result, s)
def test_reconstruction_index(self):
df = DataFrame([[1, 2, 3], [4, 5, 6]])
result = read_json(df.to_json())
tm.assert_frame_equal(result, df)
df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}, index=["A", "B", "C"])
result = read_json(df.to_json())
tm.assert_frame_equal(result, df)
def test_path(self, float_frame):
with tm.ensure_clean("test.json") as path:
for df in [
float_frame,
self.intframe,
self.tsframe,
self.mixed_frame,
]:
df.to_json(path)
read_json(path)
def test_axis_dates(self, datetime_series):
# frame
json = self.tsframe.to_json()
result = read_json(json)
tm.assert_frame_equal(result, self.tsframe)
# series
json = datetime_series.to_json()
result = read_json(json, typ="series")
tm.assert_series_equal(result, datetime_series, check_names=False)
assert result.name is None
def test_convert_dates(self, datetime_series):
# frame
df = self.tsframe.copy()
df["date"] = Timestamp("20130101")
json = df.to_json()
result = read_json(json)
tm.assert_frame_equal(result, df)
df["foo"] = 1.0
json = df.to_json(date_unit="ns")
result = read_json(json, convert_dates=False)
expected = df.copy()
expected["date"] = expected["date"].values.view("i8")
expected["foo"] = expected["foo"].astype("int64")
tm.assert_frame_equal(result, expected)
# series
ts = Series(Timestamp("20130101"), index=datetime_series.index)
json = ts.to_json()
result = read_json(json, typ="series")
tm.assert_series_equal(result, ts)
@pytest.mark.parametrize("date_format", ["epoch", "iso"])
@pytest.mark.parametrize("as_object", [True, False])
@pytest.mark.parametrize(
"date_typ", [datetime.date, datetime.datetime, pd.Timestamp]
)
def test_date_index_and_values(self, date_format, as_object, date_typ):
data = [date_typ(year=2020, month=1, day=1), pd.NaT]
if as_object:
data.append("a")
ser = pd.Series(data, index=data)
result = ser.to_json(date_format=date_format)
if date_format == "epoch":
expected = '{"1577836800000":1577836800000,"null":null}'
else:
expected = (
'{"2020-01-01T00:00:00.000Z":"2020-01-01T00:00:00.000Z","null":null}'
)
if as_object:
expected = expected.replace("}", ',"a":"a"}')
assert result == expected
@pytest.mark.parametrize(
"infer_word",
[
"trade_time",
"date",
"datetime",
"sold_at",
"modified",
"timestamp",
"timestamps",
],
)
def test_convert_dates_infer(self, infer_word):
# GH10747
from pandas.io.json import dumps
data = [{"id": 1, infer_word: 1036713600000}, {"id": 2}]
expected = DataFrame(
[[1, Timestamp("2002-11-08")], [2, pd.NaT]], columns=["id", infer_word]
)
result = read_json(dumps(data))[["id", infer_word]]
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"date,date_unit",
[
("20130101 20:43:42.123", None),
("20130101 20:43:42", "s"),
("20130101 20:43:42.123", "ms"),
("20130101 20:43:42.123456", "us"),
("20130101 20:43:42.123456789", "ns"),
],
)
def test_date_format_frame(self, date, date_unit):
df = self.tsframe.copy()
df["date"] = Timestamp(date)
df.iloc[1, df.columns.get_loc("date")] = pd.NaT
df.iloc[5, df.columns.get_loc("date")] = pd.NaT
if date_unit:
json = df.to_json(date_format="iso", date_unit=date_unit)
else:
json = df.to_json(date_format="iso")
result = read_json(json)
expected = df.copy()
expected.index = expected.index.tz_localize("UTC")
expected["date"] = expected["date"].dt.tz_localize("UTC")
tm.assert_frame_equal(result, expected)
def test_date_format_frame_raises(self):
df = self.tsframe.copy()
msg = "Invalid value 'foo' for option 'date_unit'"
with pytest.raises(ValueError, match=msg):
df.to_json(date_format="iso", date_unit="foo")
@pytest.mark.parametrize(
"date,date_unit",
[
("20130101 20:43:42.123", None),
("20130101 20:43:42", "s"),
("20130101 20:43:42.123", "ms"),
("20130101 20:43:42.123456", "us"),
("20130101 20:43:42.123456789", "ns"),
],
)
def test_date_format_series(self, date, date_unit, datetime_series):
ts = Series(Timestamp(date), index=datetime_series.index)
ts.iloc[1] = pd.NaT
ts.iloc[5] = pd.NaT
if date_unit:
json = ts.to_json(date_format="iso", date_unit=date_unit)
else:
json = ts.to_json(date_format="iso")
result = read_json(json, typ="series")
expected = ts.copy()
expected.index = expected.index.tz_localize("UTC")
expected = expected.dt.tz_localize("UTC")
tm.assert_series_equal(result, expected)
def test_date_format_series_raises(self, datetime_series):
ts = Series(Timestamp("20130101 20:43:42.123"), index=datetime_series.index)
msg = "Invalid value 'foo' for option 'date_unit'"
with pytest.raises(ValueError, match=msg):
ts.to_json(date_format="iso", date_unit="foo")
@pytest.mark.parametrize("unit", ["s", "ms", "us", "ns"])
def test_date_unit(self, unit):
df = self.tsframe.copy()
df["date"] = Timestamp("20130101 20:43:42")
dl = df.columns.get_loc("date")
df.iloc[1, dl] = Timestamp("19710101 20:43:42")
df.iloc[2, dl] = Timestamp("21460101 20:43:42")
df.iloc[4, dl] = pd.NaT
json = df.to_json(date_format="epoch", date_unit=unit)
# force date unit
result = read_json(json, date_unit=unit)
tm.assert_frame_equal(result, df)
# detect date unit
result = read_json(json, date_unit=None)
tm.assert_frame_equal(result, df)
def test_weird_nested_json(self):
# this used to core dump the parser
s = r"""{
"status": "success",
"data": {
"posts": [
{
"id": 1,
"title": "A blog post",
"body": "Some useful content"
},
{
"id": 2,
"title": "Another blog post",
"body": "More content"
}
]
}
}"""
read_json(s)
def test_doc_example(self):
dfj2 = DataFrame(np.random.randn(5, 2), columns=list("AB"))
dfj2["date"] = Timestamp("20130101")
dfj2["ints"] = range(5)
dfj2["bools"] = True
dfj2.index = pd.date_range("20130101", periods=5)
json = dfj2.to_json()
result = read_json(json, dtype={"ints": np.int64, "bools": np.bool_})
tm.assert_frame_equal(result, result)
def test_misc_example(self):
# parsing unordered input fails
result = read_json('[{"a": 1, "b": 2}, {"b":2, "a" :1}]', numpy=True)
expected = DataFrame([[1, 2], [1, 2]], columns=["a", "b"])
error_msg = """DataFrame\\.index are different
DataFrame\\.index values are different \\(100\\.0 %\\)
\\[left\\]: Index\\(\\['a', 'b'\\], dtype='object'\\)
\\[right\\]: RangeIndex\\(start=0, stop=2, step=1\\)"""
with pytest.raises(AssertionError, match=error_msg):
tm.assert_frame_equal(result, expected, check_index_type=False)
result = read_json('[{"a": 1, "b": 2}, {"b":2, "a" :1}]')
expected = DataFrame([[1, 2], [1, 2]], columns=["a", "b"])
tm.assert_frame_equal(result, expected)
@tm.network
@pytest.mark.single
def test_round_trip_exception_(self):
# GH 3867
csv = "https://raw.github.com/hayd/lahman2012/master/csvs/Teams.csv"
df = pd.read_csv(csv)
s = df.to_json()
result = pd.read_json(s)
tm.assert_frame_equal(result.reindex(index=df.index, columns=df.columns), df)
@tm.network
@pytest.mark.single
@pytest.mark.parametrize(
"field,dtype",
[
["created_at", pd.DatetimeTZDtype(tz="UTC")],
["closed_at", "datetime64[ns]"],
["updated_at", pd.DatetimeTZDtype(tz="UTC")],
],
)
def test_url(self, field, dtype):
url = "https://api.github.com/repos/pandas-dev/pandas/issues?per_page=5" # noqa
result = read_json(url, convert_dates=True)
assert result[field].dtype == dtype
def test_timedelta(self):
converter = lambda x: pd.to_timedelta(x, unit="ms")
s = Series([timedelta(23), timedelta(seconds=5)])
assert s.dtype == "timedelta64[ns]"
result = pd.read_json(s.to_json(), typ="series").apply(converter)
tm.assert_series_equal(result, s)
s = Series([timedelta(23), timedelta(seconds=5)], index=pd.Index([0, 1]))
assert s.dtype == "timedelta64[ns]"
result = pd.read_json(s.to_json(), typ="series").apply(converter)
tm.assert_series_equal(result, s)
frame = DataFrame([timedelta(23), timedelta(seconds=5)])
assert frame[0].dtype == "timedelta64[ns]"
tm.assert_frame_equal(frame, pd.read_json(frame.to_json()).apply(converter))
frame = DataFrame(
{
"a": [timedelta(days=23), timedelta(seconds=5)],
"b": [1, 2],
"c": pd.date_range(start="20130101", periods=2),
}
)
result = pd.read_json(frame.to_json(date_unit="ns"))
result["a"] = pd.to_timedelta(result.a, unit="ns")
result["c"] = pd.to_datetime(result.c)
tm.assert_frame_equal(frame, result)
def test_mixed_timedelta_datetime(self):
frame = DataFrame(
{"a": [timedelta(23), pd.Timestamp("20130101")]}, dtype=object
)
expected = DataFrame(
{"a": [pd.Timedelta(frame.a[0]).value, pd.Timestamp(frame.a[1]).value]}
)
result = pd.read_json(frame.to_json(date_unit="ns"), dtype={"a": "int64"})
tm.assert_frame_equal(result, expected, check_index_type=False)
@pytest.mark.parametrize("as_object", [True, False])
@pytest.mark.parametrize("date_format", ["iso", "epoch"])
@pytest.mark.parametrize("timedelta_typ", [pd.Timedelta, timedelta])
def test_timedelta_to_json(self, as_object, date_format, timedelta_typ):
# GH28156: to_json not correctly formatting Timedelta
data = [timedelta_typ(days=1), timedelta_typ(days=2), pd.NaT]
if as_object:
data.append("a")
ser = pd.Series(data, index=data)
if date_format == "iso":
expected = (
'{"P1DT0H0M0S":"P1DT0H0M0S","P2DT0H0M0S":"P2DT0H0M0S","null":null}'
)
else:
expected = '{"86400000":86400000,"172800000":172800000,"null":null}'
if as_object:
expected = expected.replace("}", ',"a":"a"}')
result = ser.to_json(date_format=date_format)
assert result == expected
def test_default_handler(self):
value = object()
frame = DataFrame({"a": [7, value]})
expected = DataFrame({"a": [7, str(value)]})
result = pd.read_json(frame.to_json(default_handler=str))
tm.assert_frame_equal(expected, result, check_index_type=False)
def test_default_handler_indirect(self):
from pandas.io.json import dumps
def default(obj):
if isinstance(obj, complex):
return [("mathjs", "Complex"), ("re", obj.real), ("im", obj.imag)]
return str(obj)
df_list = [
9,
DataFrame(
{"a": [1, "STR", complex(4, -5)], "b": [float("nan"), None, "N/A"]},
columns=["a", "b"],
),
]
expected = (
'[9,[[1,null],["STR",null],[[["mathjs","Complex"],'
'["re",4.0],["im",-5.0]],"N\\/A"]]]'
)
assert dumps(df_list, default_handler=default, orient="values") == expected
def test_default_handler_numpy_unsupported_dtype(self):
# GH12554 to_json raises 'Unhandled numpy dtype 15'
df = DataFrame(
{"a": [1, 2.3, complex(4, -5)], "b": [float("nan"), None, complex(1.2, 0)]},
columns=["a", "b"],
)
expected = (
'[["(1+0j)","(nan+0j)"],'
'["(2.3+0j)","(nan+0j)"],'
'["(4-5j)","(1.2+0j)"]]'
)
assert df.to_json(default_handler=str, orient="values") == expected
def test_default_handler_raises(self):
msg = "raisin"
def my_handler_raises(obj):
raise TypeError(msg)
with pytest.raises(TypeError, match=msg):
DataFrame({"a": [1, 2, object()]}).to_json(
default_handler=my_handler_raises
)
with pytest.raises(TypeError, match=msg):
DataFrame({"a": [1, 2, complex(4, -5)]}).to_json(
default_handler=my_handler_raises
)
def test_categorical(self):
# GH4377 df.to_json segfaults with non-ndarray blocks
df = DataFrame({"A": ["a", "b", "c", "a", "b", "b", "a"]})
df["B"] = df["A"]
expected = df.to_json()
df["B"] = df["A"].astype("category")
assert expected == df.to_json()
s = df["A"]
sc = df["B"]
assert s.to_json() == sc.to_json()
def test_datetime_tz(self):
# GH4377 df.to_json segfaults with non-ndarray blocks
tz_range = pd.date_range("20130101", periods=3, tz="US/Eastern")
tz_naive = tz_range.tz_convert("utc").tz_localize(None)
df = DataFrame({"A": tz_range, "B": pd.date_range("20130101", periods=3)})
df_naive = df.copy()
df_naive["A"] = tz_naive
expected = df_naive.to_json()
assert expected == df.to_json()
stz = Series(tz_range)
s_naive = Series(tz_naive)
assert stz.to_json() == s_naive.to_json()
def test_sparse(self):
# GH4377 df.to_json segfaults with non-ndarray blocks
df = pd.DataFrame(np.random.randn(10, 4))
df.loc[:8] = np.nan
sdf = df.astype("Sparse")
expected = df.to_json()
assert expected == sdf.to_json()
s = pd.Series(np.random.randn(10))
s.loc[:8] = np.nan
ss = s.astype("Sparse")
expected = s.to_json()
assert expected == ss.to_json()
@pytest.mark.parametrize(
"ts",
[
Timestamp("2013-01-10 05:00:00Z"),
Timestamp("2013-01-10 00:00:00", tz="US/Eastern"),
Timestamp("2013-01-10 00:00:00-0500"),
],
)
def test_tz_is_utc(self, ts):
from pandas.io.json import dumps
exp = '"2013-01-10T05:00:00.000Z"'
assert dumps(ts, iso_dates=True) == exp
dt = ts.to_pydatetime()
assert dumps(dt, iso_dates=True) == exp
@pytest.mark.parametrize(
"tz_range",
[
pd.date_range("2013-01-01 05:00:00Z", periods=2),
pd.date_range("2013-01-01 00:00:00", periods=2, tz="US/Eastern"),
pd.date_range("2013-01-01 00:00:00-0500", periods=2),
],
)
def test_tz_range_is_utc(self, tz_range):
from pandas.io.json import dumps
exp = '["2013-01-01T05:00:00.000Z","2013-01-02T05:00:00.000Z"]'
dfexp = (
'{"DT":{'
'"0":"2013-01-01T05:00:00.000Z",'
'"1":"2013-01-02T05:00:00.000Z"}}'
)
assert dumps(tz_range, iso_dates=True) == exp
dti = pd.DatetimeIndex(tz_range)
assert dumps(dti, iso_dates=True) == exp
df = DataFrame({"DT": dti})
result = dumps(df, iso_dates=True)
assert result == dfexp
def test_read_inline_jsonl(self):
# GH9180
result = read_json('{"a": 1, "b": 2}\n{"b":2, "a" :1}\n', lines=True)
expected = DataFrame([[1, 2], [1, 2]], columns=["a", "b"])
tm.assert_frame_equal(result, expected)
@td.skip_if_not_us_locale
def test_read_s3_jsonl(self, s3_resource):
# GH17200
result = read_json("s3n://pandas-test/items.jsonl", lines=True)
expected = DataFrame([[1, 2], [1, 2]], columns=["a", "b"])
tm.assert_frame_equal(result, expected)
def test_read_local_jsonl(self):
# GH17200
with tm.ensure_clean("tmp_items.json") as path:
with open(path, "w") as infile:
infile.write('{"a": 1, "b": 2}\n{"b":2, "a" :1}\n')
result = read_json(path, lines=True)
expected = DataFrame([[1, 2], [1, 2]], columns=["a", "b"])
tm.assert_frame_equal(result, expected)
def test_read_jsonl_unicode_chars(self):
# GH15132: non-ascii unicode characters
# \u201d == RIGHT DOUBLE QUOTATION MARK
# simulate file handle
json = '{"a": "foo”", "b": "bar"}\n{"a": "foo", "b": "bar"}\n'
json = StringIO(json)
result = read_json(json, lines=True)
expected = DataFrame([["foo\u201d", "bar"], ["foo", "bar"]], columns=["a", "b"])
| tm.assert_frame_equal(result, expected) | pandas._testing.assert_frame_equal |
import pandas as pd
import numpy as np
import statsmodels.api as sm
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error
from scipy import stats
import plotly.graph_objs as go
import cufflinks
cufflinks.go_offline()
def make_hist(df, x, category=None):
"""
Make an interactive histogram, optionally segmented by `category`
:param df: dataframe of data
:param x: string of column to use for plotting
:param category: string representing column to segment by
:return figure: a plotly histogram to show with iplot or plot
"""
if category is not None:
data = []
for name, group in df.groupby(category):
data.append(go.Histogram(dict(x=group[x], name=name)))
else:
data = [go.Histogram(dict(x=df[x]))]
layout = go.Layout(
yaxis=dict(title="Count"),
xaxis=dict(title=x.replace("_", " ").title()),
title=f"{x.replace('_', ' ').title()} Distribution by {category.replace('_', ' ').title()}"
if category
else f"{x.replace('_', ' ').title()} Distribution",
)
figure = go.Figure(data=data, layout=layout)
return figure
def make_cum_plot(df, y, category=None, ranges=False):
"""
Make an interactive cumulative plot, optionally segmented by `category`
:param df: dataframe of data, must have a `published_date` column
:param y: string of column to use for plotting or list of two strings for double y axis
:param category: string representing column to segment by
:param ranges: boolean for whether to add range slider and range selector
:return figure: a plotly plot to show with iplot or plot
"""
if category is not None:
data = []
for i, (name, group) in enumerate(df.groupby(category)):
group.sort_values("published_date", inplace=True)
data.append(
go.Scatter(
x=group["published_date"],
y=group[y].cumsum(),
mode="lines+markers",
text=group["title"],
name=name,
marker=dict(size=10, opacity=0.8, symbol=i + 2),
)
)
else:
df.sort_values("published_date", inplace=True)
if len(y) == 2:
data = [
go.Scatter(
x=df["published_date"],
y=df[y[0]].cumsum(),
name=y[0].title(),
mode="lines+markers",
text=df["title"],
marker=dict(
size=10,
color="blue",
opacity=0.6,
line=dict(color="black"),
),
),
go.Scatter(
x=df["published_date"],
y=df[y[1]].cumsum(),
yaxis="y2",
name=y[1].title(),
mode="lines+markers",
text=df["title"],
marker=dict(
size=10,
color="red",
opacity=0.6,
line=dict(color="black"),
),
),
]
else:
data = [
go.Scatter(
x=df["published_date"],
y=df[y].cumsum(),
mode="lines+markers",
text=df["title"],
marker=dict(
size=12,
color="blue",
opacity=0.6,
line=dict(color="black"),
),
)
]
if len(y) == 2:
layout = go.Layout(
xaxis=dict(title="Published Date", type="date"),
yaxis=dict(title=y[0].replace("_", " ").title(), color="blue"),
yaxis2=dict(
title=y[1].replace("_", " ").title(),
color="red",
overlaying="y",
side="right",
),
font=dict(size=14),
title=f"Cumulative {y[0].title()} and {y[1].title()}",
)
else:
layout = go.Layout(
xaxis=dict(title="Published Date", type="date"),
yaxis=dict(title=y.replace("_", " ").title()),
font=dict(size=14),
title=f"Cumulative {y.replace('_', ' ').title()} by {category.replace('_', ' ').title()}"
if category is not None
else f"Cumulative {y.replace('_', ' ').title()}",
)
# Add a rangeselector and rangeslider for a data xaxis
if ranges:
rangeselector = dict(
buttons=list(
[
dict(count=1, label="1m", step="month", stepmode="backward"),
dict(count=6, label="6m", step="month", stepmode="backward"),
dict(count=1, label="1y", step="year", stepmode="backward"),
dict(step="all"),
]
)
)
rangeslider = dict(visible=True)
layout["xaxis"]["rangeselector"] = rangeselector
layout["xaxis"]["rangeslider"] = rangeslider
layout["width"] = 1000
layout["height"] = 600
figure = go.Figure(data=data, layout=layout)
return figure
def make_scatter_plot(
df,
x,
y,
fits=None,
xlog=False,
ylog=False,
category=None,
scale=None,
sizeref=2,
annotations=None,
ranges=False,
title_override=None,
):
"""
Make an interactive scatterplot, optionally segmented by `category`
:param df: dataframe of data
:param x: string of column to use for xaxis
:param y: string of column to use for yaxis
:param fits: list of strings of fits
:param xlog: boolean for making a log xaxis
:param ylog boolean for making a log yaxis
:param category: string representing categorical column to segment by, this must be a categorical
:param scale: string representing numerical column to size and color markers by, this must be numerical data
:param sizeref: float or integer for setting the size of markers according to the scale, only used if scale is set
:param annotations: text to display on the plot (dictionary)
:param ranges: boolean for whether to add a range slider and selector
:param title_override: String to override the title
:return figure: a plotly plot to show with iplot or plot
"""
if category is not None:
title = f"{y.replace('_', ' ').title()} vs {x.replace('_', ' ').title()} by {category.replace('_', ' ').title()}"
data = []
for i, (name, group) in enumerate(df.groupby(category)):
data.append(
go.Scatter(
x=group[x],
y=group[y],
mode="markers",
text=group["title"],
name=name,
marker=dict(size=8, symbol=i + 2),
)
)
else:
if scale is not None:
title = f"{y.replace('_', ' ').title()} vs {x.replace('_', ' ').title()} Scaled by {scale.title()}"
data = [
go.Scatter(
x=df[x],
y=df[y],
mode="markers",
text=df["title"],
marker=dict(
size=df[scale],
line=dict(color="black", width=0.5),
sizemode="area",
sizeref=sizeref,
opacity=0.8,
colorscale="Viridis",
color=df[scale],
showscale=True,
sizemin=2,
),
)
]
else:
df.sort_values(x, inplace=True)
title = f"{y.replace('_', ' ').title()} vs {x.replace('_', ' ').title()}"
data = [
go.Scatter(
x=df[x],
y=df[y],
mode="markers",
text=df["title"],
marker=dict(
size=12, color="blue", opacity=0.8, line=dict(color="black")
),
name="observations",
)
]
if fits is not None:
for fit in fits:
data.append(
go.Scatter(
x=df[x],
y=df[fit],
text=df["title"],
mode="lines+markers",
marker=dict(size=8, opacity=0.6),
line=dict(dash="dash"),
name=fit,
)
)
title += " with Fit"
layout = go.Layout(
annotations=annotations,
xaxis=dict(
title=x.replace("_", " ").title() + (" (log scale)" if xlog else ""),
type="log" if xlog else None,
),
yaxis=dict(
title=y.replace("_", " ").title() + (" (log scale)" if ylog else ""),
type="log" if ylog else None,
),
font=dict(size=14),
title=title if title_override is None else title_override,
)
# Add a rangeselector and rangeslider for a data xaxis
if ranges:
rangeselector = dict(
buttons=list(
[
dict(count=1, label="1m", step="month", stepmode="backward"),
dict(count=6, label="6m", step="month", stepmode="backward"),
dict(count=1, label="1y", step="year", stepmode="backward"),
dict(step="all"),
]
)
)
rangeslider = dict(visible=True)
layout["xaxis"]["rangeselector"] = rangeselector
layout["xaxis"]["rangeslider"] = rangeslider
layout["width"] = 1000
layout["height"] = 600
figure = go.Figure(data=data, layout=layout)
return figure
def make_linear_regression(df, x, y, intercept_0):
"""
Create a linear regression, either with the intercept set to 0 or
the intercept allowed to be fitted
:param df: dataframe with data
:param x: string or list of stringsfor the name of the column with x data
:param y: string for the name of the column with y data
:param intercept_0: boolean indicating whether to set the intercept to 0
"""
if isinstance(x, list):
lin_model = LinearRegression()
lin_model.fit(df[x], df[y])
slopes, intercept, = (
lin_model.coef_,
lin_model.intercept_,
)
df["predicted"] = lin_model.predict(df[x])
r2 = lin_model.score(df[x], df[y])
rmse = np.sqrt(mean_squared_error(y_true=df[y], y_pred=df["predicted"]))
equation = f'{y.replace("_", " ")} ='
names = ["r2", "rmse", "intercept"]
values = [r2, rmse, intercept]
for i, (p, s) in enumerate(zip(x, slopes)):
if (i + 1) % 3 == 0:
equation += f'<br>{s:.2f} * {p.replace("_", " ")} +'
else:
equation += f' {s:.2f} * {p.replace("_", " ")} +'
names.append(p)
values.append(s)
equation += f" {intercept:.2f}"
annotations = [
dict(
x=0.4 * df.index.max(),
y=0.9 * df[y].max(),
showarrow=False,
text=equation,
font=dict(size=10),
)
]
df["index"] = list(df.index)
figure = make_scatter_plot(
df, x="index", y=y, fits=["predicted"], annotations=annotations
)
summary = pd.DataFrame({"name": names, "value": values})
else:
if intercept_0:
lin_reg = sm.OLS(df[y], df[x]).fit()
df["fit_values"] = lin_reg.fittedvalues
summary = lin_reg.summary()
slope = float(lin_reg.params)
equation = f"${y.replace('_', ' ')} = {slope:.2f} * {x.replace('_', ' ')}$"
else:
lin_reg = stats.linregress(df[x], df[y])
intercept, slope = lin_reg.intercept, lin_reg.slope
params = ["pvalue", "rvalue", "slope", "intercept"]
values = []
for p in params:
values.append(getattr(lin_reg, p))
summary = | pd.DataFrame({"param": params, "value": values}) | pandas.DataFrame |
import duckdb
import pandas as pd
import numpy
import pytest
from datetime import date, timedelta
class TestMap(object):
def test_map(self, duckdb_cursor):
testrel = duckdb.values([1, 2])
conn = duckdb.connect()
conn.execute('CREATE TABLE t (a integer)')
empty_rel = conn.table('t')
newdf1 = testrel.map(lambda df : df['col0'].add(42).to_frame())
newdf2 = testrel.map(lambda df : df['col0'].astype('string').to_frame())
newdf3 = testrel.map(lambda df : df)
# column count differs from bind
def evil1(df):
if len(df) == 0:
return df['col0'].to_frame()
else:
return df
# column type differs from bind
def evil2(df):
if len(df) == 0:
df['col0'] = df['col0'].astype('string')
return df
# column name differs from bind
def evil3(df):
if len(df) == 0:
df = df.rename(columns={"col0" : "col42"})
return df
# does not return a df
def evil4(df):
return 42
# straight up throws exception
def evil5(df):
this_makes_no_sense()
def return_dataframe(df):
return pd.DataFrame({'A' : [1]})
def return_big_dataframe(df):
return pd.DataFrame({'A' : [1]*5000})
def return_none(df):
return None
def return_empty_df(df):
return pd.DataFrame()
with pytest.raises(RuntimeError):
print(testrel.map(evil1).df())
with pytest.raises(RuntimeError):
print(testrel.map(evil2).df())
with pytest.raises(RuntimeError):
print(testrel.map(evil3).df())
with pytest.raises(AttributeError):
print(testrel.map(evil4).df())
with pytest.raises(RuntimeError):
print(testrel.map(evil5).df())
# not a function
with pytest.raises(TypeError):
print(testrel.map(42).df())
# nothing passed to map
with pytest.raises(TypeError):
print(testrel.map().df())
testrel.map(return_dataframe).df().equals(pd.DataFrame({'A' : [1]}))
with pytest.raises(Exception):
testrel.map(return_big_dataframe).df()
empty_rel.map(return_dataframe).df().equals(pd.DataFrame({'A' : []}))
with pytest.raises(Exception):
testrel.map(return_none).df()
with pytest.raises(Exception):
testrel.map(return_empty_df).df()
def test_isse_3237(self, duckdb_cursor):
def process(rel):
def mapper(x):
dates = x['date'].to_numpy("datetime64[us]")
days = x['days_to_add'].to_numpy("int")
x["result1"] = pd.Series([ | pd.to_datetime(y[0]) | pandas.to_datetime |
import streamlit as st
import pandas as pd
import requests
import os
from dotenv import load_dotenv
from nomics import Nomics
import json
import plotly
import yfinance as yf
import matplotlib.pyplot as plt
from PIL import Image
from fbprophet import Prophet
import hvplot as hv
import hvplot.pandas
import datetime as dt
from babel.numbers import format_currency
import tensorflow as tf
from tensorflow.keras.layers import Dense
from tensorflow.keras.models import Sequential
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn import svm
from pandas.tseries.offsets import DateOffset
from sklearn.metrics import classification_report
from sklearn.ensemble import AdaBoostClassifier
import numpy as np
from tensorflow import keras
import plotly.express as px
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
from sklearn.linear_model import LogisticRegression
# 2 PERFORM EXPLORATORY DATA ANALYSIS AND VISUALIZATION
# Function to normalize stock prices based on their initial price
def normalize(df):
x = df.copy()
for i in x.columns[1:]:
x[i] = x[i]/x[i][0]
return x
# Function to plot interactive plots using Plotly Express
print("Function to plot interactive plots using Plotly Express")
def interactive_plot(df, title):
fig = px.line(title = title)
for i in df.columns[1:]:
fig.add_scatter(x = df['Date'], y = df[i], name = i)
fig.show()
# Function to concatenate the date, stock price, and volume in one dataframe
def individual_stock(price_df, vol_df, name):
return pd.DataFrame({'Date': price_df['Date'], 'Close': price_df[name], 'Volume': vol_df[name]})
# Load .env environment variables
load_dotenv()
## Page expands to full width
st.set_page_config(layout='wide')
image = Image.open('images/crypto_image.jpg')
st.image(image,width = 600)
# Header for main and sidebar
st.title( "Crypto Signal Provider Web App")
st.markdown("""This app displays top 10 cryptocurrencies by market cap.""")
st.caption("NOTE: USDT & USDC are stablecoins pegged to the Dollar.")
st.sidebar.title("Crypto Signal Settings")
# Get nomics api key
nomics_api_key = os.getenv("NOMICS_API_KEY")
nomics_url = "https://api.nomics.com/v1/prices?key=" + nomics_api_key
nomics_currency_url = ("https://api.nomics.com/v1/currencies/ticker?key=" + nomics_api_key + "&interval=1d,30d&per-page=10&page=1")
# Read API in json
nomics_df = | pd.read_json(nomics_currency_url) | pandas.read_json |
# -*- coding: utf-8 -*-
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
import pytest
import re
from numpy import nan as NA
import numpy as np
from numpy.random import randint
from pandas.compat import range, u
import pandas.compat as compat
from pandas import Index, Series, DataFrame, isna, MultiIndex, notna
from pandas.util.testing import assert_series_equal
import pandas.util.testing as tm
import pandas.core.strings as strings
class TestStringMethods(object):
def test_api(self):
# GH 6106, GH 9322
assert Series.str is strings.StringMethods
assert isinstance(Series(['']).str, strings.StringMethods)
# GH 9184
invalid = Series([1])
with tm.assert_raises_regex(AttributeError,
"only use .str accessor"):
invalid.str
assert not hasattr(invalid, 'str')
def test_iter(self):
# GH3638
strs = 'google', 'wikimedia', 'wikipedia', 'wikitravel'
ds = Series(strs)
for s in ds.str:
# iter must yield a Series
assert isinstance(s, Series)
# indices of each yielded Series should be equal to the index of
# the original Series
tm.assert_index_equal(s.index, ds.index)
for el in s:
# each element of the series is either a basestring/str or nan
assert isinstance(el, compat.string_types) or isna(el)
# desired behavior is to iterate until everything would be nan on the
# next iter so make sure the last element of the iterator was 'l' in
# this case since 'wikitravel' is the longest string
assert s.dropna().values.item() == 'l'
def test_iter_empty(self):
ds = Series([], dtype=object)
i, s = 100, 1
for i, s in enumerate(ds.str):
pass
# nothing to iterate over so nothing defined values should remain
# unchanged
assert i == 100
assert s == 1
def test_iter_single_element(self):
ds = Series(['a'])
for i, s in enumerate(ds.str):
pass
assert not i
assert_series_equal(ds, s)
def test_iter_object_try_string(self):
ds = Series([slice(None, randint(10), randint(10, 20)) for _ in range(
4)])
i, s = 100, 'h'
for i, s in enumerate(ds.str):
pass
assert i == 100
assert s == 'h'
def test_cat(self):
one = np.array(['a', 'a', 'b', 'b', 'c', NA], dtype=np.object_)
two = np.array(['a', NA, 'b', 'd', 'foo', NA], dtype=np.object_)
# single array
result = strings.str_cat(one)
exp = 'aabbc'
assert result == exp
result = strings.str_cat(one, na_rep='NA')
exp = 'aabbcNA'
assert result == exp
result = strings.str_cat(one, na_rep='-')
exp = 'aabbc-'
assert result == exp
result = strings.str_cat(one, sep='_', na_rep='NA')
exp = 'a_a_b_b_c_NA'
assert result == exp
result = strings.str_cat(two, sep='-')
exp = 'a-b-d-foo'
assert result == exp
# Multiple arrays
result = strings.str_cat(one, [two], na_rep='NA')
exp = np.array(['aa', 'aNA', 'bb', 'bd', 'cfoo', 'NANA'],
dtype=np.object_)
tm.assert_numpy_array_equal(result, exp)
result = strings.str_cat(one, two)
exp = np.array(['aa', NA, 'bb', 'bd', 'cfoo', NA], dtype=np.object_)
tm.assert_almost_equal(result, exp)
def test_count(self):
values = np.array(['foo', 'foofoo', NA, 'foooofooofommmfoo'],
dtype=np.object_)
result = strings.str_count(values, 'f[o]+')
exp = np.array([1, 2, NA, 4])
tm.assert_numpy_array_equal(result, exp)
result = Series(values).str.count('f[o]+')
exp = Series([1, 2, NA, 4])
assert isinstance(result, Series)
tm.assert_series_equal(result, exp)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_count(mixed, 'a')
xp = np.array([1, NA, 0, NA, NA, 0, NA, NA, NA])
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.count('a')
xp = Series([1, NA, 0, NA, NA, 0, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = [u('foo'), u('foofoo'), NA, u('foooofooofommmfoo')]
result = strings.str_count(values, 'f[o]+')
exp = np.array([1, 2, NA, 4])
tm.assert_numpy_array_equal(result, exp)
result = Series(values).str.count('f[o]+')
exp = Series([1, 2, NA, 4])
assert isinstance(result, Series)
tm.assert_series_equal(result, exp)
def test_contains(self):
values = np.array(['foo', NA, 'fooommm__foo',
'mmm_', 'foommm[_]+bar'], dtype=np.object_)
pat = 'mmm[_]+'
result = strings.str_contains(values, pat)
expected = np.array([False, NA, True, True, False], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
result = strings.str_contains(values, pat, regex=False)
expected = np.array([False, NA, False, False, True], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
values = ['foo', 'xyz', 'fooommm__foo', 'mmm_']
result = strings.str_contains(values, pat)
expected = np.array([False, False, True, True])
assert result.dtype == np.bool_
tm.assert_numpy_array_equal(result, expected)
# case insensitive using regex
values = ['Foo', 'xYz', 'fOOomMm__fOo', 'MMM_']
result = strings.str_contains(values, 'FOO|mmm', case=False)
expected = np.array([True, False, True, True])
tm.assert_numpy_array_equal(result, expected)
# case insensitive without regex
result = strings.str_contains(values, 'foo', regex=False, case=False)
expected = np.array([True, False, True, False])
tm.assert_numpy_array_equal(result, expected)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_contains(mixed, 'o')
xp = np.array([False, NA, False, NA, NA, True, NA, NA, NA],
dtype=np.object_)
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.contains('o')
xp = Series([False, NA, False, NA, NA, True, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = np.array([u'foo', NA, u'fooommm__foo', u'mmm_'],
dtype=np.object_)
pat = 'mmm[_]+'
result = strings.str_contains(values, pat)
expected = np.array([False, np.nan, True, True], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
result = strings.str_contains(values, pat, na=False)
expected = np.array([False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
values = np.array(['foo', 'xyz', 'fooommm__foo', 'mmm_'],
dtype=np.object_)
result = strings.str_contains(values, pat)
expected = np.array([False, False, True, True])
assert result.dtype == np.bool_
tm.assert_numpy_array_equal(result, expected)
# na
values = Series(['om', 'foo', np.nan])
res = values.str.contains('foo', na="foo")
assert res.loc[2] == "foo"
def test_startswith(self):
values = Series(['om', NA, 'foo_nom', 'nom', 'bar_foo', NA, 'foo'])
result = values.str.startswith('foo')
exp = Series([False, NA, True, False, False, NA, True])
tm.assert_series_equal(result, exp)
# mixed
mixed = np.array(['a', NA, 'b', True, datetime.today(),
'foo', None, 1, 2.], dtype=np.object_)
rs = strings.str_startswith(mixed, 'f')
xp = np.array([False, NA, False, NA, NA, True, NA, NA, NA],
dtype=np.object_)
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.startswith('f')
assert isinstance(rs, Series)
xp = Series([False, NA, False, NA, NA, True, NA, NA, NA])
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('om'), NA, u('foo_nom'), u('nom'), u('bar_foo'), NA,
u('foo')])
result = values.str.startswith('foo')
exp = Series([False, NA, True, False, False, NA, True])
tm.assert_series_equal(result, exp)
result = values.str.startswith('foo', na=True)
tm.assert_series_equal(result, exp.fillna(True).astype(bool))
def test_endswith(self):
values = Series(['om', NA, 'foo_nom', 'nom', 'bar_foo', NA, 'foo'])
result = values.str.endswith('foo')
exp = Series([False, NA, False, False, True, NA, True])
tm.assert_series_equal(result, exp)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_endswith(mixed, 'f')
xp = np.array([False, NA, False, NA, NA, False, NA, NA, NA],
dtype=np.object_)
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.endswith('f')
xp = Series([False, NA, False, NA, NA, False, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('om'), NA, u('foo_nom'), u('nom'), u('bar_foo'), NA,
u('foo')])
result = values.str.endswith('foo')
exp = Series([False, NA, False, False, True, NA, True])
tm.assert_series_equal(result, exp)
result = values.str.endswith('foo', na=False)
tm.assert_series_equal(result, exp.fillna(False).astype(bool))
def test_title(self):
values = Series(["FOO", "BAR", NA, "Blah", "blurg"])
result = values.str.title()
exp = Series(["Foo", "Bar", NA, "Blah", "Blurg"])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(["FOO", NA, "bar", True, datetime.today(), "blah", None,
1, 2.])
mixed = mixed.str.title()
exp = Series(["Foo", NA, "Bar", NA, NA, "Blah", NA, NA, NA])
tm.assert_almost_equal(mixed, exp)
# unicode
values = Series([u("FOO"), NA, u("bar"), u("Blurg")])
results = values.str.title()
exp = Series([u("Foo"), NA, u("Bar"), u("Blurg")])
tm.assert_series_equal(results, exp)
def test_lower_upper(self):
values = Series(['om', NA, 'nom', 'nom'])
result = values.str.upper()
exp = Series(['OM', NA, 'NOM', 'NOM'])
tm.assert_series_equal(result, exp)
result = result.str.lower()
tm.assert_series_equal(result, values)
# mixed
mixed = Series(['a', NA, 'b', True, datetime.today(), 'foo', None, 1,
2.])
mixed = mixed.str.upper()
rs = Series(mixed).str.lower()
xp = Series(['a', NA, 'b', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('om'), NA, u('nom'), u('nom')])
result = values.str.upper()
exp = Series([u('OM'), NA, u('NOM'), u('NOM')])
tm.assert_series_equal(result, exp)
result = result.str.lower()
tm.assert_series_equal(result, values)
def test_capitalize(self):
values = Series(["FOO", "BAR", NA, "Blah", "blurg"])
result = values.str.capitalize()
exp = Series(["Foo", "Bar", NA, "Blah", "Blurg"])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(["FOO", NA, "bar", True, datetime.today(), "blah", None,
1, 2.])
mixed = mixed.str.capitalize()
exp = Series(["Foo", NA, "Bar", NA, NA, "Blah", NA, NA, NA])
tm.assert_almost_equal(mixed, exp)
# unicode
values = Series([u("FOO"), NA, u("bar"), u("Blurg")])
results = values.str.capitalize()
exp = Series([u("Foo"), NA, u("Bar"), u("Blurg")])
tm.assert_series_equal(results, exp)
def test_swapcase(self):
values = Series(["FOO", "BAR", NA, "Blah", "blurg"])
result = values.str.swapcase()
exp = Series(["foo", "bar", NA, "bLAH", "BLURG"])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(["FOO", NA, "bar", True, datetime.today(), "Blah", None,
1, 2.])
mixed = mixed.str.swapcase()
exp = Series(["foo", NA, "BAR", NA, NA, "bLAH", NA, NA, NA])
tm.assert_almost_equal(mixed, exp)
# unicode
values = Series([u("FOO"), NA, u("bar"), u("Blurg")])
results = values.str.swapcase()
exp = Series([u("foo"), NA, u("BAR"), u("bLURG")])
tm.assert_series_equal(results, exp)
def test_casemethods(self):
values = ['aaa', 'bbb', 'CCC', 'Dddd', 'eEEE']
s = Series(values)
assert s.str.lower().tolist() == [v.lower() for v in values]
assert s.str.upper().tolist() == [v.upper() for v in values]
assert s.str.title().tolist() == [v.title() for v in values]
assert s.str.capitalize().tolist() == [v.capitalize() for v in values]
assert s.str.swapcase().tolist() == [v.swapcase() for v in values]
def test_replace(self):
values = | Series(['fooBAD__barBAD', NA]) | pandas.Series |
# feature generation & selection
# sample
# full
# kaggle 0.14481
# minimize score
import os
import json
import sys # pylint: disable=unused-import
from time import time
import csv
from pprint import pprint # pylint: disable=unused-import
from timeit import default_timer as timer
import lightgbm as lgb
import numpy as np
from hyperopt import STATUS_OK, fmin, hp, tpe, Trials
import pandas as pd
from pandas.io.json import json_normalize
from sklearn.feature_selection import VarianceThreshold
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import KFold, train_test_split
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import PolynomialFeatures
pd.options.display.float_format = '{:.4f}'.format
pd.set_option('display.max_columns', None)
is_kaggle = os.environ['HOME'] == '/tmp'
use_sample = False if is_kaggle else True
# hyperopt
optimize = False
max_evals = 200 if is_kaggle else 100
results_file = 'optimize.csv'
iteration = 0
best_score = sys.float_info.max
trials = Trials()
optimized_params = {
'class_weight': None,
'colsample_bytree': 1.0,
'learning_rate': 0.1,
'min_child_samples': 20,
'num_leaves': 31,
'reg_alpha': 0.0,
'reg_lambda': 0.0,
'subsample_for_bin': 20000
}
zipext = '' if is_kaggle else '.zip'
# params
n_folds = 10
stop_rounds = 100
verbose_eval = -1 # 500
# https://www.kaggle.com/julian3833/1-quick-start-read-csv-and-flatten-json-fields
def evaluate(params):
# defaults
# LGBMRegressor(boosting_type='gbdt', class_weight=None, colsample_bytree=1.0,
# learning_rate=0.1, max_depth=-1, min_child_samples=20,
# min_child_weight=0.001, min_split_gain=0.0, n_estimators=100,
# n_jobs=-1, nthread=4, num_leaves=31, objective=None,
# random_state=None, reg_alpha=0.0, reg_lambda=0.0, silent=True,
# subsample=1.0, subsample_for_bin=200000, subsample_freq=0)
params['num_leaves'] = int(params['num_leaves'])
params['min_child_samples'] = int(params['min_child_samples'])
params['subsample_for_bin'] = int(params['subsample_for_bin'])
if params['class_weight'] == 0:
params['class_weight'] = None
lgb_model = lgb.LGBMRegressor(nthread=4, n_jobs=-1, verbose=-1)
lgb_model.set_params(**params)
lr_model = LinearRegression()
test_predictions = np.zeros(test.shape[0])
best_score = 0
do_ensemble = False
ensemble_count = 2 if do_ensemble else 1
for fold_n, (train_index, test_index) in enumerate(folds.split(x_train)):
X_train, X_valid = x_train.iloc[train_index], x_train.iloc[test_index]
Y_train, Y_valid = y_train.iloc[train_index], y_train.iloc[test_index]
# lgb
lgb_model.fit(X_train, Y_train,
eval_set=[(X_train, Y_train), (X_valid, Y_valid)],
eval_metric='rmse',
verbose=False, early_stopping_rounds=stop_rounds)
# pprint(dir(lgb_model))
best_score += lgb_model.best_score_['valid_1']['rmse']
lgb_test_prediction = lgb_model.predict(x_test, num_iteration=lgb_model.best_iteration_)
test_predictions += lgb_test_prediction
if do_ensemble:
# linear regression
lr_model.fit(X_train, Y_train)
train_prediction = lr_model.predict(X_train)
best_score += np.sqrt(mean_squared_error(train_prediction, Y_train))
lr_test_prediction = lr_model.predict(x_test)
test_predictions += lr_test_prediction
test_predictions /= (n_folds * ensemble_count)
best_score /= (n_folds * ensemble_count)
return test_predictions, best_score
def objective(params):
global iteration, best_score
iteration += 1
start = timer()
_, score = evaluate(params)
run_time = timer() - start
# save results
of_connection = open(results_file, 'a')
writer = csv.writer(of_connection)
writer.writerow([iteration, score, run_time, params])
of_connection.close()
# save trials for resumption
# with open('trials.json', 'w') as f:
# # might be trials_dict to be saved
# f.write(json.dumps(trials))
best_score = min(best_score, score)
print(f'iteration {iteration}, score {score}, best {best_score}, timer {run_time}')
# score must be to minimize
return {'loss': score, 'params': params, 'iteration': iteration,
'train_time': run_time, 'status': STATUS_OK}
# polynomial features
def get_polynomial_features(train, test, target):
# Make a new dataframe for polynomial features
numeric_cols = [col for col in train.columns
if (col != target) & (col != unique_id) & ((train[col].dtype == 'int64') | (train[col].dtype == 'float64'))]
poly_features = train[numeric_cols]
poly_features_test = test[numeric_cols]
poly_target = train_targets
# Create the polynomial object with specified degree
poly_transformer = PolynomialFeatures(degree=2)
# Train the polynomial features
poly_transformer.fit(poly_features)
# Transform the features
poly_features = poly_transformer.transform(poly_features)
poly_features_test = poly_transformer.transform(poly_features_test)
# print('\nPolynomial Features shape: ', poly_features.shape)
# print(poly_transformer.get_feature_names(input_features=numeric_cols))
# Create a dataframe of the features
poly_features = pd.DataFrame(poly_features,
columns=poly_transformer.get_feature_names(numeric_cols))
# Add in the target
poly_features[target] = poly_target
# Find the correlations with the target
poly_corrs = poly_features.corr()[target].sort_values()
# Display most negative and most positive
# print(poly_corrs.head(10))
# print(poly_corrs.tail(5))
# Put test features into dataframe
poly_features_test = pd.DataFrame(poly_features_test,
columns=poly_transformer.get_feature_names(numeric_cols))
# Merge polynomial features into training dataframe
poly_features[unique_id] = train[unique_id]
train_poly = train.merge(poly_features, on=unique_id, how='left')
# print('\nPolynomial Features shape: ', poly_features.shape, train.shape, train_poly.shape, )
# print('\nPolynomial Features shape: ', poly_features.describe(), train_poly.describe(), train.describe())
# Merge polynomial features into testing dataframe
poly_features_test[unique_id] = test[unique_id]
test_poly = test.merge(poly_features_test, on=unique_id, how='left')
# Align the dataframes
train_poly, test_poly = train_poly.align(test_poly, join='inner', axis=1)
# Print out the new shapes
# print('Training data with polynomial features shape: ', train_poly.shape)
# print('Testing data with polynomial features shape: ', test_poly.shape)
# train_poly, test_poly = get_collinear_features(train_poly, test_poly, target)
return train_poly, test_poly
# arithmetic features
def get_arithmetic_features(train, test, target):
numeric_cols = [col for col in train.columns
if (train[col].dtype == 'int64') | (train[col].dtype == 'float64')]
numeric_cols.remove(unique_id)
for i1 in range(0, len(numeric_cols)):
col1 = numeric_cols[i1]
for i2 in range(i1 + 1, len(numeric_cols)):
col2 = numeric_cols[i2]
# train[f'{col1} by {col2}'] = train[col1] * train[col2]
# test[f'{col1} by {col2}'] = test[col1] * test[col2]
# train[f'{col1} plus {col2}'] = train[col1] + train[col2]
# test[f'{col1} plus {col2}'] = test[col1] + test[col2]
train[f'{col1} minus {col2}'] = train[col1] - train[col2]
test[f'{col1} minus {col2}'] = test[col1] - test[col2]
# if not (train[col2] == 0).any():
# train[f'{col1} on {col2}'] = train[col1] / train[col2]
# test[f'{col1} on {col2}'] = test[col1] / test[col2]
# elif not (train[col1] == 0).any():
# train[f'{col2} on {col1}'] = train[col2] / train[col1]
# test[f'{col2} on {col1}'] = test[col2] / test[col1]
train, test = get_collinear_features(train, test, target)
return train, test
def get_collinear_features(train, test, target):
corrs = train.corr()
upper = corrs.where(np.triu(np.ones(corrs.shape), k=1).astype(np.bool))
threshold = 0.8
# Select columns with correlations above threshold
to_drop = [column for column in upper.columns if any(upper[column] > threshold)]
train = train.drop(columns=to_drop)
test = test.drop(columns=to_drop)
return train, test
def get_missing_values(train, test, target):
threshold = 0.75
train_missing = (train.isnull().sum() / len(train)).sort_values(ascending=False)
test_missing = (test.isnull().sum() / len(test)).sort_values(ascending=False)
# print(train_missing.head())
# print(test_missing.head())
# Identify missing values above threshold
train_missing = train_missing.index[train_missing > threshold]
test_missing = test_missing.index[test_missing > threshold]
all_missing = list(set(set(train_missing) | set(test_missing)))
# print(f'There are {len(all_missing)} columns with more than {threshold}%% missing values')
train = train.drop(columns=all_missing)
test = test.drop(columns=all_missing)
train, test = train.align(test, join='inner', axis=1)
return train, test
def get_feature_importance(train, test, target):
model = lgb.LGBMRegressor(nthread=4, n_jobs=-1, verbose=-1)
model.set_params(**optimized_params)
x_train = train.drop(target_key, axis=1)
if unique_id in x_train.columns:
x_train = train.drop(target_key, axis=1)
# Initialize an empty array to hold feature importances
feature_importances = np.zeros(x_train.shape[1])
# Fit the model twice to avoid overfitting
for i in range(2):
# Split into training and validation set
train_features, valid_features, train_y, valid_y = train_test_split(x_train, train_targets,
test_size=0.25, random_state=i)
# Train using early stopping
model.fit(train_features, train_y, early_stopping_rounds=100,
eval_set=[(valid_features, valid_y)],
eval_metric='rmse', verbose=False)
# Record the feature importances
feature_importances += model.feature_importances_
# Make sure to average feature importances!
feature_importances = feature_importances / 2
feature_importances = pd.DataFrame(
{'feature': list(x_train.columns), 'importance': feature_importances}).sort_values('importance', ascending=False)
# Sort features according to importance
feature_importances = feature_importances.sort_values('importance', ascending=False).reset_index()
# Normalize the feature importances to add up to one
feature_importances['importance_normalized'] = feature_importances['importance'] / feature_importances['importance'].sum()
feature_importances['cumulative_importance'] = np.cumsum(feature_importances['importance_normalized'])
# print(feature_importances)
# Find the features with minimal importance
# unimportant_features = list(feature_importances[feature_importances['importance'] == 0.0]['feature'])
# Threshold for cumulative importance
threshold = 0.99
# Extract the features to drop
features_to_drop = list(feature_importances[feature_importances[
'cumulative_importance'] > threshold]['feature'])
# print(f'There are {len(features_to_drop)} features under {threshold} importance')
# print(features_to_drop)
train = train.drop(features_to_drop, axis=1)
test = test.drop(features_to_drop, axis=1)
return train, test
def get_feature_selection(train, test, target):
# remove collinear variables
train, test = get_collinear_features(train, test, target)
print(f'collinear, cols {len(train.columns)}, {((time() - start_time) / 60):.0f} mins')
all_numeric_cols = [col for col in train.columns
if (col != unique_id) & (train[col].dtype == 'int64') | (train[col].dtype == 'float64')]
# feature selection via variance
train_numeric = train[all_numeric_cols].fillna(0)
select_features = VarianceThreshold(threshold=0.2)
select_features.fit(train_numeric)
numeric_cols = train_numeric.columns[select_features.get_support(indices=True)].tolist()
# remove cols without variance
for col in all_numeric_cols:
if col not in numeric_cols:
train.drop(col, axis=1, inplace=True)
if col in test.columns:
test.drop(col, axis=1, inplace=True)
print(f'variance, cols {len(train.columns)}, {((time() - start_time) / 60):.0f} mins')
# determine important featuers
train, test = get_feature_importance(train, test, target)
print(f'importance, cols {len(train.columns)}, {((time() - start_time) / 60):.0f} mins')
return train, test
# -------- main
start_time = time()
unique_id = 'unique_id'
target_key = 'Id'
target = 'SalePrice'
# load data
if use_sample:
train = pd.read_csv('../input/train.csv')
test = | pd.read_csv('../input/test.csv') | pandas.read_csv |
import pandas as pd
import warnings
warnings.filterwarnings('ignore')
# data = pd.read_csv('data_subset.csv')
#
# # data formatted to be able to use surprise package
# data_surprise = data[['customer_id', 'product_id','star_rating']].\
# rename(columns={'customer_id': 'userID', 'product_id': 'itemID', 'star_rating': 'rating'})
#
# data_surprise.to_csv('data_surprise.csv', index=False)
def get_data(_file_path, _save_path):
"""
Method to generate a clean pandas dataframe given file path
:param _file_path: input raw data file path
:param _save_path: output data_subset file path
:return: data_subset dataframe
"""
# reading the data from tsv
cols = | pd.read_csv(_file_path, sep='\t', nrows=1) | pandas.read_csv |
import sys
sys.path.append('.')
# stdlib
import os
from glob import glob
from tqdm.auto import tqdm
import json
import pickle
from collections import defaultdict
import time
import argparse
# numlib
import numpy as np
import pandas as pd
from ensemble_boxes import nms, weighted_boxes_fusion
#from include import *
from utils.file import Logger
from utils.him import downsize_boxes, upsize_boxes
def am_mean(data, ws):
return np.sum([d*w for d, w in zip(data, ws)])/np.sum(ws)
def gm_mean(data, ws):
return np.prod([d**w for d, w in zip(data, ws)])**(1./np.sum(ws))
def am_gm_mean(data, ws):
return 0.5*(am_mean(data, ws) + gm_mean(data, ws))
def str2boxes_image(s, with_none=False):
"""
ouput: [prob x_min y_min x_max y_max]
range x,y: [0, +inf]
"""
s = s.strip().split()
s = np.array([s[6*idx+1:6*idx+6] for idx in range(len(s)//6) \
if s[6*idx] == 'opacity' or with_none]).astype(np.float32)
if len(s) == 0: print('Warning: image without box!')
return s
def str2boxes_df(df, with_none=False):
return [str2boxes_image(row['PredictionString'], with_none=with_none) \
for _, row in df.iterrows()]
def boxes2str_image(boxes):
if len(boxes) == 0:
return ''
return ' '.join(np.concatenate([[['opacity']]*len(boxes), boxes], \
axis=1).reshape(-1).astype('str'))
def boxes2str_df(boxes, image_ids=None):
strs = [boxes2str_image(bs) for bs in boxes]
if image_ids is None:
return strs
return pd.DataFrame({'id': image_ids, 'PredictionString': strs})
def check_num_boxes_per_image(df=None, csv_path=None, filter_rows=True):
assert df is not None or csv_path is not None
if df is None:
df = pd.read_csv(csv_path)
if filter_rows:
df_image = df[df['id'].apply(lambda x: x.endswith('image'))].reset_index(drop=True)
else:
df_image = df
all_boxes = str2boxes_df(df_image, with_none=False)
all_boxes = [boxes for boxes in all_boxes if len(boxes) > 0 ]
return np.concatenate(all_boxes).shape[0] / len(df_image)
def extract_none_probs(opacity_probs):
none_probs = []
for image_probs in opacity_probs:
none_prob = np.prod(1 - np.array(image_probs))
none_probs.append(none_prob)
return none_probs
def filter_rows(df, mode):
assert mode in ['study', 'image']
df = df.copy()
df = df[df['id'].apply(lambda x: x.endswith(mode))].reset_index(drop=True)
return df
def ensemble_image(dfs, df_meta, mode='wbf', \
iou_thr=0.5, skip_box_thr=0.001, weights=None, filter_rows=True):
if filter_rows:
df_meta = filter_rows(df_meta, mode='image')
dfs = [filter_rows(df, mode='image') for df in dfs]
image_ids, prediction_strings, all_scores = [], [], []
num_boxes = 0
for i, row in tqdm(df_meta.iterrows(), total=len(df_meta)):
image_id = row['id']
s = []
for df in dfs:
if np.sum(df['id']==image_id) > 0:
ss = df.loc[df['id']==image_id, 'PredictionString'].values[0]
if type(ss) == str:
s.append(ss)
else:
s.append('')
else:
s.append('')
boxes, scores, labels = [], [], []
for ss in s:
boxes_, scores_, labels_ = [], [], []
ss = str2boxes_image(ss, with_none=False)
if len(ss) > 0:
labels_ = [0]*len(ss)
scores_ = ss[:, 0].tolist()
boxes_ = downsize_boxes(ss[:, 1:], row['w'], row['h'])
labels.append(labels_)
boxes.append(boxes_)
scores.append(scores_)
if mode == 'wbf':
boxes, scores, labels = weighted_boxes_fusion(boxes,
scores,
labels,
iou_thr=iou_thr,
weights=weights,
skip_box_thr=skip_box_thr)
elif mode == 'nms':
boxes_, scores_, labels_, weights_ = [], [], [], []
for j, b in enumerate(boxes):
if len(b) > 0:
boxes_.append(b)
scores_.append(scores[j])
labels_.append(labels[j])
if weights is not None:
weights_.append(weights[j])
if weights is None:
weights_ = None
boxes, scores, labels = nms(boxes_,
scores_,
labels_,
iou_thr=iou_thr,
weights=weights_)
if len(boxes) == 0:
image_ids.append(image_id)
prediction_strings.append('')
print('Warning: no box found after boxes fusion!')
continue
num_boxes += len(boxes)
all_scores.append(scores)
boxes = upsize_boxes(boxes, row['w'], row['h'])
s = []
for box, score, label in zip(boxes, scores, labels):
s.append(' '.join(['opacity', str(score), ' '.join(box.astype(str))]))
image_ids.append(image_id)
prediction_strings.append(' '.join(s))
df_pred = pd.DataFrame({'id': image_ids, 'PredictionString': prediction_strings})
return df_pred, num_boxes, np.concatenate(all_scores).tolist()
def ensemble_study(dfs, weights=None, mean='am'):
dfs = [filter_rows(df, mode='study') for df in dfs]
study_ids = dfs[0]['id'].values
if weights is None:
weights = [1.] * len(dfs)
weights = np.array(weights) / np.sum(weights)
ens_probs_am = np.zeros((len(study_ids), 4), dtype=np.float32)
ens_probs_gm = np.ones((len(study_ids), 4), dtype=np.float32)
for df, w in zip(dfs, weights):
df = df[df['id'].apply(lambda x: x.endswith('study'))].reset_index(drop=False)
for i, id_ in enumerate(study_ids):
s = df.loc[df['id']==id_, 'PredictionString'].values[0]
preds = s.strip().split()
for idx in range(len(preds)//6):
ens_probs_am[i, cls_map[preds[6*idx]]] += float(preds[6*idx + 1]) * w
ens_probs_gm[i, cls_map[preds[6*idx]]] *= float(preds[6*idx + 1]) ** w
# apply different ensemble methods
if mean == 'am':
ens_probs = ens_probs_am
elif mean == 'gm':
ens_probs = ens_probs_gm
elif mean == 'am_gm':
ens_probs = 0.5*(ens_probs_am + ens_probs_gm)
df = pd.DataFrame({'id': study_ids})
df[class_names] = ens_probs
df['PredictionString'] = df.apply(lambda row: \
f'negative {row["negative"]} 0 0 1 1 typical {row["typical"]} 0 0 1 1 \
indeterminate {row["indeterminate"]} 0 0 1 1 atypical {row["atypical"]} 0 0 1 1', \
axis=1)
df = df[['id', 'PredictionString']]
return df
def extract_negative_prob(df, std2img):
"""
Args:
df: study-level df
std2img: dict maps from study_id to image_id
Returns:
df with image-level ids and mapped negative probabilities
"""
df = filter_rows(df, mode='study')
image_ids, negative_probs = [], []
for study_id, img_ids in std2img.items():
s = df.loc[df['id']==study_id + '_study', 'PredictionString'].values[0]
s = s.strip().split()
for idx in range(len(s)//6):
if s[6*idx] == 'negative':
neg_prob = float(s[6*idx + 1])
break
image_ids.extend([img_id + '_image' for img_id in img_ids])
negative_probs.extend([neg_prob]*len(img_ids))
return pd.DataFrame({'id': image_ids, 'negative': negative_probs})
def postprocess_image(df_image, df_study, std2img, df_none=None, \
none_cls_w=0., none_dec_w=0.5, neg_w=0.5, \
detect_w=0.84, clsf_w=0.84):
df_image = filter_rows(df_image, mode='image')
df_study = filter_rows(df_study, mode='study')
if df_none is None:
none_cls_w = 0.
none_cls_w, none_dec_w, neg_w = \
none_cls_w/(none_cls_w + none_dec_w + neg_w), \
none_dec_w/(none_cls_w + none_dec_w + neg_w), \
neg_w/(none_cls_w + none_dec_w + neg_w)
detect_w, clsf_w = \
detect_w/(detect_w + clsf_w), \
clsf_w/(detect_w + clsf_w)
df_negative = extract_negative_prob(df_study, std2img)
df_image = df_image.merge(df_negative, on='id', how='left')
if none_cls_w > 0.:
df_image = df_image.merge(df_none, on='id', how='left')
new_nones = []
for i, row in df_image.iterrows():
if row['PredictionString'] == 'none 1 0 0 1 1' \
or row['PredictionString'] == '' \
or type(row['PredictionString']) != str:
df_image.loc[i, 'PredictionString'] = f'none {row["none"]} 0 0 1 1'
#df_image.loc[i, 'new_none'] = row["none"]
new_nones.append(row["none"])
print('no opacity founded!')
continue
else:
# extract none probabilities
none_dec_prob = 1.
bboxes = row['PredictionString'].strip().split()
for idx in range(len(bboxes)//6):
if bboxes[6*idx] == 'opacity':
none_dec_prob *= 1 - float(bboxes[6*idx + 1])
# modify opacity boxes
if none_cls_w > 0.:
post_none_prob = none_cls_w*row["none"] + none_dec_w*none_dec_prob + neg_w*row["negative"]
else:
post_none_prob = none_dec_w*none_dec_prob + neg_w*row["negative"]
for idx in range(len(bboxes)//6):
if bboxes[6*idx] == 'opacity':
bboxes[6*idx + 1] = str(float(bboxes[6*idx + 1])**detect_w * (1 - post_none_prob)**clsf_w)
df_image.loc[i, 'PredictionString'] = ' '.join(bboxes)
# add none boxes
df_image.loc[i, 'PredictionString'] += f' none {post_none_prob} 0 0 1 1'
# act none probability for ensemble with negative in study-level
if none_cls_w > 0.:
new_nones.append(none_cls_w/(none_cls_w + none_dec_w))*row["none"] + \
(none_dec_w/(none_cls_w + none_dec_w))*none_dec_prob
else:
new_nones.append(none_dec_prob)
df_none = pd.DataFrame({'id': df_image['id'].values, 'none': new_nones})
return df_image, df_none
def postprocess_study(df, df_none, std2img, neg_w=0.7, none_w=0.3):
"""
Args:
df: study-level prediction
df_none: image-level none probability
std2img: dict maps from study_id to image_id
"""
df = filter_rows(df, mode='study')
df_none = filter_rows(df_none, mode='image')
neg_w, none_w = \
neg_w/(neg_w + none_w), \
none_w/(neg_w + none_w)
# extract none probability for each study
study_ids, none_probs = [], []
for study_id, image_ids in std2img.items():
image_ids_ = [img_id + '_image' for img_id in image_ids]
study_none_prob = df_none.loc[df_none['id'].isin(image_ids_), 'none'].mean()
study_ids.append(study_id + '_study')
none_probs.append(study_none_prob)
df_study_none = pd.DataFrame({'id': study_ids, 'none': none_probs})
df = | pd.merge(df, df_study_none, on='id', how='left') | pandas.merge |
import os
import pandas
from c3x.data_loaders import configfileparser, nextgen_loaders
from c3x.data_statistics import statistics as stats
# Reads a config file to produce a dictionary that can be handed over to functions
config = configfileparser.ConfigFileParser("config/config_nextGen_stats.ini")
data_paths = config.read_data_path()
batch_info = config.read_batches()
measurement_types = config.read_data_usage()
# Create a nextGen data object that has working paths and can be sliced using batches
# it might be appropriate for the example to make the batches smaller, however that may
# increase computing time,
# the next row can be commented if data was processed prior to running this script
nextgen = nextgen_loaders.NextGenData(data_name='NextGen',
source=data_paths["source"],
batteries=data_paths["batteries"],
solar=data_paths["solar"],
node=data_paths["node"],
loads=data_paths["loads"],
results=data_paths["results"],
stats = data_paths["stats"],
number_of_batches=batch_info["number_of_batches"],
files_per_batch=batch_info["files_per_batch"],
concat_batches_start=batch_info["concat_batches_start"],
concat_batches_end=batch_info["concat_batches_end"])
# now we have a folder structure with lots of files with batch numbers
print("ALL BATCHES ANALYSIS")
node_count, batch_list = stats.batch_with_highest_node_count(data_dir=data_paths,
batch_info=batch_info,
measurement_types=measurement_types)
print("max number of nodes: ", node_count)
print("batches with max node count: ", batch_list)
print("number of batches with that node count: ", len(batch_list))
data_path_list = []
data_files = []
sorted_dict = {}
node_list = []
# here a dictionary is generate that holds a list of nodes per batch (batch:[node_ids])
for batch in range(batch_info["number_of_batches"]):
node_list = stats.nodes_per_batch(data_paths, batch, measurement_types)
sorted_dict[batch] = node_list
# a list of all files is created
for data_type in measurement_types:
path = data_paths[data_type]
for file in os.listdir(data_paths[data_type]):
data_files.append(os.path.join(path, file))
# some Data Frames and Labels for saving results nicely
result_data_frame = pandas.DataFrame()
batch_data_results = pandas.DataFrame()
index = ['Battery - PLG',
'Battery - QLG',
'Battery - RC',
'Solar - PLG',
'Load - PLG',
'Load - QLG']
columns = pandas.MultiIndex.from_product([['Samples', 'Duplicates'], index],
names=['Type', 'Measurement'])
# iterate through batches
for batch in range(batch_info["number_of_batches"]):
batch_data = pandas.DataFrame()
# iterate through nodes
result_data_frame = pandas.DataFrame()
for node in sorted_dict[batch]:
node_data = pandas.DataFrame()
search = str(node) + "_" + str(batch) + ".npy"
batch_node_subset = [val for i, val in enumerate(data_files) if val.endswith(search)]
# build a data frame with all measurement data
first_run = True
for path in batch_node_subset:
tmp_data_frame = pandas.read_pickle(path)
if first_run is True:
node_data = pandas.DataFrame(tmp_data_frame)
first_run = False
else:
node_data = pandas.concat([node_data, tmp_data_frame], axis=1)
# get the node ID
node_df = pandas.DataFrame(pandas.Series(node))
node_df.columns = ["node"]
# count samples and convert to data frame
samples = pandas.Series(stats.count_samples(node_data))
samples = | pandas.DataFrame(samples) | pandas.DataFrame |
import json
import pandas as pd
from collections import OrderedDict
from datetime import datetime
from contextlib import closing
import os
import errno
import logging
from airflow.hooks.http_hook import HttpHook
from airflow.hooks.postgres_hook import PostgresHook
def upsert_rows(hook, table, rows, on_conflict, change_field, target_fields=None, commit_every=1000):
"""
A generic way to insert a set of tuples into a table,
a new transaction is created every commit_every rows
:param table: Name of the target table
:type table: str
:param rows: The rows to insert into the table
:type rows: iterable of tuples
:param target_fields: The names of the columns to fill in the table
:type target_fields: iterable of strings
:param on_conflict: The names of the columns triggering the conflict
:type on_conflict: iterable of strings
:param change_field: The names of the columns to update on conflict
:type target_fields: iterable of strings
:param commit_every: The maximum number of rows to insert in one
transaction. Set to 0 to insert all rows in one transaction.
:type commit_every: int
"""
if target_fields:
target_fields = ", ".join(target_fields)
target_fields = "({})".format(target_fields)
else:
target_fields = ''
on_conflict = ", ".join(on_conflict)
on_conflict = "({})".format(on_conflict)
change_field = ["{0}=EXCLUDED.{0}".format(e) for e in change_field]
change_field = ", ".join(change_field)
with closing(hook.get_conn()) as conn:
if hook.supports_autocommit:
hook.set_autocommit(conn, False)
conn.commit()
with closing(conn.cursor()) as cur:
for i, row in enumerate(rows, 1):
L = []
for cell in row:
L.append(hook._serialize_cell(cell, conn))
values = tuple(L)
placeholders = ["%s",]*len(row)
sql = "INSERT INTO {0} {1} VALUES ({2}) ON CONFLICT {3} DO UPDATE SET {4};".format(
table,
target_fields,
",".join(placeholders),
on_conflict,
change_field)
cur.execute(sql, values)
if commit_every and i % commit_every == 0:
conn.commit()
conn.commit()
def get_taxi_data(conn_id, endpoint, **kwargs):
api_hook = HttpHook(http_conn_id=conn_id, method='GET')
headers = {'X-API-KEY': 'fcf1c741-e5b5-4c5e-a965-2598461b4836'}
response = api_hook.run(endpoint, headers=headers)
# df_taxis = pd.read_json(response.content)
data = response.json()
with open('dags/{}.json'.format(endpoint), 'w') as outfile:
json.dump(data, outfile)
def get_position_taxi(conn_id, **kwargs):
try:
os.makedirs('taxi-positions')
except OSError as e:
if e.errno != errno.EEXIST:
raise
endpoint = 'taxi-positions/{ts}.000Z'.format(**kwargs)
# endpoint = 'taxi-positions/2018-03-21T20:20:00.000Z'
get_taxi_data(conn_id, endpoint)
def get_last_connexions(conn_id, **kwargs):
pg_hook = PostgresHook(postgres_conn_id=conn_id)
sql = """SELECT taxi, "timestampUTC", status, operator FROM public.last_connexions"""
df = pg_hook.get_pandas_df(sql)
df.to_pickle('dags/last_connexions.pickle')
def update_last_connexions(conn_id, **kwargs):
pg_hook = PostgresHook(postgres_conn_id=conn_id)
attributes = ['taxi', '"timestampUTC"', 'status', 'operator']
df_last_connexions = pd.read_pickle('dags/last_connexions.pickle')
tuples = [tuple(x) for x in df_last_connexions.values]
# rows = [('9876543', 'occupied', '2018-03-21T22:00:00.000Z'), ('1234567', 'occupied', '2018-03-21T22:00:00.000Z')]
on_conflict = ['taxi', 'operator']
change_field = ['status', '"timestampUTC"']
upsert_rows(pg_hook, 'last_connexions', tuples, on_conflict, change_field, attributes)
# df.to_pickle('dags/last_connexions.pickle')
def transform_taxi_data(**kwargs):
logging.info('{ts}'.format(**kwargs))
attributes = ['taxi', 'timestampUTC', 'status', 'operator']
df_last_connexions = pd.read_pickle('dags/last_connexions.pickle')
with open('dags/taxi-positions/{ts}.000Z.json'.format(**kwargs)) as data_file:
positions = json.load(data_file)
taxis_connexions = OrderedDict()
for attribute in attributes:
taxis_connexions[attribute] = []
taxis_connexions['time'] = []
for items in positions['items']:
for item in items['items']:
for attribute in attributes:
taxis_connexions[attribute].append(item[attribute])
taxis_connexions['time'].append(items['receivedAt'])
df_taxis_connexions = pd.DataFrame.from_dict(taxis_connexions)
df_taxis_connexions.time = pd.to_datetime(df_taxis_connexions.time, format='%Y-%m-%dT%H:%M:%S.%fZ')
df_taxis_connexions.timestampUTC = | pd.to_datetime(df_taxis_connexions.timestampUTC, format='%Y-%m-%dT%H:%M:%S.%fZ') | pandas.to_datetime |
import requests as re
import pandas as pd
from datetime import datetime, timedelta
from typing import Callable
import time
def format_url(coin: str="DOGE") -> str:
url = "https://production.api.coindesk.com/v2/price/values/"
start_time = (datetime.now() - timedelta(minutes=10)).isoformat(timespec="minutes")
end_time = datetime.now().isoformat(timespec="minutes")
params = f"?start_date={start_time}&end_date={end_time}&ohlc=false"
return url + coin + params
def get_data(coin: str="DOGE") -> pd.DataFrame:
prices = re.get(format_url(coin))
prices = prices.json()['data']['entries']
data = | pd.DataFrame(prices, columns=["time", "price"]) | pandas.DataFrame |
from itertools import compress
import pandas as pd
import numpy as np
from abc import ABCMeta, abstractmethod
from surveyhelper.scale import QuestionScale, LikertScale, NominalScale, OrdinalScale
from scipy.stats import ttest_ind, f_oneway, chisquare
class MatrixQuestion:
__metaclass__ = ABCMeta
def __init__(self, text, label, questions):
self.text = text
self.questions = questions
self.label = label
self.assert_questions_same_type()
self.assert_choices_same()
self.assign_children_to_matrix()
def exclude_choices_from_analysis(self, choices):
for q in self.questions:
q.exclude_choices_from_analysis(choices)
def reverse_choices(self):
for q in self.questions:
q.reverse_choices()
def change_scale(self, newtype, values = None, midpoint = None):
for q in self.questions:
q.change_scale(newtype, values, midpoint)
def change_midpoint(self, midpoint):
for q in self.questions:
q.scale.midpoint = midpoint
def get_scale(self):
if len(self.questions) > 0:
return(self.questions[0].scale)
else:
None
def assert_questions_same_type(self):
if all(type(x) == type(self.questions[0]) for x in self.questions):
return(True)
else:
raise(Exception("Questions in a matrix must all have the same type"))
def assert_choices_same(self):
if all([x.scale == self.questions[0].scale for x in self.questions]):
return(True)
else:
raise(Exception("Questions in a matrix must all have the same choices"))
def assign_children_to_matrix(self):
for q in self.questions:
q.matrix = self
return
def get_variable_names(self):
names = []
for q in self.questions:
names += q.get_variable_names()
return(names)
def get_children_text(self):
return([q.text for q in self.questions])
def pretty_print(self, show_choices=True):
print("{} ({})".format(self.text, self.label))
if show_choices:
self.questions[0].pretty_print_choices()
for q in self.questions:
print(q.text)
@abstractmethod
def get_choices(self):
pass
@abstractmethod
def frequency_table(self):
pass
def freq_table_to_json(self, df):
return('')
def questions_to_json(self):
return('')
class SelectOneMatrixQuestion(MatrixQuestion):
def get_choices(self, remove_exclusions=True, show_values=False):
self.assert_choices_same()
if len(self.questions) > 0:
return(self.questions[0].scale.choices_to_str(remove_exclusions,
show_values))
else:
return([])
def frequency_table(self, df, show="ct", pct_format=".0%",
remove_exclusions = True, show_totals=True,
show_mean=True, mean_format=".1f"):
if len(self.questions) == 0:
return(pd.DataFrame())
data = []
if show == "ct":
for q in self.questions:
data.append(q.frequency_table(df, False, True,
False, pct_format, remove_exclusions,
show_totals, show_mean,
).iloc[:,0].tolist())
elif show == "pct":
for q in self.questions:
data.append(q.frequency_table(df, False, False,
True, pct_format, remove_exclusions,
show_totals, show_mean
).iloc[:,0].tolist())
else:
raise(Exception("Invalid 'show' parameter: {}".format(show)))
tbl = pd.DataFrame(data)
tmpcols = self.get_choices(remove_exclusions)
if show_totals:
tmpcols.append("Total")
if show_mean:
tmpcols.append("Mean")
tbl.columns = tmpcols
tbl["Question"] = self.get_children_text()
cols = tbl.columns.tolist()
cols = cols[-1:] + cols[:-1]
tbl = tbl[cols]
return(tbl)
def cut_by_question(self, other_question, response_set,
cut_var_label=None, question_labels=None,
pct_format=".0%", remove_exclusions=True,
show_mean=True, mean_format=".1f"):
if type(other_question) != SelectOneQuestion:
raise(Exception("Can only call cut_by_question on a SelectOneQuestion type"))
groups = response_set.groupby(other_question.label)
group_mapping = dict(zip(other_question.values, other_question.choices))
oth_text = cut_var_label
if not oth_text:
oth_text = other_question.text
return(self.cut_by(groups, group_mapping, oth_text, question_labels,
pct_format, remove_exclusions, show_mean, mean_format))
def cut_by(self, groups, group_label_mapping, cut_var_label,
question_labels=None, pct_format=".0%",
remove_exclusions=True, show_mean=True, mean_format=".1f"):
results = []
labels = question_labels
if not labels:
labels = [q.text for q in self.questions]
for q, l in zip(self.questions, labels):
r = q.cut_by(groups, group_label_mapping, cut_var_label,
l, pct_format, remove_exclusions,
show_mean, mean_format)
# r.columns = pd.MultiIndex.from_tuples([(q.text, b) for a, b in
# r.columns.tolist()])
results.append(r.T)
return(pd.concat(results))
def freq_table_to_json(self, df):
t = self.frequency_table(df, "ct", "", True, False, False, "")
return(t.iloc[:, 1:].to_json(orient="records"))
def questions_to_json(self):
df = pd.DataFrame({"Question": self.get_children_text()})
return(df.to_json(orient="records"))
def graph_type(self):
if len(self.questions) > 0:
if type(self.questions[0].scale) == LikertScale:
return('diverging_bar')
else:
return('horizontal_stacked_bar')
else:
return('')
class SelectMultipleMatrixQuestion(MatrixQuestion):
def get_choices(self, remove_exclusions=True):
self.assert_choices_same()
if len(self.questions > 0):
return(self.questions[0].get_choices(remove_exclusions))
else:
[]
def frequency_table(self, df, show="ct", pct_format=".0%",
remove_exclusions = True, show_totals=True):
data = []
if show == "ct":
for q in self.questions:
data.append(q.frequency_table(df, False, True,
False, False, pct_format, remove_exclusions,
False).iloc[:,0].tolist())
elif show == "pct_respondents":
for q in self.responses:
data.append(q.frequency_table(df, False, False,
True, False, pct_format, remove_exclusions,
False).iloc[:,0].tolist())
elif show == "pct_responses":
for q in self.responses:
data.append(q.frequency_table(df, False, False,
False, True, pct_format, remove_exclusions,
False).iloc[:,0].tolist())
else:
raise(Exception("Invalid 'show' parameter: {}".format(show)))
tbl = pd.DataFrame(data)
tbl.columns = self.get_choices(remove_exclusions)
tbl["Question"] = self.get_children_text()
cols = tbl.columns.tolist()
cols = cols[-1:] + cols[:-1]
tbl = tbl[cols]
if show_totals:
tots = []
for q in self.questions:
tots.append(q.get_total_respondents(df))
tbl["Total Respondents"] = tots
return(tbl)
class SelectQuestion:
__metaclass__ = ABCMeta
def get_total_respondents(self, df):
freqs, resp, nonresp = self.tally(df)
return(resp)
def get_scale(self):
return(self.scale)
def change_scale(self, newtype, values = None, midpoint = None):
self.scale = QuestionScale.change_scale(self.scale, newtype)
def change_midpoint(self, midpoint):
self.scale.midpoint = midpoint
def exclude_choices_from_analysis(self, choices):
self.scale.exclude_choices_from_analysis(choices)
@abstractmethod
def get_variable_names(self):
pass
@abstractmethod
def pretty_print(self):
pass
@abstractmethod
def pretty_print_choices(self):
pass
@abstractmethod
def tally(self):
pass
@abstractmethod
def frequency_table(self):
pass
def questions_to_json(self):
return('')
class SelectOneQuestion(SelectQuestion):
def __init__(self, text, var, choices, label, values,
exclude_from_analysis, matrix=None, scale_type='likert'):
self.text = text
self.label = label
self.variable = var
self.matrix = matrix
self.scale = QuestionScale.create_scale(scale_type, choices,
exclude_from_analysis, values)
def get_variable_names(self):
return([self.variable])
def pretty_print(self, show_choices=True):
print("{} ({})".format(self.text, self.label))
if show_choices:
self.pretty_print_choices()
def pretty_print_choices(self):
print(", ".join(self.scale.choices_to_str(False)))
def reverse_choices(self):
self.scale.reverse_choices()
def mean(self, df, remove_exclusions=True):
values = self.scale.get_values(remove_exclusions)
freq, n, x = self.tally(df, remove_exclusions)
num = sum([ct * v for ct, v in zip(freq, values)])
if n > 0:
return(num/n)
else:
return(np.nan)
def tally(self, df, remove_exclusions=True):
"""
Returns ([response frequencies], respondents, nonrespondents)
tuple where response frequencies is a count of responses for
each answer choice in order.
"""
unit_record = df[self.variable]
freqs = dict(unit_record.value_counts())
cts = []
values = self.scale.get_values(remove_exclusions)
for k in values:
if k in freqs:
cts.append(freqs[k])
else:
cts.append(0)
return((cts, sum(cts), len(unit_record)-sum(cts)))
def frequency_table(self, df, show_question=True, ct=True,
pct=True, pct_format=".0%", remove_exclusions=True,
show_totals=True, show_mean=True, mean_format=".1f",
show_values=True):
cts, resp, nonresp = self.tally(df, remove_exclusions)
data = []
cols = []
tots = []
mean = []
if show_question:
data.append(self.scale.choices_to_str(remove_exclusions, show_values))
cols.append("Answer")
tots.append("Total")
mean.append("Mean")
if ct:
data.append(cts)
cols.append("Count")
tots.append(resp)
mean.append(format(self.mean(df, remove_exclusions),
mean_format))
if pct:
l = []
for x in cts:
if resp > 0:
l.append(format(x/resp, pct_format))
else:
l.append("-")
data.append(l)
cols.append("%")
tots.append(format(1, pct_format))
if not ct:
mean.append(format(self.mean(df, remove_exclusions),
mean_format))
else:
mean.append("")
tbl = | pd.DataFrame(data) | pandas.DataFrame |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import StandardScaler, PolynomialFeatures, OneHotEncoder
from sklearn.pipeline import Pipeline
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression, Lasso, LassoCV, Ridge, RidgeCV
from sklearn.metrics import r2_score, mean_squared_error
"""
1. get_Xy(df): Separate features and target variable
2. get_score(X_train,X_val,y_train,y_val)
3. categorical(X_train,X_val,X_test,cat_variable)
"""
def get_Xy(df):
df = df.dropna()
target = 'opening_weekend_usa'
all_column = df.columns.values.tolist()
all_column.remove(target)
y = df[target]
X = df[all_column]
return X, y
def get_score(X_train,X_val,y_train,y_val):
# fit linear regression to training data
lr_model = LinearRegression()
lr_model.fit(X_train, y_train)
y_pred = lr_model.predict(X_val)
# score fit model on validation data
train_score = lr_model.score(X_train, y_train)
val_score = lr_model.score(X_val, y_val)
rmse = np.sqrt(mean_squared_error(y_val, y_pred))
# report results
print('\nTrain R^2 score was:', train_score)
print('Validation R^2 score was:', val_score)
print(f'RMSE: {rmse:.2f} \n')
# print('Feature coefficient results:')
# for feature, coef in zip(X.columns, lr_model.coef_):
# print(feature, ':', f'{coef:.2f}')
# Visualization
fig, ax = plt.subplots(1, 1)
plt.scatter(y_val, y_pred, alpha=0.4)
ax.set_xlabel('Opening weekend revenue ($ in millions)',fontsize=20)
ax.set_ylabel('Prediction ($ in millions)',fontsize=20)
ax.set_title('R$^2$: %0.2f' % val_score, fontsize=20)
plt.xticks(fontsize=16)
plt.yticks(fontsize=16)
x=np.linspace(0,0.7e2,50)
# x=np.linspace(4,9,50)
y=x
plt.plot(x,y,color='firebrick',linewidth=3,alpha=0.6)
plt.ylim(0,)
plt.xlim(0,)
return fig, lr_model, y_pred
def categorical_multilabel(X_train,X_val,X_test,cat_variable):
"""
Input: X_train,X_val,X_test,categorical_variable
Processing: preprocessing the three sets separately:
1. Separate continuous and categorical variable
2. Scaling + polynomial fit the conitnuous variables and get_dummies on the categorical variable
3. Combine back the continuous and categorical data
Return: tranformed X_train, X_val, X_test
"""
scaler = StandardScaler()
poly = PolynomialFeatures(degree=2,interaction_only = False)
# Train set
# Convert genre to dummies
X_train_genre = X_train[cat_variable].str.join(sep='*').str.get_dummies(sep='*')
known_columns = X_train_genre.columns
# Scaling continuous variables
X_train_con = X_train[con_feature]
X_train_con_scaled = scaler.fit_transform(X_train_con)
X_train_con_scaled_df = pd.DataFrame(X_train_con_scaled, columns=X_train_con.columns, index=X_train_con.index)
X_train_poly = poly.fit_transform(X_train_con_scaled)
X_train_poly_df = pd.DataFrame(X_train_poly, columns=poly.get_feature_names(X_train_con.columns), index=X_train_con.index)
#Combine
# X_train = pd.concat([X_train_genre,X_train_con_scaled_df],axis=1)
X_train = pd.concat([X_train_genre,X_train_poly_df],axis=1)
# Val set
# Convert genre to dummies
X_val_genre = X_val[cat_variable].str.join(sep='*').str.get_dummies(sep='*')
val_columns = X_val_genre.columns
X_val_genre = X_val_genre[[x for x in val_columns if x in known_columns]]
fill_dict = { c : 0 for c in [x for x in known_columns if x not in val_columns] }
X_val_genre = X_val_genre.assign(**fill_dict)
# Scaling continuous variables
X_val_con = X_val[con_feature]
X_val_con_scaled = scaler.transform(X_val_con)
X_val_con_scaled_df = pd.DataFrame(X_val_con_scaled, columns=X_val_con.columns, index=X_val_con.index)
X_val_poly = poly.transform(X_val_con_scaled)
X_val_poly_df = pd.DataFrame(X_val_poly, columns=poly.get_feature_names(X_val_con.columns), index=X_val_con.index)
#Combine
# X_val = pd.concat([X_val_genre,X_val_con_scaled_df],axis=1)
X_val = pd.concat([X_val_genre,X_val_poly_df],axis=1)
# Test set
# Convert genre to dummies
X_test_genre = X_test[cat_variable].str.join(sep='*').str.get_dummies(sep='*')
test_columns = X_test.columns
X_test_genre = X_test_genre[[x for x in test_columns if x in known_columns]]
fill_dict = { c : 0 for c in [x for x in known_columns if x not in test_columns] }
X_test_genre = X_test_genre.assign(**fill_dict)
# Scaling continuous variables
X_test_con = X_test[con_feature]
X_test_con_scaled = scaler.transform(X_test_con)
X_test_con_scaled_df = pd.DataFrame(X_test_con_scaled, columns=X_test_con.columns, index=X_test_con.index)
X_test_poly = poly.transform(X_test_con_scaled)
X_test_poly_df = pd.DataFrame(X_test_poly, columns=poly.get_feature_names(X_test_con.columns), index=X_test_con.index)
#Combine
# X_test = pd.concat([X_test_genre,X_test_con_scaled_df],axis=1)
X_test = pd.concat([X_test_genre,X_test_poly_df],axis=1)
return X_train,X_val,X_test
def categorical_singlelabel(X_train,X_val,X_test,cat_variable):
"""
Input: X_train,X_val,X_test,categorical_variable
Processing: preprocessing the three sets separately:
1. Separate continuous and categorical variable
2. Scaling + polynomial fit the conitnuous variables and get_dummies on the categorical variable
3. Combine back the continuous and categorical data
Return: tranformed X_train, X_val, X_test
"""
scaler = StandardScaler()
poly = PolynomialFeatures(degree=2,interaction_only = False)
# Train set
# Convert genre to dummies
X_train_genre = pd.get_dummies(X_train[cat_variable])
known_columns = X_train_genre.columns
# Scaling continuous variables
X_train_con = X_train[con_feature]
X_train_con_scaled = scaler.fit_transform(X_train_con)
X_train_con_scaled_df = pd.DataFrame(X_train_con_scaled, columns=X_train_con.columns, index=X_train_con.index)
X_train_poly = poly.fit_transform(X_train_con_scaled)
X_train_poly_df = pd.DataFrame(X_train_poly, columns=poly.get_feature_names(X_train_con.columns), index=X_train_con.index)
#Combine
X_train = pd.concat([X_train_genre,X_train_con_scaled_df],axis=1)
# X_train = pd.concat([X_train_genre,X_train_poly_df],axis=1)
# Val set
# Convert genre to dummies
X_val_genre = pd.get_dummies(X_val[cat_variable])
val_columns = X_val_genre.columns
X_val_genre = X_val_genre[[x for x in val_columns if x in known_columns]]
fill_dict = { c : 0 for c in [x for x in known_columns if x not in val_columns] }
X_val_genre = X_val_genre.assign(**fill_dict)
# Scaling continuous variables
X_val_con = X_val[con_feature]
X_val_con_scaled = scaler.transform(X_val_con)
X_val_con_scaled_df = pd.DataFrame(X_val_con_scaled, columns=X_val_con.columns, index=X_val_con.index)
X_val_poly = poly.transform(X_val_con_scaled)
X_val_poly_df = pd.DataFrame(X_val_poly, columns=poly.get_feature_names(X_val_con.columns), index=X_val_con.index)
#Combine
X_val = | pd.concat([X_val_genre,X_val_con_scaled_df],axis=1) | pandas.concat |
from datetime import timedelta
from functools import partial
from itertools import permutations
import dask.bag as db
import numpy as np
import pandas as pd
import pandas.testing as pdt
import pytest
from hypothesis import given, settings
from hypothesis import strategies as st
from kartothek.core.cube.conditions import (
C,
Conjunction,
EqualityCondition,
GreaterEqualCondition,
GreaterThanCondition,
InequalityCondition,
InIntervalCondition,
IsInCondition,
LessEqualCondition,
LessThanCondition,
)
from kartothek.core.cube.cube import Cube
from kartothek.io.dask.bag_cube import build_cube_from_bag
from kartothek.io.eager import build_dataset_indices
from kartothek.io.eager_cube import append_to_cube, build_cube, remove_partitions
__all__ = (
"apply_condition_unsafe",
"data_no_part",
"fullrange_cube",
"fullrange_data",
"fullrange_df",
"massive_partitions_cube",
"massive_partitions_data",
"massive_partitions_df",
"multipartition_cube",
"multipartition_df",
"no_part_cube",
"no_part_df",
"other_part_cube",
"sparse_outer_cube",
"sparse_outer_data",
"sparse_outer_df",
"sparse_outer_opt_cube",
"sparse_outer_opt_df",
"test_complete",
"test_condition",
"test_condition_on_null",
"test_cube",
"test_delayed_index_build_correction_restriction",
"test_delayed_index_build_partition_by",
"test_df",
"test_fail_blocksize_negative",
"test_fail_blocksize_wrong_type",
"test_fail_blocksize_zero",
"test_fail_empty_dimension_columns",
"test_fail_missing_condition_columns",
"test_fail_missing_dimension_columns",
"test_fail_missing_partition_by",
"test_fail_missing_payload_columns",
"test_fail_no_store_factory",
"test_fail_projection",
"test_fail_unindexed_partition_by",
"test_fail_unstable_dimension_columns",
"test_fail_unstable_partition_by",
"test_filter_select",
"test_hypothesis",
"test_overlay_tricky",
"test_partition_by",
"test_projection",
"test_select",
"test_simple_roundtrip",
"test_sort",
"test_stresstest_index_select_row",
"test_wrong_condition_type",
"testset",
"updated_cube",
"updated_df",
)
@pytest.fixture(scope="module")
def fullrange_data():
return {
"seed": pd.DataFrame(
{
"x": [0, 1, 0, 1, 0, 1, 0, 1, 2, 3, 2, 3, 2, 3, 2, 3],
"y": [0, 0, 1, 1, 2, 2, 3, 3, 0, 0, 1, 1, 2, 2, 3, 3],
"z": 0,
"p": [0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1],
"q": [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1],
"v1": np.arange(16),
"i1": np.arange(16),
}
),
"enrich_dense": pd.DataFrame(
{
"x": [0, 1, 0, 1, 0, 1, 0, 1, 2, 3, 2, 3, 2, 3, 2, 3],
"y": [0, 0, 1, 1, 2, 2, 3, 3, 0, 0, 1, 1, 2, 2, 3, 3],
"z": 0,
"p": [0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1],
"q": [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1],
"v2": np.arange(16),
"i2": np.arange(16),
}
),
"enrich_sparse": pd.DataFrame(
{
"y": [0, 1, 2, 3, 0, 1, 2, 3],
"z": 0,
"p": [0, 0, 1, 1, 0, 0, 1, 1],
"q": [0, 0, 0, 0, 1, 1, 1, 1],
"v3": np.arange(8),
"i3": np.arange(8),
}
),
}
@pytest.fixture(scope="module")
def fullrange_cube(module_store, fullrange_data):
cube = Cube(
dimension_columns=["x", "y", "z"],
partition_columns=["p", "q"],
uuid_prefix="fullrange_cube",
index_columns=["i1", "i2", "i3"],
)
build_cube(data=fullrange_data, store=module_store, cube=cube)
return cube
@pytest.fixture(scope="module")
def multipartition_cube(module_store, fullrange_data, fullrange_cube):
def _gen(part):
result = {}
for dataset_id, df in fullrange_data.items():
df = df.copy()
df["z"] = part
result[dataset_id] = df
return result
cube = fullrange_cube.copy(uuid_prefix="multipartition_cube")
build_cube_from_bag(
data=db.from_sequence([0, 1], partition_size=1).map(_gen),
store=module_store,
cube=cube,
ktk_cube_dataset_ids=["seed", "enrich_dense", "enrich_sparse"],
).compute()
return cube
@pytest.fixture(scope="module")
def sparse_outer_data():
return {
"seed": pd.DataFrame(
{
"x": [0, 1, 0],
"y": [0, 0, 1],
"z": 0,
"p": [0, 1, 2],
"q": 0,
"v1": [0, 3, 7],
"i1": [0, 3, 7],
}
),
"enrich_dense": pd.DataFrame(
{
"x": [0, 0],
"y": [0, 1],
"z": 0,
"p": [0, 2],
"q": 0,
"v2": [0, 7],
"i2": [0, 7],
}
),
"enrich_sparse": pd.DataFrame(
{"y": [0, 0], "z": 0, "p": [0, 1], "q": 0, "v3": [0, 3], "i3": [0, 3]}
),
}
@pytest.fixture(scope="module")
def sparse_outer_cube(module_store, sparse_outer_data):
cube = Cube(
dimension_columns=["x", "y", "z"],
partition_columns=["p", "q"],
uuid_prefix="sparse_outer_cube",
index_columns=["i1", "i2", "i3"],
)
build_cube(data=sparse_outer_data, store=module_store, cube=cube)
return cube
@pytest.fixture(scope="module")
def sparse_outer_opt_cube(
module_store,
sparse_outer_data,
sparse_outer_cube,
sparse_outer_df,
sparse_outer_opt_df,
):
data = {}
for dataset_id in sparse_outer_data.keys():
df = sparse_outer_data[dataset_id].copy()
for col in sparse_outer_opt_df.columns:
if col in df.columns:
dtype = sparse_outer_opt_df[col].dtype
if dtype == np.float64:
dtype = np.int64
elif dtype == np.float32:
dtype = np.int32
elif dtype == np.float16:
dtype = np.int16
df[col] = df[col].astype(dtype)
data[dataset_id] = df
cube = sparse_outer_cube.copy(uuid_prefix="sparse_outer_opt_cube")
build_cube(data=data, store=module_store, cube=cube)
return cube
@pytest.fixture(scope="module")
def massive_partitions_data():
n = 17
return {
"seed": pd.DataFrame(
{
"x": np.arange(n),
"y": np.arange(n),
"z": np.arange(n),
"p": np.arange(n),
"q": np.arange(n),
"v1": np.arange(n),
"i1": np.arange(n),
}
),
"enrich_1": pd.DataFrame(
{
"x": np.arange(n),
"y": np.arange(n),
"z": np.arange(n),
"p": np.arange(n),
"q": np.arange(n),
"v2": np.arange(n),
"i2": np.arange(n),
}
),
"enrich_2": pd.DataFrame(
{
"y": np.arange(n),
"z": np.arange(n),
"p": np.arange(n),
"q": np.arange(n),
"v3": np.arange(n),
"i3": np.arange(n),
}
),
}
@pytest.fixture(scope="module")
def massive_partitions_cube(module_store, massive_partitions_data):
cube = Cube(
dimension_columns=["x", "y", "z"],
partition_columns=["p", "q"],
uuid_prefix="massive_partitions_cube",
index_columns=["i1", "i2", "i3"],
)
build_cube(data=massive_partitions_data, store=module_store, cube=cube)
return cube
@pytest.fixture(scope="module")
def fullrange_df():
return (
pd.DataFrame(
data={
"x": [0, 1, 0, 1, 0, 1, 0, 1, 2, 3, 2, 3, 2, 3, 2, 3],
"y": [0, 0, 1, 1, 2, 2, 3, 3, 0, 0, 1, 1, 2, 2, 3, 3],
"z": 0,
"p": [0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1],
"q": [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1],
"v1": np.arange(16),
"v2": np.arange(16),
"v3": [0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7],
"i1": np.arange(16),
"i2": np.arange(16),
"i3": [0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7],
},
columns=["i1", "i2", "i3", "p", "q", "v1", "v2", "v3", "x", "y", "z"],
)
.sort_values(["x", "y", "z", "p", "q"])
.reset_index(drop=True)
)
@pytest.fixture(scope="module")
def multipartition_df(fullrange_df):
dfs = []
for z in (0, 1):
df = fullrange_df.copy()
df["z"] = z
dfs.append(df)
return (
pd.concat(dfs, ignore_index=True)
.sort_values(["x", "y", "z", "p", "q"])
.reset_index(drop=True)
)
@pytest.fixture(scope="module")
def sparse_outer_df():
return (
pd.DataFrame(
data={
"x": [0, 1, 0],
"y": [0, 0, 1],
"z": 0,
"p": [0, 1, 2],
"q": 0,
"v1": [0, 3, 7],
"v2": [0, np.nan, 7],
"v3": [0, 3, np.nan],
"i1": [0, 3, 7],
"i2": [0, np.nan, 7],
"i3": [0, 3, np.nan],
},
columns=["i1", "i2", "i3", "p", "q", "v1", "v2", "v3", "x", "y", "z"],
)
.sort_values(["x", "y", "z", "p", "q"])
.reset_index(drop=True)
)
@pytest.fixture(scope="module")
def sparse_outer_opt_df(sparse_outer_df):
df = sparse_outer_df.copy()
df["x"] = df["x"].astype(np.int16)
df["y"] = df["y"].astype(np.int32)
df["z"] = df["z"].astype(np.int8)
df["v1"] = df["v1"].astype(np.int8)
df["i1"] = df["i1"].astype(np.int8)
return df
@pytest.fixture(scope="module")
def massive_partitions_df():
n = 17
return (
pd.DataFrame(
data={
"x": np.arange(n),
"y": np.arange(n),
"z": np.arange(n),
"p": np.arange(n),
"q": np.arange(n),
"v1": np.arange(n),
"v2": np.arange(n),
"v3": np.arange(n),
"i1": np.arange(n),
"i2": np.arange(n),
"i3": np.arange(n),
},
columns=["i1", "i2", "i3", "p", "q", "v1", "v2", "v3", "x", "y", "z"],
)
.sort_values(["x", "y", "z", "p", "q"])
.reset_index(drop=True)
)
@pytest.fixture(scope="module")
def updated_cube(module_store, fullrange_data):
cube = Cube(
dimension_columns=["x", "y", "z"],
partition_columns=["p", "q"],
uuid_prefix="updated_cube",
index_columns=["i1", "i2", "i3"],
)
build_cube(
data={
cube.seed_dataset: pd.DataFrame(
{
"x": [0, 0, 1, 1, 2, 2],
"y": [0, 1, 0, 1, 0, 1],
"z": 0,
"p": [0, 0, 1, 1, 2, 2],
"q": 0,
"v1": np.arange(6),
"i1": np.arange(6),
}
),
"enrich": pd.DataFrame(
{
"x": [0, 0, 1, 1, 2, 2],
"y": [0, 1, 0, 1, 0, 1],
"z": 0,
"p": [0, 0, 1, 1, 2, 2],
"q": 0,
"v2": np.arange(6),
"i2": np.arange(6),
}
),
"extra": pd.DataFrame(
{
"y": [0, 1, 0, 1, 0, 1],
"z": 0,
"p": [0, 0, 1, 1, 2, 2],
"q": 0,
"v3": np.arange(6),
"i3": np.arange(6),
}
),
},
store=module_store,
cube=cube,
)
remove_partitions(
cube=cube,
store=module_store,
ktk_cube_dataset_ids=["enrich"],
conditions=C("p") >= 1,
)
append_to_cube(
data={
"enrich": pd.DataFrame(
{
"x": [1, 1],
"y": [0, 1],
"z": 0,
"p": [1, 1],
"q": 0,
"v2": [7, 8],
"i2": [7, 8],
}
)
},
store=module_store,
cube=cube,
)
return cube
@pytest.fixture(scope="module")
def updated_df():
return (
pd.DataFrame(
data={
"x": [0, 0, 1, 1, 2, 2],
"y": [0, 1, 0, 1, 0, 1],
"z": 0,
"p": [0, 0, 1, 1, 2, 2],
"q": 0,
"v1": np.arange(6),
"v2": [0, 1, 7, 8, np.nan, np.nan],
"v3": np.arange(6),
"i1": np.arange(6),
"i2": [0, 1, 7, 8, np.nan, np.nan],
"i3": np.arange(6),
},
columns=["i1", "i2", "i3", "p", "q", "v1", "v2", "v3", "x", "y", "z"],
)
.sort_values(["x", "y", "z", "p", "q"])
.reset_index(drop=True)
)
@pytest.fixture(scope="module")
def data_no_part():
return {
"seed": pd.DataFrame(
{
"x": [0, 1, 0, 1, 0, 1, 0, 1, 2, 3, 2, 3, 2, 3, 2, 3],
"y": [0, 0, 1, 1, 2, 2, 3, 3, 0, 0, 1, 1, 2, 2, 3, 3],
"z": 0,
"p": [0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1],
"q": [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1],
"v1": np.arange(16),
"i1": np.arange(16),
}
),
"enrich_dense": pd.DataFrame(
{
"x": [0, 1, 0, 1, 0, 1, 0, 1, 2, 3, 2, 3, 2, 3, 2, 3],
"y": [0, 0, 1, 1, 2, 2, 3, 3, 0, 0, 1, 1, 2, 2, 3, 3],
"z": 0,
"v2": np.arange(16),
"i2": np.arange(16),
}
),
"enrich_sparse": pd.DataFrame(
{"y": [0, 1, 2, 3], "z": 0, "v3": np.arange(4), "i3": np.arange(4)}
),
}
@pytest.fixture(scope="module")
def no_part_cube(module_store, data_no_part):
cube = Cube(
dimension_columns=["x", "y", "z"],
partition_columns=["p", "q"],
uuid_prefix="data_no_part",
index_columns=["i1", "i2", "i3"],
)
build_cube(
data=data_no_part,
store=module_store,
cube=cube,
partition_on={"enrich_dense": [], "enrich_sparse": []},
)
return cube
@pytest.fixture(scope="module")
def other_part_cube(module_store, data_no_part):
cube = Cube(
dimension_columns=["x", "y", "z"],
partition_columns=["p", "q"],
uuid_prefix="other_part_cube",
index_columns=["i1", "i2", "i3"],
)
build_cube(
data=data_no_part,
store=module_store,
cube=cube,
partition_on={"enrich_dense": ["i2"], "enrich_sparse": ["i3"]},
)
return cube
@pytest.fixture(scope="module")
def no_part_df():
return (
pd.DataFrame(
data={
"x": [0, 1, 0, 1, 0, 1, 0, 1, 2, 3, 2, 3, 2, 3, 2, 3],
"y": [0, 0, 1, 1, 2, 2, 3, 3, 0, 0, 1, 1, 2, 2, 3, 3],
"z": 0,
"p": [0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1],
"q": [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1],
"v1": np.arange(16),
"v2": np.arange(16),
"v3": [0, 0, 1, 1, 2, 2, 3, 3, 0, 0, 1, 1, 2, 2, 3, 3],
"i1": np.arange(16),
"i2": np.arange(16),
"i3": [0, 0, 1, 1, 2, 2, 3, 3, 0, 0, 1, 1, 2, 2, 3, 3],
},
columns=["i1", "i2", "i3", "p", "q", "v1", "v2", "v3", "x", "y", "z"],
)
.sort_values(["x", "y", "z", "p", "q"])
.reset_index(drop=True)
)
@pytest.fixture(
params=[
"fullrange",
"multipartition",
"sparse_outer",
"sparse_outer_opt",
"massive_partitions",
"updated",
"no_part",
"other_part",
],
scope="module",
)
def testset(request):
return request.param
@pytest.fixture(scope="module")
def test_cube(
testset,
fullrange_cube,
multipartition_cube,
sparse_outer_cube,
sparse_outer_opt_cube,
massive_partitions_cube,
updated_cube,
no_part_cube,
other_part_cube,
):
if testset == "fullrange":
return fullrange_cube
elif testset == "multipartition":
return multipartition_cube
elif testset == "sparse_outer":
return sparse_outer_cube
elif testset == "sparse_outer_opt":
return sparse_outer_opt_cube
elif testset == "massive_partitions":
return massive_partitions_cube
elif testset == "updated":
return updated_cube
elif testset == "no_part":
return no_part_cube
elif testset == "other_part":
return other_part_cube
else:
raise ValueError("Unknown param {}".format(testset))
@pytest.fixture(scope="module")
def test_df(
testset,
fullrange_df,
multipartition_df,
sparse_outer_df,
sparse_outer_opt_df,
massive_partitions_df,
updated_df,
no_part_df,
):
if testset == "fullrange":
return fullrange_df
elif testset == "multipartition":
return multipartition_df
elif testset == "sparse_outer":
return sparse_outer_df
elif testset == "sparse_outer_opt":
return sparse_outer_opt_df
elif testset == "massive_partitions":
return massive_partitions_df
elif testset == "updated":
return updated_df
elif testset in ("no_part", "other_part"):
return no_part_df
else:
raise ValueError("Unknown param {}".format(testset))
def test_simple_roundtrip(driver, function_store, function_store_rwro):
df = pd.DataFrame({"x": [0, 1, 2, 3], "p": [0, 0, 1, 1], "v": [10, 11, 12, 13]})
cube = Cube(dimension_columns=["x"], partition_columns=["p"], uuid_prefix="cube")
build_cube(data=df, cube=cube, store=function_store)
result = driver(cube=cube, store=function_store_rwro)
assert len(result) == 1
df_actual = result[0]
df_expected = df.reindex(columns=["p", "v", "x"])
pdt.assert_frame_equal(df_actual, df_expected)
def test_complete(driver, module_store, test_cube, test_df):
result = driver(cube=test_cube, store=module_store)
assert len(result) == 1
df_actual = result[0]
pdt.assert_frame_equal(df_actual, test_df)
def apply_condition_unsafe(df, cond):
# For the sparse_outer testset, the test_df has the wrong datatype because we cannot encode missing integer data in
# pandas.
#
# The condition will not be applicable to the DF because the DF has floats while conditions have ints. We fix that
# by modifying the the condition.
#
# In case there is no missing data because of the right conditions, kartothek will return integer data.
# assert_frame_equal will then complain about this. So in case there is no missing data, let's recover the correct
# dtype here.
if not isinstance(cond, Conjunction):
cond = Conjunction(cond)
float_cols = {col for col in df.columns if df[col].dtype == float}
# convert int to float conditions
cond2 = Conjunction([])
for col, conj in cond.split_by_column().items():
if col in float_cols:
parts = []
for part in conj.conditions:
if isinstance(part, IsInCondition):
part = IsInCondition(
column=part.column, value=tuple((float(v) for v in part.value))
)
elif isinstance(part, InIntervalCondition):
part = InIntervalCondition(
column=part.column,
start=float(part.start),
stop=float(part.stop),
)
else:
part = part.__class__(column=part.column, value=float(part.value))
parts.append(part)
conj = Conjunction(parts)
cond2 &= conj
# apply conditions
df = cond2.filter_df(df).reset_index(drop=True)
# convert float columns to int columns
for col in df.columns:
if df[col].notnull().all():
dtype = df[col].dtype
if dtype == np.float64:
dtype = np.int64
elif dtype == np.float32:
dtype = np.int32
elif dtype == np.float16:
dtype = np.int16
df[col] = df[col].astype(dtype)
return df
@pytest.mark.parametrize(
"cond",
[
C("v1") >= 7,
C("v1") >= 10000,
C("v2") >= 7,
C("v3") >= 3,
C("i1") >= 7,
C("i1") >= 10000,
C("i2") >= 7,
C("i2") != 0,
C("i3") >= 3,
C("p") >= 1,
C("q") >= 1,
C("x") >= 1,
C("y") >= 1,
(C("x") == 3) & (C("y") == 3),
(C("i1") > 0) & (C("i2") > 0),
Conjunction([]),
],
)
def test_condition(driver, module_store, test_cube, test_df, cond):
result = driver(cube=test_cube, store=module_store, conditions=cond)
df_expected = apply_condition_unsafe(test_df, cond)
if df_expected.empty:
assert len(result) == 0
else:
assert len(result) == 1
df_actual = result[0]
pdt.assert_frame_equal(df_actual, df_expected)
@pytest.mark.parametrize("payload_columns", [["v1", "v2"], ["v2", "v3"], ["v3"]])
def test_select(driver, module_store, test_cube, test_df, payload_columns):
result = driver(cube=test_cube, store=module_store, payload_columns=payload_columns)
assert len(result) == 1
df_actual = result[0]
df_expected = test_df.loc[
:, sorted(set(payload_columns) | {"x", "y", "z", "p", "q"})
]
pdt.assert_frame_equal(df_actual, df_expected)
def test_filter_select(driver, module_store, test_cube, test_df):
result = driver(
cube=test_cube,
store=module_store,
payload_columns=["v1", "v2"],
conditions=(C("i3") >= 3), # completely unrelated to the payload
)
assert len(result) == 1
df_actual = result[0]
df_expected = test_df.loc[
test_df["i3"] >= 3, ["p", "q", "v1", "v2", "x", "y", "z"]
].reset_index(drop=True)
pdt.assert_frame_equal(df_actual, df_expected)
@pytest.mark.parametrize(
"partition_by",
[["i1"], ["i2"], ["i3"], ["x"], ["y"], ["p"], ["q"], ["i1", "i2"], ["x", "y"]],
)
def test_partition_by(driver, module_store, test_cube, test_df, partition_by):
dfs_actual = driver(cube=test_cube, store=module_store, partition_by=partition_by)
dfs_expected = [
df_g.reset_index(drop=True)
for g, df_g in test_df.groupby(partition_by, sort=True)
]
for df_expected in dfs_expected:
for col in df_expected.columns:
if df_expected[col].dtype == float:
try:
df_expected[col] = df_expected[col].astype(int)
except Exception:
pass
assert len(dfs_actual) == len(dfs_expected)
for df_actual, df_expected in zip(dfs_actual, dfs_expected):
pdt.assert_frame_equal(df_actual, df_expected)
@pytest.mark.parametrize("dimension_columns", list(permutations(["x", "y", "z"])))
def test_sort(driver, module_store, test_cube, test_df, dimension_columns):
result = driver(
cube=test_cube, store=module_store, dimension_columns=dimension_columns
)
assert len(result) == 1
df_actual = result[0]
df_expected = test_df.sort_values(
list(dimension_columns) + list(test_cube.partition_columns)
).reset_index(drop=True)
pdt.assert_frame_equal(df_actual, df_expected)
@pytest.mark.parametrize("payload_columns", [["y", "z"], ["y", "z", "v3"]])
def test_projection(driver, module_store, test_cube, test_df, payload_columns):
result = driver(
cube=test_cube,
store=module_store,
dimension_columns=["y", "z"],
payload_columns=payload_columns,
)
assert len(result) == 1
df_actual = result[0]
df_expected = (
test_df.loc[:, sorted(set(payload_columns) | {"y", "z", "p", "q"})]
.drop_duplicates()
.sort_values(["y", "z", "p", "q"])
.reset_index(drop=True)
)
pdt.assert_frame_equal(df_actual, df_expected)
def test_stresstest_index_select_row(driver, function_store):
n_indices = 100
n_rows = 1000
data = {"x": np.arange(n_rows), "p": 0}
for i in range(n_indices):
data["i{}".format(i)] = np.arange(n_rows)
df = pd.DataFrame(data)
cube = Cube(
dimension_columns=["x"],
partition_columns=["p"],
uuid_prefix="cube",
index_columns=["i{}".format(i) for i in range(n_indices)],
)
build_cube(data=df, cube=cube, store=function_store)
conditions = Conjunction([(C("i{}".format(i)) == 0) for i in range(n_indices)])
result = driver(
cube=cube,
store=function_store,
conditions=conditions,
payload_columns=["p", "x"],
)
assert len(result) == 1
df_actual = result[0]
df_expected = df.loc[df["x"] == 0].reindex(columns=["p", "x"])
pdt.assert_frame_equal(df_actual, df_expected)
def test_fail_missing_dimension_columns(driver, module_store, test_cube, test_df):
with pytest.raises(ValueError) as exc:
driver(cube=test_cube, store=module_store, dimension_columns=["x", "a", "b"])
assert (
"Following dimension columns were requested but are missing from the cube: a, b"
in str(exc.value)
)
def test_fail_empty_dimension_columns(driver, module_store, test_cube, test_df):
with pytest.raises(ValueError) as exc:
driver(cube=test_cube, store=module_store, dimension_columns=[])
assert "Dimension columns cannot be empty." in str(exc.value)
def test_fail_missing_partition_by(driver, module_store, test_cube, test_df):
with pytest.raises(ValueError) as exc:
driver(cube=test_cube, store=module_store, partition_by=["foo"])
assert (
"Following partition-by columns were requested but are missing from the cube: foo"
in str(exc.value)
)
def test_fail_unindexed_partition_by(driver, module_store, test_cube, test_df):
with pytest.raises(ValueError) as exc:
driver(cube=test_cube, store=module_store, partition_by=["v1", "v2"])
assert (
"Following partition-by columns are not indexed and cannot be used: v1, v2"
in str(exc.value)
)
def test_fail_missing_condition_columns(driver, module_store, test_cube, test_df):
with pytest.raises(ValueError) as exc:
driver(
cube=test_cube,
store=module_store,
conditions=(C("foo") == 1) & (C("bar") == 2),
)
assert (
"Following condition columns are required but are missing from the cube: bar, foo"
in str(exc.value)
)
def test_fail_missing_payload_columns(driver, module_store, test_cube, test_df):
with pytest.raises(ValueError) as exc:
driver(cube=test_cube, store=module_store, payload_columns=["foo", "bar"])
assert "Cannot find the following requested payload columns: bar, foo" in str(
exc.value
)
def test_fail_projection(driver, module_store, test_cube, test_df):
with pytest.raises(ValueError) as exc:
driver(
cube=test_cube,
store=module_store,
dimension_columns=["y", "z"],
payload_columns=["v1"],
)
assert (
'Cannot project dataset "seed" with dimensionality [x, y, z] to [y, z] '
"while keeping the following payload intact: v1" in str(exc.value)
)
def test_fail_unstable_dimension_columns(driver, module_store, test_cube, test_df):
with pytest.raises(TypeError) as exc:
driver(cube=test_cube, store=module_store, dimension_columns={"x", "y"})
assert "which has type set has an unstable iteration order" in str(exc.value)
def test_fail_unstable_partition_by(driver, module_store, test_cube, test_df):
with pytest.raises(TypeError) as exc:
driver(cube=test_cube, store=module_store, partition_by={"x", "y"})
assert "which has type set has an unstable iteration order" in str(exc.value)
def test_wrong_condition_type(driver, function_store, driver_name):
types = {
"int": pd.Series([-1], dtype=np.int64),
"uint": pd.Series([1], dtype=np.uint64),
"float": pd.Series([1.3], dtype=np.float64),
"bool": pd.Series([True], dtype=np.bool_),
"str": pd.Series(["foo"], dtype=object),
}
cube = Cube(
dimension_columns=["d_{}".format(t) for t in sorted(types.keys())],
partition_columns=["p_{}".format(t) for t in sorted(types.keys())],
uuid_prefix="typed_cube",
index_columns=["i_{}".format(t) for t in sorted(types.keys())],
)
data = {
"seed": pd.DataFrame(
{
"{}_{}".format(prefix, t): types[t]
for t in sorted(types.keys())
for prefix in ["d", "p", "v1"]
}
),
"enrich": pd.DataFrame(
{
"{}_{}".format(prefix, t): types[t]
for t in sorted(types.keys())
for prefix in ["d", "p", "i", "v2"]
}
),
}
build_cube(data=data, store=function_store, cube=cube)
df = pd.DataFrame(
{
"{}_{}".format(prefix, t): types[t]
for t in sorted(types.keys())
for prefix in ["d", "p", "i", "v1", "v2"]
}
)
for col in df.columns:
t1 = col.split("_")[1]
for t2 in sorted(types.keys()):
cond = C(col) == types[t2].values[0]
if t1 == t2:
result = driver(cube=cube, store=function_store, conditions=cond)
assert len(result) == 1
df_actual = result[0]
df_expected = cond.filter_df(df).reset_index(drop=True)
pdt.assert_frame_equal(df_actual, df_expected, check_like=True)
else:
with pytest.raises(TypeError) as exc:
driver(cube=cube, store=function_store, conditions=cond)
assert "has wrong type" in str(exc.value)
def test_condition_on_null(driver, function_store):
df = pd.DataFrame(
{
"x": pd.Series([0, 1, 2], dtype=np.int64),
"p": pd.Series([0, 0, 1], dtype=np.int64),
"v_f1": pd.Series([0, np.nan, 2], dtype=np.float64),
"v_f2": pd.Series([0, 1, np.nan], dtype=np.float64),
"v_f3": pd.Series([np.nan, np.nan, np.nan], dtype=np.float64),
"v_s1": pd.Series(["a", None, "c"], dtype=object),
"v_s2": pd.Series(["a", "b", None], dtype=object),
"v_s3": pd.Series([None, None, None], dtype=object),
}
)
cube = Cube(
dimension_columns=["x"],
partition_columns=["p"],
uuid_prefix="nulled_cube",
index_columns=[],
)
build_cube(data=df, store=function_store, cube=cube)
for col in df.columns:
# only iterate over the value columns (not the dimension / partition column):
if not col.startswith("v"):
continue
# col_type will be either 'f' for float or 's' for string; see column
# names above
col_type = col.split("_")[1][0]
if col_type == "f":
value = 1.2
elif col_type == "s":
value = "foo"
else:
raise RuntimeError("unknown type")
cond = C(col) == value
df_expected = cond.filter_df(df).reset_index(drop=True)
result = driver(cube=cube, store=function_store, conditions=cond)
if df_expected.empty:
assert len(result) == 0
else:
assert len(result) == 1
df_actual = result[0]
| pdt.assert_frame_equal(df_actual, df_expected, check_like=True) | pandas.testing.assert_frame_equal |
##########################################
# Share issuance as factor
# December 2018
# <NAME>
##########################################
import pandas as pd
import numpy as np
import os
from pandas.tseries.offsets import *
# Note that ccm, comp and crsp_m are WRDS datasets. However, the code is useful for
# other datasets as long they are panel datasets in conformity to those from WRDS.
# There are some methodology idiosyncrasies of the US dataset, acc. Fama-French (1993),
# but once understood, the adaptation to other country dataset is totally feasible.
###################
# CRSP Block #
###################
## permco is a unique permanent identifier assigned by CRSP to all companies with issues on a CRSP file
## permno identifies a firm's security through all its history, and companies may have several stocks at one time
## shrcd is a two-digit code describing the type of shares traded. The first digit describes the type of security traded.
## exchcd is a code indicating the exchange on which a security is listed
## change variable format to int
crsp_m[['permco','permno','shrcd','exchcd']]=crsp_m[['permco','permno',
'shrcd','exchcd']].astype(int)
## Line up date to be end of month day, no adjustment on time, but on pattern
crsp_m['date']=pd.to_datetime(crsp_m['date'])
crsp_m['jdate']=crsp_m['date']+MonthEnd(0)
crsp_m = crsp_m[(crsp_m['date'].dt.year > 1993)] # This increases velocity of the algorithm,
# but pay attention on this, as it limits the dataset.
## adjusting for delisting return
dlret.permno=dlret.permno.astype(int)
dlret['dlstdt']=pd.to_datetime(dlret['dlstdt'])
dlret['jdate']=dlret['dlstdt']+MonthEnd(0) ## pick the delist date and put into the EoP
## merge the crsp dataset with the dlret on the left indexes
crsp = pd.merge(crsp_m, dlret, how='left',on=['permno','jdate'])
crsp['dlret']=crsp['dlret'].fillna(0)
crsp['ret']=crsp['ret'].fillna(0)
crsp['retadj']=(1+crsp['ret'])*(1+crsp['dlret'])-1 ## adjusting for delisting return
crsp['me']=crsp['prc'].abs()*crsp['shrout'] # calculate market equity
crsp=crsp.drop(['dlret','dlstdt','prc','shrout'], axis=1)
## axis = 0 is the row, and is default, and axis = 1 is the column to drop
crsp=crsp.sort_values(by=['jdate','permco','me'])
## sorting columns ascending = TRUE as default, by the variables: jdate is the adj date by the EoP and
## permco is the CRSP number for stocks, and me is the market equity.
### Aggregate Market Cap ###
## sum of me across different permno belonging to same permco a given date
crsp_summe = crsp.groupby(['jdate','permco'])['me'].sum().reset_index()
## reset the index to the prior numbers as default in pandas,
## and with the changed index still there drop = False as default
# largest mktcap within a permco/date
crsp_maxme = crsp.groupby(['jdate','permco'])['me'].max().reset_index()
# join by jdate/maxme to find the permno
crsp1=pd.merge(crsp, crsp_maxme, how='inner', on=['jdate','permco','me'])
## join : {‘inner’, ‘outer’}, default ‘outer’. Outer for union and inner for intersection.
## drop me column and replace with the sum me
crsp1=crsp1.drop(['me'], axis=1)
## join with sum of me to get the correct market cap info
crsp2=pd.merge(crsp1, crsp_summe, how='inner', on=['jdate','permco'])
## sort by permno and date and also drop duplicates
crsp2=crsp2.sort_values(by=['permno','jdate']).drop_duplicates()
## keep December market cap
crsp2['year']=crsp2['jdate'].dt.year
crsp2['month']=crsp2['jdate'].dt.month
decme=crsp2[crsp2['month']==12]
decme=decme[['permno','date','jdate','me','year']].rename(columns={'me':'dec_me'})
### July to June dates
crsp2['ffdate']=crsp2['jdate']+MonthEnd(-6) ## MonthEnd(-6) is to go six months in the EoM backwards
crsp2['ffyear']=crsp2['ffdate'].dt.year
crsp2['ffmonth']=crsp2['ffdate'].dt.month
crsp2['1+retx']=1+crsp2['retx'] ## retx is the holding period return w/o dividends for a month
crsp2=crsp2.sort_values(by=['permno','date'])
# cumret by stock ## pick the before year
crsp2['cumretx']=crsp2.groupby(['permno','ffyear'])['1+retx'].cumprod() ## compute the cumulative return
## of a year measured by ffyear, the data date backwards six months.
# lag cumret
crsp2['lcumretx']=crsp2.groupby(['permno'])['cumretx'].shift(1)
## shift one row (as default, axis = 0), this leads to the next period.
# lag market cap by one month
crsp2['lme']=crsp2.groupby(['permno'])['me'].shift(1)
## if first permno then use me/(1+retx) to replace the missing value
crsp2['count']=crsp2.groupby(['permno']).cumcount()
crsp2['lme']=np.where(crsp2['count']==0, crsp2['me']/crsp2['1+retx'], crsp2['lme'])
## insert a 'nan' if the count is zero, or pick the lag one market cap.
# baseline me ## pick the first month of this backwards year, and say it is the base.
mebase=crsp2[crsp2['ffmonth']==1][['permno','ffyear', 'lme']].rename(columns={'lme':'mebase'})
## merge result back together
crsp3=pd.merge(crsp2, mebase, how='left', on=['permno','ffyear'])
crsp3['wt']=np.where(crsp3['ffmonth']==1, crsp3['lme'], crsp3['mebase']*crsp3['lcumretx'])
## and really use the returns to take out the dividends distributed (but what about them?)
## wt is the adjusted lag me without dividends basically, by constructing a cum ret measure.
## the weight should have a criterium, and lagged me seems to be it. Not the current
## me, but six months behind one.
#######################
# CCM Block #
#######################
## Compustat and CRSP merged data
ccm['linkdt']=pd.to_datetime(ccm['linkdt']) ## linkdt is a calendar date marking the first effective
## date of the current link. If the link was valid before CRSP's earliest record, LINKDT is set to be
## SAS missing code ".B".
ccm['linkenddt']=pd.to_datetime(ccm['linkenddt']) ## LINKENDDT is the last effective date of the link record.
## It uses the SAS missing code ".E" if a link is still valid.
# if linkenddt is missing then set to today date
ccm['linkenddt']=ccm['linkenddt'].fillna(pd.to_datetime('today'))
###########################
### Net issuance Block ###
###########################
# The previous part is default for the CRSP dataset, but the following is
# the adaptive part to construct other type of portfolios.
# load share issuance original data
# =============================================================================
# os.chdir('C:\\Users\\n3o_A\\Google Drive (<EMAIL>)\\Doutorado Insper\\Finlab\\Finhub project ')
# share_issuance = pd.read_stata('Share_issuance.dta')
# share_issuance = share_issuance[share_issuance['exchcd'] != 0]
# share_issuance = share_issuance[['permno','date','vol','shrout','cfacshr']]
# share_issuance.to_stata('Share_issuance2.dta')
# =============================================================================
# load share issuance simplified data
os.chdir('C:\\Users\\n3o_A\\Google Drive (<EMAIL>)\\Doutorado Insper\\Finlab\\Finhub project')
share_issuance = pd.read_stata('Share_issuance2.dta')
# adjust for nan and zero values
share_issuance = share_issuance[pd.notnull(share_issuance['cfacshr'])]
share_issuance = share_issuance[share_issuance['cfacshr'] != 0]
# generate the adjustment factor for shares outstanding
df = share_issuance.set_index(['permno','date'])
firsts = (df.groupby(level=['permno']).transform('first'))
result = df['cfacshr'] / firsts['cfacshr']
result = result.reset_index()
result=result.rename(columns={'cfacshr':'adj_cfac'})
# adjust the shares outstanding by the adjustment factor above
share_issuance = pd.merge(share_issuance, result, how='inner', on=['permno','date'])
share_issuance['adj_out_shs'] = share_issuance['shrout']*share_issuance['adj_cfac']
share_issuance['adj_out_shs'].tail() # just a test to see if the last ones have values.
share_issuance = share_issuance.sort_values(by=['permno','date']).drop_duplicates()
share_issuance['jdate']=share_issuance['date']+MonthEnd(0)
# number months observations
share_issuance['count']=share_issuance.groupby(['permno']).cumcount()
# make the cumulative share issuance for -17 to -6 months
share_issuance['adj_out_shs'] = share_issuance['adj_out_shs'].astype(float)
share_issuance['ln_adj_out_shs'] = np.log(share_issuance['adj_out_shs'])
share_issuance['ln_shs_6'] = share_issuance.groupby(['permno'])['ln_adj_out_shs'].shift(6)
share_issuance['ln_shs_17'] = share_issuance.groupby(['permno'])['ln_adj_out_shs'].shift(17)
share_issuance['shs_iss'] = share_issuance['ln_shs_6'] - share_issuance['ln_shs_17']
###########################
#### Portfolios Block ####
###########################
# make the usual portfolio schemes to make the asset pricing factor
ccm1=pd.merge(share_issuance[['permno','jdate','shs_iss', 'count']],ccm,how='left',on=['permno'])
# set link date bounds
ccm2=ccm1[(ccm1['jdate']>=ccm1['linkdt'])&(ccm1['jdate']<=ccm1['linkenddt'])]
ccm2=ccm2[['gvkey','permno','jdate','shs_iss', 'count']]
# create a market cap variable
crsp_m['me']=crsp_m['prc'].abs()*crsp_m['shrout'] # calculate market equity
# link comp and crsp
ccm5=pd.merge(crsp_m, ccm2, how='inner', on=['permno', 'jdate'])
## select NYSE stocks for bucket breakdown
## exchcd = 1 (NYSE) and positive beme and positive me and at least 2 years in comp and shrcd in (10,11), resp.
nyse=ccm5[(ccm5['exchcd']==1) & (ccm5['count']>1) & ((ccm5['shrcd']==10) | (ccm5['shrcd']==11))]
nyse = | pd.merge(ccm5[['gvkey', 'jdate', 'shs_iss']], nyse, how='inner', on=['gvkey', 'jdate', 'shs_iss']) | pandas.merge |
from datetime import datetime
from typing import Dict
import pandas as pd
import abc
import threading
from loguru import logger as log
from pathlib import Path
class Writer(object):
# TODO: get chunksize from config
def __init__(self):
self.schema = [
'source',
'created_at',
'lang',
'reply_settings',
'referenced_tweets',
'possibly_sensitive',
'author_id',
'id',
'text',
'conversation_id',
'public_metrics.retweet_count',
'public_metrics.reply_count',
'public_metrics.like_count',
'public_metrics.quote_count',
'entities.mentions',
'in_reply_to_user_id',
'entities.urls',
'entities.hashtags',
'attachments.media_keys',
'context_annotations'
]
self.buffer = pd.DataFrame(columns=self.schema)
# if (self.mp == True):
self.lock = threading.Lock()
self.path = Path(f'{datetime.strftime(datetime.now(), "%Y%m%d_%H%M")}_twacapic.csv')
# create output file
with self.path.open('w') as file:
file.writelines(f'{",".join(self.schema)}\n')
self.chunkSize = 100
def __del__(self):
# flush buffer before destruction
self.persist(len(self.buffer))
def write(self, df: pd.DataFrame):
# if (self.mp == True):
with self.lock:
self.buffer = | pd.concat([self.buffer, df], ignore_index=True, sort=False) | pandas.concat |
# #-- -- -- -- Merging DataFrames with pandas
# # Used for Data Scientist Training Path
# #FYI it's a compilation of how to work
# #with different commands.
# ### --------------------------------------------------------
# # # # ------>>>> Reading DataFrames from multiple files
# Import pandas
import pandas as pd
# Read 'Bronze.csv' into a DataFrame: bronze
bronze = pd.read_csv('Bronze.csv')
# Read 'Silver.csv' into a DataFrame: silver
silver = pd.read_csv('Silver.csv')
# Read 'Gold.csv' into a DataFrame: gold
gold = pd.read_csv('Gold.csv')
# Print the first five rows of gold
print(gold.head())
# ### --------------------------------------------------------
# # # # ------>>>> Reading DataFrames from multiple files in a loop
# Import pandas
import pandas as pd
# Create the list of file names: filenames
filenames = ['Gold.csv', 'Silver.csv', 'Bronze.csv']
# Create the list of three DataFrames: dataframes
dataframes = []
for filename in filenames:
dataframes.append(pd.read_csv(filename))
# Print top 5 rows of 1st DataFrame in dataframes
print(dataframes[0].head())
# ### --------------------------------------------------------
# # # # ------>>>> Combining DataFrames from multiple data files
# Import pandas
import pandas as pd
# Make a copy of gold: medals
medals = gold.copy()
# Create list of new column labels: new_labels
new_labels = ['NOC', 'Country', 'Gold']
# Rename the columns of medals using new_labels
medals.columns = new_labels
# Add columns 'Silver' & 'Bronze' to medals
medals['Silver'] = silver['Total']
medals['Bronze'] = bronze['Total']
# Print the head of medals
print(medals.head())
# ### --------------------------------------------------------
# # # # ------>>>> Sorting DataFrame with the Index & columns
# Import pandas
import pandas as pd
# Read 'monthly_max_temp.csv' into a DataFrame: weather1
weather1 = pd.read_csv('monthly_max_temp.csv', index_col='Month')
# Print the head of weather1
print(weather1.head())
# Sort the index of weather1 in alphabetical order: weather2
weather2 = weather1.sort_index()
# Print the head of weather2
print(weather2.head())
# Sort the index of weather1 in reverse alphabetical order: weather3
weather3 = weather1.sort_index(ascending=False)
# Print the head of weather3
print(weather3.head())
# Sort weather1 numerically using the values of 'Max TemperatureF': weather4
weather4 = weather1.sort_values('Max TemperatureF')
# Print the head of weather4
print(weather4.head())
# ### --------------------------------------------------------
# # # # ------>>>> Reindexing DataFrame from a list
# Import pandas
import pandas as pd
# Reindex weather1 using the list year: weather2
weather2 = weather1.reindex(year)
# Print weather2
print(weather2)
# Reindex weather1 using the list year with forward-fill: weather3
weather3 = weather1.reindex(year).ffill()
# Print weather3
print(weather3)
# ### --------------------------------------------------------
# # # # ------>>>> Reindexing using another DataFrame Index
# Import pandas
import pandas as pd
# Reindex names_1981 with index of names_1881: common_names
common_names = names_1981.reindex(names_1881.index)
# Print shape of common_names
print(common_names.shape)
# Drop rows with null counts: common_names
common_names = common_names.dropna()
# Print shape of new common_names
print(common_names.shape)
# ### --------------------------------------------------------
# # # # ------>>>>Adding unaligned DataFrames
# The DataFrames january and february, which have
# been printed in the IPython Shell, represent the
# sales a company made in the corresponding months.
# The Indexes in both DataFrames are called Company,
# identifying which company bought that quantity of
# units. The column Units is the number of units sold.
# If you were to add these two DataFrames by
# executing the command total = january + february,
# how many rows would the resulting DataFrame have?
# Try this in the IPython Shell and find out for yourself.
total = january + february
total
# R/ 6 rows.
# ### --------------------------------------------------------
# # # # ------>>>> Broadcasting in arithmetic formulas
# Extract selected columns from weather as new DataFrame: temps_f
temps_f = weather[['Min TemperatureF', 'Mean TemperatureF', 'Max TemperatureF']]
# Convert temps_f to celsius: temps_c
temps_c = (temps_f - 32) * 5/9
# Rename 'F' in column names with 'C': temps_c.columns
temps_c.columns = temps_c.columns.str.replace('F', 'C')
# Print first 5 rows of temps_c
print(temps_c.head())
# ### --------------------------------------------------------
# # # # ------>>>> Computing percentage growth of GDP
import pandas as pd
# Read 'GDP.csv' into a DataFrame: gdp
gdp = | pd.read_csv('GDP.csv', parse_dates=True, index_col='DATE') | pandas.read_csv |
import logging
import time
import traceback
from collections import Counter
import discord
import matplotlib.dates as mdates
import matplotlib.pyplot as plt
import matplotlib.ticker as mticker
import numpy as np
import pandas as pd
from bot import SentdeBot
from discord.ext import commands, tasks
from discord.ext.commands import Cog, command
from discord.ext.commands.context import Context
from discord.ext.commands.errors import CommandError
from .utils import community_report, in_channels
def df_match(c1, c2):
if c1 == c2:
return np.nan
else:
return c1
class Community(Cog):
logger = logging.getLogger(f"<Cog 'Community' at {__name__}>")
def __init__(self, bot) -> None:
self.bot = bot
# add attributes here so that we dont have to access bot repeatedly
self.guild = None
self.path = self.bot.path
self.DAYS_BACK = self.bot.DAYS_BACK
self.RESAMPLE = self.bot.RESAMPLE
self.MOST_COMMON_INT = self.bot.MOST_COMMON_INT
self.COMMUNITY_BASED_CHANNELS = self.bot.COMMUNITY_BASED_CHANNELS
self.DISCORD_BG_COLOR = str(discord.Colour.dark_theme())
@command()
async def member_count(self, ctx: Context):
await ctx.send(f"```py\n{ctx.bot.guild.member_count}```")
@command()
@in_channels(SentdeBot.image_channels)
async def community_report(self, ctx: Context):
online, idle, offline = community_report(self.guild)
file = discord.File(self.path / "online.png", filename=f"online.png")
await ctx.send("", file=file)
await ctx.send(f'```py\n{{\n\t"Online": {online},\n\t"Idle/busy/dnd": {idle},\n\t"Offline": {offline}\n}}```')
@command()
@in_channels(SentdeBot.image_channels)
async def user_activity(self, ctx: Context):
file = discord.File(self.bot.path / "activity.png", filename=f"activity.png")
await ctx.send("", file=file)
@community_report.error
@user_activity.error
async def error_handler(self, ctx: Context, error: CommandError):
if isinstance(error, commands.CheckFailure):
await ctx.send(f"Cannot use `{ctx.command.qualified_name}` in this channel!", delete_after=5.0)
else:
self.logger.error(f"In {ctx.command.qualified_name}:")
traceback.print_tb(error.original.__traceback__)
self.logger.error(
f"{error.original.__class__.__name__}: {error.original}")
# The try-catch blocks are removed because
# the default error handler for a :class:`discord.ext.task.Loop`
# prints to sys.sterr by default.
# <https://discordpy.readthedocs.io/en/latest/ext/tasks/index.html#discord.ext.tasks.Loop.error>
@tasks.loop(seconds=300)
async def user_metrics(self):
online, idle, offline = community_report(self.guild)
# self.path: pathlib.Path
with open(self.path / "usermetrics.csv", "a") as f:
f.write(f"{int(time.time())},{online},{idle},{offline}\n")
df_msgs = pd.read_csv(str(self.path / "msgs.csv"), names=['time', 'uid', 'channel'])
df_msgs = df_msgs[(df_msgs['time'] > time.time()-(86400*self.DAYS_BACK))]
df_msgs['count'] = 1
df_msgs['date'] = | pd.to_datetime(df_msgs['time'], unit='s') | pandas.to_datetime |
"""Query DB for analyses."""
import csv
import pandas as pd
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from sqlalchemy import func, and_
from sqlalchemy.sql.expression import distinct
from ideotype.sql_declarative import (IdeotypeBase,
WeaData,
Sims,
SiteInfo,
Params)
def query_weadata(fpath_db):
"""
Weathere data query.
- Average meteorology at each site.
- Variance of meteorology at each site.
"""
engine = create_engine('sqlite:///' + fpath_db)
IdeotypeBase.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
session = DBSession()
query = session.query(WeaData.site.label('site'),
func.avg(WeaData.temp).label('mean_temp'),
func.avg(WeaData.vpd).label('mean_vpd'),
func.sum(WeaData.precip).label('total_precip'),
func.count(WeaData.precip).label('precip_count')
).group_by(WeaData.site)
results = query.all()
# query output as csv
columns = []
for item in query.column_descriptions:
columns.append(item['name'])
with open('testoutput.csv', 'w') as outfile:
outcsv = csv.writer(outfile)
outcsv.writerow(columns)
for row in results:
outcsv.writerow(row)
def query_gseason_climate(fpath_db, phenos):
"""
Query in-season climate.
Climate data queried from maizsim output,
which means there could be slight differences between
the climate conditions each phenotype experiences
due to difference in pdate & phenology.
Parameters
----------
fpath_db : str
phenos : list
List of top phenotype numbers.
Returns
-------
query : sqlalchemy query
results : list
List of query results.
df : pd.DataFrame
DataFrame of queried results.
"""
engine = create_engine('sqlite:///' + fpath_db)
IdeotypeBase.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
session = DBSession()
query = session.query(Sims.cvar.label('cvar'),
Sims.year.label('year'),
Sims.site.label('site'),
Sims.pheno.label('pheno'),
func.avg(Sims.temp_air).label('temp_air'),
func.avg(Sims.temp_canopy).label('temp_can'),
func.avg(Sims.temp_soil).label('temp_soil'),
func.avg(Sims.VPD).label('vpd'),
func.avg(Sims.PFD_sun).label('pfd_sun'),
func.avg(Sims.PFD_shade).label('pfd_shade'),
).group_by(Sims.cvar,
Sims.year,
Sims.site,
Sims.pheno).filter(
and_(Sims.cvar.in_(phenos),
))
results = query.all()
# Construct dataframe from database query
columns = []
for item in query.column_descriptions:
columns.append(item['name'])
df = pd.DataFrame(results, columns=columns)
return(query, results, df)
def query_yield(fpath_db, phenos):
"""
Sims query.
- Final yield for each site-year-cvar combination.
- Yield variation across cvars.
- Yield variation across sites.
- Yield variation across years.
"""
engine = create_engine('sqlite:///' + fpath_db)
IdeotypeBase.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
session = DBSession()
query = session.query(Sims.cvar.label('cvar'),
Sims.year.label('year'),
Sims.site.label('site'),
Sims.pheno.label('pheno'),
func.avg(Sims.DM_ear).label('yield'),
SiteInfo.lat.label('lat'),
SiteInfo.lon.label('lon'),
SiteInfo.texture.label('soil_texture'),
).group_by(Sims.cvar,
Sims.year,
Sims.site,
Sims.pheno,
SiteInfo.site).filter(
and_(Sims.pheno == '"Matured"',
Sims.cvar.in_(phenos),
Sims.site == SiteInfo.site,
Sims.cvar == Params.cvar
))
results = query.all()
# Construct dataframe from database query
columns = []
for item in query.column_descriptions:
columns.append(item['name'])
df = pd.DataFrame(results, columns=columns)
return(query, results, df)
def query_phys(fpath_db, phenos):
"""
Query phhysiological model outputs during sunlit hours.
Parameters
----------
fpath_db : str
phenos : list
"""
engine = create_engine('sqlite:///' + fpath_db)
IdeotypeBase.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
session = DBSession()
query = session.query(Sims.cvar.label('cvar'),
Sims.site.label('site'),
Sims.year.label('year'),
Sims.pheno.label('pheno'),
func.avg(Sims.av_gs).label('gs'),
func.avg(Sims.Pn).label('pn'),
func.avg(Sims.Pg).label('pg'),
func.max(Sims.LAI_sun).label('LAI_sun'),
func.max(Sims.LAI_shade).label('LAI_shade'),
func.avg(Sims.Ag_sun).label('Ag_sun'),
func.avg(Sims.Ag_shade).label('Ag_shade'),
func.avg(Sims.An_sun).label('An_sun'),
func.avg(Sims.An_shade).label('An_shade')
).group_by(Sims.cvar,
Sims.year,
Sims.site,
Sims.pheno).filter(
and_(Sims.cvar.in_(phenos),
Sims.PFD_sun > 0
))
results = query.all()
# Construct dataframe from database query
columns = []
for item in query.column_descriptions:
columns.append(item['name'])
df = pd.DataFrame(results, columns=columns)
# Scale photosynthesis to canopy
df['sun_perct'] = df.LAI_sun/(df.LAI_sun + df.LAI_shade)
df['shade_perct'] = df.LAI_shade/(df.LAI_sun + df.LAI_shade)
df['Ag'] = (df.Ag_sun * df.sun_perct) + (df.Ag_shade * df.shade_perct)
df['An'] = (df.An_sun * df.sun_perct) + (df.An_shade * df.shade_perct)
return(query, results, df)
def query_carbon(fpath_db, phenos):
"""
Query mean and total carbon accumulation across phenostage.
Parameters
----------
fpath_db : str
phenos : list
"""
engine = create_engine('sqlite:///' + fpath_db)
IdeotypeBase.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
session = DBSession()
query = session.query(Sims.cvar.label('cvar'),
Sims.site.label('site'),
Sims.year.label('year'),
Sims.pheno.label('pheno'),
func.sum(Sims.Pn).label('pn_sum'),
func.sum(Sims.Pg).label('pg_sum'),
func.avg(Sims.Pn).label('pn_mean'),
func.avg(Sims.Pg).label('pg_mean')
).group_by(Sims.cvar,
Sims.year,
Sims.site,
Sims.pheno).filter(
and_(Sims.cvar.in_(phenos),
))
results = query.all()
# Construct dataframe from database query
columns = []
for item in query.column_descriptions:
columns.append(item['name'])
df = pd.DataFrame(results, columns=columns)
return(query, results, df)
def query_mass(fpath_db, phenos):
"""
Query mass.
Parameters
----------
fpath_db : str
phenos : list
"""
engine = create_engine('sqlite:///' + fpath_db)
IdeotypeBase.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
session = DBSession()
query = session.query(Sims.cvar.label('cvar'),
Sims.site.label('site'),
Sims.year.label('year'),
Sims.pheno.label('pheno'),
func.max(Sims.DM_total).label('dm_total'),
func.max(Sims.DM_root).label('dm_root'),
func.max(Sims.DM_shoot).label('dm_shoot'),
func.max(Sims.DM_stem).label('dm_stem'),
func.max(Sims.DM_leaf).label('dm_leaf'),
func.max(Sims.DM_ear).label('dm_ear'),
).group_by(Sims.cvar,
Sims.year,
Sims.site,
Sims.pheno).filter(
and_(Sims.cvar.in_(phenos)
))
results = query.all()
# Construct dataframe from database query
columns = []
for item in query.column_descriptions:
columns.append(item['name'])
df = pd.DataFrame(results, columns=columns)
return(query, results, df)
def query_pheno(fpath_db, phenos):
"""
Query pheno info.
Parameters
----------
fpath_db : str
phenos : list
List of top phenotype numbers.
Returns
-------
query : sqlalchemy query
results : list
List of query results.
"""
engine = create_engine('sqlite:///' + fpath_db)
IdeotypeBase.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
session = DBSession()
query = session.query(Sims.cvar.label('cvar'),
Sims.site.label('site'),
Sims.year.label('year'),
Sims.pheno.label('pheno'),
func.count(distinct(Sims.jday)).label('pheno_days'),
func.min(Sims.jday).label('jday_start'),
func.min(Sims.date).label('date_start')
).group_by(Sims.cvar,
Sims.site,
Sims.year,
Sims.pheno).filter(
and_(Sims.cvar.in_(phenos)
))
results = query.all()
# Construct dataframe from database query
columns = []
for item in query.column_descriptions:
columns.append(item['name'])
df = pd.DataFrame(results, columns=columns)
return(query, results, df)
def query_leaves(fpath_db, phenos):
"""
Query physiological model outputs.
Parameters
----------
fpath_db : str
phenos : list
"""
engine = create_engine('sqlite:///' + fpath_db)
IdeotypeBase.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
session = DBSession()
query = session.query(Sims.cvar.label('cvar'),
Sims.site.label('site'),
Sims.year.label('year'),
Sims.pheno.label('pheno'),
func.max(Sims.LAI).label('LAI'),
func.max(Sims.LA_perplant).label('LA'),
func.max(Sims.leaves).label('leaves'),
func.max(Sims.leaves_mature).label('leaves_mature'),
func.max(Sims.leaves_dropped).label('leaves_dropped')
).group_by(Sims.cvar,
Sims.year,
Sims.site,
Sims.pheno).filter(
and_(Sims.cvar.in_(phenos)
))
results = query.all()
# Construct dataframe from database query
columns = []
for item in query.column_descriptions:
columns.append(item['name'])
df = | pd.DataFrame(results, columns=columns) | pandas.DataFrame |
"""Postprocesses data across dates and simulation runs before aggregating at geographic levels (ADM0, ADM1, or ADM2)."""
import concurrent.futures
import gc
import queue
import shutil
import threading
import numpy as np
import pandas as pd
import tqdm
from fastparquet import ParquetFile
from loguru import logger
from .numerical_libs import enable_cupy, reimport_numerical_libs, xp
from .util.util import _banner
# TODO switch to cupy.quantile instead of percentile (they didn't have that when we first wrote this)
# also double check but the api might be consistant by now so we dont have to handle numpy/cupy differently
def main(cfg):
"""Main method for postprocessing the raw outputs from an MC run."""
_banner("Postprocessing Quantiles")
# verbose = cfg["runtime.verbose"]
# use_gpu = cfg["runtime.use_cupy"]
run_dir = cfg["postprocessing.run_dir"]
data_dir = run_dir / "data"
metadata_dir = run_dir / "metadata"
# if verbose:
# logger.info(cfg)
output_dir = cfg["postprocessing.output_dir"]
if not output_dir.exists():
output_dir.mkdir(parents=True)
# Copy metadata
output_metadata_dir = output_dir / "metadata"
output_metadata_dir.mkdir(exist_ok=True)
# TODO this should probably recurse directories too...
for md_file in metadata_dir.iterdir():
shutil.copy2(md_file, output_metadata_dir / md_file.name)
adm_mapping = pd.read_csv(metadata_dir / "adm_mapping.csv")
dates = pd.read_csv(metadata_dir / "dates.csv")
dates = dates["date"].to_numpy()
if cfg["runtime.use_cupy"]:
enable_cupy(optimize=True)
reimport_numerical_libs("postprocess")
# TODO switch to using the async_thread/buckyoutputwriter util
write_queue = queue.Queue()
def _writer():
"""Write thread that will pull from a queue."""
# Call to_write.get() until it returns None
file_tables = {}
for fname, q_dict in iter(write_queue.get, None):
df = | pd.DataFrame(q_dict) | pandas.DataFrame |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib import style
import time as time
import pickle
from astropy.table import Table
from astropy import coordinates as coords
import astropy.units as u
from astroquery.sdss import SDSS
def download_spectra(coord_list_url, from_sp, to_sp, save=False):
"""
download_spectra()
Downloads SDSS spectra in a specified range based on a list of coordinates
Parameters
----------
coord_list_url: string
The path for the CSV file that contains the list of coordinates
that was downloaded using SQL, which provides coordinates for spectra
to download. Contains 500,000 rows
from_sp : int
The index from which to download spectra. This enables us to download in
batches and save the spectral data in batches
to_sp : int
The index which specifies the upper limit until which to download spectra.
save : boolean
When True, save the resulting DataFrame into a pickle
When False, don't save
Returns
-------
df: pandas.DataFrame
The DataFrame that contains all downloaded spectral data.
columns: 'flux_list',
'wavelength',
'z',
'ra',
'dec',
'objid'
"""
t_start = time.clock()
coord_list = pd.read_csv(filepath_or_buffer=coord_list_url)
print(f'coord_list = {coord_list}')
ra_list = coord_list["ra"].tolist()
dec_list = coord_list["dec"].tolist()
ra = ra_list[from_sp:to_sp]
dec = dec_list[from_sp:to_sp]
n_errors = 0
df = {}
df['flux_list'] = []
df['wavelength'] = []
df['z'] = []
df['ra'] = []
df['dec'] = []
df['objid'] = []
n_coordinates = len(ra)
number_none = 0
for i in range(n_coordinates):
try:
pos = coords.SkyCoord((ra[i]) * u.deg, (dec[i]) * u.deg, frame='icrs')
xid = SDSS.query_region(pos, spectro=True) # radius=5 * u.arcsec)
if xid == None:
number_none = number_none + 1
print('xid is None at:', i)
continue
elif xid != None and len(xid) > 1: xid = Table(xid[0])
sp = SDSS.get_spectra(matches=xid)
df['flux_list'].append(sp[0][1].data['flux'])
df['wavelength'].append(10. ** sp[0][1].data['loglam'])
df['z'].append(xid['z'])
df['ra'].append(xid['ra'])
df['dec'].append(xid['dec'])
df['objid'].append(xid['objid'])
print(f'Downloaded: {i}')
except:
print('Failed to download at:', i)
n_errors = n_errors + 1
df = | pd.DataFrame(df) | pandas.DataFrame |
import pandas as pd
import numpy as np
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
from PyQt5.QtGui import *
import sys
class temsilci(QItemDelegate):
def __init__(self, parent=None):
super().__init__()
def olustur(self, parent, option, index):
olustur = QLineEdit(parent)
olustur.setValidator(QDoubleValidator())
return olustur
class bakkalBorcDefteri(QTableWidget):
def __init__(self, df):
super().__init__()
self.df = df
self.setStyleSheet('font-size: 25px;')
Satirlar, Sütünlar = self.df.shape
self.setColumnCount(Sütünlar)
self.setRowCount(Satirlar)
self.setHorizontalHeaderLabels(("Müşteri Adı-Soyad","Borç Tutarı","Son Ödeme tarihi","Ödendi/Ödenmedi","Ödeme Tipi"))
self.verticalHeader().setSectionResizeMode(QHeaderView.Stretch)
self.horizontalHeader().setSectionResizeMode(QHeaderView.Stretch)
self.setItemDelegateForColumn(1, temsilci())
for x in range(self.rowCount()):
for y in range(self.columnCount()):
self.setItem(x, y, QTableWidgetItem(str(self.df.iloc[x, y])))
self.cellChanged[int, int].connect(self.guncellemeDF)
def guncellemeDF(self, satir, sütün):
metin = self.item(satir, sütün).text()
self.df.iloc[satir, sütün] = metin
class DF(QWidget):
veri = np.array([["Müşteri Bilgisi Giriniz",0,"Tarih Belirt"], ["Müşteri Bilgisi Giriniz",0,"Tarih Belirt"],["Müşteri Bilgisi Giriniz",0,"Tarih Belirt"]])
df = pd.DataFrame(data=veri, index=[1,2,3], columns =["Müşteri Adı-Soyad","Borç Tutarı","Son Ödeme tarihi"])
df["Ödendi/Ödenmedi"] = pd.Series(data=["Durum Ne", "Durum Ne","Durum Ne"], index=[1,2,3])
df["Ödeme Tipi"] = pd.Series(data=["Kredi", "Nakit","Kredi"], index=[1,2,3])
df.loc[4] = pd.Series(data=["Müşteri Bilgisi Giriniz",0,"Tarih Belirt","Durum Ne","Kredi"], index=["Müşteri Adı-Soyad","Borç Tutarı","Son Ödeme tarihi","Ödendi/Ödenmedi","Ödeme Tipi"])
df.loc[5] = | pd.Series(data=["Müşteri Bilgisi Giriniz",0,"Tarih Belirt","Durum Ne","Nakit"], index=["Müşteri Adı-Soyad","Borç Tutarı","Son Ödeme tarihi","Ödendi/Ödenmedi","Ödeme Tipi"]) | pandas.Series |
from __future__ import division
from textwrap import dedent
import numpy.testing as npt
import pandas.util.testing as pdtest
import numpy
from numpy.testing import assert_equal
import pandas
import pytest
from statsmodels.imputation import ros
from statsmodels.compat.python import StringIO
if | pandas.__version__.split('.') | pandas.__version__.split |
import pandas as pd
import numpy as np
import os
from sklearn.linear_model import LogisticRegression
from math import exp
import pickle
def logistic_regression(data_set_path):
X,y = prepare_data(data_set_path)
retain_reg = LogisticRegression(penalty='l1', solver='liblinear', fit_intercept=True)
retain_reg.fit(X, y)
save_regression_summary(data_set_path,retain_reg)
save_regression_model(data_set_path,retain_reg)
save_dataset_predictions(data_set_path,retain_reg,X)
def prepare_data(data_set_path,ext='_groupscore',as_retention=True):
score_save_path = data_set_path.replace('.csv', '{}.csv'.format(ext))
assert os.path.isfile(score_save_path), 'You must run listing 6.3 to save grouped metric scores first'
grouped_data = pd.read_csv(score_save_path,index_col=[0,1])
y = grouped_data['is_churn'].astype(np.bool)
if as_retention: y=~y
X = grouped_data.drop(['is_churn'],axis=1)
return X,y
def calculate_impacts(retain_reg):
average_retain=s_curve(-retain_reg.intercept_)
one_stdev_retain=np.array( [ s_curve(-retain_reg.intercept_-c) for c in retain_reg.coef_[0]])
one_stdev_impact=one_stdev_retain-average_retain
return one_stdev_impact, average_retain
def s_curve(x):
return 1.0 - (1.0/(1.0+exp(-x)))
def save_regression_summary(data_set_path,retain_reg,ext=''):
one_stdev_impact,average_retain = calculate_impacts(retain_reg)
group_lists = pd.read_csv(data_set_path.replace('.csv', '_groupmets.csv'),index_col=0)
coef_df = pd.DataFrame.from_dict(
{'group_metric_offset': np.append(group_lists.index,'offset'),
'weight': np.append(retain_reg.coef_[0],retain_reg.intercept_),
'retain_impact' : np.append(one_stdev_impact,average_retain),
'group_metrics' : np.append(group_lists['metrics'],'(baseline)')})
save_path = data_set_path.replace('.csv', '_logreg_summary{}.csv'.format(ext))
coef_df.to_csv(save_path, index=False)
print('Saved coefficients to ' + save_path)
def save_regression_model(data_set_path,retain_reg,ext=''):
pickle_path = data_set_path.replace('.csv', '_logreg_model{}.pkl'.format(ext))
with open(pickle_path, 'wb') as fid:
pickle.dump(retain_reg, fid)
print('Saved model pickle to ' + pickle_path)
def save_dataset_predictions(data_set_path, retain_reg, X,ext=''):
predictions = retain_reg.predict_proba(X)
predict_df = | pd.DataFrame(predictions,index=X.index,columns=['churn_prob','retain_prob']) | pandas.DataFrame |
import pandas as pd
import os
import glob
from pathlib import Path
import json
from mlsriracha.interfaces.process import ProcessInterface
from mlsriracha.plugins.kubernetes.common.helper import s3_download, s3_upload, azblob_download
class KubernetesProcess(ProcessInterface):
def __init__(self):
print('Selected Kubernetes profile')
Path('/opt/ml/processing/input/data').mkdir(parents=True, exist_ok=True)
Path('/opt/ml/processing/output/data').mkdir(parents=True, exist_ok=True)
channel_var = 'input_' + channel
if channel_var in self.get_env_vars():
print(f'Found channel request: {channel_var}' )
# create local path to save this
# if S3
if 's3' in self.get_env_vars()[channel_var]:
s3_download(self.get_env_vars()[channel_var], f'/opt/ml/processing/input/data/{channel}/')
# if Azure
if 'blob.core.windows' in self.get_env_vars()[channel_var]:
azblob_download(self.get_env_vars()[channel_var], f'/opt/ml/processing/input/data/{channel}/')
def get_env_vars(self):
envs = {}
for k, v in os.environ.items():
if k.startswith('sriracha_'):
try:
value = float(v) # Type-casting the string to `float`.
if value.is_integer():
value = int(value)
except ValueError:
value = v
envs[k.replace('sriracha_', '')] = value
return envs
def input_as_dataframe(self, filename):
"""
The function returns a panda dataframe for the input channel artifacts.
AWS SageMaker passes all values in FileMode from the S3 bucket into the
input processing data "channels" when starting your container.
This function takes in the channel and merges all CSV files per channel
into a dataframe for reading.
Arguments:
channel (str): The name of the channel which contains the given filename
"""
if filename:
csv_files = glob.glob(os.path.join(f'/opt/ml/processing/inputs/{filename}'))
else:
csv_files = glob.glob(os.path.join(f'/opt/ml/processing/inputs/*.csv'))
print(f'Files in input directory: {csv_files}')
# loop over the list of csv files
fileBytes = []
for f in csv_files:
# read the csv file
df = | pd.read_csv(f) | pandas.read_csv |
#----------------------------------------------------------------------- NEEDED PACKAGES ---------------------------------------------------------------------
import pandas as pd
from glob import glob
from PIL import Image
import matplotlib.pyplot as plt
import numpy as np
import re
import seaborn as sn
import tensorflow as tf
#-------------------------------------------------------------------- PREPROCESSING ------------------------------------------------------------------------
def emoticon_replacer(sentence):
"""
replace emoticons in a text with words with similiar semantic value
"""
sentence=re.sub(r'😍|🥰|😘|😻|❤️|🧡|💛|💚|💙|💜|🖤|🤍|🤎|💕|💞|💓|💗|💖|💘|💝', 'adoro ', sentence)
sentence=re.sub(r'😀|😃|😄|😁|😆|😂|🤣|😹|😸', 'ahah ', sentence)
sentence=re.sub(r'😡|🤬|👿|🤡', 'infame ', sentence)
sentence=re.sub(r'✈️|🔥|💫|⭐️|🌟|✨|💥|🛫|🛬|🛩|🚀', 'wow ', sentence)
sentence=re.sub(r'😢|😭|😿', 'piango ', sentence)
sentence=re.sub(r'🤢|🤮', 'schifo ', sentence)
return sentence
def tokenize(sentence, tokenizer, SEQ_LEN=50, emotic=False):
"""
tokenizes a sentence preparing it for bert model
"""
sentence=re.sub(r'(http\S+)|(\s)#\w+', '', sentence)
if emotic==True:
sentence=emoticon_replacer(sentence)
tokens = tokenizer.encode_plus(sentence, max_length=SEQ_LEN,
truncation=True, padding='max_length',
add_special_tokens=True, return_attention_mask=True,
return_token_type_ids=False, return_tensors='tf')
return tokens['input_ids'], tokens['attention_mask']
def preprocess_txt(input_ids, masks, labels):
"""
format for tf model
"""
return {'input_ids': input_ids, 'attention_mask': masks}, labels
def preprocess_txt_imtxt(input_ids, masks, input_ids2, masks2, labels):
"""
format for tf model
"""
return {'input_ids': input_ids, 'attention_mask': masks,
'input_ids2': input_ids2, 'attention_mask2': masks2}, labels
def aug(image, label):
"""
perform data augumentation
"""
IMG_SIZE=224
image = tf.image.random_brightness(image, 0.15)
image = tf.image.random_contrast(image, upper=1.5, lower=0.5)
image = tf.image.random_saturation(image,upper=1.5, lower=0.5)
image = tf.image.random_hue(image, 0.15)
#if tf.random.uniform([])>0.5:
#image= tf.image.flip_left_right(image)
if tf.random.uniform([])>0:
image = tf.image.resize_with_crop_or_pad(image, IMG_SIZE + 6, IMG_SIZE + 6)
# Random crop back to the original size
image = tf.image.random_crop(image, size=[IMG_SIZE, IMG_SIZE, 3])
image = tf.clip_by_value(image, 0, 1)
return image, label
def preprocess_image(image, labels, prediction=False):
"""
format for tf model + open image and prepare it for resnet
"""
if prediction==False:
image = tf.io.read_file(image)
image = tf.io.decode_image(image, channels=3,expand_animations = False)
image = tf.cast(image, tf.float32) / 255.0
image = tf.image.resize(image, size=(224, 224))
if prediction==True:
image= tf.expand_dims(image, axis=0)
return image
return image, labels
def preprocess_txt_image(input_ids, masks, path, labels):
"""
format for tf model + open image and prepare it for resnet
"""
image = tf.io.read_file(path)
image = tf.io.decode_image(image, channels=3,expand_animations = False)
image = tf.cast(image, tf.float32) / 255.0
image = tf.image.resize(image, size=(224, 224))
return {'input_ids': input_ids, 'attention_mask': masks, 'images': image}, labels
def preprocess_txt_imtxt_image(input_ids, masks, input_ids2, masks2, path, labels):
"""
format for tf model + open image and prepare it for resnet
"""
image = tf.io.read_file(path)
image = tf.io.decode_image(image, channels=3,expand_animations = False)
image = tf.cast(image, tf.float32) / 255.0
image = tf.image.resize(image, size=(224, 224))
return {'input_ids': input_ids, 'attention_mask': masks,
'input_ids2': input_ids2, 'attention_mask2': masks2,
'images': image}, labels
#------------------------------------------------------ DATA IMPORTER ---------------------------------------------------------------------
def df_to_tf_data(df, tokenizer, txt=True, image=False, imtxt=False,
SEQ_LEN=50, SEQ_LEN2=10,
shuffle=True, emotic=False, augmentation=False):
"""
from dataframe to tensorflow Dataset object
SEQ_LEN: max number of tokens from text before truncation
SEQ_LEN2: max number of tokens from in image text before truncation
emoticon: wheater or not translate emoticons
augumentation: wheater or not to augment images
"""
df=df.replace({'negative': 0,'neutral':1, 'positive': 2})
arr = df['sentiment'].values # take sentiment column in df as array
labels = np.zeros((arr.size, arr.max()+1)) # initialize empty (all zero) label array
labels[np.arange(arr.size), arr] = 1 # add ones in indices where we have a value
if txt == True:
Xids = np.zeros((len(df), SEQ_LEN))
Xmask = np.zeros((len(df), SEQ_LEN))
for i, sentence in enumerate(df['text']):
Xids[i, :], Xmask[i, :] = tokenize(sentence, tokenizer, SEQ_LEN, emotic=emotic)
if imtxt == True:
Xids2 = np.zeros((len(df), SEQ_LEN2))
Xmask2 = np.zeros((len(df), SEQ_LEN2))
for i, sentence in enumerate(df['inimagetext']):
Xids2[i, :], Xmask2[i, :] = tokenize(sentence, tokenizer, SEQ_LEN2, emotic=False)
if image == True:
paths = []
for i, img in enumerate(df['path']):
paths.append(img)
if txt==True:
if imtxt == True:
if image == True:
dataset = tf.data.Dataset.from_tensor_slices((Xids, Xmask, Xids2, Xmask2, paths, labels))
preprocess=preprocess_txt_imtxt_image
else:
dataset = tf.data.Dataset.from_tensor_slices((Xids, Xmask, Xids2, Xmask2, labels))
preprocess=preprocess_txt_imtxt
else:
if image == True:
dataset = tf.data.Dataset.from_tensor_slices((Xids, Xmask, paths, labels))
preprocess=preprocess_txt_image
else:
dataset = tf.data.Dataset.from_tensor_slices((Xids, Xmask, labels))
preprocess=preprocess_txt
else:
if image == True:
dataset = tf.data.Dataset.from_tensor_slices((paths, labels))
preprocess=preprocess_image
# shuffle and batch the dataset
if shuffle==True:
if augmentation==True:
dataset = dataset.shuffle(5000).map(preprocess).map(aug).batch(4)
else:
dataset = dataset.shuffle(5000).map(preprocess).batch(4)
else:
dataset = dataset.map(preprocess).batch(4)
return(dataset)
#------------------------------------------------------ PLOTS --------------------------------------------------------------------------
def plot_history(history, title, save=False):
f = plt.figure(figsize=(15,4))
plt.suptitle(title)
f.add_subplot(1,2, 1)
plt.plot(history['loss'])
plt.plot(history['val_loss'])
plt.legend(['Train loss', 'Val loss'])
plt.xlabel('Epochs')
plt.ylabel('Loss')
f.add_subplot(1,2, 2)
plt.plot(history['accuracy'])
plt.plot(history['val_accuracy'])
plt.legend(['Train Accuracy', 'Val accuracy'])
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.show()
if save!=False:
f.savefig('C:\\Users\\Egon\\Desktop\\tesi\\immagini tesi\\modelli\\'+title+'.png')
def confusion_matrix_plotter(true,pred, title='confusion_matrix', normalize='true', save=False):
lab=['negative', 'neutral', 'positive']
mat=np.array(tf.math.confusion_matrix(predictions=pred, labels=true))
if normalize=='true':
mat=mat/mat.sum(axis=1)
fig, ax= plt.subplots(figsize = (15,8))
sn.heatmap( | pd.DataFrame(mat, index=lab, columns=lab) | pandas.DataFrame |
import os, sys
import numpy as np
import pandas as pd
import time
import pydicom
from glob import glob
def computeSliceSpacing(alldcm):
try:
if len(alldcm)>1:
ds0 = pydicom.dcmread(alldcm[0], force = False, defer_size = 256, specific_tags = ['SliceLocation'], stop_before_pixels = True)
value0 = float(ds0.data_element('SliceLocation').value)
ds1 = pydicom.dcmread(alldcm[1], force = False, defer_size = 256, specific_tags = ['SliceLocation'], stop_before_pixels = True)
value1 = float(ds1.data_element('SliceLocation').value)
SliceSpacing = abs(value1-value0)
else:
SliceSpacing = -1.0
except Exception as why:
SliceSpacing = -1.0
return SliceSpacing
def countSeriesInstanceUIDs(settings, NumSamples=None):
root = settings['folderpath_discharge']
study_uids = os.listdir(root)
if NumSamples is None:
NumSamples = len(study_uids)
study_uids = study_uids[0:NumSamples]
NumSeriesInstanceUIDs = 0
for istudy, study_uid in enumerate(study_uids):
print(istudy, study_uid)
series_uids = os.listdir(os.path.join(root, study_uid))
NumSeriesInstanceUIDs = NumSeriesInstanceUIDs + len(series_uids)
return NumSeriesInstanceUIDs
def extractDICOMTags(settings, NumSamples=None):
root = settings['folderpath_discharge']
fout = settings['filepath_dicom']
specific_tags = settings['dicom_tags']
cols_first=[]
study_uids = os.listdir(root)
df = | pd.DataFrame(columns=specific_tags) | pandas.DataFrame |
from datetime import datetime, timedelta
import numpy as np
import pytest
from pandas._libs.tslibs import period as libperiod
import pandas as pd
from pandas import DatetimeIndex, Period, PeriodIndex, Series, notna, period_range
import pandas._testing as tm
class TestGetItem:
def test_ellipsis(self):
# GH#21282
idx = period_range("2011-01-01", "2011-01-31", freq="D", name="idx")
result = idx[...]
assert result.equals(idx)
assert result is not idx
def test_getitem(self):
idx1 = pd.period_range("2011-01-01", "2011-01-31", freq="D", name="idx")
for idx in [idx1]:
result = idx[0]
assert result == pd.Period("2011-01-01", freq="D")
result = idx[-1]
assert result == pd.Period("2011-01-31", freq="D")
result = idx[0:5]
expected = pd.period_range("2011-01-01", "2011-01-05", freq="D", name="idx")
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
assert result.freq == "D"
result = idx[0:10:2]
expected = pd.PeriodIndex(
["2011-01-01", "2011-01-03", "2011-01-05", "2011-01-07", "2011-01-09"],
freq="D",
name="idx",
)
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
assert result.freq == "D"
result = idx[-20:-5:3]
expected = pd.PeriodIndex(
["2011-01-12", "2011-01-15", "2011-01-18", "2011-01-21", "2011-01-24"],
freq="D",
name="idx",
)
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
assert result.freq == "D"
result = idx[4::-1]
expected = PeriodIndex(
["2011-01-05", "2011-01-04", "2011-01-03", "2011-01-02", "2011-01-01"],
freq="D",
name="idx",
)
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
assert result.freq == "D"
def test_getitem_index(self):
idx = period_range("2007-01", periods=10, freq="M", name="x")
result = idx[[1, 3, 5]]
exp = pd.PeriodIndex(["2007-02", "2007-04", "2007-06"], freq="M", name="x")
tm.assert_index_equal(result, exp)
result = idx[[True, True, False, False, False, True, True, False, False, False]]
exp = pd.PeriodIndex(
["2007-01", "2007-02", "2007-06", "2007-07"], freq="M", name="x"
)
tm.assert_index_equal(result, exp)
def test_getitem_partial(self):
rng = period_range("2007-01", periods=50, freq="M")
ts = Series(np.random.randn(len(rng)), rng)
with pytest.raises(KeyError, match=r"^'2006'$"):
ts["2006"]
result = ts["2008"]
assert (result.index.year == 2008).all()
result = ts["2008":"2009"]
assert len(result) == 24
result = ts["2008-1":"2009-12"]
assert len(result) == 24
result = ts["2008Q1":"2009Q4"]
assert len(result) == 24
result = ts[:"2009"]
assert len(result) == 36
result = ts["2009":]
assert len(result) == 50 - 24
exp = result
result = ts[24:]
tm.assert_series_equal(exp, result)
ts = ts[10:].append(ts[10:])
msg = "left slice bound for non-unique label: '2008'"
with pytest.raises(KeyError, match=msg):
ts[slice("2008", "2009")]
def test_getitem_datetime(self):
rng = period_range(start="2012-01-01", periods=10, freq="W-MON")
ts = Series(range(len(rng)), index=rng)
dt1 = datetime(2011, 10, 2)
dt4 = datetime(2012, 4, 20)
rs = ts[dt1:dt4]
tm.assert_series_equal(rs, ts)
def test_getitem_nat(self):
idx = pd.PeriodIndex(["2011-01", "NaT", "2011-02"], freq="M")
assert idx[0] == pd.Period("2011-01", freq="M")
assert idx[1] is pd.NaT
s = pd.Series([0, 1, 2], index=idx)
assert s[pd.NaT] == 1
s = pd.Series(idx, index=idx)
assert s[pd.Period("2011-01", freq="M")] == pd.Period("2011-01", freq="M")
assert s[pd.NaT] is pd.NaT
def test_getitem_list_periods(self):
# GH 7710
rng = period_range(start="2012-01-01", periods=10, freq="D")
ts = Series(range(len(rng)), index=rng)
exp = ts.iloc[[1]]
tm.assert_series_equal(ts[[Period("2012-01-02", freq="D")]], exp)
def test_getitem_seconds(self):
# GH#6716
didx = pd.date_range(start="2013/01/01 09:00:00", freq="S", periods=4000)
pidx = period_range(start="2013/01/01 09:00:00", freq="S", periods=4000)
for idx in [didx, pidx]:
# getitem against index should raise ValueError
values = [
"2014",
"2013/02",
"2013/01/02",
"2013/02/01 9H",
"2013/02/01 09:00",
]
for v in values:
# GH7116
# these show deprecations as we are trying
# to slice with non-integer indexers
# with pytest.raises(IndexError):
# idx[v]
continue
s = Series(np.random.rand(len(idx)), index=idx)
tm.assert_series_equal(s["2013/01/01 10:00"], s[3600:3660])
tm.assert_series_equal(s["2013/01/01 9H"], s[:3600])
for d in ["2013/01/01", "2013/01", "2013"]:
tm.assert_series_equal(s[d], s)
def test_getitem_day(self):
# GH#6716
# Confirm DatetimeIndex and PeriodIndex works identically
didx = pd.date_range(start="2013/01/01", freq="D", periods=400)
pidx = period_range(start="2013/01/01", freq="D", periods=400)
for idx in [didx, pidx]:
# getitem against index should raise ValueError
values = [
"2014",
"2013/02",
"2013/01/02",
"2013/02/01 9H",
"2013/02/01 09:00",
]
for v in values:
# GH7116
# these show deprecations as we are trying
# to slice with non-integer indexers
# with pytest.raises(IndexError):
# idx[v]
continue
s = Series(np.random.rand(len(idx)), index=idx)
tm.assert_series_equal(s["2013/01"], s[0:31])
tm.assert_series_equal(s["2013/02"], s[31:59])
tm.assert_series_equal(s["2014"], s[365:])
invalid = ["2013/02/01 9H", "2013/02/01 09:00"]
for v in invalid:
with pytest.raises(KeyError, match=v):
s[v]
class TestWhere:
@pytest.mark.parametrize("klass", [list, tuple, np.array, Series])
def test_where(self, klass):
i = period_range("20130101", periods=5, freq="D")
cond = [True] * len(i)
expected = i
result = i.where(klass(cond))
tm.assert_index_equal(result, expected)
cond = [False] + [True] * (len(i) - 1)
expected = PeriodIndex([pd.NaT] + i[1:].tolist(), freq="D")
result = i.where(klass(cond))
tm.assert_index_equal(result, expected)
def test_where_other(self):
i = period_range("20130101", periods=5, freq="D")
for arr in [np.nan, pd.NaT]:
result = i.where(notna(i), other=np.nan)
expected = i
tm.assert_index_equal(result, expected)
i2 = i.copy()
i2 = pd.PeriodIndex([pd.NaT, pd.NaT] + i[2:].tolist(), freq="D")
result = i.where(notna(i2), i2)
tm.assert_index_equal(result, i2)
i2 = i.copy()
i2 = pd.PeriodIndex([pd.NaT, pd.NaT] + i[2:].tolist(), freq="D")
result = i.where(notna(i2), i2.values)
tm.assert_index_equal(result, i2)
def test_where_invalid_dtypes(self):
pi = period_range("20130101", periods=5, freq="D")
i2 = pi.copy()
i2 = pd.PeriodIndex([pd.NaT, pd.NaT] + pi[2:].tolist(), freq="D")
with pytest.raises(TypeError, match="Where requires matching dtype"):
pi.where(notna(i2), i2.asi8)
with pytest.raises(TypeError, match="Where requires matching dtype"):
pi.where(notna(i2), i2.asi8.view("timedelta64[ns]"))
with pytest.raises(TypeError, match="Where requires matching dtype"):
pi.where(notna(i2), i2.to_timestamp("S"))
class TestTake:
def test_take(self):
# GH#10295
idx1 = pd.period_range("2011-01-01", "2011-01-31", freq="D", name="idx")
for idx in [idx1]:
result = idx.take([0])
assert result == pd.Period("2011-01-01", freq="D")
result = idx.take([5])
assert result == pd.Period("2011-01-06", freq="D")
result = idx.take([0, 1, 2])
expected = pd.period_range("2011-01-01", "2011-01-03", freq="D", name="idx")
tm.assert_index_equal(result, expected)
assert result.freq == "D"
assert result.freq == expected.freq
result = idx.take([0, 2, 4])
expected = pd.PeriodIndex(
["2011-01-01", "2011-01-03", "2011-01-05"], freq="D", name="idx"
)
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
assert result.freq == "D"
result = idx.take([7, 4, 1])
expected = pd.PeriodIndex(
["2011-01-08", "2011-01-05", "2011-01-02"], freq="D", name="idx"
)
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
assert result.freq == "D"
result = idx.take([3, 2, 5])
expected = PeriodIndex(
["2011-01-04", "2011-01-03", "2011-01-06"], freq="D", name="idx"
)
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
assert result.freq == "D"
result = idx.take([-3, 2, 5])
expected = PeriodIndex(
["2011-01-29", "2011-01-03", "2011-01-06"], freq="D", name="idx"
)
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
assert result.freq == "D"
def test_take_misc(self):
index = period_range(start="1/1/10", end="12/31/12", freq="D", name="idx")
expected = PeriodIndex(
[
datetime(2010, 1, 6),
datetime(2010, 1, 7),
datetime(2010, 1, 9),
datetime(2010, 1, 13),
],
freq="D",
name="idx",
)
taken1 = index.take([5, 6, 8, 12])
taken2 = index[[5, 6, 8, 12]]
for taken in [taken1, taken2]:
tm.assert_index_equal(taken, expected)
assert isinstance(taken, PeriodIndex)
assert taken.freq == index.freq
assert taken.name == expected.name
def test_take_fill_value(self):
# GH#12631
idx = pd.PeriodIndex(
["2011-01-01", "2011-02-01", "2011-03-01"], name="xxx", freq="D"
)
result = idx.take(np.array([1, 0, -1]))
expected = pd.PeriodIndex(
["2011-02-01", "2011-01-01", "2011-03-01"], name="xxx", freq="D"
)
tm.assert_index_equal(result, expected)
# fill_value
result = idx.take(np.array([1, 0, -1]), fill_value=True)
expected = pd.PeriodIndex(
["2011-02-01", "2011-01-01", "NaT"], name="xxx", freq="D"
)
tm.assert_index_equal(result, expected)
# allow_fill=False
result = idx.take(np.array([1, 0, -1]), allow_fill=False, fill_value=True)
expected = pd.PeriodIndex(
["2011-02-01", "2011-01-01", "2011-03-01"], name="xxx", freq="D"
)
tm.assert_index_equal(result, expected)
msg = (
"When allow_fill=True and fill_value is not None, "
"all indices must be >= -1"
)
with pytest.raises(ValueError, match=msg):
idx.take(np.array([1, 0, -2]), fill_value=True)
with pytest.raises(ValueError, match=msg):
idx.take(np.array([1, 0, -5]), fill_value=True)
msg = "index -5 is out of bounds for( axis 0 with)? size 3"
with pytest.raises(IndexError, match=msg):
idx.take(np.array([1, -5]))
class TestIndexing:
def test_get_loc_msg(self):
idx = period_range("2000-1-1", freq="A", periods=10)
bad_period = Period("2012", "A")
with pytest.raises(KeyError, match=r"^Period\('2012', 'A-DEC'\)$"):
idx.get_loc(bad_period)
try:
idx.get_loc(bad_period)
except KeyError as inst:
assert inst.args[0] == bad_period
def test_get_loc_nat(self):
didx = DatetimeIndex(["2011-01-01", "NaT", "2011-01-03"])
pidx = PeriodIndex(["2011-01-01", "NaT", "2011-01-03"], freq="M")
# check DatetimeIndex compat
for idx in [didx, pidx]:
assert idx.get_loc(pd.NaT) == 1
assert idx.get_loc(None) == 1
assert idx.get_loc(float("nan")) == 1
assert idx.get_loc(np.nan) == 1
def test_get_loc(self):
# GH 17717
p0 = pd.Period("2017-09-01")
p1 = pd.Period("2017-09-02")
p2 = pd.Period("2017-09-03")
# get the location of p1/p2 from
# monotonic increasing PeriodIndex with non-duplicate
idx0 = pd.PeriodIndex([p0, p1, p2])
expected_idx1_p1 = 1
expected_idx1_p2 = 2
assert idx0.get_loc(p1) == expected_idx1_p1
assert idx0.get_loc(str(p1)) == expected_idx1_p1
assert idx0.get_loc(p2) == expected_idx1_p2
assert idx0.get_loc(str(p2)) == expected_idx1_p2
msg = "Cannot interpret 'foo' as period"
with pytest.raises(KeyError, match=msg):
idx0.get_loc("foo")
with pytest.raises(KeyError, match=r"^1\.1$"):
idx0.get_loc(1.1)
msg = (
r"'PeriodIndex\(\['2017-09-01', '2017-09-02', '2017-09-03'\],"
r" dtype='period\[D\]', freq='D'\)' is an invalid key"
)
with pytest.raises(TypeError, match=msg):
idx0.get_loc(idx0)
# get the location of p1/p2 from
# monotonic increasing PeriodIndex with duplicate
idx1 = pd.PeriodIndex([p1, p1, p2])
expected_idx1_p1 = slice(0, 2)
expected_idx1_p2 = 2
assert idx1.get_loc(p1) == expected_idx1_p1
assert idx1.get_loc(str(p1)) == expected_idx1_p1
assert idx1.get_loc(p2) == expected_idx1_p2
assert idx1.get_loc(str(p2)) == expected_idx1_p2
msg = "Cannot interpret 'foo' as period"
with pytest.raises(KeyError, match=msg):
idx1.get_loc("foo")
with pytest.raises(KeyError, match=r"^1\.1$"):
idx1.get_loc(1.1)
msg = (
r"'PeriodIndex\(\['2017-09-02', '2017-09-02', '2017-09-03'\],"
r" dtype='period\[D\]', freq='D'\)' is an invalid key"
)
with pytest.raises(TypeError, match=msg):
idx1.get_loc(idx1)
# get the location of p1/p2 from
# non-monotonic increasing/decreasing PeriodIndex with duplicate
idx2 = pd.PeriodIndex([p2, p1, p2])
expected_idx2_p1 = 1
expected_idx2_p2 = np.array([True, False, True])
assert idx2.get_loc(p1) == expected_idx2_p1
assert idx2.get_loc(str(p1)) == expected_idx2_p1
tm.assert_numpy_array_equal(idx2.get_loc(p2), expected_idx2_p2)
tm.assert_numpy_array_equal(idx2.get_loc(str(p2)), expected_idx2_p2)
def test_is_monotonic_increasing(self):
# GH 17717
p0 = pd.Period("2017-09-01")
p1 = pd.Period("2017-09-02")
p2 = pd.Period("2017-09-03")
idx_inc0 = pd.PeriodIndex([p0, p1, p2])
idx_inc1 = pd.PeriodIndex([p0, p1, p1])
idx_dec0 = pd.PeriodIndex([p2, p1, p0])
idx_dec1 = pd.PeriodIndex([p2, p1, p1])
idx = pd.PeriodIndex([p1, p2, p0])
assert idx_inc0.is_monotonic_increasing is True
assert idx_inc1.is_monotonic_increasing is True
assert idx_dec0.is_monotonic_increasing is False
assert idx_dec1.is_monotonic_increasing is False
assert idx.is_monotonic_increasing is False
def test_is_monotonic_decreasing(self):
# GH 17717
p0 = pd.Period("2017-09-01")
p1 = pd.Period("2017-09-02")
p2 = pd.Period("2017-09-03")
idx_inc0 = | pd.PeriodIndex([p0, p1, p2]) | pandas.PeriodIndex |
import numpy as np
import pytest
import pandas.util._test_decorators as td
import pandas as pd
from pandas import Index, MultiIndex, Series, date_range, isna
import pandas._testing as tm
@pytest.fixture(
params=[
"linear",
"index",
"values",
"nearest",
"slinear",
"zero",
"quadratic",
"cubic",
"barycentric",
"krogh",
"polynomial",
"spline",
"piecewise_polynomial",
"from_derivatives",
"pchip",
"akima",
"cubicspline",
]
)
def nontemporal_method(request):
"""Fixture that returns an (method name, required kwargs) pair.
This fixture does not include method 'time' as a parameterization; that
method requires a Series with a DatetimeIndex, and is generally tested
separately from these non-temporal methods.
"""
method = request.param
kwargs = {"order": 1} if method in ("spline", "polynomial") else {}
return method, kwargs
@pytest.fixture(
params=[
"linear",
"slinear",
"zero",
"quadratic",
"cubic",
"barycentric",
"krogh",
"polynomial",
"spline",
"piecewise_polynomial",
"from_derivatives",
"pchip",
"akima",
"cubicspline",
]
)
def interp_methods_ind(request):
"""Fixture that returns a (method name, required kwargs) pair to
be tested for various Index types.
This fixture does not include methods - 'time', 'index', 'nearest',
'values' as a parameterization
"""
method = request.param
kwargs = {"order": 1} if method in ("spline", "polynomial") else {}
return method, kwargs
class TestSeriesInterpolateData:
def test_interpolate(self, datetime_series, string_series):
ts = Series(np.arange(len(datetime_series), dtype=float), datetime_series.index)
ts_copy = ts.copy()
ts_copy[5:10] = np.NaN
linear_interp = ts_copy.interpolate(method="linear")
tm.assert_series_equal(linear_interp, ts)
ord_ts = Series(
[d.toordinal() for d in datetime_series.index], index=datetime_series.index
).astype(float)
ord_ts_copy = ord_ts.copy()
ord_ts_copy[5:10] = np.NaN
time_interp = ord_ts_copy.interpolate(method="time")
tm.assert_series_equal(time_interp, ord_ts)
def test_interpolate_time_raises_for_non_timeseries(self):
# When method='time' is used on a non-TimeSeries that contains a null
# value, a ValueError should be raised.
non_ts = Series([0, 1, 2, np.NaN])
msg = "time-weighted interpolation only works on Series.* with a DatetimeIndex"
with pytest.raises(ValueError, match=msg):
non_ts.interpolate(method="time")
@td.skip_if_no_scipy
def test_interpolate_cubicspline(self):
ser = Series([10, 11, 12, 13])
expected = Series(
[11.00, 11.25, 11.50, 11.75, 12.00, 12.25, 12.50, 12.75, 13.00],
index=Index([1.0, 1.25, 1.5, 1.75, 2.0, 2.25, 2.5, 2.75, 3.0]),
)
# interpolate at new_index
new_index = ser.index.union(Index([1.25, 1.5, 1.75, 2.25, 2.5, 2.75])).astype(
float
)
result = ser.reindex(new_index).interpolate(method="cubicspline")[1:3]
tm.assert_series_equal(result, expected)
@td.skip_if_no_scipy
def test_interpolate_pchip(self):
ser = Series(np.sort(np.random.uniform(size=100)))
# interpolate at new_index
new_index = ser.index.union(
Index([49.25, 49.5, 49.75, 50.25, 50.5, 50.75])
).astype(float)
interp_s = ser.reindex(new_index).interpolate(method="pchip")
# does not blow up, GH5977
interp_s[49:51]
@td.skip_if_no_scipy
def test_interpolate_akima(self):
ser = Series([10, 11, 12, 13])
# interpolate at new_index where `der` is zero
expected = Series(
[11.00, 11.25, 11.50, 11.75, 12.00, 12.25, 12.50, 12.75, 13.00],
index=Index([1.0, 1.25, 1.5, 1.75, 2.0, 2.25, 2.5, 2.75, 3.0]),
)
new_index = ser.index.union(Index([1.25, 1.5, 1.75, 2.25, 2.5, 2.75])).astype(
float
)
interp_s = ser.reindex(new_index).interpolate(method="akima")
tm.assert_series_equal(interp_s[1:3], expected)
# interpolate at new_index where `der` is a non-zero int
expected = Series(
[11.0, 1.0, 1.0, 1.0, 12.0, 1.0, 1.0, 1.0, 13.0],
index=Index([1.0, 1.25, 1.5, 1.75, 2.0, 2.25, 2.5, 2.75, 3.0]),
)
new_index = ser.index.union(Index([1.25, 1.5, 1.75, 2.25, 2.5, 2.75])).astype(
float
)
interp_s = ser.reindex(new_index).interpolate(method="akima", der=1)
tm.assert_series_equal(interp_s[1:3], expected)
@td.skip_if_no_scipy
def test_interpolate_piecewise_polynomial(self):
ser = Series([10, 11, 12, 13])
expected = Series(
[11.00, 11.25, 11.50, 11.75, 12.00, 12.25, 12.50, 12.75, 13.00],
index=Index([1.0, 1.25, 1.5, 1.75, 2.0, 2.25, 2.5, 2.75, 3.0]),
)
# interpolate at new_index
new_index = ser.index.union(Index([1.25, 1.5, 1.75, 2.25, 2.5, 2.75])).astype(
float
)
interp_s = ser.reindex(new_index).interpolate(method="piecewise_polynomial")
tm.assert_series_equal(interp_s[1:3], expected)
@td.skip_if_no_scipy
def test_interpolate_from_derivatives(self):
ser = Series([10, 11, 12, 13])
expected = Series(
[11.00, 11.25, 11.50, 11.75, 12.00, 12.25, 12.50, 12.75, 13.00],
index=Index([1.0, 1.25, 1.5, 1.75, 2.0, 2.25, 2.5, 2.75, 3.0]),
)
# interpolate at new_index
new_index = ser.index.union(Index([1.25, 1.5, 1.75, 2.25, 2.5, 2.75])).astype(
float
)
interp_s = ser.reindex(new_index).interpolate(method="from_derivatives")
tm.assert_series_equal(interp_s[1:3], expected)
@pytest.mark.parametrize(
"kwargs",
[
{},
pytest.param(
{"method": "polynomial", "order": 1}, marks=td.skip_if_no_scipy
),
],
)
def test_interpolate_corners(self, kwargs):
s = Series([np.nan, np.nan])
tm.assert_series_equal(s.interpolate(**kwargs), s)
s = Series([], dtype=object).interpolate()
tm.assert_series_equal(s.interpolate(**kwargs), s)
def test_interpolate_index_values(self):
s = Series(np.nan, index=np.sort(np.random.rand(30)))
s[::3] = np.random.randn(10)
vals = s.index.values.astype(float)
result = s.interpolate(method="index")
expected = s.copy()
bad = isna(expected.values)
good = ~bad
expected = Series(
np.interp(vals[bad], vals[good], s.values[good]), index=s.index[bad]
)
tm.assert_series_equal(result[bad], expected)
# 'values' is synonymous with 'index' for the method kwarg
other_result = s.interpolate(method="values")
tm.assert_series_equal(other_result, result)
tm.assert_series_equal(other_result[bad], expected)
def test_interpolate_non_ts(self):
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
msg = (
"time-weighted interpolation only works on Series or DataFrames "
"with a DatetimeIndex"
)
with pytest.raises(ValueError, match=msg):
s.interpolate(method="time")
@pytest.mark.parametrize(
"kwargs",
[
{},
pytest.param(
{"method": "polynomial", "order": 1}, marks=td.skip_if_no_scipy
),
],
)
def test_nan_interpolate(self, kwargs):
s = Series([0, 1, np.nan, 3])
result = s.interpolate(**kwargs)
expected = Series([0.0, 1.0, 2.0, 3.0])
tm.assert_series_equal(result, expected)
def test_nan_irregular_index(self):
s = Series([1, 2, np.nan, 4], index=[1, 3, 5, 9])
result = s.interpolate()
expected = Series([1.0, 2.0, 3.0, 4.0], index=[1, 3, 5, 9])
tm.assert_series_equal(result, expected)
def test_nan_str_index(self):
s = Series([0, 1, 2, np.nan], index=list("abcd"))
result = s.interpolate()
expected = Series([0.0, 1.0, 2.0, 2.0], index=list("abcd"))
tm.assert_series_equal(result, expected)
@td.skip_if_no_scipy
def test_interp_quad(self):
sq = Series([1, 4, np.nan, 16], index=[1, 2, 3, 4])
result = sq.interpolate(method="quadratic")
expected = Series([1.0, 4.0, 9.0, 16.0], index=[1, 2, 3, 4])
tm.assert_series_equal(result, expected)
@td.skip_if_no_scipy
def test_interp_scipy_basic(self):
s = Series([1, 3, np.nan, 12, np.nan, 25])
# slinear
expected = Series([1.0, 3.0, 7.5, 12.0, 18.5, 25.0])
result = s.interpolate(method="slinear")
tm.assert_series_equal(result, expected)
result = s.interpolate(method="slinear", downcast="infer")
| tm.assert_series_equal(result, expected) | pandas._testing.assert_series_equal |
import os, sys
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import scipy.optimize
from pyddem.volint_tools import neff_circ, std_err
import functools
import matplotlib.ticker as mtick
from mpl_toolkits.axes_grid.inset_locator import inset_axes
plt.rcParams.update({'font.size': 5})
plt.rcParams.update({'lines.linewidth':0.35})
plt.rcParams.update({'axes.linewidth':0.35})
plt.rcParams.update({'lines.markersize':2.5})
plt.rcParams.update({'axes.labelpad':1.5})
all_csv = '/home/atom/ongoing/work_worldwide/validation/tcorr/tinterp_corr_deseas_agg_all.csv'
# all_csv = '/home/atom/ongoing/work_worldwide/validation/tinterp_corr_agg_all.csv'
df = pd.read_csv(all_csv)
# df = df[df.reg==5]
cutoffs = list(set(list(df.cutoff)))
dts = sorted(list(set(list(df.nb_dt))))
col = ['tab:orange','tab:blue','tab:olive','tab:red','tab:cyan','tab:brown','tab:gray','tab:pink','tab:purple']
#plot covar by lag
# for dt in dts:
#
# df_dt = df[df.nb_dt == dt]
#
# for cutoff in cutoffs:
# df_c = df_dt[df_dt.cutoff == cutoff]
#
# if cutoff == 10000:
# plt.scatter(df_c.bins.values[1],df_c.exp.values[1],color=col[dts.index(dt)],label=str(dt))
# plt.scatter(df_c.bins.values[20],df_c.exp.values[20],color=col[dts.index(dt)])
# plt.scatter(df_c.bins.values[50],df_c.exp.values[50],color=col[dts.index(dt)])
# elif cutoff == 100000:
# plt.scatter(df_c.bins.values[20],df_c.exp.values[20],color=col[dts.index(dt)])
# plt.scatter(df_c.bins.values[50],df_c.exp.values[50],color=col[dts.index(dt)])
# else:
# plt.scatter(df_c.bins.values[20],df_c.exp.values[20],color=col[dts.index(dt)])
# plt.scatter(df_c.bins.values[50],df_c.exp.values[50],color=col[dts.index(dt)])
#
# plt.ylim([0,50])
# plt.xscale('log')
# plt.legend()
#plot covar by dt
dts = sorted(dts)
dts.remove(540.)
dts.remove(900.)
dts.remove(1750.)
dts.remove(2250.)
arr_res = np.zeros((len(dts),7))
arr_count = np.zeros((len(dts),7))
for dt in dts:
df_dt = df[df.nb_dt == dt]
for cutoff in cutoffs:
df_c = df_dt[df_dt.cutoff == cutoff]
if cutoff == 10000:
arr_res[dts.index(dt),0]=np.nanmean(df_c.exp.values[1:2])
arr_count[dts.index(dt),0]=np.nanmean(df_c['count'].values[1:2])
arr_res[dts.index(dt), 1] = np.nanmean(df_c.exp.values[20 - 10:20 + 10])
arr_count[dts.index(dt), 1] = np.nanmean(df_c['count'].values[20 - 10:20 + 10])
arr_res[dts.index(dt), 2] = np.nanmean(df_c.exp.values[50 - 10:50 + 10])
arr_count[dts.index(dt), 2] = np.nanmean(df_c['count'].values[50 - 10:50 + 10])
elif cutoff == 100000:
arr_res[dts.index(dt),3]=np.nanmean(df_c.exp.values[20-5:20+20])
arr_count[dts.index(dt),3]=np.nanmean(df_c['count'].values[20-10:20+10])
arr_res[dts.index(dt),4]=np.nanmean(df_c.exp.values[50-10:50+10])
arr_count[dts.index(dt),4]=np.nanmean(df_c['count'].values[50-10:50+10])
elif cutoff == 1000000:
arr_res[dts.index(dt),5]=np.nanmean(df_c.exp.values[20-10:20+30])
arr_count[dts.index(dt),5]=np.nanmean(df_c['count'].values[20-10:20+30])
arr_res[dts.index(dt),6]=np.nanmean(df_c.exp.values[50-40:50+40])
arr_count[dts.index(dt),6]=np.nanmean(df_c['count'].values[50-40:50+40])
arr_res[arr_count<100]=np.nan
# for dt in dts:
#
# df_dt = df[df.nb_dt == dt]
#
# for cutoff in cutoffs:
# df_c = df_dt[df_dt.cutoff == cutoff]
#
# if cutoff == 10000:
# plt.scatter(dt,df_c.exp.values[1],color=col[0])
# plt.scatter(dt,np.nanmean(df_c.exp.values[20-10:20+10]),color=col[1])
# plt.scatter(dt,np.nanmean(df_c.exp.values[50-10:50+10]),color=col[2])
# elif cutoff == 100000:
# plt.scatter(dt,np.nanmean(df_c.exp.values[20-10:20+10]),color=col[3])
# plt.scatter(dt,np.nanmean(df_c.exp.values[50-10:50+10]),color=col[4])
# else:
# plt.scatter(dt,np.nanmean(df_c.exp.values[20-10:20+10]),color=col[5])
# plt.scatter(dt,np.nanmean(df_c.exp.values[50-10:50+10]),color=col[6])
fig = plt.figure(figsize=(7.2,9.3))
# plt.subplots_adjust(hspace=0.3)
grid = plt.GridSpec(8, 13, wspace=0.05, hspace=0.5)
ax = fig.add_subplot(grid[:2,:2])
# ax = fig.add_subplot(2, 1, 1)
vario = df[df.nb_dt == 720.]
vec_bins = []
vec_exp = []
vgm1 = vario[vario.cutoff == 10000]
vgm1 = vgm1[vgm1.bins<3000]
for i in range(6):
vec_bins += [np.nanmean(vgm1.bins.values[0+i*5:5+i*5])]
vec_exp += [np.nanmean(vgm1.exp.values[0+i*5:5+i*5])]
# vec_bins += vgm1.bins.tolist()
# vec_exp += vgm1.exp.tolist()
vgm1 = vario[vario.cutoff == 100000]
vgm1 = vgm1[np.logical_and(vgm1.bins>3000,vgm1.bins<30000)]
vec_bins += vgm1.bins.tolist()
vec_exp += vgm1.exp.tolist()
vgm1 = vario[vario.cutoff == 1000000]
vgm1 = vgm1[vgm1.bins>30000]
for i in range(18):
vec_bins += [np.nanmean(vgm1.bins.values[0+i*5:5+i*5])]
vec_exp += [np.nanmean(vgm1.exp.values[0+i*5:5+i*5])]
vec_bins = np.array(vec_bins)
vec_exp=np.array(vec_exp)
def sph_var(c0,c1,a1,h):
if h < a1:
vgm = c0 + c1 * (3 / 2 * h / a1-1 / 2 * (h / a1) ** 3)
else:
vgm = c0 + c1
return vgm
vect = np.array(list(np.arange(0,3000,1)) + list(np.arange(3000,30000,10)) + list(np.arange(30000,3000000,100)))
mod = []
c1s = [0] + list(arr_res[dts.index(720.),:])
a1s = [0.2,2,5,20,50,200]
#find unbiased sills
list_c = []
for j in range(len(a1s)):
print('Range:' + str(a1s[-1 - j]))
c = c1s[-2 - j] - c1s[-3 - j]
print(c)
for k in range(j):
# c -= sph_var(0, list_c[k], a1s[-1 - k] * 1000, a1s[-1 - j] * 1000)
if j>5:
c -= (sph_var(0, list_c[k], a1s[-1 - k] * 1000, a1s[-1 - j] * 1000) - sph_var(0,list_c[k], a1s[-1-k]*1000,a1s[-2-j]*1000))
elif j==5:
c -= sph_var(0, list_c[k], a1s[-1 - k] * 1000, a1s[-1 - j] * 1000)
c = max(0, c)
list_c.append(c)
list_c.reverse()
#compute variogram
for i in range(len(vect)):
val = 0
for j in range(len(a1s)):
val += sph_var(0,list_c[j],a1s[j]*1000,vect[i])
mod.append(val)
mod = np.array(mod)
ax.scatter(vec_bins/1000,vec_exp,color='black',marker='x')
ax.set_xlim((0,3))
ax.set_ylim((0,50))
ax.set_xticks([0,1,2])
ax.text(0.075, 0.975, 'a', transform=ax.transAxes,
fontsize=8, fontweight='bold', va='top', ha='left')
ax.vlines(0.15,0,60,color=col[0],linewidth=0.5)
ax.text(0.4,c1s[1]-5,'$s_0$',color=col[0],ha='left',va='bottom',bbox= dict(boxstyle='round', facecolor='white', alpha=0.7,linewidth=0.35))
ax.vlines(2,0,60,color=col[1],linewidth=0.5)
ax.text(2.2,c1s[2]-5,'$s_1$',color=col[1],ha='left',va='bottom',bbox= dict(boxstyle='round', facecolor='white', alpha=0.7,linewidth=0.35))
ax.plot(vect/1000,mod,color='dimgrey',linestyle='dashed')
# ax.hlines(25,0,500,colors='black',linestyles='dotted')
ax.set_ylabel('Variance of elevation differences (m$^2$)')
ax.tick_params(width=0.35,length=2.5)
ax = fig.add_subplot(grid[:2,2:4])
ax.scatter(vec_bins/1000,vec_exp,color='black',marker='x')
ax.set_xlim((0,30))
ax.set_ylim((0,50))
ax.set_xticks([0,10,20])
# ax.text(0.075, 0.975, 'B', transform=ax.transAxes,
# fontsize=14, fontweight='bold', va='top', ha='left')
ax.vlines(5,0,60,color=col[2],linewidth=0.5)
ax.text(6,c1s[3]-5,'$s_2$',color=col[2],ha='left',va='bottom',bbox= dict(boxstyle='round', facecolor='white', alpha=0.7,linewidth=0.35))
ax.vlines(20,0,60,color=col[3],linewidth=0.5)
ax.text(21,c1s[4]-5,'$s_3$',color=col[3],ha='left',va='bottom',bbox= dict(boxstyle='round', facecolor='white', alpha=0.7,linewidth=0.35))
ax.plot(vect/1000,mod,color='dimgrey',linestyle='dashed')
# ax.hlines(25,0,500,colors='black',linestyles='dotted',label='Global mean variance')
ax.set_yticks([])
ax.set_xlabel('Spatial lag (km)')
ax.tick_params(width=0.35,length=2.5)
ax = fig.add_subplot(grid[:2,4:6])
ax.scatter(vec_bins/1000,vec_exp,color='black',marker='x')
ax.set_xlim((0,550))
ax.set_ylim((0,50))
ax.set_xticks([0,100,200,300,400,500])
# ax.text(0.075, 0.975, 'C', transform=ax.transAxes,
# fontsize=14, fontweight='bold', va='top', ha='left')
ax.vlines(50,0,60,colors=[col[4]],linewidth=0.5)
ax.text(70,c1s[5]-5,'$s_4$',color=col[4],ha='left',va='bottom',bbox= dict(boxstyle='round', facecolor='white', alpha=0.7,linewidth=0.35))
ax.vlines(200,0,60,colors=[col[5]],linewidth=0.5)
ax.text(220,c1s[6]-7,'$s_5$',color=col[5],ha='left',va='bottom',bbox= dict(boxstyle='round', facecolor='white', alpha=0.7,linewidth=0.35))
ax.vlines(500,0,60,colors=[col[6]],linewidth=0.5)
ax.text(480,c1s[6]-7,'$s_6$',color=col[6],ha='right',va='bottom',bbox= dict(boxstyle='round', facecolor='white', alpha=0.7,linewidth=0.35))
ax.plot(vect/1000,mod,color='dimgrey',linestyle='dashed')
# ax.hlines(25,0,500,colors='black',linestyles='dotted')
ax.tick_params(width=0.35,length=2.5)
ax.plot([],[],color='grey',linestyle='dashed',label='Sum of spherical models')
ax.scatter([],[],color='black',marker='x',label='Empirical variance')
ax.vlines([],[],[],color=col[0],label='0.15 km',linewidth=0.5)
ax.vlines([],[],[],color=col[1],label='2 km',linewidth=0.5)
ax.vlines([],[],[],color=col[2],label='5 km',linewidth=0.5)
ax.vlines([],[],[],color=col[3],label='20 km',linewidth=0.5)
ax.vlines([],[],[],color=col[4],label='50 km',linewidth=0.5)
ax.vlines([],[],[],color=col[5],label='200 km',linewidth=0.5)
ax.vlines([],[],[],color=col[6],label='500 km',linewidth=0.5)
ax.legend(loc='lower right',ncol=3,title='Spatial correlations of GP elevation at $\Delta t$ = 720 days',title_fontsize=6,columnspacing=0.5)
ax.set_yticks([])
ax = fig.add_subplot(grid[2:4,:6])
coefs_list = []
y = None
# arr_res[0:1,4]=25
# arr_res[arr_res>25] = 25.
# arr_res[4,2]=np.nan
# arr_res[3:,3]=np.nan
# arr_res[0,3]=25.
# arr_res[0,3:] = np.nan
for i in [0,1,2,3,4,5,6]:
# i=0
# arr_res[-1,0]=np.nan
coefs , _ = scipy.optimize.curve_fit(lambda t,a,b:a*t+b, np.array(dts)[~np.isnan(arr_res[:,i])], np.sqrt(arr_res[:,i][~np.isnan(arr_res[:,i])]))
coefs_list.append(coefs)
x = np.arange(0, 3000, 1)
if y is not None:
y0 = y
else:
y0 = x*0
y = coefs[0]*x+coefs[1] #- 2*np.sin(x/365.2224*np.pi)**2
# y[y>25]=25.
# y[y<y0]=y0[y<y0]
y = y
ax.plot(x,y**2 -2*np.sin(x/365.2224*2*np.pi)**2,color=col[i])
ax.fill_between(x,y0**2 -2*np.sin(x/365.2224*2*np.pi)**2,y**2 -2*np.sin(x/365.2224*2*np.pi)**2,color = col[i],alpha=0.2)
# ax.fill_between(x,40*np.ones(len(x)),y,color='tab:gray')
# arr_res[0,3:]=25.
for i in [0,1,2,3,4,5,6]:
ax.scatter(dts,arr_res[:,i],color=col[i])
# ax.hlines(25,0,3000,linestyles='dashed',color='tab:gray')
ax.plot([],[],color='black',label='Model fit')
ax.fill_between([],[],color=col[0],label='0.15 km')
ax.fill_between([],[],color=col[1],label='2 km')
ax.fill_between([],[],color=col[2],label='5 km')
ax.fill_between([],[],color=col[3],label='20 km')
ax.scatter([],[],color='black',label='Empirical\nvariance')
ax.fill_between([],[],color=col[4],label='50 km')
ax.fill_between([],[],color=col[5],label='200 km')
ax.fill_between([],[],color=col[6],label='500 km')
ax.set_xlim([0,1370])
ax.set_ylim([0,78])
ax.set_ylabel('Variance of elevation differences (m$^{2}$)')
ax.set_xlabel('Days to closest observation $\Delta t$')
ax.vlines(720,0,100,colors='black',linestyles='dashed')
ax.text(740,5,'$\overline{s_{0}(\Delta t)}$: correlated until 0.15 km',bbox= dict(boxstyle='round', facecolor='white', alpha=0.7,linewidth=0.35),color='tab:orange')
ax.text(800,22,'$s_{1}(\Delta t)$: correlated until 2 km',bbox= dict(boxstyle='round', facecolor='white', alpha=0.7,linewidth=0.35),color='tab:blue')
ax.text(1150,35,'$s_{3}(\Delta t)$',bbox= dict(boxstyle='round', facecolor='white', alpha=0.7,linewidth=0.35),color='tab:red')
ax.text(1250,48,'$s_{5}(\Delta t)$',bbox= dict(boxstyle='round', facecolor='white', alpha=0.7,linewidth=0.35),color='tab:brown')
# ax.text(1000,22,'Fully correlated = Systematic',bbox= dict(boxstyle='round', facecolor='white', alpha=0.5),color='dimgrey')
# plt.xscale('log')
ax.legend(loc='upper left',bbox_to_anchor=(0.0625,0,0.9375,1),title='Spatial correlations of\nGP elevation with\ntime lag to observation',title_fontsize=6,ncol=2,columnspacing=0.5)
ax.text(0.025, 0.975, 'b', transform=ax.transAxes,
fontsize=8, fontweight='bold', va='top', ha='left')
ax.text(740,45,'panel (a)',fontweight='bold',va='bottom',ha='left')
# plt.savefig('/home/atom/ongoing/work_worldwide/figures/Figure_S12.png',dpi=360)
ax.tick_params(width=0.35,length=2.5)
ax = fig.add_subplot(grid[4:6,:6])
corr_ranges = [150, 2000, 5000, 20000, 50000]
coefs = [np.array([1.26694247e-03, 3.03486839e+00]),
np.array([1.35708936e-03, 4.05065698e+00]),
np.array([1.42572733e-03, 4.20851582e+00]),
np.array([1.82537137e-03, 4.28515920e+00]),
np.array([1.87250755e-03, 4.31311254e+00]),
np.array([2.06249620e-03, 4.33582812e+00])]
thresh = [0, 0, 0, 180, 180]
ind = [1, 1, 1, 2, 1]
def sill_frac(t, a, b, c, d):
if t >= c:
return (coefs[-1][0] * t + coefs[-1][1]) ** 2 - (a * t + b) ** 2 - (
(coefs[-1][1] + c * coefs[-1][0]) ** 2 - (coefs[-1 - d][1] + c * coefs[-1 - d][0]) ** 2)
else:
return 0
corr_std_dt = [functools.partial(sill_frac,a=coefs[i][0],b=coefs[i][1],c=thresh[i],d=ind[i]) for i in range(len(corr_ranges))]
list_areas = [100*2**i for i in np.arange(3,31)]
list_df=[]
for area in list_areas:
dt = [180,540,900,1260]
perc_area = [0.5,0.2,0.2,0.1]
dx=100.
nsamp_dt = np.zeros(len(dt)) * np.nan
err_corr = np.zeros((len(dt), len(corr_ranges) + 1)) * np.nan
for j in np.arange(len(dt)):
final_num_err_dt = 10.
nsamp_dt[j] = perc_area[j]*area
sum_var = 0
for k in range(len(corr_ranges)+1):
if k != len(corr_ranges):
err_corr[j,k] = np.sqrt(max(0,corr_std_dt[len(corr_ranges)-1-k](dt[j]) - sum_var))
sum_var += err_corr[j,k] ** 2
else:
err_corr[j, k]=np.sqrt(max(0,final_num_err_dt**2-sum_var))
final_num_err_corr, int_err_corr = (np.zeros( len(corr_ranges) + 1) * np.nan for i in range(2))
for k in range(len(corr_ranges) + 1):
final_num_err_corr[k] = np.sqrt(np.nansum(err_corr[:, k] * nsamp_dt) / np.nansum(nsamp_dt))
if k == 0:
tmp_length = 200000
else:
tmp_length = corr_ranges[len(corr_ranges) - k]
if final_num_err_corr[k] == 0:
int_err_corr[k] = 0
else:
int_err_corr[k] = std_err(final_num_err_corr[k],
neff_circ(area, [(tmp_length, 'Sph', final_num_err_corr[k] ** 2)]))
df_int = pd.DataFrame()
for i in range(len(corr_ranges)):
df_int['err_corr_'+str(corr_ranges[i])] =[int_err_corr[len(corr_ranges)-i]]
df_int['err_corr_200000'] =[int_err_corr[0]]
df_int['area']=area
list_df.append(df_int)
df = pd.concat(list_df)
#First panel: sources for volume change
col = ['tab:orange','tab:blue','tab:olive','tab:red','tab:cyan','tab:brown','tab:gray','tab:pink','tab:purple']
tmp_y = np.zeros(len(list_areas))
tmp_y_next = np.zeros(len(list_areas))
for i in range(6):
tmp_y = tmp_y_next
tmp_y_next = tmp_y + (2*df.iloc[:len(list_areas),i])**2
ax.fill_between(x=np.array(list_areas)/1000000,y1=tmp_y,y2=tmp_y_next,interpolate=True,color=col[i],alpha=0.5,edgecolor=None)
if i == 0:
ax.plot(np.array(list_areas)/1000000,tmp_y_next,color='black',linestyle='--')
ax.fill_between([],[],color=col[0],label='0.15 km',alpha=0.5)
ax.fill_between([],[],color=col[1],label='2 km',alpha=0.5)
ax.fill_between([],[],color=col[2],label='5 km',alpha=0.5)
ax.fill_between([],[],color=col[3],label='20 km',alpha=0.5)
ax.fill_between([],[],color=col[4],label='50 km',alpha=0.5)
ax.fill_between([],[],color=col[5],label='200 km',alpha=0.5)
ax.plot([],[],color='black',linestyle='--',label='Limit GP/spatial\ncorrelation sources')
ax.set_xscale('log')
ax.set_xlabel('Glacier area (km²)')
ax.set_ylabel('Squared uncertainties of\nspecific volume change (m²)')
ax.set_ylim((0,30))
ax.set_xlim((0.005,7.5*10**10/1000000))
handles, labels = ax.get_legend_handles_labels()
# sort both labels and handles by labels
labels, handles = zip(*sorted(zip(labels, handles), key=lambda t: t[0]))
print(labels[0:2])
ax.legend(handles[0:2]+(handles[-1],)+handles[2:-1], labels[0:2]+(labels[-1],)+labels[2:-1],title='Uncertainty sources for specific volume change\n(i.e. mean elevation change)',title_fontsize=6,ncol=3,columnspacing=0.5)
ax.text(0.023,4*1.2,'Uncertainty \nsources from\npixel-wise\nGP regression\n(0.15 km)',color=plt.cm.Greys(0.8),va='center',ha='center')
ax.text(5,4*2,'Uncertainty sources from \nshort- to long-\nrange correlations\n(2 km - 200 km)',color=plt.cm.Greys(0.8),va='center',ha='center')
ax.text(0.025, 0.95, 'c', transform=ax.transAxes, fontsize=8, fontweight='bold', va='top', ha='left')
ax.tick_params(width=0.35,length=2.5)
import os, sys
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from scipy import stats
from pybob.ddem_tools import nmad
df_gp = pd.read_csv('/home/atom/data/other/Hugonnet_2020/dhdt_int_GP.csv')
df_hr = pd.read_csv('/home/atom/data/other/Hugonnet_2020/dhdt_int_HR.csv')
# ind = np.logical_and(df_hr.perc_meas>0.70,df_hr.category.values=='matthias')
# ind = np.logical_and(df_hr.perc_meas>0.70,df_hr.area.values<1000000.)
ind = df_hr.perc_meas>0.70
list_rgiid = list(df_hr[ind].rgiid)
list_area = list(df_hr[df_hr.rgiid.isin(list_rgiid)].area)
list_rgiid = [rgiid for _, rgiid in sorted(zip(list_area,list_rgiid),reverse=True)]
list_area = sorted(list_area,reverse=True)
ax = fig.add_subplot(grid[:2, 7:])
kval = 3.5
# sites=np.unique(data['Site'])
# colors=['b','g','r','c','m','y','k','grey']
colors = ['tab:blue','tab:orange','tab:red','tab:grey']
# sites=sites.tolist()
ax.plot([-3, 0.5], [-3, 0.5], color='k', linestyle='-', linewidth=0.75)
label_list=[]
diff2 = []
list_area2 = []
for rgiid in list_rgiid:
df_gp_rgiid = df_gp[df_gp.rgiid==rgiid]
df_hr_rgiid = df_hr[df_hr.rgiid==rgiid]
if df_hr_rgiid.category.values[0]=='matthias':
col = colors[0]
elif df_hr_rgiid.category.values[0]=='brian':
col = colors[1]
else:
if df_hr_rgiid.site.values[0] in ['Chhota','Gangotri','Abramov','Mera']:
col = colors[2]
elif df_hr_rgiid.site.values[0] == 'Yukon':
col=colors[3]
elif df_hr_rgiid.site.values[0] == 'MontBlanc':
col=colors[0]
ax.errorbar(df_hr_rgiid.dhdt.values[0], df_gp_rgiid.dhdt.values[0],
xerr=df_hr_rgiid.err_dhdt.values[0],
yerr=df_gp_rgiid.err_dhdt.values[0],marker='o',mec='k',
ms=kval*(df_hr_rgiid.area.values[0]/1000000)**0.5/3, mew=0.25,elinewidth=0.25,ecolor=col,mfc=col,alpha=0.9)
#,ecolor=colors[sites.index(data['Site'][value])]mfc=colors[sites.index(data['Site'][value])],alpha=0.5)
diff2.append(df_hr_rgiid.dhdt.values[0]-df_gp_rgiid.dhdt.values[0])
list_area2.append(df_hr_rgiid.area.values[0])
ax.text(-1.9,0,'Mean bias:\n'+str(np.round(np.nanmean(diff2),2))+'$\pm$'+str(np.round(2*nmad(diff2)/np.sqrt(len(diff2)),2))+' m yr$^{-1}$',ha='center',va='center',bbox= dict(boxstyle='round', facecolor='white', alpha=0.7,linewidth=0.35))
print(np.nanmean(diff2))
print(np.nansum(np.array(diff2)*np.array(list_area2))/np.nansum(np.array(list_area2)))
ax.set_ylabel('Specific volume change (m yr$^{-1}$)')
ax.set_xlabel('High-resolution specific volume change (m yr$^{-1}$)')
#plt.legend(loc='upper left')
ax.set_xlim([-2.95, 0.5])
ax.set_ylim([-2.95, 0.5])
#mask = ~np.isnan(b_dot_anomaly) & ~np.isnan(dP)
# slope, intercept, r_value, p_value, std_err = stats.linregress(data['MB GEOD'], data['MB ASTER'])
# print(slope)
# print("r-squared:", r_value**2)
# print('std err:', std_err)
# plt.text(-320, -1250, 'Slope:' + str(np.round(slope, 2)))
# plt.text(-320, -1300, 'r$^{2}$:' + str(np.round(r_value**2, 2)))
## add symbols to show relative size of glaciers
ax.errorbar(-2500/1000,-150/1000,ms = kval*(5.0**0.5)/3, xerr=0.0001, yerr=0.0001, color='k',marker='o')
ax.errorbar(-2500/1000,-500/1000,ms = kval*(50.0**0.5)/3, xerr=0.0001, yerr=0.0001,color='k',marker='o')
ax.errorbar(-2500/1000,-1250/1000,ms = kval*(500.0**0.5)/3, xerr=0.0001, yerr=0.0001, color='k', marker='o')
ax.text(-2500/1000, -220/1000,'5 km$^2$',va='top',ha='center')
ax.text(-2500/1000, -650/1000,'50 km$^2$',va='top',ha='center')
ax.text(-2500/1000, -1730/1000,'500 km$^2$',va='top',ha='center')
ax.text(0.025,0.966,'d',transform=ax.transAxes,
fontsize=8, fontweight='bold', va='top', ha='left')
ax.plot([],[],color=colors[0],label='Alps',lw=1)
ax.plot([],[],color=colors[1],label='Western NA',lw=1)
ax.plot([],[],color=colors[2],label='High Mountain Asia',lw=1)
ax.plot([],[],color=colors[3],label='Alaska',lw=1)
ax.plot([],[],color='k',label='1:1 line',lw=0.5)
ax.legend(loc='lower right',title='Validation of volume changes with high-resolution DEMs',title_fontsize=6,ncol=3)
ax.tick_params(width=0.35,length=2.5)
ax = fig.add_subplot(grid[4:6, 7:])
ax.text(0.025,0.966,'f',transform=ax.transAxes,
fontsize=8, fontweight='bold', va='top', ha='left')
vec_err_dhdt=[0.1,0.2,0.4,0.6,0.8,1,1.5,2]
list_err_emp = []
list_err_the = []
bin_err = []
nb_95ci = []
nb_gla = []
for i in range(len(vec_err_dhdt)-1):
ind = np.logical_and(df_gp.err_dhdt < vec_err_dhdt[i+1],df_gp.err_dhdt>=vec_err_dhdt[i])
list_rgiid = list(df_gp[ind].rgiid)
diff_dhdt = []
err_dhdt = []
ci_size = []
for rgiid in list_rgiid:
diff = df_hr[df_hr.rgiid==rgiid].dhdt.values[0] - df_gp[df_gp.rgiid==rgiid].dhdt.values[0]
err = np.sqrt(df_hr[df_hr.rgiid==rgiid].err_dhdt.values[0]**2+df_gp[df_gp.rgiid==rgiid].err_dhdt.values[0]**2)
err_dhdt.append(err)
diff_dhdt.append(diff)
if np.abs(diff) - 2 * np.abs(df_hr[df_hr.rgiid == rgiid].err_dhdt.values[0]) - 2 * np.abs(
df_gp[df_gp.rgiid == rgiid].err_dhdt.values[0]) > 0:
ci_too_small = 0
elif ~np.isnan(diff):
ci_too_small = 1
else:
ci_too_small = np.nan
ci_size.append(ci_too_small)
list_err_emp.append(nmad(diff_dhdt))
list_err_the.append(np.nanmedian(err_dhdt))
bin_err.append(np.mean((vec_err_dhdt[i+1],vec_err_dhdt[i])))
nb_95ci.append(np.nansum(ci_size)/np.count_nonzero(~np.isnan(ci_size)))
nb_gla.append(np.count_nonzero(~np.isnan(ci_size)))
if i < 2:
va_text = 'bottom'
y_off = 0.1
if i == 0:
x_off = -0.05
else:
x_off = 0
else:
va_text = 'top'
y_off = -0.1
ax.text(bin_err[i]+x_off, list_err_emp[i] + y_off, str(nb_gla[i]) + ' gla.\n' + str(np.round(nb_95ci[i] * 100, 0)) + '%',
va=va_text, ha='center')
ax.plot([0,2],[0,2],color='k',label='1:1 line',lw=0.5)
ax.plot(bin_err,list_err_emp,color='tab:blue',label='Error (1$\sigma$) comparison to HR elevation differences\n(printed: glacier number and $\%$ of intersecting 95% CIs)',linestyle='dashed',marker='x')
ax.set_xlabel('Theoretical specific volume change uncertainty (m yr$^{-1}$)')
ax.set_ylabel('Empirical specific volume\nchange uncertainty (m yr$^{-1}$)')
ax.set_ylim((0,1.4))
ax.legend(loc='upper right',title='Validation of volume change uncertainties\nwith varying uncertainty size',title_fontsize=6)
ax.tick_params(width=0.35,length=2.5)
ax = fig.add_subplot(grid[2:4, 7:])
ax.text(0.025,0.966,'e',transform=ax.transAxes,
fontsize=8, fontweight='bold', va='top', ha='left')
vec_area=[0.01,0.05,0.2,1,5,20,200,1500]
list_err_emp = []
list_err_the = []
bin_err = []
nb_95ci = []
nb_gla = []
for i in range(len(vec_area)-1):
ind = np.logical_and(df_gp.area.values/1000000 < vec_area[i+1],df_gp.area.values/1000000>=vec_area[i])
list_rgiid = list(df_gp[ind].rgiid)
diff_dhdt = []
err_dhdt = []
ci_size = []
for rgiid in list_rgiid:
diff = df_hr[df_hr.rgiid==rgiid].dhdt.values[0] - df_gp[df_gp.rgiid==rgiid].dhdt.values[0]
err = np.sqrt(df_hr[df_hr.rgiid==rgiid].err_dhdt.values[0]**2+df_gp[df_gp.rgiid==rgiid].err_dhdt.values[0]**2)
diff_dhdt.append(diff)
err_dhdt.append(err)
if np.abs(diff) - 2 * np.abs(df_hr[df_hr.rgiid == rgiid].err_dhdt.values[0]) - 2 * np.abs(
df_gp[df_gp.rgiid == rgiid].err_dhdt.values[0]) > 0:
ci_too_small = 0
elif ~np.isnan(diff):
ci_too_small = 1
else:
ci_too_small = np.nan
ci_size.append(ci_too_small)
list_err_emp.append(nmad(diff_dhdt))
list_err_the.append(np.nanmedian(err_dhdt))
bin_err.append(np.mean((vec_area[i+1],vec_area[i])))
nb_95ci.append(np.nansum(ci_size)/np.count_nonzero(~np.isnan(ci_size)))
nb_gla.append(np.count_nonzero(~np.isnan(ci_size)))
if i <2:
va_text = 'top'
y_off = -0.1
else:
va_text = 'bottom'
y_off = 0.1
ax.text(bin_err[i],list_err_emp[i]+y_off,str(nb_gla[i])+' gla.\n'+str(np.round(nb_95ci[i]*100,0))+'%',va=va_text,ha='center')
ax.plot(bin_err,list_err_the,color='black',label='Theoretical uncertainty (1$\sigma$):\nspatially integrated variograms',marker='x')
ax.plot(bin_err,list_err_emp,color='tab:blue',label='Empirical uncertainty (1$\sigma$):\ncomparison to HR elevation differences\n(printed: glacier number and\n$\%$ of intersecting 95% CIs)',linestyle='dashed',marker='x')
ax.set_xscale('log')
ax.set_xlabel('Glacier area (km$^{2}$)')
ax.set_ylabel('Specific volume\nchange uncertainty (m yr$^{-1}$)')
ax.set_ylim([0,1.4])
ax.legend(loc='upper right',title='Validation of volume change uncertainties\nwith varying glaciers area',title_fontsize=6)
ax.tick_params(width=0.35,length=2.5)
ax2 = fig.add_subplot(grid[6:,:])
reg_dir = '/home/atom/ongoing/work_worldwide/vol/final'
list_fn_reg = [os.path.join(reg_dir,'dh_'+str(i).zfill(2)+'_rgi60_int_base_reg.csv') for i in np.arange(1,20)]
list_df_out = []
for fn_reg in list_fn_reg:
df = pd.read_csv(fn_reg)
mult_ann = 20
area = df.area.values[0]
dvol = (df[df.time == '2000-01-01'].dvol.values - df[df.time == '2020-01-01'].dvol.values)[0]
dh = dvol / area
err_dh = np.sqrt(
df[df.time == '2000-01-01'].err_dh.values[0] ** 2 +
df[df.time == '2020-01-01'].err_dh.values[0] ** 2)
err_dvol = np.sqrt((err_dh * area) ** 2 + (dh * df.perc_err_cont.values[0] / 100. * area) ** 2)
dvoldt = dvol / mult_ann
err_dvoldt = err_dvol / mult_ann
dmdt = dvol * 0.85 / 10 ** 9 / mult_ann
err_dmdt = np.sqrt((err_dvol * 0.85 / 10 ** 9) ** 2 + (
dvol * 0.06 / 10 ** 9) ** 2) / mult_ann
sq_err_dmdt_fromdh = (err_dh*area)**2 * (0.85 / mult_ann)**2 /area**2
sq_err_dmdt_fromarea = (dh * df.perc_err_cont.values[0] / 100. * area) ** 2 * (0.85 / mult_ann)**2 /area**2
sq_err_dmdt_fromdensity = (dvol * 0.06) ** 2 / mult_ann**2 / area**2
dmdtda = dmdt/area*10**9
df_out = pd.DataFrame()
df_out['region']=[df.reg.values[0]]
df_out['dmdtda'] = [dmdtda]
df_out['sq_err_fromdh'] = [sq_err_dmdt_fromdh]
df_out['sq_err_fromarea'] = [sq_err_dmdt_fromarea]
df_out['sq_err_fromdensity'] = [sq_err_dmdt_fromdensity]
df_out['area'] = [area]
list_df_out.append(df_out)
df_all = pd.concat(list_df_out)
df_g = pd.DataFrame()
df_g['region']=[21]
df_g['dmdtda'] = [np.nansum(df_all.dmdtda.values*df_all.area.values)/np.nansum(df_all.area.values)]
df_g['sq_err_fromdh'] = [np.nansum(df_all.sq_err_fromdh.values * df_all.area.values **2)/np.nansum(df_all.area.values)**2]
df_g['sq_err_fromarea'] = [np.nansum(df_all.sq_err_fromarea.values * df_all.area.values **2)/np.nansum(df_all.area.values)**2]
df_g['sq_err_fromdensity'] = [np.nansum(df_all.sq_err_fromdensity.values * df_all.area.values **2)/np.nansum(df_all.area.values)**2]
df_g['area'] = [np.nansum(df_all.area.values)]
df_noper = | pd.DataFrame() | pandas.DataFrame |
import keras
import tensorflow as tf
import math
import logging
import numpy as np
import pandas as pd
from keras.callbacks import EarlyStopping
from sacred import Ingredient
from sklearn import preprocessing
from sklearn.cross_validation import KFold
from pypagai.util.class_loader import ClassLoader
tb_callback = keras.callbacks.TensorBoard(log_dir='.log/', histogram_freq=0, write_graph=True, write_images=True)
LOG = logging.getLogger('pypagai-logger')
model_ingredient = Ingredient('model_default_cfg')
@model_ingredient.config
def default_model_configuration():
"""
Model configuration
"""
model = 'pypagai.models.model_lstm.SimpleLSTM' # Path to the ML model
verbose = False # True to print info about train
class BaseModel:
"""
Base model is the class used by all model classes in the experiment framework
It encapsulate common functions and properties used in models. In this same file we have implementations
provided by the framework to atend specific implementations for: Keras, Scikit, Tensorflow models
It receives as parameters configurations of how to test and validate models. The main functions of this class is:
- evaluate: evaluate models to helping in choosing which is the best parameters
- train: train model without cross-validation
- predict: user trained model to predict new instances
"""
def __init__(self, model_cfg):
self._model = None
# Number of k-folds in validation samples
self._splits = model_cfg['kfold_splits'] if 'kfold_splits' in model_cfg else 2
# Verbose mode on models.
self._verbose = model_cfg['verbose'] if 'verbose' in model_cfg else False
# Skip experiment with kfolds. When it is False skip kfold validation and go thought train all dataset directly
self._experiment = model_cfg['experiment'] if 'experiment' in model_cfg else False
# TODO: I don't remeber why to use this configuration
self._maximum_acc = model_cfg['maximum_acc'] if 'maximum_acc' in model_cfg else 1
self._vocab_size = model_cfg['vocab_size']
self._story_maxlen = model_cfg['story_maxlen']
self._query_maxlen = model_cfg['query_maxlen']
self._sentences_maxlen = model_cfg['sentences_maxlen']
@staticmethod
def default_config():
return {
'maximum_acc': 1,
}
def name(self):
return self.__class__.__name__
def print(self):
LOG.info(self.name())
def train(self, data):
fold = 0
final_report = pd.DataFrame()
fold_list = KFold(len(data.answer), self._splits) if self._experiment else [(range(len(data.answer)), [])]
for train_index, valid_index in fold_list:
report = | pd.DataFrame() | pandas.DataFrame |
"""
Parallel HTTP transport
IMPORT from multiple independent processes running in parallel
"""
import pyexasol
import _config as config
import multiprocessing
import pyexasol.callback as cb
import pandas
import pprint
printer = pprint.PrettyPrinter(indent=4, width=140)
class ImportProc(multiprocessing.Process):
def __init__(self, node):
self.node = node
self.read_pipe, self.write_pipe = multiprocessing.Pipe(False)
super().__init__()
def start(self):
super().start()
self.write_pipe.close()
@property
def exa_address(self):
return self.read_pipe.recv()
def run(self):
self.read_pipe.close()
# Init HTTP transport connection
http = pyexasol.http_transport(self.node['ipaddr'], self.node['port'])
# Send internal Exasol address to parent process
self.write_pipe.send(http.exa_address)
self.write_pipe.close()
data = [
{'user_id': 1, 'user_name': 'John', 'shard_id': self.node['idx']},
{'user_id': 2, 'user_name': 'Foo', 'shard_id': self.node['idx']},
{'user_id': 3, 'user_name': 'Bar', 'shard_id': self.node['idx']},
]
pd = | pandas.DataFrame(data, columns=['user_id', 'user_name', 'shard_id']) | pandas.DataFrame |
from utils import mol2fp
import os
import pandas as pd
import numpy as np
from rdkit import Chem
import configparser
import argparse
if __name__ == '__main__':
# argments
parser = argparse.ArgumentParser()
parser.add_argument('conf')
args = parser.parse_args()
# load config file
conf_file = args.conf
section = 'morgan_fp'
config = configparser.ConfigParser()
config.read(conf_file)
csv_path = config.get(section, 'csv_path')
radius = int(config.get(section, 'radius'))
nBits = int(config.get(section, 'nBits'))
col_smiles = config.get(section, 'col_smiles')
col_property = config.get(section, 'col_property')
df = | pd.read_csv(csv_path) | pandas.read_csv |
from bittrex import Bittrex
import requests
import pandas as pd
import os
import bittrex_test as btt
import quandl_api_test as qat
from scrape_coinmarketcap import scrape_data
API_K = os.environ.get('bittrex_api')
API_S = os.environ.get('bittrex_sec')
if API_K is None:
API_K = os.environ.get('btx_key')
API_S = os.environ.get('btx_sec')
bt = Bittrex(API_K, API_S)
HOME_DIR = btt.get_home_dir()
MARKETS = btt.get_all_currency_pairs()
def get_balances():
bals = bt.get_balances()
if bals['success'] == True:
return pd.io.json.json_normalize(bals['result'])
else:
print('error!', bals['message'])
return None
def get_total_dollar_balance(bals):
btc_amts = []
dollar_amts = []
for i, r in bals.iterrows():
if 'BTC-' + r['Currency'] in MARKETS:
print('getting price for', r['Currency'])
t = btt.get_ticker('BTC-' + r['Currency'])
btc_amts.append(t['Last'] * r['Balance'])
else:
# have to find which market we have, the convert to BTC
if r['Currency'] == 'BTC':
btc_amts.append(r['Balance'])
else:
print('no BTC market for', r['Currency'])
bals_copy = bals.copy()
bals_copy['BTC_equivalent'] = btc_amts
usdt = btt.get_ticker('USDT-BTC')['Last']
bals_copy['USD_equivalent'] = bals_copy['BTC_equivalent'] * usdt
return bals_copy
def get_deposit_history():
dh = bt.get_deposit_history()
if dh['success'] == True:
df = pd.io.json.json_normalize(dh['result'])
df['LastUpdated'] = pd.to_datetime(df['LastUpdated'])
return df
else:
print('error!', dh['message'])
return None
def get_deposit_amts(df):
# market_data = qat.load_save_data()
# bt_df = qat.get_bitstamp_full_df(market_data)
eth = scrape_data()
btc = scrape_data('bitcoin')
aeon = scrape_data('aeon')
xmr = scrape_data('monero')
dep_dollars = []
for i, r in df.iterrows():
date = r['LastUpdated']
d = str(date.day).zfill(2)
m = str(date.month).zfill(2)
y = str(date.year)
if r['Currency'] == 'BTC':
price = btc.loc[y + m + d, 'usd_price'][0]
dep_dollars.append(price * r['Amount'])
elif r['Currency'] == 'ETH':
price = eth.loc[y + m + d, 'usd_price'][0]
dep_dollars.append(price * r['Amount'])
elif r['Currency'] == 'AEON':
price = aeon.loc[y + m + d, 'usd_price'][0]
dep_dollars.append(price * r['Amount'])
elif r['Currency'] == 'XMR':
price = xmr.loc[y + m + d, 'usd_price'][0]
dep_dollars.append(price * r['Amount'])
df['usd'] = dep_dollars
return df
def get_order_history():
hist = bt.get_order_history()
if hist['success']:
df = | pd.io.json.json_normalize(hist['result']) | pandas.io.json.json_normalize |
# Arithmetic tests for DataFrame/Series/Index/Array classes that should
# behave identically.
# Specifically for Period dtype
import operator
import numpy as np
import pytest
from pandas._libs.tslibs.period import IncompatibleFrequency
from pandas.errors import PerformanceWarning
import pandas as pd
from pandas import Period, PeriodIndex, Series, period_range
from pandas.core import ops
from pandas.core.arrays import TimedeltaArray
import pandas.util.testing as tm
from pandas.tseries.frequencies import to_offset
# ------------------------------------------------------------------
# Comparisons
class TestPeriodArrayLikeComparisons:
# Comparison tests for PeriodDtype vectors fully parametrized over
# DataFrame/Series/PeriodIndex/PeriodArray. Ideally all comparison
# tests will eventually end up here.
def test_compare_zerodim(self, box_with_array):
# GH#26689 make sure we unbox zero-dimensional arrays
xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
pi = pd.period_range("2000", periods=4)
other = np.array(pi.to_numpy()[0])
pi = tm.box_expected(pi, box_with_array)
result = pi <= other
expected = np.array([True, False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
class TestPeriodIndexComparisons:
# TODO: parameterize over boxes
@pytest.mark.parametrize("other", ["2017", 2017])
def test_eq(self, other):
idx = PeriodIndex(["2017", "2017", "2018"], freq="D")
expected = np.array([True, True, False])
result = idx == other
tm.assert_numpy_array_equal(result, expected)
def test_pi_cmp_period(self):
idx = period_range("2007-01", periods=20, freq="M")
result = idx < idx[10]
exp = idx.values < idx.values[10]
tm.assert_numpy_array_equal(result, exp)
# TODO: moved from test_datetime64; de-duplicate with version below
def test_parr_cmp_period_scalar2(self, box_with_array):
xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
pi = pd.period_range("2000-01-01", periods=10, freq="D")
val = Period("2000-01-04", freq="D")
expected = [x > val for x in pi]
ser = tm.box_expected(pi, box_with_array)
expected = tm.box_expected(expected, xbox)
result = ser > val
tm.assert_equal(result, expected)
val = pi[5]
result = ser > val
expected = [x > val for x in pi]
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize("freq", ["M", "2M", "3M"])
def test_parr_cmp_period_scalar(self, freq, box_with_array):
# GH#13200
xbox = np.ndarray if box_with_array is pd.Index else box_with_array
base = PeriodIndex(["2011-01", "2011-02", "2011-03", "2011-04"], freq=freq)
base = tm.box_expected(base, box_with_array)
per = Period("2011-02", freq=freq)
exp = np.array([False, True, False, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base == per, exp)
tm.assert_equal(per == base, exp)
exp = np.array([True, False, True, True])
exp = | tm.box_expected(exp, xbox) | pandas.util.testing.box_expected |
import wf_core_data
import pandas as pd
from collections import OrderedDict
import datetime
import logging
logger = logging.getLogger(__name__)
class FamilySurveyAirtableClient(wf_core_data.AirtableClient):
def fetch_school_inputs(
self,
pull_datetime=None,
params=None,
base_id=wf_core_data.SCHOOLS_BASE_ID,
format='dataframe',
delay=wf_core_data.DEFAULT_DELAY,
max_requests=wf_core_data.DEFAULT_MAX_REQUESTS
):
pull_datetime = wf_core_data.utils.to_datetime(pull_datetime)
if pull_datetime is None:
pull_datetime = datetime.datetime.now(tz=datetime.timezone.utc)
logger.info('Fetching family survey school inputs from Airtable')
records = self.bulk_get(
base_id=base_id,
endpoint='Family survey - school inputs',
params=params
)
school_inputs=list()
for record in records:
fields = record.get('fields', {})
datum = OrderedDict([
('school_input_id_at', record.get('id')),
('school_input_created_datetime_at', wf_core_data.utils.to_datetime(record.get('createdTime'))),
('pull_datetime', pull_datetime),
('school_id_at', fields.get('Schools')),
('include_school_in_data', fields.get('Include in data')),
('include_school_in_reporting', fields.get('Include in reporting')),
('school_data_pending', fields.get('Data pending')),
('school_report_language', fields.get('Report language')),
('num_students', fields.get('Number of students')),
('num_forms_sent', fields.get('Number of forms sent'))
])
school_inputs.append(datum)
if format == 'dataframe':
school_inputs = convert_school_inputs_to_df(school_inputs)
elif format == 'list':
pass
else:
raise ValueError('Data format \'{}\' not recognized'.format(format))
return school_inputs
def fetch_hub_inputs(
self,
pull_datetime=None,
params=None,
base_id=wf_core_data.SCHOOLS_BASE_ID,
format='dataframe',
delay=wf_core_data.DEFAULT_DELAY,
max_requests=wf_core_data.DEFAULT_MAX_REQUESTS
):
pull_datetime = wf_core_data.utils.to_datetime(pull_datetime)
if pull_datetime is None:
pull_datetime = datetime.datetime.now(tz=datetime.timezone.utc)
logger.info('Fetching family survey hub inputs from Airtable')
records = self.bulk_get(
base_id=base_id,
endpoint='Family survey - hub inputs',
params=params
)
hub_inputs=list()
for record in records:
fields = record.get('fields', {})
datum = OrderedDict([
('hub_input_id_at', record.get('id')),
('hub_input_created_datetime_at', wf_core_data.utils.to_datetime(record.get('createdTime'))),
('pull_datetime', pull_datetime),
('hub_id_at', fields.get('Hubs')),
('include_hub_in_reporting', fields.get('Include in reporting')),
('hub_data_pending', fields.get('Data pending'))
])
hub_inputs.append(datum)
if format == 'dataframe':
hub_inputs = convert_hub_inputs_to_df(hub_inputs)
elif format == 'list':
pass
else:
raise ValueError('Data format \'{}\' not recognized'.format(format))
return hub_inputs
def fetch_excluded_classroom_inputs(
self,
pull_datetime=None,
params=None,
base_id=wf_core_data.SCHOOLS_BASE_ID,
format='dataframe',
delay=wf_core_data.DEFAULT_DELAY,
max_requests=wf_core_data.DEFAULT_MAX_REQUESTS
):
pull_datetime = wf_core_data.utils.to_datetime(pull_datetime)
if pull_datetime is None:
pull_datetime = datetime.datetime.now(tz=datetime.timezone.utc)
logger.info('Fetching family survey excluded classroom inputs from Airtable')
records = self.bulk_get(
base_id=base_id,
endpoint='Family survey - excluded classroom inputs',
params=params
)
excluded_classroom_inputs = list()
for record in records:
fields = record.get('fields', {})
datum = OrderedDict([
('excluded_classroom_input_id_at', record.get('id')),
('excluded_classroom_input_created_datetime_at', wf_core_data.utils.to_datetime(record.get('createdTime'))),
('pull_datetime', pull_datetime),
('school_id_tc', fields.get('TC school ID')),
('classroom_id_tc', fields.get('TC classroom ID'))
])
excluded_classroom_inputs.append(datum)
if format == 'dataframe':
excluded_classroom_inputs = convert_excluded_classroom_inputs_to_df(excluded_classroom_inputs)
elif format == 'list':
pass
else:
raise ValueError('Data format \'{}\' not recognized'.format(format))
return excluded_classroom_inputs
def fetch_excluded_student_inputs(
self,
pull_datetime=None,
params=None,
base_id=wf_core_data.SCHOOLS_BASE_ID,
format='dataframe',
delay=wf_core_data.DEFAULT_DELAY,
max_requests=wf_core_data.DEFAULT_MAX_REQUESTS
):
pull_datetime = wf_core_data.utils.to_datetime(pull_datetime)
if pull_datetime is None:
pull_datetime = datetime.datetime.now(tz=datetime.timezone.utc)
logger.info('Fetching family survey excluded student inputs from Airtable')
records = self.bulk_get(
base_id=base_id,
endpoint='Family survey - excluded student inputs',
params=params
)
excluded_student_inputs = list()
for record in records:
fields = record.get('fields', {})
datum = OrderedDict([
('excluded_student_input_id_at', record.get('id')),
('excluded_student_input_created_datetime_at', wf_core_data.utils.to_datetime(record.get('createdTime'))),
('pull_datetime', pull_datetime),
('school_id_tc', fields.get('TC school ID')),
('student_id_tc', fields.get('TC student ID'))
])
excluded_student_inputs.append(datum)
if format == 'dataframe':
excluded_student_inputs = convert_excluded_student_inputs_to_df(excluded_student_inputs)
elif format == 'list':
pass
else:
raise ValueError('Data format \'{}\' not recognized'.format(format))
return excluded_student_inputs
def fetch_field_name_inputs(
self,
pull_datetime=None,
params=None,
base_id=wf_core_data.SCHOOLS_BASE_ID,
format='dataframe',
delay=wf_core_data.DEFAULT_DELAY,
max_requests=wf_core_data.DEFAULT_MAX_REQUESTS
):
pull_datetime = wf_core_data.utils.to_datetime(pull_datetime)
if pull_datetime is None:
pull_datetime = datetime.datetime.now(tz=datetime.timezone.utc)
logger.info('Fetching family survey field name inputs from Airtable')
records = self.bulk_get(
base_id=base_id,
endpoint='Family survey - field name inputs',
params=params
)
field_name_inputs = list()
for record in records:
fields = record.get('fields', {})
datum = OrderedDict([
('field_name_input_id_at', record.get('id')),
('field_name_input_created_datetime_at', wf_core_data.utils.to_datetime(record.get('createdTime'))),
('pull_datetime', pull_datetime),
('source_field_name', fields.get('Source field name')),
('target_field_name', fields.get('Target field name'))
])
field_name_inputs.append(datum)
if format == 'dataframe':
field_name_inputs = convert_field_name_inputs_to_df(field_name_inputs)
elif format == 'list':
pass
else:
raise ValueError('Data format \'{}\' not recognized'.format(format))
return field_name_inputs
def fetch_non_tc_form_data(
self,
pull_datetime=None,
params=None,
base_id=wf_core_data.SCHOOLS_BASE_ID,
format='dataframe',
delay=wf_core_data.DEFAULT_DELAY,
max_requests=wf_core_data.DEFAULT_MAX_REQUESTS
):
pull_datetime = wf_core_data.utils.to_datetime(pull_datetime)
if pull_datetime is None:
pull_datetime = datetime.datetime.now(tz=datetime.timezone.utc)
logger.info('Fetching family survey paper form data from Airtable')
records = self.bulk_get(
base_id=base_id,
endpoint='Family survey non-TC data 2021-22',
params=params
)
non_tc_form_data=list()
for record in records:
fields = record.get('fields', {})
datum = OrderedDict([
('non_tc_form_id_at', record.get('id')),
('non_tc_form_created_datetime_at', wf_core_data.utils.to_datetime(record.get('createdTime'))),
('pull_datetime', pull_datetime),
('school_id_at', fields.get('school_id_at')),
('student_id_at_auto', wf_core_data.utils.extract_int(fields.get('student_id_at_auto'))),
('student_first_name_at', fields.get('student_first_name_at')),
('student_last_name_at', fields.get('student_last_name_at')),
('ethnicity_category_id_at_list', fields.get('ethnicity_list')),
('language_response', fields.get('language_response')),
('household_income_category_id_at', fields.get('household_income')),
('frl_boolean_category_id_at', fields.get('frl')),
('nps_response', wf_core_data.utils.extract_int(fields.get('nps_response'))),
('marketing_opt_out_boolean_category_id_at', fields.get('marketing_opt_out')),
('feedback_response', fields.get('feedback_response'))
])
non_tc_form_data.append(datum)
if format == 'dataframe':
non_tc_form_data = convert_non_tc_form_data_to_df(non_tc_form_data)
elif format == 'list':
pass
else:
raise ValueError('Data format \'{}\' not recognized'.format(format))
return non_tc_form_data
def convert_school_inputs_to_df(school_inputs):
if len(school_inputs) == 0:
return pd.DataFrame()
school_inputs_df = pd.DataFrame(
school_inputs,
dtype='object'
)
school_inputs_df['pull_datetime'] = pd.to_datetime(school_inputs_df['pull_datetime'])
school_inputs_df['school_input_created_datetime_at'] = pd.to_datetime(school_inputs_df['school_input_created_datetime_at'])
school_inputs_df['school_id_at'] = school_inputs_df['school_id_at'].apply(wf_core_data.utils.to_singleton)
school_inputs_df = school_inputs_df.astype({
'school_input_id_at': 'string',
'school_id_at': 'string',
'include_school_in_data': 'bool',
'include_school_in_reporting': 'bool',
'school_data_pending': 'bool',
'num_students': 'Int64',
'num_forms_sent': 'Int64'
})
school_inputs_df.set_index('school_input_id_at', inplace=True)
return school_inputs_df
def convert_hub_inputs_to_df(hub_inputs):
if len(hub_inputs) == 0:
return pd.DataFrame()
hub_inputs_df = pd.DataFrame(
hub_inputs,
dtype='object'
)
hub_inputs_df['pull_datetime'] = pd.to_datetime(hub_inputs_df['pull_datetime'])
hub_inputs_df['hub_input_created_datetime_at'] = pd.to_datetime(hub_inputs_df['hub_input_created_datetime_at'])
hub_inputs_df['hub_id_at'] = hub_inputs_df['hub_id_at'].apply(wf_core_data.utils.to_singleton)
hub_inputs_df = hub_inputs_df.astype({
'hub_input_id_at': 'string',
'hub_id_at': 'string',
'include_hub_in_reporting': 'bool',
'hub_data_pending': 'bool'
})
hub_inputs_df.set_index('hub_input_id_at', inplace=True)
return hub_inputs_df
def convert_excluded_classroom_inputs_to_df(excluded_classroom_inputs):
if len(excluded_classroom_inputs) == 0:
return pd.DataFrame()
excluded_classroom_inputs_df = pd.DataFrame(
excluded_classroom_inputs,
dtype='object'
)
excluded_classroom_inputs_df['pull_datetime'] = pd.to_datetime(excluded_classroom_inputs_df['pull_datetime'])
excluded_classroom_inputs_df['excluded_classroom_input_created_datetime_at'] = pd.to_datetime(excluded_classroom_inputs_df['excluded_classroom_input_created_datetime_at'])
excluded_classroom_inputs_df = excluded_classroom_inputs_df.astype({
'excluded_classroom_input_id_at': 'string',
'school_id_tc': 'int',
'classroom_id_tc': 'int'
})
excluded_classroom_inputs_df.set_index('excluded_classroom_input_id_at', inplace=True)
return excluded_classroom_inputs_df
def convert_excluded_student_inputs_to_df(excluded_student_inputs):
if len(excluded_student_inputs) == 0:
return pd.DataFrame()
excluded_student_inputs_df = pd.DataFrame(
excluded_student_inputs,
dtype='object'
)
excluded_student_inputs_df['pull_datetime'] = pd.to_datetime(excluded_student_inputs_df['pull_datetime'])
excluded_student_inputs_df['excluded_student_input_created_datetime_at'] = pd.to_datetime(excluded_student_inputs_df['excluded_student_input_created_datetime_at'])
excluded_student_inputs_df = excluded_student_inputs_df.astype({
'excluded_student_input_id_at': 'string',
'school_id_tc': 'int',
'student_id_tc': 'int'
})
excluded_student_inputs_df.set_index('excluded_student_input_id_at', inplace=True)
return excluded_student_inputs_df
def convert_field_name_inputs_to_df(field_name_inputs):
if len(field_name_inputs) == 0:
return pd.DataFrame()
field_name_inputs_df = pd.DataFrame(
field_name_inputs,
dtype='object'
)
field_name_inputs_df['pull_datetime'] = pd.to_datetime(field_name_inputs_df['pull_datetime'])
field_name_inputs_df['field_name_input_created_datetime_at'] = | pd.to_datetime(field_name_inputs_df['field_name_input_created_datetime_at']) | pandas.to_datetime |
# (C) Copyright 2021 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
import pandas as pd
import numpy as np
from itertools import permutations, combinations
from collections import namedtuple, Counter
from sklearn.covariance import EmpiricalCovariance
from sklearn.neighbors import NearestNeighbors
from sklearn.exceptions import NotFittedError
from sklearn.base import clone as sk_clone
from .base_estimator import IndividualOutcomeEstimator
from scipy.optimize import linear_sum_assignment
from scipy.spatial import distance
KNN = namedtuple("KNN", "learner index")
# scipy distance routine requires matrix of valid numerical distances
# we use `VERY_LARGE_NUMBER` to represent an infinite distance
VERY_LARGE_NUMBER = np.finfo('d').max
def majority_rule(x):
return Counter(x).most_common(1)[0][0]
class Matching(IndividualOutcomeEstimator):
def __init__(
self,
propensity_transform=None,
caliper=None,
with_replacement=True,
n_neighbors=1,
matching_mode="both",
metric="mahalanobis",
knn_backend="sklearn",
estimate_observed_outcome=False,
):
"""Match treatment and control samples with similar covariates.
Args:
propensity_transform (causallib.transformers.PropensityTransformer):
an object for data preprocessing which adds the propensity
score as a feature (default: None)
caliper (float) : maximal distance for a match to be accepted. If
not defined, all matches will be accepted. If defined, some
samples may not be matched and their outcomes will not be
estimated. (default: None)
with_replacement (bool): whether samples can be used multiple times
for matching. If set to False, the matching process will optimize
the linear sum of distances between pairs of treatment and
control samples and only `min(N_treatment, N_control)` samples
will be estimated. Matching with no replacement does not make
use of the `fit` data and is therefore not implemented for
out-of-sample data (default: True)
n_neighbors (int) : number of nearest neighbors to include in match.
Must be 1 if `with_replacement` is `False.` If larger than 1, the
estimate is calculated using the `regress_agg_function` or
`classify_agg_function` across the `n_neighbors`. Note that when
the `caliper` variable is set, some samples will have fewer than
`n_neighbors` matches. (default: 1).
matching_mode (str) : Direction of matching: `treatment_to_control`,
`control_to_treatment` or `both` to indicate which set should
be matched to which. All sets are cross-matched in `match`
and when `with_replacement` is `False` all matching modes
coincide. With replacement there is a difference.
metric (str) : Distance metric string for calculating distance
between samples. Note: if an external built `knn_backend`
object with a different metric is supplied, `metric` needs to
be changed to reflect that, because `Matching` will set its
inverse covariance matrix if "mahalanobis" is set. (default:
"mahalanobis", also supported: "euclidean")
knn_backend (str or callable) : Backend to use for nearest neighbor
search. Options are "sklearn" or a callable which returns an
object implementing `fit`, `kneighbors` and `set_params`
like the sklearn `NearestNeighbors` object. (default: "sklearn").
estimate_observed_outcome (bool) : Whether to allow a match of a
sample to a sample other than itself when looking within its own
treatment value. If True, the estimated potential outcome for the
observed outcome may differ from the true observed outcome.
(default: False)
Attributes:
classify_agg_function (callable) : Aggregating function for outcome
estimation when classifying. (default: majority_rule)
Usage is determined by type of `y` during `fit`
regress_agg_function (callable) : Aggregating function for outcome
estimation when regressing or predicting prob_a. (default: np.mean)
Usage is determined by type of `y` during `fit`
treatments_ (pd.DataFrame) : DataFrame of treatments (created after `fit`)
outcomes_ (pd.DataFrame) : DataFrame of outcomes (created after `fit`)
match_df_ (pd.DataFrame) : Dataframe of most recently calculated
matches. For details, see `match`. (created after `match`)
samples_used_ (pd.Series) : Series with count of samples used
during most recent match. Series includes a count for each
treatment value. (created after `match`)
"""
self.propensity_transform = propensity_transform
self.covariance_conditioner = EmpiricalCovariance()
self.caliper = caliper
self.with_replacement = with_replacement
self.n_neighbors = n_neighbors
self.matching_mode = matching_mode
self.metric = metric
# if classify task, default aggregation function is majority
self.classify_agg_function = majority_rule
# if regress task, default aggregation function is mean
self.regress_agg_function = np.mean
self.knn_backend = knn_backend
self.estimate_observed_outcome = estimate_observed_outcome
def fit(self, X, a, y, sample_weight=None):
"""Load the treatments and outcomes and fit search trees.
Applies transform to covariates X, initializes search trees for each
treatment value for performing nearest neighbor searches.
Note: Running `fit` a second time overwrites any information from
previous `fit or `match` and re-fits the propensity_transform object.
Args:
X (pd.DataFrame): DataFrame of shape (n,m) containing m covariates
for n samples.
a (pd.Series): Series of shape (n,) containing discrete treatment
values for the n samples.
y (pd.Series): Series of shape (n,) containing outcomes for
the n samples.
sample_weight: IGNORED In signature for compatibility with other
estimators.
Note: `X`, `a` and `y` must share the same index.
Returns:
self (Matching) the fitted object
"""
self._clear_post_fit_variables()
self.outcome_ = y.copy()
self.treatments_ = a.copy()
if self.propensity_transform:
self.propensity_transform.fit(X, a)
X = self.propensity_transform.transform(X)
self.conditioned_covariance_ = self._calculate_covariance(X)
self.treatment_knns_ = {}
for a in self.treatments_.unique():
haystack = X[self.treatments_ == a]
self.treatment_knns_[a] = self._fit_sknn(haystack)
return self
def _execute_matching(self, X, a):
"""Execute matching of samples in X according to the treatment values in a.
Returns a DataFrame including all the results, which is also set as
the attribute `self.match_df_`. The arguments `X` and `a` define the
"needle" where the "haystack" is the data that was previously passed
to fit, for matching with replacement. As such, treatment and control
samples from within `X` will not be matched with each other, unless
the same `X` and `a` were passed to `fit`. For matching without
replacement, the `X` and `a` passed to `match` provide the "needle" and
the "haystack". If the attribute `caliper` is set, the matches are
limited to those with a distance less than `caliper`.
This function ignores the existing `match_df_` and will overwrite it.
It is thus useful for if you have changed the settings and need to
rematch the samples. For most applications, the `match` function is
more convenient.
Args:
X (pd.DataFrame): DataFrame of shape (n,m) containing m covariates
for n samples.
a (pd.Series): Series of shape (n,) containing discrete treatment
values for the n samples.
Note: The args are assumed to share the same index.
Returns:
match_df: The resulting matches DataFrame is indexed so that
` match_df.loc[treatment_value, sample_id]` has columns `matches`
and `distances` containing lists of indices to samples and the
respective distances for the matches discovered for `sample_id`
from within the fitted samples with the given `treatment_value`.
The indices in the `matches` column are from the fitted data,
not the X argument in `match`. If `sample_id` had no match,
`match_df.loc[treatment_value, sample_id].matches = []`.
The DataFrame has shape (n* len(a.unique()), 2 ).
Raises:
NotImplementedError: Raised when with_replacement is False and
n_neighbors is not 1.
"""
if self.n_neighbors != 1 and not self.with_replacement:
raise NotImplementedError(
"Matching more than one neighbor is only implemented for"
"no-replacement"
)
if self.propensity_transform:
X = self.propensity_transform.transform(X)
if self.with_replacement:
self.match_df_ = self._withreplacement_match(X, a)
else:
self.match_df_ = self._noreplacement_match(X, a)
sample_id_name = X.index.name if X.index.name is not None else "sample_id"
self.match_df_.index.set_names(
["match_to_treatment", sample_id_name], inplace=True
)
# we record the number of samples that were successfully matched of
# each treatment value
self.samples_used_ = self._count_samples_used_by_treatment_value(a)
return self.match_df_
def estimate_individual_outcome(
self, X, a, y=None, treatment_values=None, predict_proba=True, dropna=True
):
"""
Calculate the potential outcome for each sample and treatment value.
Execute match and calculate, for each treatment value and each sample,
the expected outcome.
Note: Out of sample estimation for matching without replacement requires
passing a `y` vector here. If no 'y' is passed here, the values received
by `fit` are used, and if the estimation indices are not a subset of the
fitted indices, the estimation will fail.
If the attribute `estimate_observed_outcome` is
`True`, estimates will be calculated for the observed outcomes as well.
If not, then the observed outcome will be passed through from the
corresponding element of `y` passed to `fit`.
Args:
X (pd.DataFrame): DataFrame of shape (n,m) containing m covariates
for n samples.
a (pd.Series): Series of shape (n,) containing discrete treatment
values for the n samples.
y (pd.Series): Series of shape (n,) containing outcome values for
n samples. This is only used when `with_replacemnt=False`.
Otherwise, the outcome values passed to `fit` are used.
predict_proba (bool) : whether to output classifications or
probabilties for a classification task. If set to False and
data is non-integer, a warning is issued. (default True)
dropna (bool) : For samples that were unmatched due to caliper
restrictions, drop from outcome_df leading to a potentially
smaller sized output, or include them as NaN. (default: True)
treatment_values : IGNORED
Note: The args are assumed to share the same index.
Returns:
outcome_df (pd.DataFrame)
"""
match_df = self.match(X, a, use_cached_result=True)
outcome_df = self._aggregate_match_df_to_generate_outcome_df(
match_df, a, predict_proba)
outcome_df = self._filter_outcome_df_by_matching_mode(outcome_df, a)
if outcome_df.isna().all(axis=None):
raise ValueError("Matching was not successful and no outcomes can"
"be estimated. Check caliper value.")
if dropna:
outcome_df = outcome_df.dropna()
return outcome_df
def match(self, X, a, use_cached_result=True, successful_matches_only=False):
"""Matching the samples in X according to the treatment values in a.
Returns a DataFrame including all the results, which is also set as
the attribute `self.match_df_`. The arguments `X` and `a` define the
"needle" where the "haystack" is the data that was previously passed
to fit, for matching with replacement. As such, treatment and control
samp les from within `X` will not be matched with each other, unless
the same `X` and `a` were passed to `fit`. For matching without
replacement, the `X` and `a` passed to `match` provide the "needle" and
the "haystack". If the attribute `caliper` is set, the matches are
limited to those with a distance less than `caliper`.
Args:
X (pd.DataFrame): DataFrame of shape (n,m) containing m covariates
for n samples.
a (pd.Series): Series of shape (n,) containing discrete treatment
values for the n samples.
use_cached_result (bool): Whether or not to return the `match_df`
from the most recent matching operation. The cached result will
only be used if the sample indices of `X` and those of `match_df`
are identical, otherwise it will rematch.
successful_matches_only (bool): Whether or not to filter the matches
to those which matched successfully. If set to `False`, the
resulting DataFrame will have shape (n* len(a.unique()), 2 ),
otherwise it may have a smaller shape due to unsuccessful matches.
Note: The args are assumed to share the same index.
Returns:
match_df: The resulting matches DataFrame is indexed so that
` match_df.loc[treatment_value, sample_id]` has columns `matches`
and `distances` containing lists of indices to samples and the
respective distances for the matches discovered for `sample_id`
from within the fitted samples with the given `treatment_value`.
The indices in the `matches` column are from the fitted data,
not the X argument in `match`. If `sample_id` had no match,
`match_df.loc[treatment_value, sample_id].matches = []`.
The DataFrame has shape (n* len(a.unique()), 2 ), if
`successful_matches_only` is set to `False.
Raises:
NotImplementedError: Raised when with_replacement is False and
n_neighbors is not 1.
"""
cached_result_available = (hasattr(self, "match_df_")
and X.index.equals(self.match_df_.loc[0].index))
if not (use_cached_result and cached_result_available):
self._execute_matching(X, a)
return self._get_match_df(successful_matches_only=successful_matches_only)
def matches_to_weights(self, match_df=None):
"""Calculate weights based on a given set of matches.
For each matching from one treatment value to another, a weight vector
is generated. The weights are calculated as the number of times a
sample was selected in a matching, with each occurrence weighted
according to the number of other samples in that matching. The weights
can be used to estimate outcomes or to check covariate balancing. The
function can only be called after `match` has been run.
Args:
match_df (pd.DataFrame) : a DataFrame of matches returned from
`match`. If not supplied, will use the `match_df_` attribute if
available, else raises ValueError. Will not execute `match` to
generate a `match_df`.
Returns:
weights_df (pd.DataFrame): DataFrame of shape (n,M) where M is the
number of permutations of `a.unique()`.
"""
if match_df is None:
match_df = self._get_match_df(successful_matches_only=False)
match_permutations = sorted(permutations(self.treatments_.unique()))
weights_df = pd.DataFrame([
self._matches_to_weights_single_matching(s, t, match_df)
for s, t in match_permutations],).T
return weights_df
def get_covariates_of_matches(self, s, t, covariates):
"""
Look up covariates of closest matches for a given matching.
Using `self.match_df_` and the supplied `covariates`, look up
the covariates of the last match. The function can only be called after
`match` has been run.
Args:
s (int) : source treatment value
t (int) : target treatment value
covariates (pd.DataFrame) : The same covariates which were
passed to `fit`.
Returns:
covariate_df (pd.DataFrame) : a DataFrame of size
(n_matched_samples, n_covariates * 3 + 2) with the covariate
values of the sample, covariates of its match, calculated
distance and number of neighbors found within the given
caliper (with no caliper this will equal self.n_neighbors )
"""
match_df = self._get_match_df()
subdf = match_df.loc[s][self.treatments_ == t]
sample_id_name = subdf.index.name
def get_covariate_difference_from_nearest_match(source_row_index):
j = subdf.loc[source_row_index].matches[0]
delta_series = pd.Series(
covariates.loc[source_row_index] - covariates.loc[j])
source_row = covariates.loc[j].copy()
source_row.at[sample_id_name] = j
target_row = covariates.loc[source_row_index].copy()
target_row = target_row
covariate_differences = pd.concat(
{
t: target_row,
s: source_row,
"delta": delta_series,
"outcomes": pd.Series(
{t: self.outcome_.loc[source_row_index],
s: self.outcome_.loc[j]}
),
"match": pd.Series(
dict(
n_neighbors=len(
subdf.loc[source_row_index].matches),
distance=subdf.loc[source_row_index].distances[0],
)
),
}
)
return covariate_differences
covdf = pd.DataFrame(
data=[get_covariate_difference_from_nearest_match(i)
for i in subdf.index], index=subdf.index
)
covdf = covdf.reset_index()
cols = covdf.columns
covdf.columns = pd.MultiIndex.from_tuples(
[(t, sample_id_name)] + list(cols[1:]))
return covdf
def _clear_post_fit_variables(self):
for var in list(vars(self)):
if var[-1] == "_":
self.__delattr__(var)
def _calculate_covariance(self, X):
if len(X.shape) > 1 and X.shape[1] > 1:
V_list = []
for a in self.treatments_.unique():
X_at_a = X[self.treatments_ == a].copy()
current_V = self.covariance_conditioner.fit(X_at_a).covariance_
V_list.append(current_V)
# following Imbens&Rubin, we average across treatment groups
V = np.mean(V_list, axis=0)
else:
# for 1d data revert to euclidean metric
V = np.array(1).reshape(1, 1)
return V
def _aggregate_match_df_to_generate_outcome_df(self, match_df, a, predict_proba):
agg_function = self._get_agg_function(predict_proba)
def outcome_from_matches_by_idx(x):
return agg_function(self.outcome_.loc[x])
outcomes = {}
for i in sorted(a.unique()):
outcomes[i] = match_df.loc[i].matches.apply(
outcome_from_matches_by_idx)
outcome_df = pd.DataFrame(outcomes)
return outcome_df
def _get_match_df(self, successful_matches_only=True):
if not hasattr(self, "match_df_") or self.match_df_ is None:
raise NotFittedError("You need to run `match` first")
match_df = self.match_df_.copy()
if successful_matches_only:
match_df = match_df[match_df.matches.apply(bool)]
if match_df.empty:
raise ValueError(
"Matching was not successful and no outcomes can be "
"estimated. Check caliper value."
)
return match_df
def _filter_outcome_df_by_matching_mode(self, outcome_df, a):
if self.matching_mode == "treatment_to_control":
outcome_df = outcome_df[a == 1]
elif self.matching_mode == "control_to_treatment":
outcome_df = outcome_df[a == 0]
elif self.matching_mode == "both":
pass
else:
raise NotImplementedError(
"Matching mode {} is not implemented. Please select one of "
"'treatment_to_control', 'control_to_treatment, "
"or 'both'.".format(self.matching_mode)
)
return outcome_df
def _get_agg_function(self, predict_proba):
if predict_proba:
agg_function = self.regress_agg_function
else:
agg_function = self.classify_agg_function
try:
isoutputinteger = np.allclose(
self.outcome_.apply(int), self.outcome_)
if not isoutputinteger:
warnings.warn(
"Classifying non-categorical outcomes. "
"This is probably a mistake."
)
except:
warnings.warn(
"Unable to detect whether outcome is integer-like. ")
return agg_function
def _instantiate_nearest_neighbors_object(self):
backend = self.knn_backend
if backend == "sklearn":
backend_instance = NearestNeighbors(algorithm="auto")
elif callable(backend):
backend_instance = backend()
self.metric = backend_instance.metric
elif hasattr(backend, "fit") and hasattr(backend, "kneighbors"):
backend_instance = sk_clone(backend)
self.metric = backend_instance.metric
else:
raise NotImplementedError(
"`knn_backend` must be either an NearestNeighbors-like object,"
" a callable returning such an object, or the string \"sklearn\"")
backend_instance.set_params(**self._get_metric_dict())
return backend_instance
def _fit_sknn(self, target_df):
"""
Fit scikit-learn NearestNeighbors object with samples in target_df.
Fits object, adds metric parameters and returns namedtuple which
also includes DataFrame indices so that identities can looked up.
Args:
target_df (pd.DataFrame) : DataFrame of covariates to fit
Returns:
KNN (namedtuple) : Namedtuple with members `learner` and `index`
containing the fitted sklearn object and an index lookup vector,
respectively.
"""
target_array = target_df.values
sknn = self._instantiate_nearest_neighbors_object()
target_array = self._ensure_array_columnlike(target_array)
sknn.fit(target_array)
return KNN(sknn, target_df.index)
@staticmethod
def _ensure_array_columnlike(target_array):
if len(target_array.shape) < 2 or target_array.shape[1] == 1:
target_array = target_array.reshape(-1, 1)
return target_array
def _get_metric_dict(
self,
VI_in_metric_params=True,
):
metric_dict = dict(metric=self.metric)
if self.metric == "mahalanobis":
VI = np.linalg.inv(self.conditioned_covariance_)
if VI_in_metric_params:
metric_dict["metric_params"] = {"VI": VI}
else:
metric_dict["VI"] = VI
return metric_dict
def _kneighbors(self, knn, source_df):
"""Lookup neighbors in knn object.
Args:
knn (namedtuple) : knn named tuple to look for neighbors in. The
object has `learner` and `index` attributes to reference the
original df index.
source_df (pd.DataFrame) : a DataFrame of source data points to use
as "needles" for the knn "haystack."
Returns:
match_df (pd.DataFrame) : a DataFrame of matches
"""
source_array = source_df.values
# 1d data must be in shape (-1, 1) for sklearn.knn
source_array = self._ensure_array_columnlike(source_array)
distances, neighbor_array_indices = knn.learner.kneighbors(
source_array, n_neighbors=self.n_neighbors
)
return self._generate_match_df(
source_df, knn.index, distances, neighbor_array_indices
)
def _generate_match_df(
self, source_df, target_df_index, distances, neighbor_array_indices
):
"""
Take results of matching and build into match_df DataFrame.
For clarity we'll call the samples that are being matched "needles" and
the set of samples that they looked for matches in the "haystack".
Args:
source_df (pd.DataFrame) : Covariate dataframe of N "needles"
target_df_index (np.array) : An array of M indices of the haystack
samples in their original dataframe.
distances (np.array) : An array of N arrays of floats of length K
where K is `self.n_neighbors`.
neighbor_array_indices (np.array) : An array of N arrays of ints of
length K where K is `self.n_neighbors`.
"""
# target is the haystack, source is the needle(s)
# translate array indices back to original indices
matches_dict = {}
for source_idx, distance_row, neighbor_array_index_row in zip(
source_df.index, distances, neighbor_array_indices
):
neighbor_df_indices = \
target_df_index[neighbor_array_index_row.flatten()]
if self.caliper is not None:
neighbor_df_indices = [
n
for i, n in enumerate(neighbor_df_indices)
if distance_row[i] < self.caliper
]
distance_row = [d for d in distance_row if d < self.caliper]
matches_dict[source_idx] = dict(
matches=list(neighbor_df_indices), distances=list(distance_row)
)
# convert dict of dicts like { 1: {'matches':[], 'distances':[]}} to df
return | pd.DataFrame(matches_dict) | pandas.DataFrame |
from clean2 import*
import pandas as pd
import matplotlib.pyplot as plt
import math
import datetime
import time
def main():
loop_set=[3,5]
set3=[] #labels scaled at different window sizes
set4=[] #labels without scaling
for i in range(0,len(loop_set)):
set3.extend(['totalmovavg_predictclose'+str(loop_set[i])+'_'+str(15*loop_set[i])+'0'])
set3.extend(['totalmovavg_predictclose'+str(loop_set[i])+'_'+str(15*loop_set[i])+'1'])
set3.extend(['totalmovavg_predictclose'+str(loop_set[i])+'_'+str(15*loop_set[i])+'2'])
set4.extend(['totalmovavg_predictclose'+str(loop_set[i])+'_'+str(15*loop_set[i])])
data_window=pd.DataFrame()
data_window_labels=pd.DataFrame()
final_data=pd.DataFrame()
predictors_1=pd.DataFrame()
predictors=pd.DataFrame()
predictors_final=pd.DataFrame()
data_copy_labels=pd.DataFrame()
data_predict=pd.DataFrame()
close_win=pd.DataFrame()
data=pd.DataFrame()
data_copy=pd.DataFrame()
labe_train=pd.DataFrame()
labe_test=pd.DataFrame()
data_la=pd.DataFrame()
data_confr=pd.DataFrame()
final_data.loc[0,'predicted_close']=0
final_data.loc[0,'predicted_close_high']=0
final_data.loc[0,'predicted_close_low']=0
now=datetime.datetime.now()
day=now.strftime('%d')
hour=now.strftime('%H')
now=now.strftime('%M')
size0=1999 #a too small size0 can lead to insufficient data to be elaborated
now1=int(day)*1440+int(hour)*60+int(now)+size0
now=int(day)*1440+int(hour)*60+int(now)
set_windows=[7,8,8.5]
starters=[]
size_=size0-15*loop_set[len(loop_set)-1]
for i in set_windows:
starters.extend([size_-int(i*size_/9)])
delay_max_window=20
count=0
count1=0
lab_tra=0
x=[]
y=[]
yy=[]
ya=[]
yb=[]
yc=[]
x.extend([count1])
plt.ion()
fig=plt.figure()
ax1=fig.add_subplot(1,1,1)
from sklearn.externals import joblib
gbrt_fin=joblib.load('gbrt_final_close')
from sklearn.preprocessing import StandardScaler
scaler=StandardScaler()
while now1-now>0:
size=now1-int(now)
now=datetime.datetime.now()
d=now.strftime('%d')
h=now.strftime('%H')
now=now.strftime('%M')
now=int(d)*1440+int(h)*60+int(now)
data_cycle=data_creator('BTC','EUR',size,1)
data=data.shift(-(size))
data.drop(data.tail(size+1).index,inplace=True)
frame_cycle=[data,data_cycle]
data=pd.concat(frame_cycle)
data=data.reset_index()
data=data.drop(['index'],axis=1)
data_feat=pd.DataFrame()
data_feat=data.copy()
data_feat=data_feat.iloc[len(data_feat)-size0-1:,:]
data_feat=data_feat.reset_index()
data_feat=data_feat.drop(['index'],axis=1)
last_data=size+1
seconds=datetime.datetime.now()
seconds=seconds.strftime('%S')
for i in range(0,len(loop_set)):
short_window=loop_set[i]
window1=2*loop_set[i]
window2=4*loop_set[i]
window3=6*loop_set[i]
window4=10*loop_set[i]
window5=15*loop_set[i]
local_variance_window=int(round(1+short_window/2))
slope_window=int(round(1+(short_window/2)))
#Labels
if last_data == len(data_feat):
movavfun=total_movavg_predict(data_feat,'close',short_window,window1,window2,window3,window4,window5)
avg_close1=total_movavg(data_feat,'close',short_window,window1,window2,window3,window4,window5)
#Features
short_window=int(round(1+short_window/2))
window1=int(round(1+window1/2))
window2=int(round(1+window2/2))
window3=int(round(1+window3/2))
window4=int(round(1+window4/2))
window5=int(round(1+window5/2))
local_variance_window=int(round(1+local_variance_window/2))
slope_window=int(round(1+slope_window/2))
avg_close=total_movavg(data_feat,'close',short_window,window1,window2,window3,window4,window5)
avg_close_root=movavg(data_feat,'close',short_window)
local_variance_close=local_msq(data_feat,'close',avg_close,local_variance_window)
msroot_close=msroot(data_feat,'close',avg_close_root,short_window)
entropy_close=entropy(data_feat,'close',msroot_close,short_window,size)
local_entropy_close=entropy(data_feat,'close',local_variance_close,short_window,size)
avg_entropy_close=movavg(data_feat,entropy_close[1],short_window)
slope_close=slope(data_feat,'close',slope_window)
avg_slope=total_movavg(data_feat,slope_close,short_window,window1,window2,window3,window4,window5)
avg_slope_root=movavg(data_feat,slope_close,short_window)
local_variance_slope=local_msq(data_feat,slope_close,avg_slope,local_variance_window)
msroot_slope=msroot(data_feat,slope_close,avg_slope_root,short_window)
entropy_slope=entropy(data_feat,slope_close,msroot_slope,short_window,size)
local_entropy_slope=entropy(data_feat,slope_close,local_variance_slope,short_window,size)
avg_entropy_slope=movavg(data_feat,entropy_slope[1],short_window)
data_feat['high_close'+str(loop_set[i])]=data_feat[avg_close]+data_feat[local_variance_close]
avg_high=total_movavg(data_feat,'high_close'+str(loop_set[i]),short_window,window1,window2,window3,window4,window5)
avg_high_root=movavg(data_feat,'high_close'+str(loop_set[i]),short_window)
local_variance_high=local_msq(data_feat,'high_close'+str(loop_set[i]),avg_high,local_variance_window)
msroot_high=msroot(data_feat,'high_close'+str(loop_set[i]),avg_high_root,short_window)
entropy_high=entropy(data_feat,'high_close'+str(loop_set[i]),msroot_high,short_window,size)
local_entropy_high=entropy(data_feat,'high_close'+str(loop_set[i]),local_variance_high,short_window,size)
avg_entropy_high=movavg(data_feat,entropy_high[1],short_window)
data_feat['low_close'+str(loop_set[i])]=data_feat[avg_close]-data_feat[local_variance_close]
avg_low=total_movavg(data_feat,'low_close'+str(loop_set[i]),short_window,window1,window2,window3,window4,window5)
avg_low_root=movavg(data_feat,'low_close'+str(loop_set[i]),short_window)
local_variance_low=local_msq(data_feat,'low_close'+str(loop_set[i]),avg_high,local_variance_window)
msroot_low=msroot(data_feat,'low_close'+str(loop_set[i]),avg_low_root,short_window)
entropy_low=entropy(data_feat,'low_close'+str(loop_set[i]),msroot_low,short_window,size)
local_entropy_low=entropy(data_feat,'low_close'+str(loop_set[i]),local_variance_low,short_window,size)
avg_entropy_low=movavg(data_feat,entropy_low[1],short_window)
else:
#Labels
movavfun=total_movavg_predict_cycle(data_feat,'close',last_data,short_window,window1,window2,window3,window4,window5)
avg_close1=total_movavg_cycle(data_feat,'close',last_data,short_window,window1,window2,window3,window4,window5)
#Features
short_window=int(round(1+short_window/2))
window1=int(round(1+window1/2))
window2=int(round(1+window2/2))
window3=int(round(1+window3/2))
window4=int(round(1+window4/2))
window5=int(round(1+window5/2))
local_variance_window=int(round(1+local_variance_window/2))
slope_window=int(round(1+slope_window/2))
avg_close=total_movavg_cycle(data_feat,'close',last_data,short_window,window1,window2,window3,window4,window5)
avg_close_root=movavg_cycle(data_feat,'close',short_window,last_data)
local_variance_close=local_msq_cycle(data_feat,'close',avg_close,local_variance_window,last_data)
msroot_close=msroot(data_feat,'close',avg_close_root,short_window)
entropy_close=entropy(data_feat,'close',msroot_close,short_window,size)
local_entropy_close=entropy(data_feat,'close',local_variance_close,short_window,size)
avg_entropy_close=movavg_cycle(data_feat,entropy_close[1],short_window,last_data)
slope_close=slope_cycle(data_feat,'close',slope_window,last_data)
avg_slope=total_movavg_cycle(data_feat,slope_close,last_data,short_window,window1,window2,window3,window4,window5)
avg_slope_root=movavg_cycle(data_feat,slope_close,short_window,last_data)
local_variance_slope=local_msq_cycle(data_feat,slope_close,avg_slope,local_variance_window,last_data)
msroot_slope=msroot(data_feat,slope_close,avg_slope_root,short_window)
entropy_slope=entropy(data_feat,slope_close,msroot_slope,short_window,size)
local_entropy_slope=entropy(data_feat,slope_close,local_variance_slope,short_window,size)
avg_entropy_slope=movavg_cycle(data_feat,entropy_slope[1],short_window,last_data)
data_feat['high_close'+str(loop_set[i])]=data_feat[avg_close]+data_feat[local_variance_close]
avg_high=total_movavg_cycle(data_feat,'high_close'+str(loop_set[i]),last_data,short_window,window1,window2,window3,window4,window5)
avg_high_root=movavg_cycle(data_feat,'high_close'+str(loop_set[i]),short_window,last_data)
local_variance_high=local_msq_cycle(data_feat,'high_close'+str(loop_set[i]),avg_high,local_variance_window,last_data)
msroot_high=msroot(data_feat,'high_close'+str(loop_set[i]),avg_high_root,short_window)
entropy_high=entropy(data_feat,'high_close'+str(loop_set[i]),msroot_high,short_window,size)
local_entropy_high=entropy(data_feat,'high_close'+str(loop_set[i]),local_variance_high,short_window,size)
avg_entropy_high=movavg_cycle(data_feat,entropy_high[1],short_window,last_data)
data_feat['low_close'+str(loop_set[i])]=data_feat[avg_close]-data_feat[local_variance_close]
avg_low=total_movavg_cycle(data_feat,'low_close'+str(loop_set[i]),last_data,short_window,window1,window2,window3,window4,window5)
avg_low_root=movavg_cycle(data_feat,'low_close'+str(loop_set[i]),short_window,last_data)
local_variance_low=local_msq_cycle(data_feat,'low_close'+str(loop_set[i]),avg_high,local_variance_window,last_data)
msroot_low=msroot(data_feat,'low_close'+str(loop_set[i]),avg_low_root,short_window)
entropy_low=entropy(data_feat,'low_close'+str(loop_set[i]),msroot_low,short_window,size)
local_entropy_low=entropy(data_feat,'low_close'+str(loop_set[i]),local_variance_low,short_window,size)
avg_entropy_low=movavg_cycle(data_feat,entropy_low[1],short_window,last_data)
if last_data == len(data_feat):
data_labels=pd.DataFrame()
labels(data_labels,data_feat,'close',loop_set)
lista=list(data_labels.columns.values)
quantity=int(round((loop_set[len(loop_set)-1]/2)+1))
if last_data != len(data_feat):
data_final=data_feat.iloc[len(data_feat)-(size+quantity+1):,:]
data.drop(data.tail(quantity+size+1).index,inplace=True)
else:
data_final=data_feat.iloc[len(data_feat)-(size+1):,:]
data.drop(data.tail(size+1).index,inplace=True)
frame0=[data,data_final]
data=pd.concat(frame0)
now1=datetime.datetime.now()
d1=now1.strftime('%d')
h1=now1.strftime('%H')
m1=now1.strftime('%M')
seconds1=now1.strftime('%S')
now1=int(d1)*1440+int(h1)*60+int(m1)
size1=now1-int(now)
difsec=int(seconds1)+60*size1-int(seconds)
if size1==1 and 60-int(seconds1)<int(difsec/size):
time.sleep(60-int(seconds1)+1)
now1=datetime.datetime.now()
d1=now1.strftime('%d')
h1=now1.strftime('%H')
m1=now1.strftime('%M')
now1=int(d1)*1440+int(h1)*60+int(m1)
print(now1)
print('i waited a little')
print(int(difsec/size))
data_work=data.copy()
data_copy_labels=pd.DataFrame()
labels(data_copy_labels,data_work,'close',loop_set)
clean_labels(data_work,'close',loop_set)
lista=list(data_labels.columns.values)
data_work=data_work.dropna()
data_work=data_work.reset_index()
data_work=data_work.drop(['index'],axis=1)
len1=starters[0]+21+150
data_work=data_work.iloc[len(data_work)-starters[0]-21-150:,:]
data_copy_labels=data_copy_labels.iloc[len(data_copy_labels)-starters[0]-21-150:,:]
data_work=data_work.reset_index()
data_work=data_work.drop(['index'],axis=1)
data_copy_labels=data_copy_labels.reset_index()
data_copy_labels=data_copy_labels.drop(['index'],axis=1)
len2=len(data_work)
if len1 != len2:
print('Warning, data_work length is varying!')
data_confr['totalmovavgclose'+str(loop_set[0])+'_'+str(15*loop_set[0])]=data_work['totalmovavgclose'+str(loop_set[0])+'_'+str(15*loop_set[0])]
data_work=data_work.drop(['totalmovavgclose'+str(loop_set[0])+'_'+str(15*loop_set[0])],axis=1)
data_work=data_work.drop(['totalmovavgclose'+str(loop_set[1])+'_'+str(15*loop_set[1])],axis=1)
data_work=data_work.drop(['date'],axis=1)
data_work=data_work.drop(['time'],axis=1)
data_iterator=pd.DataFrame()
for q in starters:
for h in range(0,len(lista)):
name=lista[h]
data_iterator.loc[0,'variance_pred'+str(starters.index(q))+name]=0
data_iterator.loc[0,'variance_gbrt'+str(starters.index(q))+name]=0
data_iterator.loc[0,'variance_ada'+str(starters.index(q))+name]=0
data_iterator.loc[0,'counter'+str(starters.index(q))+name]=0
if close_win.empty:
true_features = | pd.read_csv('folder address'+'true_features.txt') | pandas.read_csv |
from pathlib import Path
from typing import Dict, List, Optional, Sequence, Tuple, Union
import numpy as np
import pandas as pd
import pytorch_lightning as pl
import torch
import torch.nn.functional as F
from omegaconf.dictconfig import DictConfig
from sklearn.model_selection import train_test_split
from torch.nn import CrossEntropyLoss, Module
from torch.optim import AdamW, Optimizer
from torch.optim.lr_scheduler import ReduceLROnPlateau, OneCycleLR
from torch.tensor import Tensor
from torch.utils.data import DataLoader, Dataset
from torchsampler import ImbalancedDatasetSampler
from zindi_keyword_spotter.dataset import ZindiAudioDataset
from zindi_keyword_spotter.focal_loss import FocalLoss
class PLClassifier(pl.LightningModule):
def __init__(
self,
model: Module,
loss_name: str,
lr: float,
wd: float,
scheduler: Optional[str],
total_steps: int,
weights: Optional[Tensor] = None,
val_weights: Optional[Tensor] = None,
loss_params: Optional[Dict[str, float]] = None,
) -> None:
super().__init__()
self.model = model
self.lr = lr
self.wd = wd
self.scheduler = scheduler
self.total_steps = total_steps
self.probs: Optional[np.ndarray] = None
# lb metric is log loss
self.val_criterion = CrossEntropyLoss(weight=val_weights)
if loss_name == 'ce':
self.criterion = CrossEntropyLoss(weight=weights)
elif loss_name == 'focal':
gamma = loss_params['focal_gamma']
self.criterion = FocalLoss(alpha=weights, gamma=gamma)
def forward(self, x: Tensor) -> Tensor:
return self.model(x)
def training_step(self, batch, batch_idx: int) -> Tensor:
x, y = batch
y_hat = self(x)
loss = self.criterion(y_hat, y)
self.log('train_loss', loss)
return loss
def validation_step(self, batch, batch_idx: int) -> Tensor:
x, y = batch
y_hat = self(x)
loss = self.val_criterion(y_hat, y)
self.log('val_loss', loss, prog_bar=True)
return loss
def test_step(self, batch, batch_idx: int) -> Tensor:
return self(batch).cpu()
def test_epoch_end(self, outputs: List[np.ndarray]) -> None:
logits = torch.vstack(outputs)
self.probs = F.softmax(logits, dim=1).numpy()
def configure_optimizers(self) -> Optional[Union[Optimizer, Sequence[Optimizer], Dict, Sequence[Dict], Tuple[List, List]]]:
optimizer = AdamW(self.parameters(), lr=self.lr, weight_decay=self.wd)
if self.scheduler is None:
return optimizer
elif self.scheduler == 'plateau':
return {
'optimizer': optimizer,
'lr_scheduler': ReduceLROnPlateau(optimizer, factor=0.1, patience=25, eps=1e-4, cooldown=0, min_lr=2e-7, verbose=True),
'monitor': 'val_loss',
}
elif self.scheduler == '1cycle':
return {
'optimizer': optimizer,
'lr_scheduler': OneCycleLR(optimizer, max_lr=10**2 * self.lr, total_steps=self.total_steps)
}
def callback_get_label(dataset: ZindiAudioDataset, idx: int) -> int:
label2idx = dataset.label2idx
return label2idx[dataset.labels[idx]]
class ZindiDataModule(pl.LightningDataModule):
def __init__(
self,
cfg: DictConfig,
data_dir: Path,
log_dir: Path,
label2idx: Optional[Dict[str, int]] = None,
train_df: Optional[pd.DataFrame] = None,
test_df: Optional[pd.DataFrame] = None,
) -> None:
super().__init__()
self.pad_length = cfg.pad_length
self.batch_size = cfg.batch_size
self.data_dir = data_dir
self.log_dir = log_dir
self.val_type = cfg.val_type
self.train_utts = cfg.train_utts
self.val_utts = cfg.val_utts
self.transforms_config = {
'time_shift': cfg.time_shift,
'speed_tune': cfg.speed_tune,
'volume_tune': cfg.volume_tune,
'noise_vol': cfg.noise_vol,
'standartize_peaks': cfg.standartize_peaks,
}
self.label2idx = label2idx
self.train_df = train_df
self.test_df = test_df
self.val_df = None
self.balance_sampler = cfg.balance_sampler
self.n_workers = cfg.n_workers
self.train: Optional[Dataset] = None
self.val: Optional[Dataset] = None
self.test: Optional[Dataset] = None
def create_sized_split(self) -> Tuple[pd.DataFrame, pd.DataFrame]:
utt_counts = self.train_df['utt_id'].value_counts()
# get utt_ids that occure only once in dataset
unique_utts = utt_counts[utt_counts == 1].index.values
# unique part will be divided by usual train_test_split
# nonuqnie will be splitted by utt_id
unique_utt_samples = self.train_df[self.train_df['utt_id'].isin(unique_utts)]
nonunique_utt_sample = self.train_df[~self.train_df['utt_id'].isin(unique_utts)]
train_df1, val_df1 = train_test_split(unique_utt_samples, stratify=unique_utt_samples['label'], test_size=self.val_type)
train_df2 = nonunique_utt_sample[nonunique_utt_sample['utt_id'].isin(self.train_utts)]
val_df2 = nonunique_utt_sample[nonunique_utt_sample['utt_id'].isin(self.val_utts)]
train_df = pd.concat((train_df1, train_df2), axis=0)
val_df = pd.concat((val_df1, val_df2), axis=0)
return train_df, val_df
def create_chess_split(self) -> Tuple[pd.DataFrame, pd.DataFrame]:
# split across utts in chess order
all_labels = sorted(self.train_df['label'].unique())
train_parts = []
val_parts = []
for label in all_labels:
label_samples = self.train_df[self.train_df['label'] == label]
label_utts = label_samples['utt_id'].value_counts().index.values
for idx, utt_id in enumerate(label_utts):
utt_samples = label_samples[label_samples['utt_id'] == utt_id].copy()
if idx % 2 == 0:
train_parts.append(utt_samples)
else:
val_parts.append(utt_samples)
train_df = pd.concat(train_parts, axis=0, ignore_index=True)
val_df = | pd.concat(val_parts, axis=0, ignore_index=True) | pandas.concat |
import copy
from datetime import datetime
import warnings
import numpy as np
from numpy.random import randn
import pytest
import pandas.util._test_decorators as td
import pandas as pd
from pandas import DataFrame, DatetimeIndex, Index, Series, isna, notna
import pandas._testing as tm
from pandas.core.window.common import _flex_binary_moment
from pandas.tests.window.common import (
Base,
check_pairwise_moment,
moments_consistency_cov_data,
moments_consistency_is_constant,
moments_consistency_mock_mean,
moments_consistency_series_data,
moments_consistency_std_data,
moments_consistency_var_data,
moments_consistency_var_debiasing_factors,
)
import pandas.tseries.offsets as offsets
@pytest.mark.filterwarnings("ignore:can't resolve package:ImportWarning")
class TestMoments(Base):
def setup_method(self, method):
self._create_data()
def test_centered_axis_validation(self):
# ok
Series(np.ones(10)).rolling(window=3, center=True, axis=0).mean()
# bad axis
with pytest.raises(ValueError):
Series(np.ones(10)).rolling(window=3, center=True, axis=1).mean()
# ok ok
DataFrame(np.ones((10, 10))).rolling(window=3, center=True, axis=0).mean()
DataFrame(np.ones((10, 10))).rolling(window=3, center=True, axis=1).mean()
# bad axis
with pytest.raises(ValueError):
(DataFrame(np.ones((10, 10))).rolling(window=3, center=True, axis=2).mean())
def test_rolling_sum(self, raw):
self._check_moment_func(
np.nansum, name="sum", zero_min_periods_equal=False, raw=raw
)
def test_rolling_count(self, raw):
counter = lambda x: np.isfinite(x).astype(float).sum()
self._check_moment_func(
counter, name="count", has_min_periods=False, fill_value=0, raw=raw
)
def test_rolling_mean(self, raw):
self._check_moment_func(np.mean, name="mean", raw=raw)
@td.skip_if_no_scipy
def test_cmov_mean(self):
# GH 8238
vals = np.array(
[6.95, 15.21, 4.72, 9.12, 13.81, 13.49, 16.68, 9.48, 10.63, 14.48]
)
result = Series(vals).rolling(5, center=True).mean()
expected = Series(
[
np.nan,
np.nan,
9.962,
11.27,
11.564,
12.516,
12.818,
12.952,
np.nan,
np.nan,
]
)
tm.assert_series_equal(expected, result)
@td.skip_if_no_scipy
def test_cmov_window(self):
# GH 8238
vals = np.array(
[6.95, 15.21, 4.72, 9.12, 13.81, 13.49, 16.68, 9.48, 10.63, 14.48]
)
result = Series(vals).rolling(5, win_type="boxcar", center=True).mean()
expected = Series(
[
np.nan,
np.nan,
9.962,
11.27,
11.564,
12.516,
12.818,
12.952,
np.nan,
np.nan,
]
)
tm.assert_series_equal(expected, result)
@td.skip_if_no_scipy
def test_cmov_window_corner(self):
# GH 8238
# all nan
vals = pd.Series([np.nan] * 10)
result = vals.rolling(5, center=True, win_type="boxcar").mean()
assert np.isnan(result).all()
# empty
vals = pd.Series([], dtype=object)
result = vals.rolling(5, center=True, win_type="boxcar").mean()
assert len(result) == 0
# shorter than window
vals = pd.Series(np.random.randn(5))
result = vals.rolling(10, win_type="boxcar").mean()
assert np.isnan(result).all()
assert len(result) == 5
@td.skip_if_no_scipy
@pytest.mark.parametrize(
"f,xp",
[
(
"mean",
[
[np.nan, np.nan],
[np.nan, np.nan],
[9.252, 9.392],
[8.644, 9.906],
[8.87, 10.208],
[6.81, 8.588],
[7.792, 8.644],
[9.05, 7.824],
[np.nan, np.nan],
[np.nan, np.nan],
],
),
(
"std",
[
[np.nan, np.nan],
[np.nan, np.nan],
[3.789706, 4.068313],
[3.429232, 3.237411],
[3.589269, 3.220810],
[3.405195, 2.380655],
[3.281839, 2.369869],
[3.676846, 1.801799],
[np.nan, np.nan],
[np.nan, np.nan],
],
),
(
"var",
[
[np.nan, np.nan],
[np.nan, np.nan],
[14.36187, 16.55117],
[11.75963, 10.48083],
[12.88285, 10.37362],
[11.59535, 5.66752],
[10.77047, 5.61628],
[13.51920, 3.24648],
[np.nan, np.nan],
[np.nan, np.nan],
],
),
(
"sum",
[
[np.nan, np.nan],
[np.nan, np.nan],
[46.26, 46.96],
[43.22, 49.53],
[44.35, 51.04],
[34.05, 42.94],
[38.96, 43.22],
[45.25, 39.12],
[np.nan, np.nan],
[np.nan, np.nan],
],
),
],
)
def test_cmov_window_frame(self, f, xp):
# Gh 8238
df = DataFrame(
np.array(
[
[12.18, 3.64],
[10.18, 9.16],
[13.24, 14.61],
[4.51, 8.11],
[6.15, 11.44],
[9.14, 6.21],
[11.31, 10.67],
[2.94, 6.51],
[9.42, 8.39],
[12.44, 7.34],
]
)
)
xp = DataFrame(np.array(xp))
roll = df.rolling(5, win_type="boxcar", center=True)
rs = getattr(roll, f)()
tm.assert_frame_equal(xp, rs)
@td.skip_if_no_scipy
def test_cmov_window_na_min_periods(self):
# min_periods
vals = Series(np.random.randn(10))
vals[4] = np.nan
vals[8] = np.nan
xp = vals.rolling(5, min_periods=4, center=True).mean()
rs = vals.rolling(5, win_type="boxcar", min_periods=4, center=True).mean()
tm.assert_series_equal(xp, rs)
@td.skip_if_no_scipy
def test_cmov_window_regular(self, win_types):
# GH 8238
vals = np.array(
[6.95, 15.21, 4.72, 9.12, 13.81, 13.49, 16.68, 9.48, 10.63, 14.48]
)
xps = {
"hamming": [
np.nan,
np.nan,
8.71384,
9.56348,
12.38009,
14.03687,
13.8567,
11.81473,
np.nan,
np.nan,
],
"triang": [
np.nan,
np.nan,
9.28667,
10.34667,
12.00556,
13.33889,
13.38,
12.33667,
np.nan,
np.nan,
],
"barthann": [
np.nan,
np.nan,
8.4425,
9.1925,
12.5575,
14.3675,
14.0825,
11.5675,
np.nan,
np.nan,
],
"bohman": [
np.nan,
np.nan,
7.61599,
9.1764,
12.83559,
14.17267,
14.65923,
11.10401,
np.nan,
np.nan,
],
"blackmanharris": [
np.nan,
np.nan,
6.97691,
9.16438,
13.05052,
14.02156,
15.10512,
10.74574,
np.nan,
np.nan,
],
"nuttall": [
np.nan,
np.nan,
7.04618,
9.16786,
13.02671,
14.03559,
15.05657,
10.78514,
np.nan,
np.nan,
],
"blackman": [
np.nan,
np.nan,
7.73345,
9.17869,
12.79607,
14.20036,
14.57726,
11.16988,
np.nan,
np.nan,
],
"bartlett": [
np.nan,
np.nan,
8.4425,
9.1925,
12.5575,
14.3675,
14.0825,
11.5675,
np.nan,
np.nan,
],
}
xp = Series(xps[win_types])
rs = Series(vals).rolling(5, win_type=win_types, center=True).mean()
tm.assert_series_equal(xp, rs)
@td.skip_if_no_scipy
def test_cmov_window_regular_linear_range(self, win_types):
# GH 8238
vals = np.array(range(10), dtype=np.float)
xp = vals.copy()
xp[:2] = np.nan
xp[-2:] = np.nan
xp = Series(xp)
rs = Series(vals).rolling(5, win_type=win_types, center=True).mean()
tm.assert_series_equal(xp, rs)
@td.skip_if_no_scipy
def test_cmov_window_regular_missing_data(self, win_types):
# GH 8238
vals = np.array(
[6.95, 15.21, 4.72, 9.12, 13.81, 13.49, 16.68, np.nan, 10.63, 14.48]
)
xps = {
"bartlett": [
np.nan,
np.nan,
9.70333,
10.5225,
8.4425,
9.1925,
12.5575,
14.3675,
15.61667,
13.655,
],
"blackman": [
np.nan,
np.nan,
9.04582,
11.41536,
7.73345,
9.17869,
12.79607,
14.20036,
15.8706,
13.655,
],
"barthann": [
np.nan,
np.nan,
9.70333,
10.5225,
8.4425,
9.1925,
12.5575,
14.3675,
15.61667,
13.655,
],
"bohman": [
np.nan,
np.nan,
8.9444,
11.56327,
7.61599,
9.1764,
12.83559,
14.17267,
15.90976,
13.655,
],
"hamming": [
np.nan,
np.nan,
9.59321,
10.29694,
8.71384,
9.56348,
12.38009,
14.20565,
15.24694,
13.69758,
],
"nuttall": [
np.nan,
np.nan,
8.47693,
12.2821,
7.04618,
9.16786,
13.02671,
14.03673,
16.08759,
13.65553,
],
"triang": [
np.nan,
np.nan,
9.33167,
9.76125,
9.28667,
10.34667,
12.00556,
13.82125,
14.49429,
13.765,
],
"blackmanharris": [
np.nan,
np.nan,
8.42526,
12.36824,
6.97691,
9.16438,
13.05052,
14.02175,
16.1098,
13.65509,
],
}
xp = Series(xps[win_types])
rs = Series(vals).rolling(5, win_type=win_types, min_periods=3).mean()
tm.assert_series_equal(xp, rs)
@td.skip_if_no_scipy
def test_cmov_window_special(self, win_types_special):
# GH 8238
kwds = {
"kaiser": {"beta": 1.0},
"gaussian": {"std": 1.0},
"general_gaussian": {"power": 2.0, "width": 2.0},
"exponential": {"tau": 10},
}
vals = np.array(
[6.95, 15.21, 4.72, 9.12, 13.81, 13.49, 16.68, 9.48, 10.63, 14.48]
)
xps = {
"gaussian": [
np.nan,
np.nan,
8.97297,
9.76077,
12.24763,
13.89053,
13.65671,
12.01002,
np.nan,
np.nan,
],
"general_gaussian": [
np.nan,
np.nan,
9.85011,
10.71589,
11.73161,
13.08516,
12.95111,
12.74577,
np.nan,
np.nan,
],
"kaiser": [
np.nan,
np.nan,
9.86851,
11.02969,
11.65161,
12.75129,
12.90702,
12.83757,
np.nan,
np.nan,
],
"exponential": [
np.nan,
np.nan,
9.83364,
11.10472,
11.64551,
12.66138,
12.92379,
12.83770,
np.nan,
np.nan,
],
}
xp = Series(xps[win_types_special])
rs = (
Series(vals)
.rolling(5, win_type=win_types_special, center=True)
.mean(**kwds[win_types_special])
)
tm.assert_series_equal(xp, rs)
@td.skip_if_no_scipy
def test_cmov_window_special_linear_range(self, win_types_special):
# GH 8238
kwds = {
"kaiser": {"beta": 1.0},
"gaussian": {"std": 1.0},
"general_gaussian": {"power": 2.0, "width": 2.0},
"slepian": {"width": 0.5},
"exponential": {"tau": 10},
}
vals = np.array(range(10), dtype=np.float)
xp = vals.copy()
xp[:2] = np.nan
xp[-2:] = np.nan
xp = Series(xp)
rs = (
Series(vals)
.rolling(5, win_type=win_types_special, center=True)
.mean(**kwds[win_types_special])
)
tm.assert_series_equal(xp, rs)
def test_rolling_median(self, raw):
self._check_moment_func(np.median, name="median", raw=raw)
def test_rolling_min(self, raw):
self._check_moment_func(np.min, name="min", raw=raw)
a = pd.Series([1, 2, 3, 4, 5])
result = a.rolling(window=100, min_periods=1).min()
expected = pd.Series(np.ones(len(a)))
tm.assert_series_equal(result, expected)
with pytest.raises(ValueError):
pd.Series([1, 2, 3]).rolling(window=3, min_periods=5).min()
def test_rolling_max(self, raw):
self._check_moment_func(np.max, name="max", raw=raw)
a = pd.Series([1, 2, 3, 4, 5], dtype=np.float64)
b = a.rolling(window=100, min_periods=1).max()
tm.assert_almost_equal(a, b)
with pytest.raises(ValueError):
pd.Series([1, 2, 3]).rolling(window=3, min_periods=5).max()
@pytest.mark.parametrize("q", [0.0, 0.1, 0.5, 0.9, 1.0])
def test_rolling_quantile(self, q, raw):
def scoreatpercentile(a, per):
values = np.sort(a, axis=0)
idx = int(per / 1.0 * (values.shape[0] - 1))
if idx == values.shape[0] - 1:
retval = values[-1]
else:
qlow = float(idx) / float(values.shape[0] - 1)
qhig = float(idx + 1) / float(values.shape[0] - 1)
vlow = values[idx]
vhig = values[idx + 1]
retval = vlow + (vhig - vlow) * (per - qlow) / (qhig - qlow)
return retval
def quantile_func(x):
return scoreatpercentile(x, q)
self._check_moment_func(quantile_func, name="quantile", quantile=q, raw=raw)
def test_rolling_quantile_np_percentile(self):
# #9413: Tests that rolling window's quantile default behavior
# is analogous to Numpy's percentile
row = 10
col = 5
idx = pd.date_range("20100101", periods=row, freq="B")
df = DataFrame(np.random.rand(row * col).reshape((row, -1)), index=idx)
df_quantile = df.quantile([0.25, 0.5, 0.75], axis=0)
np_percentile = np.percentile(df, [25, 50, 75], axis=0)
tm.assert_almost_equal(df_quantile.values, np.array(np_percentile))
@pytest.mark.parametrize("quantile", [0.0, 0.1, 0.45, 0.5, 1])
@pytest.mark.parametrize(
"interpolation", ["linear", "lower", "higher", "nearest", "midpoint"]
)
@pytest.mark.parametrize(
"data",
[
[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0],
[8.0, 1.0, 3.0, 4.0, 5.0, 2.0, 6.0, 7.0],
[0.0, np.nan, 0.2, np.nan, 0.4],
[np.nan, np.nan, np.nan, np.nan],
[np.nan, 0.1, np.nan, 0.3, 0.4, 0.5],
[0.5],
[np.nan, 0.7, 0.6],
],
)
def test_rolling_quantile_interpolation_options(
self, quantile, interpolation, data
):
# Tests that rolling window's quantile behavior is analogous to
# Series' quantile for each interpolation option
s = Series(data)
q1 = s.quantile(quantile, interpolation)
q2 = s.expanding(min_periods=1).quantile(quantile, interpolation).iloc[-1]
if np.isnan(q1):
assert np.isnan(q2)
else:
assert q1 == q2
def test_invalid_quantile_value(self):
data = np.arange(5)
s = Series(data)
msg = "Interpolation 'invalid' is not supported"
with pytest.raises(ValueError, match=msg):
s.rolling(len(data), min_periods=1).quantile(0.5, interpolation="invalid")
def test_rolling_quantile_param(self):
ser = Series([0.0, 0.1, 0.5, 0.9, 1.0])
with pytest.raises(ValueError):
ser.rolling(3).quantile(-0.1)
with pytest.raises(ValueError):
ser.rolling(3).quantile(10.0)
with pytest.raises(TypeError):
ser.rolling(3).quantile("foo")
def test_rolling_apply(self, raw):
# suppress warnings about empty slices, as we are deliberately testing
# with a 0-length Series
def f(x):
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore",
message=".*(empty slice|0 for slice).*",
category=RuntimeWarning,
)
return x[np.isfinite(x)].mean()
self._check_moment_func(np.mean, name="apply", func=f, raw=raw)
def test_rolling_std(self, raw):
self._check_moment_func(lambda x: np.std(x, ddof=1), name="std", raw=raw)
self._check_moment_func(
lambda x: np.std(x, ddof=0), name="std", ddof=0, raw=raw
)
def test_rolling_std_1obs(self):
vals = pd.Series([1.0, 2.0, 3.0, 4.0, 5.0])
result = vals.rolling(1, min_periods=1).std()
expected = pd.Series([np.nan] * 5)
tm.assert_series_equal(result, expected)
result = vals.rolling(1, min_periods=1).std(ddof=0)
expected = pd.Series([0.0] * 5)
tm.assert_series_equal(result, expected)
result = pd.Series([np.nan, np.nan, 3, 4, 5]).rolling(3, min_periods=2).std()
assert np.isnan(result[2])
def test_rolling_std_neg_sqrt(self):
# unit test from Bottleneck
# Test move_nanstd for neg sqrt.
a = pd.Series(
[
0.0011448196318903589,
0.00028718669878572767,
0.00028718669878572767,
0.00028718669878572767,
0.00028718669878572767,
]
)
b = a.rolling(window=3).std()
assert np.isfinite(b[2:]).all()
b = a.ewm(span=3).std()
assert np.isfinite(b[2:]).all()
def test_rolling_var(self, raw):
self._check_moment_func(lambda x: np.var(x, ddof=1), name="var", raw=raw)
self._check_moment_func(
lambda x: np.var(x, ddof=0), name="var", ddof=0, raw=raw
)
@td.skip_if_no_scipy
def test_rolling_skew(self, raw):
from scipy.stats import skew
self._check_moment_func(lambda x: skew(x, bias=False), name="skew", raw=raw)
@td.skip_if_no_scipy
def test_rolling_kurt(self, raw):
from scipy.stats import kurtosis
self._check_moment_func(lambda x: kurtosis(x, bias=False), name="kurt", raw=raw)
def _check_moment_func(
self,
static_comp,
name,
raw,
has_min_periods=True,
has_center=True,
has_time_rule=True,
fill_value=None,
zero_min_periods_equal=True,
**kwargs,
):
# inject raw
if name == "apply":
kwargs = copy.copy(kwargs)
kwargs["raw"] = raw
def get_result(obj, window, min_periods=None, center=False):
r = obj.rolling(window=window, min_periods=min_periods, center=center)
return getattr(r, name)(**kwargs)
series_result = get_result(self.series, window=50)
assert isinstance(series_result, Series)
tm.assert_almost_equal(series_result.iloc[-1], static_comp(self.series[-50:]))
frame_result = get_result(self.frame, window=50)
assert isinstance(frame_result, DataFrame)
tm.assert_series_equal(
frame_result.iloc[-1, :],
self.frame.iloc[-50:, :].apply(static_comp, axis=0, raw=raw),
check_names=False,
)
# check time_rule works
if has_time_rule:
win = 25
minp = 10
series = self.series[::2].resample("B").mean()
frame = self.frame[::2].resample("B").mean()
if has_min_periods:
series_result = get_result(series, window=win, min_periods=minp)
frame_result = get_result(frame, window=win, min_periods=minp)
else:
series_result = get_result(series, window=win, min_periods=0)
frame_result = get_result(frame, window=win, min_periods=0)
last_date = series_result.index[-1]
prev_date = last_date - 24 * offsets.BDay()
trunc_series = self.series[::2].truncate(prev_date, last_date)
trunc_frame = self.frame[::2].truncate(prev_date, last_date)
tm.assert_almost_equal(series_result[-1], static_comp(trunc_series))
tm.assert_series_equal(
frame_result.xs(last_date),
trunc_frame.apply(static_comp, raw=raw),
check_names=False,
)
# excluding NaNs correctly
obj = Series(randn(50))
obj[:10] = np.NaN
obj[-10:] = np.NaN
if has_min_periods:
result = get_result(obj, 50, min_periods=30)
tm.assert_almost_equal(result.iloc[-1], static_comp(obj[10:-10]))
# min_periods is working correctly
result = get_result(obj, 20, min_periods=15)
assert isna(result.iloc[23])
assert not isna(result.iloc[24])
assert not isna(result.iloc[-6])
assert isna(result.iloc[-5])
obj2 = Series(randn(20))
result = get_result(obj2, 10, min_periods=5)
assert isna(result.iloc[3])
assert notna(result.iloc[4])
if zero_min_periods_equal:
# min_periods=0 may be equivalent to min_periods=1
result0 = get_result(obj, 20, min_periods=0)
result1 = get_result(obj, 20, min_periods=1)
tm.assert_almost_equal(result0, result1)
else:
result = get_result(obj, 50)
tm.assert_almost_equal(result.iloc[-1], static_comp(obj[10:-10]))
# window larger than series length (#7297)
if has_min_periods:
for minp in (0, len(self.series) - 1, len(self.series)):
result = get_result(self.series, len(self.series) + 1, min_periods=minp)
expected = get_result(self.series, len(self.series), min_periods=minp)
nan_mask = isna(result)
tm.assert_series_equal(nan_mask, isna(expected))
nan_mask = ~nan_mask
tm.assert_almost_equal(result[nan_mask], expected[nan_mask])
else:
result = get_result(self.series, len(self.series) + 1, min_periods=0)
expected = get_result(self.series, len(self.series), min_periods=0)
nan_mask = isna(result)
tm.assert_series_equal(nan_mask, isna(expected))
nan_mask = ~nan_mask
tm.assert_almost_equal(result[nan_mask], expected[nan_mask])
# check center=True
if has_center:
if has_min_periods:
result = get_result(obj, 20, min_periods=15, center=True)
expected = get_result(
pd.concat([obj, Series([np.NaN] * 9)]), 20, min_periods=15
)[9:].reset_index(drop=True)
else:
result = get_result(obj, 20, min_periods=0, center=True)
print(result)
expected = get_result(
pd.concat([obj, Series([np.NaN] * 9)]), 20, min_periods=0
)[9:].reset_index(drop=True)
tm.assert_series_equal(result, expected)
# shifter index
s = [f"x{x:d}" for x in range(12)]
if has_min_periods:
minp = 10
series_xp = (
get_result(
self.series.reindex(list(self.series.index) + s),
window=25,
min_periods=minp,
)
.shift(-12)
.reindex(self.series.index)
)
frame_xp = (
get_result(
self.frame.reindex(list(self.frame.index) + s),
window=25,
min_periods=minp,
)
.shift(-12)
.reindex(self.frame.index)
)
series_rs = get_result(
self.series, window=25, min_periods=minp, center=True
)
frame_rs = get_result(
self.frame, window=25, min_periods=minp, center=True
)
else:
series_xp = (
get_result(
self.series.reindex(list(self.series.index) + s),
window=25,
min_periods=0,
)
.shift(-12)
.reindex(self.series.index)
)
frame_xp = (
get_result(
self.frame.reindex(list(self.frame.index) + s),
window=25,
min_periods=0,
)
.shift(-12)
.reindex(self.frame.index)
)
series_rs = get_result(
self.series, window=25, min_periods=0, center=True
)
frame_rs = get_result(self.frame, window=25, min_periods=0, center=True)
if fill_value is not None:
series_xp = series_xp.fillna(fill_value)
frame_xp = frame_xp.fillna(fill_value)
tm.assert_series_equal(series_xp, series_rs)
tm.assert_frame_equal(frame_xp, frame_rs)
def _rolling_consistency_cases():
for window in [1, 2, 3, 10, 20]:
for min_periods in {0, 1, 2, 3, 4, window}:
if min_periods and (min_periods > window):
continue
for center in [False, True]:
yield window, min_periods, center
class TestRollingMomentsConsistency(Base):
def setup_method(self, method):
self._create_data()
# binary moments
def test_rolling_cov(self):
A = self.series
B = A + randn(len(A))
result = A.rolling(window=50, min_periods=25).cov(B)
tm.assert_almost_equal(result[-1], np.cov(A[-50:], B[-50:])[0, 1])
def test_rolling_corr(self):
A = self.series
B = A + randn(len(A))
result = A.rolling(window=50, min_periods=25).corr(B)
tm.assert_almost_equal(result[-1], np.corrcoef(A[-50:], B[-50:])[0, 1])
# test for correct bias correction
a = tm.makeTimeSeries()
b = tm.makeTimeSeries()
a[:5] = np.nan
b[:10] = np.nan
result = a.rolling(window=len(a), min_periods=1).corr(b)
tm.assert_almost_equal(result[-1], a.corr(b))
@pytest.mark.parametrize("func", ["cov", "corr"])
def test_rolling_pairwise_cov_corr(self, func):
check_pairwise_moment(self.frame, "rolling", func, window=10, min_periods=5)
@pytest.mark.parametrize("method", ["corr", "cov"])
def test_flex_binary_frame(self, method):
series = self.frame[1]
res = getattr(series.rolling(window=10), method)(self.frame)
res2 = getattr(self.frame.rolling(window=10), method)(series)
exp = self.frame.apply(lambda x: getattr(series.rolling(window=10), method)(x))
tm.assert_frame_equal(res, exp)
tm.assert_frame_equal(res2, exp)
frame2 = self.frame.copy()
frame2.values[:] = np.random.randn(*frame2.shape)
res3 = getattr(self.frame.rolling(window=10), method)(frame2)
exp = DataFrame(
{
k: getattr(self.frame[k].rolling(window=10), method)(frame2[k])
for k in self.frame
}
)
tm.assert_frame_equal(res3, exp)
@pytest.mark.slow
@pytest.mark.parametrize(
"window,min_periods,center", list(_rolling_consistency_cases())
)
def test_rolling_apply_consistency(
consistency_data, base_functions, no_nan_functions, window, min_periods, center
):
x, is_constant, no_nans = consistency_data
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore", message=".*(empty slice|0 for slice).*", category=RuntimeWarning,
)
# test consistency between rolling_xyz() and either (a)
# rolling_apply of Series.xyz(), or (b) rolling_apply of
# np.nanxyz()
functions = base_functions
# GH 8269
if no_nans:
functions = no_nan_functions + base_functions
for (f, require_min_periods, name) in functions:
rolling_f = getattr(
x.rolling(window=window, center=center, min_periods=min_periods), name,
)
if (
require_min_periods
and (min_periods is not None)
and (min_periods < require_min_periods)
):
continue
if name == "count":
rolling_f_result = rolling_f()
rolling_apply_f_result = x.rolling(
window=window, min_periods=min_periods, center=center
).apply(func=f, raw=True)
else:
if name in ["cov", "corr"]:
rolling_f_result = rolling_f(pairwise=False)
else:
rolling_f_result = rolling_f()
rolling_apply_f_result = x.rolling(
window=window, min_periods=min_periods, center=center
).apply(func=f, raw=True)
# GH 9422
if name in ["sum", "prod"]:
tm.assert_equal(rolling_f_result, rolling_apply_f_result)
@pytest.mark.parametrize("window", range(7))
def test_rolling_corr_with_zero_variance(window):
# GH 18430
s = pd.Series(np.zeros(20))
other = pd.Series(np.arange(20))
assert s.rolling(window=window).corr(other=other).isna().all()
def test_flex_binary_moment():
# GH3155
# don't blow the stack
msg = "arguments to moment function must be of type np.ndarray/Series/DataFrame"
with pytest.raises(TypeError, match=msg):
_flex_binary_moment(5, 6, None)
def test_corr_sanity():
# GH 3155
df = DataFrame(
np.array(
[
[0.87024726, 0.18505595],
[0.64355431, 0.3091617],
[0.92372966, 0.50552513],
[0.00203756, 0.04520709],
[0.84780328, 0.33394331],
[0.78369152, 0.63919667],
]
)
)
res = df[0].rolling(5, center=True).corr(df[1])
assert all(np.abs(np.nan_to_num(x)) <= 1 for x in res)
# and some fuzzing
for _ in range(10):
df = DataFrame(np.random.rand(30, 2))
res = df[0].rolling(5, center=True).corr(df[1])
try:
assert all(np.abs(np.nan_to_num(x)) <= 1 for x in res)
except AssertionError:
print(res)
def test_rolling_cov_diff_length():
# GH 7512
s1 = Series([1, 2, 3], index=[0, 1, 2])
s2 = Series([1, 3], index=[0, 2])
result = s1.rolling(window=3, min_periods=2).cov(s2)
expected = Series([None, None, 2.0])
tm.assert_series_equal(result, expected)
s2a = Series([1, None, 3], index=[0, 1, 2])
result = s1.rolling(window=3, min_periods=2).cov(s2a)
tm.assert_series_equal(result, expected)
def test_rolling_corr_diff_length():
# GH 7512
s1 = Series([1, 2, 3], index=[0, 1, 2])
s2 = Series([1, 3], index=[0, 2])
result = s1.rolling(window=3, min_periods=2).corr(s2)
expected = Series([None, None, 1.0])
tm.assert_series_equal(result, expected)
s2a = Series([1, None, 3], index=[0, 1, 2])
result = s1.rolling(window=3, min_periods=2).corr(s2a)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"f",
[
lambda x: x.rolling(window=10, min_periods=5).cov(x, pairwise=False),
lambda x: x.rolling(window=10, min_periods=5).corr(x, pairwise=False),
lambda x: x.rolling(window=10, min_periods=5).max(),
lambda x: x.rolling(window=10, min_periods=5).min(),
lambda x: x.rolling(window=10, min_periods=5).sum(),
lambda x: x.rolling(window=10, min_periods=5).mean(),
lambda x: x.rolling(window=10, min_periods=5).std(),
lambda x: x.rolling(window=10, min_periods=5).var(),
lambda x: x.rolling(window=10, min_periods=5).skew(),
lambda x: x.rolling(window=10, min_periods=5).kurt(),
lambda x: x.rolling(window=10, min_periods=5).quantile(quantile=0.5),
lambda x: x.rolling(window=10, min_periods=5).median(),
lambda x: x.rolling(window=10, min_periods=5).apply(sum, raw=False),
lambda x: x.rolling(window=10, min_periods=5).apply(sum, raw=True),
lambda x: x.rolling(win_type="boxcar", window=10, min_periods=5).mean(),
],
)
@td.skip_if_no_scipy
def test_rolling_functions_window_non_shrinkage(f):
# GH 7764
s = Series(range(4))
s_expected = Series(np.nan, index=s.index)
df = DataFrame([[1, 5], [3, 2], [3, 9], [-1, 0]], columns=["A", "B"])
df_expected = DataFrame(np.nan, index=df.index, columns=df.columns)
s_result = f(s)
tm.assert_series_equal(s_result, s_expected)
df_result = f(df)
tm.assert_frame_equal(df_result, df_expected)
def test_rolling_functions_window_non_shrinkage_binary():
# corr/cov return a MI DataFrame
df = DataFrame(
[[1, 5], [3, 2], [3, 9], [-1, 0]],
columns=Index(["A", "B"], name="foo"),
index=Index(range(4), name="bar"),
)
df_expected = DataFrame(
columns=Index(["A", "B"], name="foo"),
index=pd.MultiIndex.from_product([df.index, df.columns], names=["bar", "foo"]),
dtype="float64",
)
functions = [
lambda x: (x.rolling(window=10, min_periods=5).cov(x, pairwise=True)),
lambda x: (x.rolling(window=10, min_periods=5).corr(x, pairwise=True)),
]
for f in functions:
df_result = f(df)
tm.assert_frame_equal(df_result, df_expected)
def test_rolling_skew_edge_cases():
all_nan = Series([np.NaN] * 5)
# yields all NaN (0 variance)
d = Series([1] * 5)
x = d.rolling(window=5).skew()
tm.assert_series_equal(all_nan, x)
# yields all NaN (window too small)
d = Series(np.random.randn(5))
x = d.rolling(window=2).skew()
tm.assert_series_equal(all_nan, x)
# yields [NaN, NaN, NaN, 0.177994, 1.548824]
d = Series([-1.50837035, -0.1297039, 0.19501095, 1.73508164, 0.41941401])
expected = Series([np.NaN, np.NaN, np.NaN, 0.177994, 1.548824])
x = d.rolling(window=4).skew()
tm.assert_series_equal(expected, x)
def test_rolling_kurt_edge_cases():
all_nan = Series([np.NaN] * 5)
# yields all NaN (0 variance)
d = Series([1] * 5)
x = d.rolling(window=5).kurt()
tm.assert_series_equal(all_nan, x)
# yields all NaN (window too small)
d = Series(np.random.randn(5))
x = d.rolling(window=3).kurt()
tm.assert_series_equal(all_nan, x)
# yields [NaN, NaN, NaN, 1.224307, 2.671499]
d = Series([-1.50837035, -0.1297039, 0.19501095, 1.73508164, 0.41941401])
expected = Series([np.NaN, np.NaN, np.NaN, 1.224307, 2.671499])
x = d.rolling(window=4).kurt()
tm.assert_series_equal(expected, x)
def test_rolling_skew_eq_value_fperr():
# #18804 all rolling skew for all equal values should return Nan
a = Series([1.1] * 15).rolling(window=10).skew()
assert np.isnan(a).all()
def test_rolling_kurt_eq_value_fperr():
# #18804 all rolling kurt for all equal values should return Nan
a = Series([1.1] * 15).rolling(window=10).kurt()
assert np.isnan(a).all()
def test_rolling_max_gh6297():
"""Replicate result expected in GH #6297"""
indices = [datetime(1975, 1, i) for i in range(1, 6)]
# So that we can have 2 datapoints on one of the days
indices.append(datetime(1975, 1, 3, 6, 0))
series = Series(range(1, 7), index=indices)
# Use floats instead of ints as values
series = series.map(lambda x: float(x))
# Sort chronologically
series = series.sort_index()
expected = Series(
[1.0, 2.0, 6.0, 4.0, 5.0],
index=DatetimeIndex([datetime(1975, 1, i, 0) for i in range(1, 6)], freq="D"),
)
x = series.resample("D").max().rolling(window=1).max()
tm.assert_series_equal(expected, x)
def test_rolling_max_resample():
indices = [datetime(1975, 1, i) for i in range(1, 6)]
# So that we can have 3 datapoints on last day (4, 10, and 20)
indices.append(datetime(1975, 1, 5, 1))
indices.append(datetime(1975, 1, 5, 2))
series = Series(list(range(0, 5)) + [10, 20], index=indices)
# Use floats instead of ints as values
series = series.map(lambda x: float(x))
# Sort chronologically
series = series.sort_index()
# Default how should be max
expected = Series(
[0.0, 1.0, 2.0, 3.0, 20.0],
index=DatetimeIndex([datetime(1975, 1, i, 0) for i in range(1, 6)], freq="D"),
)
x = series.resample("D").max().rolling(window=1).max()
tm.assert_series_equal(expected, x)
# Now specify median (10.0)
expected = Series(
[0.0, 1.0, 2.0, 3.0, 10.0],
index=DatetimeIndex([datetime(1975, 1, i, 0) for i in range(1, 6)], freq="D"),
)
x = series.resample("D").median().rolling(window=1).max()
| tm.assert_series_equal(expected, x) | pandas._testing.assert_series_equal |
# import des librairies et des datasets
import streamlit as st
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.backends.backend_agg import RendererAgg
import seaborn as sns
import os
# import et fusion des datasets
ptbdb_normal = pd.read_csv("data/ptbdb_normal.csv", header=None)
ptbdb_abnormal = pd.read_csv("data/ptbdb_abnormal.csv", header=None)
ptbdb = pd.concat([ptbdb_normal, ptbdb_abnormal])
mitbih_train = pd.read_csv("data/mitbih_test.csv", header = None)
mitbih_test = pd.read_csv("data/mitbih_test.csv", header = None)
mitbih = | pd.concat([mitbih_test, mitbih_train]) | pandas.concat |
"""
Utilities for time series preprocessing
"""
import numpy as np
import pandas as pd
def get_timeseries_at_node(node_ind, node2feature, ts_code):
"""
Return ts_code time series at node_ind
output shape : (T, )
"""
return node2feature[node_ind][ts_code]
def merge_timeseries(node_indices, node2feature, ts_code):
"""
Return merged ts_code time series of node_indices
Input:
node_indices : a list of N nodes we want to consider.
node2feature : node to features dictionary
ts_code : a code of time series
Output:
ret : a merged time series (pd.DataFrame). shape=(T,N)
"""
ret, cols = [], []
for nid in node_indices:
cols.append(node2feature[nid]['ij_loc'])
ret.append(get_timeseries_at_node(nid, node2feature, ts_code))
ret = pd.DataFrame(np.array(ret).transpose()) # (T,N)
ret.columns = cols
return ret
def add_timestamp(df, start_time="2012-06-28 21:00:00", timedelta=None):
"""
Return a dataframe having time stamps as indices.
Input:
df : (T,N) dataframe. It is an output of merge_timeseries() function.
start_time : str, or pd.datetime
timedelta : a list of time delta. Read 'XTIME' in the dataset.
Output:
ret : a dataframe with time stamps as an index column
"""
df['Time'] = pd.to_datetime(start_time)
df['Time'] = df['Time'] + | pd.to_timedelta(timedelta, unit='m') | pandas.to_timedelta |
import numpy as np
import pandas as pd
import neurokit2 as nk
# =============================================================================
# Example 1
# =============================================================================
# Generate synthetic signals
ecg = nk.ecg_simulate(duration=10, heart_rate=70)
emg = nk.emg_simulate(duration=10, n_bursts=3)
# Visualise biosignals
| pd.DataFrame({"ECG": ecg, "EMG": emg}) | pandas.DataFrame |
#!/usr/bin/env python
# inst: university of bristol
# auth: <NAME>
# mail: <EMAIL> / <EMAIL>
import os
import shutil
from glob import glob
import zipfile
import numpy as np
import pandas as pd
import gdalutils
from osgeo import osr
def _secs_to_time(df, date1):
df = df.copy()
conversion = 86400 # 86400s = 1day
df['time'] = pd.to_datetime(
df['Time']/conversion, unit='D', origin=pd.Timestamp(date1))
df.set_index(df['time'], inplace=True)
del df['Time']
del df['time']
return df
def _hours_to_time(df, date1):
df = df.copy()
conversion = 24 # 24h = 1day
df['time'] = pd.to_datetime(
df['Time']/conversion, unit='D', origin=pd.Timestamp(date1))
df.set_index(df['time'], inplace=True)
del df['Time']
del df['time']
return df
def _get_lineno(filename, phrase):
with open(filename, 'r') as f:
for num, line in enumerate(f):
if phrase in line:
return num
def read_mass(filename, date1='1990-01-01'):
df = | pd.read_csv(filename, delim_whitespace=True) | pandas.read_csv |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# This file contains dummy data for the model unit tests
import numpy as np
import pandas as pd
AIR_FCST_LINEAR_95 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("1961-01-01 00:00:00"),
1: pd.Timestamp("1961-02-01 00:00:00"),
2: pd.Timestamp("1961-03-01 00:00:00"),
3: pd.Timestamp("1961-04-01 00:00:00"),
4: pd.Timestamp("1961-05-01 00:00:00"),
5: pd.Timestamp("1961-06-01 00:00:00"),
6: pd.Timestamp("1961-07-01 00:00:00"),
7: pd.Timestamp("1961-08-01 00:00:00"),
8: pd.Timestamp("1961-09-01 00:00:00"),
9: pd.Timestamp("1961-10-01 00:00:00"),
10: pd.Timestamp("1961-11-01 00:00:00"),
11: pd.Timestamp("1961-12-01 00:00:00"),
12: pd.Timestamp("1962-01-01 00:00:00"),
13: pd.Timestamp("1962-02-01 00:00:00"),
14: pd.Timestamp("1962-03-01 00:00:00"),
15: pd.Timestamp("1962-04-01 00:00:00"),
16: pd.Timestamp("1962-05-01 00:00:00"),
17: pd.Timestamp("1962-06-01 00:00:00"),
18: pd.Timestamp("1962-07-01 00:00:00"),
19: pd.Timestamp("1962-08-01 00:00:00"),
20: pd.Timestamp("1962-09-01 00:00:00"),
21: pd.Timestamp("1962-10-01 00:00:00"),
22: pd.Timestamp("1962-11-01 00:00:00"),
23: pd.Timestamp("1962-12-01 00:00:00"),
24: pd.Timestamp("1963-01-01 00:00:00"),
25: pd.Timestamp("1963-02-01 00:00:00"),
26: pd.Timestamp("1963-03-01 00:00:00"),
27: pd.Timestamp("1963-04-01 00:00:00"),
28: pd.Timestamp("1963-05-01 00:00:00"),
29: pd.Timestamp("1963-06-01 00:00:00"),
},
"fcst": {
0: 472.9444444444443,
1: 475.60162835249025,
2: 478.2588122605362,
3: 480.9159961685822,
4: 483.57318007662815,
5: 486.23036398467417,
6: 488.88754789272014,
7: 491.5447318007661,
8: 494.20191570881207,
9: 496.85909961685803,
10: 499.516283524904,
11: 502.17346743295,
12: 504.830651340996,
13: 507.48783524904195,
14: 510.1450191570879,
15: 512.8022030651339,
16: 515.4593869731799,
17: 518.1165708812258,
18: 520.7737547892718,
19: 523.4309386973177,
20: 526.0881226053638,
21: 528.7453065134097,
22: 531.4024904214557,
23: 534.0596743295017,
24: 536.7168582375476,
25: 539.3740421455936,
26: 542.0312260536396,
27: 544.6884099616856,
28: 547.3455938697316,
29: 550.0027777777775,
},
"fcst_lower": {
0: 380.6292037661305,
1: 383.26004701147235,
2: 385.8905370924373,
3: 388.52067431512216,
4: 391.1504589893095,
5: 393.7798914284503,
6: 396.4089719496461,
7: 399.0377008736321,
8: 401.66607852475926,
9: 404.2941052309762,
10: 406.9217813238114,
11: 409.54910713835505,
12: 412.1760830132403,
13: 414.80270929062544,
14: 417.42898631617453,
15: 420.0549144390392,
16: 422.68049401183924,
17: 425.3057253906438,
18: 427.93060893495215,
19: 430.555145007674,
20: 433.1793339751107,
21: 435.8031762069345,
22: 438.42667207616984,
23: 441.0498219591729,
24: 443.6726262356114,
25: 446.2950852884452,
26: 448.91719950390507,
27: 451.53896927147304,
28: 454.1603949838614,
29: 456.78147703699216,
},
"fcst_upper": {
0: 565.2596851227581,
1: 567.9432096935082,
2: 570.6270874286351,
3: 573.3113180220422,
4: 575.9959011639468,
5: 578.680836540898,
6: 581.3661238357942,
7: 584.0517627279,
8: 586.7377528928648,
9: 589.4240940027398,
10: 592.1107857259966,
11: 594.797827727545,
12: 597.4852196687516,
13: 600.1729612074585,
14: 602.8610519980012,
15: 605.5494916912286,
16: 608.2382799345206,
17: 610.9274163718079,
18: 613.6169006435915,
19: 616.3067323869615,
20: 618.9969112356168,
21: 621.6874368198849,
22: 624.3783087667415,
23: 627.0695266998305,
24: 629.7610902394838,
25: 632.4529990027421,
26: 635.145252603374,
27: 637.8378506518982,
28: 640.5307927556019,
29: 643.2240785185628,
},
}
)
AIR_FCST_LINEAR_99 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("1961-01-01 00:00:00"),
1: pd.Timestamp("1961-02-01 00:00:00"),
2: pd.Timestamp("1961-03-01 00:00:00"),
3: pd.Timestamp("1961-04-01 00:00:00"),
4: pd.Timestamp("1961-05-01 00:00:00"),
5: pd.Timestamp("1961-06-01 00:00:00"),
6: pd.Timestamp("1961-07-01 00:00:00"),
7: pd.Timestamp("1961-08-01 00:00:00"),
8: pd.Timestamp("1961-09-01 00:00:00"),
9: pd.Timestamp("1961-10-01 00:00:00"),
10: pd.Timestamp("1961-11-01 00:00:00"),
11: pd.Timestamp("1961-12-01 00:00:00"),
12: pd.Timestamp("1962-01-01 00:00:00"),
13: pd.Timestamp("1962-02-01 00:00:00"),
14: pd.Timestamp("1962-03-01 00:00:00"),
15: pd.Timestamp("1962-04-01 00:00:00"),
16: pd.Timestamp("1962-05-01 00:00:00"),
17: pd.Timestamp("1962-06-01 00:00:00"),
18: pd.Timestamp("1962-07-01 00:00:00"),
19: pd.Timestamp("1962-08-01 00:00:00"),
20: pd.Timestamp("1962-09-01 00:00:00"),
21: pd.Timestamp("1962-10-01 00:00:00"),
22: pd.Timestamp("1962-11-01 00:00:00"),
23: pd.Timestamp("1962-12-01 00:00:00"),
24: pd.Timestamp("1963-01-01 00:00:00"),
25: pd.Timestamp("1963-02-01 00:00:00"),
26: pd.Timestamp("1963-03-01 00:00:00"),
27: pd.Timestamp("1963-04-01 00:00:00"),
28: pd.Timestamp("1963-05-01 00:00:00"),
29: pd.Timestamp("1963-06-01 00:00:00"),
},
"fcst": {
0: 472.9444444444443,
1: 475.60162835249025,
2: 478.2588122605362,
3: 480.9159961685822,
4: 483.57318007662815,
5: 486.23036398467417,
6: 488.88754789272014,
7: 491.5447318007661,
8: 494.20191570881207,
9: 496.85909961685803,
10: 499.516283524904,
11: 502.17346743295,
12: 504.830651340996,
13: 507.48783524904195,
14: 510.1450191570879,
15: 512.8022030651339,
16: 515.4593869731799,
17: 518.1165708812258,
18: 520.7737547892718,
19: 523.4309386973177,
20: 526.0881226053638,
21: 528.7453065134097,
22: 531.4024904214557,
23: 534.0596743295017,
24: 536.7168582375476,
25: 539.3740421455936,
26: 542.0312260536396,
27: 544.6884099616856,
28: 547.3455938697316,
29: 550.0027777777775,
},
"fcst_lower": {
0: 351.01805478037915,
1: 353.64044896268456,
2: 356.2623766991775,
3: 358.883838394139,
4: 361.50483445671773,
5: 364.12536530090745,
6: 366.74543134552374,
7: 369.3650330141812,
8: 371.98417073526997,
9: 374.6028449419319,
10: 377.2210560720369,
11: 379.83880456815905,
12: 382.45609087755207,
13: 385.07291545212513,
14: 387.68927874841813,
15: 390.3051812275768,
16: 392.92062335532785,
17: 395.5356056019535,
18: 398.15012844226646,
19: 400.764192355584,
20: 403.37779782570226,
21: 405.99094534087044,
22: 408.60363539376465,
23: 411.2158684814615,
24: 413.82764510541136,
25: 416.4389657714128,
26: 419.04983098958445,
27: 421.66024127433906,
28: 424.2701971443558,
29: 426.8796991225531,
},
"fcst_upper": {
0: 594.8708341085095,
1: 597.562807742296,
2: 600.255247821895,
3: 602.9481539430253,
4: 605.6415256965386,
5: 608.3353626684409,
6: 611.0296644399166,
7: 613.724430587351,
8: 616.4196606823541,
9: 619.1153542917842,
10: 621.8115109777711,
11: 624.508130297741,
12: 627.2052118044398,
13: 629.9027550459588,
14: 632.6007595657577,
15: 635.299224902691,
16: 637.998150591032,
17: 640.6975361604982,
18: 643.3973811362772,
19: 646.0976850390515,
20: 648.7984473850253,
21: 651.4996676859489,
22: 654.2013454491467,
23: 656.903480177542,
24: 659.6060713696838,
25: 662.3091185197744,
26: 665.0126211176946,
27: 667.716578649032,
28: 670.4209905951075,
29: 673.1258564330019,
},
}
)
PEYTON_FCST_LINEAR_95 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2013-05-01 00:00:00"),
1: pd.Timestamp("2013-05-02 00:00:00"),
2: pd.Timestamp("2013-05-03 00:00:00"),
3: pd.Timestamp("2013-05-04 00:00:00"),
4: pd.Timestamp("2013-05-05 00:00:00"),
5: pd.Timestamp("2013-05-06 00:00:00"),
6: pd.Timestamp("2013-05-07 00:00:00"),
7: pd.Timestamp("2013-05-08 00:00:00"),
8: pd.Timestamp("2013-05-09 00:00:00"),
9: pd.Timestamp("2013-05-10 00:00:00"),
10: pd.Timestamp("2013-05-11 00:00:00"),
11: pd.Timestamp("2013-05-12 00:00:00"),
12: pd.Timestamp("2013-05-13 00:00:00"),
13: pd.Timestamp("2013-05-14 00:00:00"),
14: pd.Timestamp("2013-05-15 00:00:00"),
15: pd.Timestamp("2013-05-16 00:00:00"),
16: pd.Timestamp("2013-05-17 00:00:00"),
17: pd.Timestamp("2013-05-18 00:00:00"),
18: pd.Timestamp("2013-05-19 00:00:00"),
19: pd.Timestamp("2013-05-20 00:00:00"),
20: pd.Timestamp("2013-05-21 00:00:00"),
21: pd.Timestamp("2013-05-22 00:00:00"),
22: pd.Timestamp("2013-05-23 00:00:00"),
23: pd.Timestamp("2013-05-24 00:00:00"),
24: pd.Timestamp("2013-05-25 00:00:00"),
25: pd.Timestamp("2013-05-26 00:00:00"),
26: pd.Timestamp("2013-05-27 00:00:00"),
27: pd.Timestamp("2013-05-28 00:00:00"),
28: pd.Timestamp("2013-05-29 00:00:00"),
29: pd.Timestamp("2013-05-30 00:00:00"),
},
"fcst": {
0: 8.479624727157459,
1: 8.479984673362159,
2: 8.480344619566859,
3: 8.48070456577156,
4: 8.48106451197626,
5: 8.48142445818096,
6: 8.481784404385662,
7: 8.482144350590362,
8: 8.482504296795062,
9: 8.482864242999762,
10: 8.483224189204464,
11: 8.483584135409163,
12: 8.483944081613863,
13: 8.484304027818565,
14: 8.484663974023265,
15: 8.485023920227965,
16: 8.485383866432667,
17: 8.485743812637367,
18: 8.486103758842066,
19: 8.486463705046766,
20: 8.486823651251468,
21: 8.487183597456168,
22: 8.487543543660868,
23: 8.48790348986557,
24: 8.48826343607027,
25: 8.48862338227497,
26: 8.48898332847967,
27: 8.489343274684371,
28: 8.489703220889071,
29: 8.490063167093771,
},
"fcst_lower": {
0: 7.055970485245664,
1: 7.056266316358524,
2: 7.056561800026597,
3: 7.056856936297079,
4: 7.057151725217398,
5: 7.05744616683524,
6: 7.057740261198534,
7: 7.058034008355445,
8: 7.058327408354395,
9: 7.058620461244044,
10: 7.0589131670733005,
11: 7.059205525891312,
12: 7.059497537747475,
13: 7.059789202691431,
14: 7.0600805207730595,
15: 7.060371492042489,
16: 7.060662116550093,
17: 7.060952394346479,
18: 7.06124232548251,
19: 7.0615319100092835,
20: 7.061821147978145,
21: 7.062110039440677,
22: 7.062398584448709,
23: 7.062686783054313,
24: 7.0629746353098,
25: 7.063262141267724,
26: 7.063549300980883,
27: 7.063836114502315,
28: 7.0641225818852975,
29: 7.064408703183352,
},
"fcst_upper": {
0: 9.903278969069254,
1: 9.903703030365794,
2: 9.90412743910712,
3: 9.904552195246042,
4: 9.904977298735123,
5: 9.90540274952668,
6: 9.90582854757279,
7: 9.906254692825279,
8: 9.90668118523573,
9: 9.90710802475548,
10: 9.907535211335626,
11: 9.907962744927016,
12: 9.908390625480251,
13: 9.9088188529457,
14: 9.90924742727347,
15: 9.909676348413441,
16: 9.91010561631524,
17: 9.910535230928254,
18: 9.910965192201623,
19: 9.91139550008425,
20: 9.91182615452479,
21: 9.912257155471659,
22: 9.912688502873028,
23: 9.913120196676825,
24: 9.91355223683074,
25: 9.913984623282214,
26: 9.914417355978456,
27: 9.914850434866427,
28: 9.915283859892844,
29: 9.91571763100419,
},
}
)
PEYTON_FCST_LINEAR_99 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2013-05-01 00:00:00"),
1: pd.Timestamp("2013-05-02 00:00:00"),
2: pd.Timestamp("2013-05-03 00:00:00"),
3: pd.Timestamp("2013-05-04 00:00:00"),
4: pd.Timestamp("2013-05-05 00:00:00"),
5: pd.Timestamp("2013-05-06 00:00:00"),
6: pd.Timestamp("2013-05-07 00:00:00"),
7: pd.Timestamp("2013-05-08 00:00:00"),
8: pd.Timestamp("2013-05-09 00:00:00"),
9: pd.Timestamp("2013-05-10 00:00:00"),
10: pd.Timestamp("2013-05-11 00:00:00"),
11: pd.Timestamp("2013-05-12 00:00:00"),
12: pd.Timestamp("2013-05-13 00:00:00"),
13: pd.Timestamp("2013-05-14 00:00:00"),
14: pd.Timestamp("2013-05-15 00:00:00"),
15: pd.Timestamp("2013-05-16 00:00:00"),
16: pd.Timestamp("2013-05-17 00:00:00"),
17: pd.Timestamp("2013-05-18 00:00:00"),
18: pd.Timestamp("2013-05-19 00:00:00"),
19: pd.Timestamp("2013-05-20 00:00:00"),
20: pd.Timestamp("2013-05-21 00:00:00"),
21: pd.Timestamp("2013-05-22 00:00:00"),
22: pd.Timestamp("2013-05-23 00:00:00"),
23: pd.Timestamp("2013-05-24 00:00:00"),
24: pd.Timestamp("2013-05-25 00:00:00"),
25: pd.Timestamp("2013-05-26 00:00:00"),
26: pd.Timestamp("2013-05-27 00:00:00"),
27: pd.Timestamp("2013-05-28 00:00:00"),
28: pd.Timestamp("2013-05-29 00:00:00"),
29: pd.Timestamp("2013-05-30 00:00:00"),
},
"fcst": {
0: 8.479624727157459,
1: 8.479984673362159,
2: 8.480344619566859,
3: 8.48070456577156,
4: 8.48106451197626,
5: 8.48142445818096,
6: 8.481784404385662,
7: 8.482144350590362,
8: 8.482504296795062,
9: 8.482864242999762,
10: 8.483224189204464,
11: 8.483584135409163,
12: 8.483944081613863,
13: 8.484304027818565,
14: 8.484663974023265,
15: 8.485023920227965,
16: 8.485383866432667,
17: 8.485743812637367,
18: 8.486103758842066,
19: 8.486463705046766,
20: 8.486823651251468,
21: 8.487183597456168,
22: 8.487543543660868,
23: 8.48790348986557,
24: 8.48826343607027,
25: 8.48862338227497,
26: 8.48898332847967,
27: 8.489343274684371,
28: 8.489703220889071,
29: 8.490063167093771,
},
"fcst_lower": {
0: 6.605000045325637,
1: 6.605275566724015,
2: 6.605550630617649,
3: 6.605825237068679,
4: 6.606099386139563,
5: 6.60637307789309,
6: 6.606646312392368,
7: 6.606919089700827,
8: 6.607191409882221,
9: 6.607463273000626,
10: 6.607734679120443,
11: 6.608005628306389,
12: 6.608276120623508,
13: 6.608546156137163,
14: 6.608815734913038,
15: 6.609084857017139,
16: 6.609353522515795,
17: 6.609621731475649,
18: 6.609889483963668,
19: 6.610156780047143,
20: 6.61042361979368,
21: 6.610690003271204,
22: 6.610955930547961,
23: 6.611221401692519,
24: 6.611486416773756,
25: 6.611750975860878,
26: 6.612015079023405,
27: 6.612278726331177,
28: 6.612541917854348,
29: 6.612804653663393,
},
"fcst_upper": {
0: 10.354249408989281,
1: 10.354693780000304,
2: 10.355138608516068,
3: 10.355583894474442,
4: 10.356029637812957,
5: 10.35647583846883,
6: 10.356922496378955,
7: 10.357369611479896,
8: 10.357817183707903,
9: 10.358265212998898,
10: 10.358713699288483,
11: 10.359162642511938,
12: 10.359612042604219,
13: 10.360061899499968,
14: 10.360512213133493,
15: 10.36096298343879,
16: 10.361414210349539,
17: 10.361865893799084,
18: 10.362318033720465,
19: 10.36277063004639,
20: 10.363223682709256,
21: 10.363677191641132,
22: 10.364131156773775,
23: 10.364585578038621,
24: 10.365040455366783,
25: 10.365495788689062,
26: 10.365951577935935,
27: 10.366407823037564,
28: 10.366864523923793,
29: 10.36732168052415,
},
}
)
PEYTON_FCST_LINEAR_INVALID_ZERO = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2012-05-02 00:00:00"),
1: pd.Timestamp("2012-05-03 00:00:00"),
2: pd.Timestamp("2012-05-04 00:00:00"),
3: pd.Timestamp("2012-05-05 00:00:00"),
4: pd.Timestamp("2012-05-06 00:00:00"),
5: pd.Timestamp("2012-05-07 00:00:00"),
6: pd.Timestamp("2012-05-08 00:00:00"),
7: pd.Timestamp("2012-05-09 00:00:00"),
8: pd.Timestamp("2012-05-10 00:00:00"),
9: pd.Timestamp("2012-05-11 00:00:00"),
10: pd.Timestamp("2012-05-12 00:00:00"),
11: pd.Timestamp("2012-05-13 00:00:00"),
12: pd.Timestamp("2012-05-14 00:00:00"),
13: pd.Timestamp("2012-05-15 00:00:00"),
14: pd.Timestamp("2012-05-16 00:00:00"),
15: pd.Timestamp("2012-05-17 00:00:00"),
16: pd.Timestamp("2012-05-18 00:00:00"),
17: pd.Timestamp("2012-05-19 00:00:00"),
18: pd.Timestamp("2012-05-20 00:00:00"),
19: pd.Timestamp("2012-05-21 00:00:00"),
20: pd.Timestamp("2012-05-22 00:00:00"),
21: pd.Timestamp("2012-05-23 00:00:00"),
22: pd.Timestamp("2012-05-24 00:00:00"),
23: pd.Timestamp("2012-05-25 00:00:00"),
24: pd.Timestamp("2012-05-26 00:00:00"),
25: pd.Timestamp("2012-05-27 00:00:00"),
26: pd.Timestamp("2012-05-28 00:00:00"),
27: pd.Timestamp("2012-05-29 00:00:00"),
28: pd.Timestamp("2012-05-30 00:00:00"),
29: pd.Timestamp("2012-05-31 00:00:00"),
30: pd.Timestamp("2012-06-01 00:00:00"),
31: pd.Timestamp("2012-06-02 00:00:00"),
32: pd.Timestamp("2012-06-03 00:00:00"),
33: pd.Timestamp("2012-06-04 00:00:00"),
34: pd.Timestamp("2012-06-05 00:00:00"),
35: pd.Timestamp("2012-06-06 00:00:00"),
36: pd.Timestamp("2012-06-07 00:00:00"),
37: pd.Timestamp("2012-06-08 00:00:00"),
38: pd.Timestamp("2012-06-09 00:00:00"),
39: pd.Timestamp("2012-06-10 00:00:00"),
40: pd.Timestamp("2012-06-11 00:00:00"),
41: pd.Timestamp("2012-06-12 00:00:00"),
42: pd.Timestamp("2012-06-13 00:00:00"),
43: pd.Timestamp("2012-06-14 00:00:00"),
44: pd.Timestamp("2012-06-15 00:00:00"),
45: pd.Timestamp("2012-06-16 00:00:00"),
46: pd.Timestamp("2012-06-17 00:00:00"),
47: pd.Timestamp("2012-06-18 00:00:00"),
48: pd.Timestamp("2012-06-19 00:00:00"),
49: pd.Timestamp("2012-06-20 00:00:00"),
50: pd.Timestamp("2012-06-21 00:00:00"),
51: pd.Timestamp("2012-06-22 00:00:00"),
52: pd.Timestamp("2012-06-23 00:00:00"),
53: pd.Timestamp("2012-06-24 00:00:00"),
54: pd.Timestamp("2012-06-25 00:00:00"),
55: pd.Timestamp("2012-06-26 00:00:00"),
56: pd.Timestamp("2012-06-27 00:00:00"),
57: pd.Timestamp("2012-06-28 00:00:00"),
58: pd.Timestamp("2012-06-29 00:00:00"),
59: pd.Timestamp("2012-06-30 00:00:00"),
60: pd.Timestamp("2012-07-01 00:00:00"),
61: pd.Timestamp("2012-07-02 00:00:00"),
62: pd.Timestamp("2012-07-03 00:00:00"),
63: pd.Timestamp("2012-07-04 00:00:00"),
64: pd.Timestamp("2012-07-05 00:00:00"),
65: pd.Timestamp("2012-07-06 00:00:00"),
66: pd.Timestamp("2012-07-07 00:00:00"),
67: pd.Timestamp("2012-07-08 00:00:00"),
68: pd.Timestamp("2012-07-09 00:00:00"),
69: pd.Timestamp("2012-07-10 00:00:00"),
70: pd.Timestamp("2012-07-11 00:00:00"),
71: pd.Timestamp("2012-07-12 00:00:00"),
72: pd.Timestamp("2012-07-13 00:00:00"),
73: pd.Timestamp("2012-07-14 00:00:00"),
74: pd.Timestamp("2012-07-15 00:00:00"),
75: pd.Timestamp("2012-07-16 00:00:00"),
76: pd.Timestamp("2012-07-17 00:00:00"),
77: pd.Timestamp("2012-07-18 00:00:00"),
78: pd.Timestamp("2012-07-19 00:00:00"),
79: pd.Timestamp("2012-07-20 00:00:00"),
80: pd.Timestamp("2012-07-21 00:00:00"),
81: pd.Timestamp("2012-07-22 00:00:00"),
82: pd.Timestamp("2012-07-23 00:00:00"),
83: pd.Timestamp("2012-07-24 00:00:00"),
84: pd.Timestamp("2012-07-25 00:00:00"),
85: pd.Timestamp("2012-07-26 00:00:00"),
86: pd.Timestamp("2012-07-27 00:00:00"),
87: pd.Timestamp("2012-07-28 00:00:00"),
88: pd.Timestamp("2012-07-29 00:00:00"),
89: pd.Timestamp("2012-07-30 00:00:00"),
90: pd.Timestamp("2012-07-31 00:00:00"),
91: pd.Timestamp("2012-08-01 00:00:00"),
92: pd.Timestamp("2012-08-02 00:00:00"),
93: pd.Timestamp("2012-08-03 00:00:00"),
94: pd.Timestamp("2012-08-04 00:00:00"),
95: pd.Timestamp("2012-08-05 00:00:00"),
96: pd.Timestamp("2012-08-06 00:00:00"),
97: pd.Timestamp("2012-08-07 00:00:00"),
98: pd.Timestamp("2012-08-08 00:00:00"),
99: pd.Timestamp("2012-08-09 00:00:00"),
100: pd.Timestamp("2012-08-10 00:00:00"),
101: pd.Timestamp("2012-08-11 00:00:00"),
102: pd.Timestamp("2012-08-12 00:00:00"),
103: pd.Timestamp("2012-08-13 00:00:00"),
104: pd.Timestamp("2012-08-14 00:00:00"),
105: pd.Timestamp("2012-08-15 00:00:00"),
106: pd.Timestamp("2012-08-16 00:00:00"),
107: pd.Timestamp("2012-08-17 00:00:00"),
108: pd.Timestamp("2012-08-18 00:00:00"),
109: pd.Timestamp("2012-08-19 00:00:00"),
110: pd.Timestamp("2012-08-20 00:00:00"),
111: pd.Timestamp("2012-08-21 00:00:00"),
112: pd.Timestamp("2012-08-22 00:00:00"),
113: pd.Timestamp("2012-08-23 00:00:00"),
114: pd.Timestamp("2012-08-24 00:00:00"),
115: pd.Timestamp("2012-08-25 00:00:00"),
116: pd.Timestamp("2012-08-26 00:00:00"),
117: pd.Timestamp("2012-08-27 00:00:00"),
118: pd.Timestamp("2012-08-28 00:00:00"),
119: pd.Timestamp("2012-08-29 00:00:00"),
120: pd.Timestamp("2012-08-30 00:00:00"),
121: pd.Timestamp("2012-08-31 00:00:00"),
122: pd.Timestamp("2012-09-01 00:00:00"),
123: pd.Timestamp("2012-09-02 00:00:00"),
124: pd.Timestamp("2012-09-03 00:00:00"),
125: pd.Timestamp("2012-09-04 00:00:00"),
126: pd.Timestamp("2012-09-05 00:00:00"),
127: pd.Timestamp("2012-09-06 00:00:00"),
128: pd.Timestamp("2012-09-07 00:00:00"),
129: pd.Timestamp("2012-09-08 00:00:00"),
130: pd.Timestamp("2012-09-09 00:00:00"),
131: pd.Timestamp("2012-09-10 00:00:00"),
132: pd.Timestamp("2012-09-11 00:00:00"),
133: pd.Timestamp("2012-09-12 00:00:00"),
134: pd.Timestamp("2012-09-13 00:00:00"),
135: pd.Timestamp("2012-09-14 00:00:00"),
136: pd.Timestamp("2012-09-15 00:00:00"),
137: pd.Timestamp("2012-09-16 00:00:00"),
138: pd.Timestamp("2012-09-17 00:00:00"),
139: pd.Timestamp("2012-09-18 00:00:00"),
140: pd.Timestamp("2012-09-19 00:00:00"),
141: pd.Timestamp("2012-09-20 00:00:00"),
142: pd.Timestamp("2012-09-21 00:00:00"),
143: pd.Timestamp("2012-09-22 00:00:00"),
144: pd.Timestamp("2012-09-23 00:00:00"),
145: pd.Timestamp("2012-09-24 00:00:00"),
146: pd.Timestamp("2012-09-25 00:00:00"),
147: pd.Timestamp("2012-09-26 00:00:00"),
148: pd.Timestamp("2012-09-27 00:00:00"),
149: pd.Timestamp("2012-09-28 00:00:00"),
150: pd.Timestamp("2012-09-29 00:00:00"),
151: pd.Timestamp("2012-09-30 00:00:00"),
152: pd.Timestamp("2012-10-01 00:00:00"),
153: pd.Timestamp("2012-10-02 00:00:00"),
154: pd.Timestamp("2012-10-03 00:00:00"),
155: pd.Timestamp("2012-10-04 00:00:00"),
156: pd.Timestamp("2012-10-05 00:00:00"),
157: pd.Timestamp("2012-10-06 00:00:00"),
158: pd.Timestamp("2012-10-07 00:00:00"),
159: pd.Timestamp("2012-10-08 00:00:00"),
160: pd.Timestamp("2012-10-09 00:00:00"),
161: pd.Timestamp("2012-10-10 00:00:00"),
162: pd.Timestamp("2012-10-11 00:00:00"),
163: pd.Timestamp("2012-10-12 00:00:00"),
164: pd.Timestamp("2012-10-13 00:00:00"),
165: pd.Timestamp("2012-10-14 00:00:00"),
166: pd.Timestamp("2012-10-15 00:00:00"),
167: pd.Timestamp("2012-10-16 00:00:00"),
168: pd.Timestamp("2012-10-17 00:00:00"),
169: pd.Timestamp("2012-10-18 00:00:00"),
170: pd.Timestamp("2012-10-19 00:00:00"),
171: pd.Timestamp("2012-10-20 00:00:00"),
172: | pd.Timestamp("2012-10-21 00:00:00") | pandas.Timestamp |
import numpy as np
import pandas as pd
import matplotlib.cm as cm
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import seaborn as sns
from scipy import stats
import warnings
import os
from itertools import combinations
import multiprocessing
from multiprocessing import Pool
from tqdm import tqdm
from IPython.display import Markdown, display
from scipy.spatial import distance
from sklearn.metrics.pairwise import cosine_similarity
def printmd(string):
display(Markdown(string))
def pandas_df_to_markdown_table(df):
fmt = ['---' for i in range(len(df.columns))]
df_fmt = pd.DataFrame([fmt], columns=df.columns)
df_formatted = pd.concat([df_fmt, df])
display(Markdown(df_formatted.to_csv(sep="|", index=False)))
#use:df.style.applymap(color_code(1), subset=['col1','col2'])
def color_code(thresh):
def color_code_by_val(val):
color = None
if val <= thresh:
color = 'red'
return 'background-color: %s' % color
return color_code_by_val
def doInitialSettings(figsize=(5,3)):
try:
warnings.simplefilter("always")
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = 'all'
plt.rcParams["figure.figsize"] = figsize
plt.rc('axes', labelsize=14)
plt.rc('xtick', labelsize=12)
plt.rc('ytick', labelsize=12)
pd.set_option('display.max_rows',20)
pd.set_option("io.excel.xlsx.reader", "openpyxl")
pd.set_option("io.excel.xlsm.reader", "openpyxl")
pd.set_option("io.excel.xlsb.reader", "openpyxl")
pd.set_option("io.excel.xlsx.writer", "openpyxl")
pd.set_option("io.excel.xlsm.writer", "openpyxl")
except:
pass
def printUniques(datafr,i=10):
"""
prints unique values in a dataframe whose nunique value <= 10
"""
try:
dict_=dict(datafr.nunique())
for k,v in dict_.items():
if int(v)<=i: #we don't want to see the unique items that are greater than i
print("Unique items in column",k)
print(datafr[k].unique(),end="\n\n")
print("You may want to convert the numerics with low cardinality to categorical")
except Exception as e:
print(e)
def printValueCount(datafr,i=10):
"""
prints value counts for columns whose # of unique value is less than i
"""
try:
dict_=dict(datafr.nunique())
for k,v in dict_.items():
if int(v)<=i:
print("Unique items in column",k)
print(datafr[k].value_counts(dropna=False),end="\n\n")
except Exception as e:
print(e)
def getColumnsInLowCardinality(df,i=10):
#buna gerek var mı ? printUniques fakrı ne?
try:
dict_=dict(df.nunique())
list_=[]
for k,v in dict_.items():
if int(v)<=i:
list_.append(k)
return list_
except Exception as e:
print(e)
def multicountplot(datafr,i=5,fig=(4,5),r=45, colsize=2,hue=None):
"""countplots for columns whose # of unique value is less than i """
try:
dict_=dict(datafr.nunique())
target=[k for k,v in dict_.items() if v<=i]
lng=0
if len(target)<=2:
print("plot manually due to <=2 target feature")
return
if len(target)//colsize==len(target)/colsize:
lng=len(target)//colsize
else:
lng=len(target)//colsize+1
fig, axes= plt.subplots(lng,colsize,figsize=fig)
k=0
for i in range(lng):
for j in range(colsize):
if k==len(target):
break
elif target[k]==hue:
pass
else:
sns.countplot(x=datafr[target[k]].fillna("Null"), ax=axes[i,j], data=datafr, hue=hue)
plt.tight_layout()
axes[i,j].set_xticklabels(axes[i,j].get_xticklabels(), rotation=r,ha='right')
k=k+1
except Exception as e:
print(e)
print("You may want to increase the size of i")
def ShowTopN(df,n=5):
"""
Works for numeric features. Even if you pass categorical features they will be disregarded
"""
try:
for d in df.select_dtypes("number").columns:
print(f"Top {n} in {d}:")
print(df[d].sort_values(ascending=False).head(n))
print("---------------------------")
except Exception as e:
print(e)
def sortAndPrintMaxMinNValues(df,columns,n=1,removeNull=True):
#if n=1 returns some unusual values, we can increase n
try:
for c in columns:
sorted_=df[c].sort_values()
if removeNull==True:
sorted_=sorted_.dropna()
print((c,sorted_[:n].values,sorted_[-n:].values))
except Exception as e:
print(e)
def addStdMeanMedian(df):
warnings.warn("Warning...addStdMeanMedian is depreciated. Use addCoefOfVarianceToDescribe")
def addCoefOfVarianceToDescribe(df):
df=df.describe().T
df["mean/median"]=df["mean"]/df["50%"]
df["std/mean"]=df["std"]/df["mean"]
return df
def outlierinfo(df,featurelist,imputestrategy="None",thresh=0.25):
"""
Gives Q1,Q3,IQR, outlier beginning points, mean in the boxplot, total mean.
Args:
imputestrategy:median, mean, mode, None
"""
for f in featurelist:
if imputestrategy=='None':
Q1 = df[f].quantile(thresh)
Q3 = df[f].quantile(1-thresh)
IQR = Q3-Q1
top=(Q3 + 1.5 * IQR)
bottom=(Q1 - 1.5 * IQR)
mbox=df[(df[f] > top) | (df[f] < bottom)][f].mean()
m=df[f].mean()
outliers=len(df[(df[f]>top) | (df[f]<bottom)])
else:
temp=df[f].fillna(df[f].agg(imputestrategy))
Q1 = temp.quantile(thresh)
Q3 = temp.quantile(1-thresh)
IQR = Q3-Q1
top=(Q3 + 1.5 * IQR)
bottom=(Q1 - 1.5 * IQR)
mbox=temp[(temp > top) | (temp < bottom)].mean()
m=temp.mean()
outliers=len(temp[(temp >top) | (temp<bottom)])
print(f"{f}, Min:{df[f].min()}, Max:{df[f].max()}, Q1:{Q1:9.2f}, Q3:{Q3:9.2f}, IQR:{IQR:9.2f}, Q3+1,5*IQR:{top:9.2f}, Q1-1,5*IQR:{bottom:9.2f}, Mean within the box:{mbox:9.2f}, Total Mean:{m:9.2f}, Outliers:{outliers}",end="\n\n")
def outliers_IQR(df,featurelist,imputestrategy="None",thresh=0.25,printorreturn='print'):
"""
This is the approach that boxplot uses, which is IQR approach.
sensitive to null. the more null, the narrower box from both end. boxplot just shrinks, thus number of outliers increases.
so it would be sensible to impute the nulls first. we, here, impute them temporarily just in case.
Args:
imputestrategy:median, mean, mode, None
printorreturn:(print,return,both). if print, it prints the results, if return, it returns the list of results as a list of tuple,if both, it prints an returns
"""
retlist=[]
for f in featurelist:
if imputestrategy=='None':
Q1 = df[f].quantile(thresh)
Q3 = df[f].quantile(1-thresh)
else:
Q1 = df[f].fillna(df[f].agg(imputestrategy)).quantile(thresh)
Q3 = df[f].fillna(df[f].agg(imputestrategy)).quantile(1-thresh)
IQR = Q3-Q1
top=(Q3 + 1.5 * IQR)
bottom=(Q1 - 1.5 * IQR)
adet=len(df[(df[f] > top) | (df[f] < bottom)])
if adet>0:
if printorreturn=='print':
print(f"{adet} outliers exists in feature '{f}'")
elif printorreturn=='return':
retlist.append((f,adet))
elif printorreturn=='both':
retlist.append((f,adet))
print(f"{adet} outliers exists in feature '{f}'")
else:
print("wrong value for printorreturn")
raise
if printorreturn=='return':
return retlist
def outliers_std(df,featurelist,n=3,imputestrategy="None",printorreturn='print'):
"""
if the std is higher than mean it may go negative at the bottom edge, so you cannot catch bottom outliers
Args:
imputestrategy:median, mean, mode, None
printorreturn:(print,return,both). if print, it prints the results, if return, it returns the list of results as a list of tuple,if both, it prints an returns
"""
for f in featurelist:
if imputestrategy=='None':
top=df[f].mean()+n*df[f].std()
bottom=df[f].mean()-n*df[f].std()
else:
top=df[f].fillna(df[f].agg(imputestrategy)).mean()+n*df[f].fillna(df[f].agg(imputestrategy)).std()
bottom=df[f].fillna(df[f].agg(imputestrategy)).mean()-n*df[f].fillna(df[f].agg(imputestrategy)).std()
adet=len(df[(df[f] > top) | (df[f] < bottom)])
if adet>0:
if printorreturn=='print':
print(f"{adet} outliers exists in feature '{f}'")
elif printorreturn=='return':
retlist.append((f,adet))
elif printorreturn=='both':
retlist.append((f,adet))
print(f"{adet} outliers exists in feature '{f}'")
else:
print("wrong value for printorreturn")
raise
if printorreturn=='return':
return retlist
def outliers_zs(df,featurelist,thresh_z=3,imputestrategy="None",printorreturn='print'):
"""
finds the outliers to the z score.
Args:
imputestrategy:median, mean, mode, None
printorreturn:(print,return,both). if print, it prints the results, if return, it returns the list of results as a list of tuple,if both, it prints an returns
"""
for f in featurelist:
if imputestrategy=='None':
z= np.abs(stats.zscore(df[f]))
else:
z= np.abs(stats.zscore(df[f].fillna(df[f].agg(imputestrategy))))
adet=len(df[np.abs(df[f])>df.iloc[np.where(z>thresh_z)][f].min()])
if adet>0:
if printorreturn=='print':
print(f"{adet} outliers exists in feature '{f}'")
elif printorreturn=='return':
retlist.append((f,adet))
elif printorreturn=='both':
retlist.append((f,adet))
print(f"{adet} outliers exists in feature '{f}'")
else:
print("wrong value for printorreturn")
raise
if printorreturn=='return':
return retlist
def plotHistWithoutOutliers(df,fig=(12,8),thresh=0.25,imputestrategy="median",outliertreat="remove"):
"""this function does not change the dataframe permanently
args:
outliertreat: remove or cap
"""
df=df.select_dtypes("number")
col=4
row=int(len(df.columns)/col)+1
_, axes = plt.subplots(row,col,figsize=fig)
delete=row*col-len(df.columns)
for d in range(delete):
plt.delaxes(axes[row-1,col-d-1])
plt.suptitle("Histograms without outliers")
r=0;c=0;fc=0;
for f in sorted(df.columns):
Q1 = df[f].fillna(df[f].agg(imputestrategy)).quantile(thresh)
Q3 = df[f].fillna(df[f].agg(imputestrategy)).quantile(1-thresh)
IQR = Q3-Q1
t1=(Q3 + 1.5 * IQR)
t2=(Q1 - 1.5 * IQR)
cond=((df[f] > t1) | (df[f] < t2))
r=int(fc/4)
c=fc % 4
if outliertreat=="remove":
df[~cond][f].hist(ax=axes[r,c])
elif outliertreat=="cap":
s=df[f].copy()
s.where(s>t2,t2,inplace=True)
s.where(s<t1,t1,inplace=True)
s.hist(ax=axes[r,c])
else:
print("wrong value for outliertreat")
raise
#axes[r,c].set_xticklabels(axes[r,c].get_xticklabels(), rotation=r,ha='right')
axes[r,c].set_title(f)
plt.tight_layout(rect=[0, 0.03, 1, 0.95])
fc=fc+1
def numpyValuecounts(dizi):
unique, counts = np.unique(dizi, return_counts=True)
return np.asarray((unique, counts)).T
def findNullLikeValues(df,listofvalues=[[-1,-999],["na","yok","tanımsız","bilinmiyor","?"]]):
"""
df:dataframe,
listofvalues: turkish words that might mean null. put your own language equivalents.
first item in this list are the numeric ones, second one contains strings,
default values:[[-1,-999],["na","yok","tanımsız","bilinmiyor","?"]
"""
t=0
for f in df.select_dtypes("number").columns:
x=0
for i in listofvalues[0]:
x+=len(df[df[f]==i])
t+=1
if x>0:
print("{} null-like values in {}".format(x,f))
for f in df.select_dtypes("object"):
x=0
for i in listofvalues[1]:
try: #in case of nulls
x+=len(df[df[f].str.lower()==i])
t+=1
except:
pass
if x>0:
print("{} null-like values in {}".format(x,f))
if t==0:
print("There are no null-like values")
def parse_col_json(column, key):
"""
Args:
column: string
name of the column to be processed.
key: string
name of the dictionary key which needs to be extracted
"""
for index,i in zip(movies_df.index,movies_df[column].apply(json.loads)):
list1=[]
for j in range(len(i)):
list1.append((i[j][key]))# the key 'name' contains the name of the genre
movies_df.loc[index,column]=str(list1)
def plotNumericsBasedOnCategorical(df,cats,nums,fig=(15,15),r=45,aggf='mean',sort=False,hueCol=None):
"""
<NAME>; CATPLOT YAPIYOR BU İŞİ, HW1'den kontrol et
- cast and nums must be array-like.
- plots will be displayed such that that each numeric feature could be tracked in the rows and categories in the columns
"""
cols=len(cats)
rows=len(nums)
c=0
f, axes = plt.subplots(rows,cols,figsize=fig)
for cat in cats:
r=0
for num in nums:
ix=axes[r,c] if rows>1 else axes[c]
if hueCol is None or hueCol==cat:
if sort==True:
gruplu=df.groupby(cat)[num].agg(aggf).sort_values(ascending=False)
else:
gruplu=df.groupby(cat)[num].agg(aggf)
sns.barplot(x=gruplu.index, y=gruplu.values,ax=ix)
else:
if sort==True:
gruplu=df.groupby([cat,hueCol])[num].agg(aggf).sort_values(ascending=False)
else:
gruplu=df.groupby([cat,hueCol])[num].agg(aggf)
temp=gruplu.to_frame()
grupludf=temp.swaplevel(0,1).reset_index()
sns.barplot(x=cat, y=num,ax=ix, data=grupludf, hue=hueCol)
#plt.xticks(rotation= 45) #isimler uzun olursa horizontalalignment='right' da ekle
ix.set_xticklabels(ix.get_xticklabels(), rotation=r,ha='right')
ix.set_title(f"{aggf.upper()} for {num}")
plt.tight_layout()
r=r+1
c=c+1
def countifwithConditon(df,feature,condition):
print(df[df[feature].isin(df[condition][feature])].groupby(feature).size().value_counts())
def nullPlot(df):
sns.heatmap(df.isnull(),yticklabels=False,cbar=False,cmap='viridis')
def SuperInfo(df, dropna=False):
"""
Returns a dataframe consisting of datatypes, nuniques, #s of nulls head(1), most frequent item and its frequncy,
where the column names are indices.
"""
dt=pd.DataFrame(df.dtypes, columns=["Type"])
dn=pd.DataFrame(df.nunique(), columns=["Nunique"])
nonnull=pd.DataFrame(df.isnull().sum(), columns=["#of Missing"])
firstT=df.head(1).T.rename(columns={0:"First"})
MostFreqI=pd.DataFrame([df[x].value_counts().head(1).index[0] for x in df.columns], columns=["MostFreqItem"],index=df.columns)
MostFreqC=pd.DataFrame([df[x].value_counts().head(1).values[0] for x in df.columns], columns=["MostFreqCount"],index=df.columns)
return pd.concat([dt,dn,nonnull,MostFreqI,MostFreqC,firstT],axis=1)
def prepareListOfCombinationsForRelationFinder(df,i=5):
dict_=dict(df.nunique())
target=[k for k,v in dict_.items() if v<=i]
if len(target)>50:
c=3
elif len(target)>20:
c=4
else:
c=5
comb=[list(combinations(target,x)) for x in range(2,c)]
flat_list = [item for sublist in comb for item in sublist]
return flat_list
def findRelationsAmongFeatures(tpl):
"""
Must be used with multiprocessing module.
args
tpl:tuple consisting of a dataframe and a inner tuple of features of some combinations returning from 'prepareListOfCombinationsForRelationFinder' method. These tuples must be provieded as parallel in a multiprocess-based procedure.
"""
df,item=tpl
list_=list(item)
dist=df.drop_duplicates(list_)[list_]
for i in list_:
uns = dist[i].unique()
for u in uns:
if len(dist[dist[i]==u])==1:
return (list_,i,uns,u)
def getListOfRelationsParallel(df):
if __name__ == "__main__":#windows-jupyter olayı nedeniyle if main
cpu=multiprocessing.cpu_count()
flat_list=prepareListOfCombinationsForRelationFinder(df)
tpl=[(df,i) for i in flat_list]
with Pool(cpu) as p:
list_= p.map(findRelationsAmongFeatures, tqdm(tpl))
return list_
def pandas_df_to_markdown_table(df):
fmt = ['---' for i in range(len(df.columns))]
df_fmt = pd.DataFrame([fmt], columns=df.columns)
df_formatted = | pd.concat([df_fmt, df]) | pandas.concat |
from collections import deque
from datetime import datetime
import operator
import numpy as np
import pytest
import pytz
import pandas as pd
import pandas._testing as tm
from pandas.tests.frame.common import _check_mixed_float, _check_mixed_int
# -------------------------------------------------------------------
# Comparisons
class TestFrameComparisons:
# Specifically _not_ flex-comparisons
def test_frame_in_list(self):
# GH#12689 this should raise at the DataFrame level, not blocks
df = pd.DataFrame(np.random.randn(6, 4), columns=list("ABCD"))
msg = "The truth value of a DataFrame is ambiguous"
with pytest.raises(ValueError, match=msg):
df in [None]
def test_comparison_invalid(self):
def check(df, df2):
for (x, y) in [(df, df2), (df2, df)]:
# we expect the result to match Series comparisons for
# == and !=, inequalities should raise
result = x == y
expected = pd.DataFrame(
{col: x[col] == y[col] for col in x.columns},
index=x.index,
columns=x.columns,
)
tm.assert_frame_equal(result, expected)
result = x != y
expected = pd.DataFrame(
{col: x[col] != y[col] for col in x.columns},
index=x.index,
columns=x.columns,
)
tm.assert_frame_equal(result, expected)
with pytest.raises(TypeError):
x >= y
with pytest.raises(TypeError):
x > y
with pytest.raises(TypeError):
x < y
with pytest.raises(TypeError):
x <= y
# GH4968
# invalid date/int comparisons
df = pd.DataFrame(np.random.randint(10, size=(10, 1)), columns=["a"])
df["dates"] = pd.date_range("20010101", periods=len(df))
df2 = df.copy()
df2["dates"] = df["a"]
check(df, df2)
df = pd.DataFrame(np.random.randint(10, size=(10, 2)), columns=["a", "b"])
df2 = pd.DataFrame(
{
"a": pd.date_range("20010101", periods=len(df)),
"b": pd.date_range("20100101", periods=len(df)),
}
)
check(df, df2)
def test_timestamp_compare(self):
# make sure we can compare Timestamps on the right AND left hand side
# GH#4982
df = pd.DataFrame(
{
"dates1": pd.date_range("20010101", periods=10),
"dates2": pd.date_range("20010102", periods=10),
"intcol": np.random.randint(1000000000, size=10),
"floatcol": np.random.randn(10),
"stringcol": list(tm.rands(10)),
}
)
df.loc[np.random.rand(len(df)) > 0.5, "dates2"] = pd.NaT
ops = {"gt": "lt", "lt": "gt", "ge": "le", "le": "ge", "eq": "eq", "ne": "ne"}
for left, right in ops.items():
left_f = getattr(operator, left)
right_f = getattr(operator, right)
# no nats
if left in ["eq", "ne"]:
expected = left_f(df, pd.Timestamp("20010109"))
result = right_f(pd.Timestamp("20010109"), df)
tm.assert_frame_equal(result, expected)
else:
with pytest.raises(TypeError):
left_f(df, pd.Timestamp("20010109"))
with pytest.raises(TypeError):
right_f(pd.Timestamp("20010109"), df)
# nats
expected = left_f(df, pd.Timestamp("nat"))
result = right_f(pd.Timestamp("nat"), df)
tm.assert_frame_equal(result, expected)
def test_mixed_comparison(self):
# GH#13128, GH#22163 != datetime64 vs non-dt64 should be False,
# not raise TypeError
# (this appears to be fixed before GH#22163, not sure when)
df = pd.DataFrame([["1989-08-01", 1], ["1989-08-01", 2]])
other = pd.DataFrame([["a", "b"], ["c", "d"]])
result = df == other
assert not result.any().any()
result = df != other
assert result.all().all()
def test_df_boolean_comparison_error(self):
# GH#4576, GH#22880
# comparing DataFrame against list/tuple with len(obj) matching
# len(df.columns) is supported as of GH#22800
df = pd.DataFrame(np.arange(6).reshape((3, 2)))
expected = pd.DataFrame([[False, False], [True, False], [False, False]])
result = df == (2, 2)
tm.assert_frame_equal(result, expected)
result = df == [2, 2]
tm.assert_frame_equal(result, expected)
def test_df_float_none_comparison(self):
df = pd.DataFrame(
np.random.randn(8, 3), index=range(8), columns=["A", "B", "C"]
)
result = df.__eq__(None)
assert not result.any().any()
def test_df_string_comparison(self):
df = pd.DataFrame([{"a": 1, "b": "foo"}, {"a": 2, "b": "bar"}])
mask_a = df.a > 1
tm.assert_frame_equal(df[mask_a], df.loc[1:1, :])
tm.assert_frame_equal(df[-mask_a], df.loc[0:0, :])
mask_b = df.b == "foo"
tm.assert_frame_equal(df[mask_b], df.loc[0:0, :])
tm.assert_frame_equal(df[-mask_b], df.loc[1:1, :])
class TestFrameFlexComparisons:
# TODO: test_bool_flex_frame needs a better name
def test_bool_flex_frame(self):
data = np.random.randn(5, 3)
other_data = np.random.randn(5, 3)
df = pd.DataFrame(data)
other = pd.DataFrame(other_data)
ndim_5 = np.ones(df.shape + (1, 3))
# Unaligned
def _check_unaligned_frame(meth, op, df, other):
part_o = other.loc[3:, 1:].copy()
rs = meth(part_o)
xp = op(df, part_o.reindex(index=df.index, columns=df.columns))
tm.assert_frame_equal(rs, xp)
# DataFrame
assert df.eq(df).values.all()
assert not df.ne(df).values.any()
for op in ["eq", "ne", "gt", "lt", "ge", "le"]:
f = getattr(df, op)
o = getattr(operator, op)
# No NAs
tm.assert_frame_equal(f(other), o(df, other))
_check_unaligned_frame(f, o, df, other)
# ndarray
tm.assert_frame_equal(f(other.values), o(df, other.values))
# scalar
tm.assert_frame_equal(f(0), o(df, 0))
# NAs
msg = "Unable to coerce to Series/DataFrame"
tm.assert_frame_equal(f(np.nan), o(df, np.nan))
with pytest.raises(ValueError, match=msg):
f(ndim_5)
# Series
def _test_seq(df, idx_ser, col_ser):
idx_eq = df.eq(idx_ser, axis=0)
col_eq = df.eq(col_ser)
idx_ne = df.ne(idx_ser, axis=0)
col_ne = df.ne(col_ser)
tm.assert_frame_equal(col_eq, df == pd.Series(col_ser))
tm.assert_frame_equal(col_eq, -col_ne)
tm.assert_frame_equal(idx_eq, -idx_ne)
tm.assert_frame_equal(idx_eq, df.T.eq(idx_ser).T)
tm.assert_frame_equal(col_eq, df.eq(list(col_ser)))
tm.assert_frame_equal(idx_eq, df.eq(pd.Series(idx_ser), axis=0))
tm.assert_frame_equal(idx_eq, df.eq(list(idx_ser), axis=0))
idx_gt = df.gt(idx_ser, axis=0)
col_gt = df.gt(col_ser)
idx_le = df.le(idx_ser, axis=0)
col_le = df.le(col_ser)
tm.assert_frame_equal(col_gt, df > pd.Series(col_ser))
tm.assert_frame_equal(col_gt, -col_le)
tm.assert_frame_equal(idx_gt, -idx_le)
tm.assert_frame_equal(idx_gt, df.T.gt(idx_ser).T)
idx_ge = df.ge(idx_ser, axis=0)
col_ge = df.ge(col_ser)
idx_lt = df.lt(idx_ser, axis=0)
col_lt = df.lt(col_ser)
tm.assert_frame_equal(col_ge, df >= pd.Series(col_ser))
tm.assert_frame_equal(col_ge, -col_lt)
tm.assert_frame_equal(idx_ge, -idx_lt)
tm.assert_frame_equal(idx_ge, df.T.ge(idx_ser).T)
idx_ser = pd.Series(np.random.randn(5))
col_ser = pd.Series(np.random.randn(3))
_test_seq(df, idx_ser, col_ser)
# list/tuple
_test_seq(df, idx_ser.values, col_ser.values)
# NA
df.loc[0, 0] = np.nan
rs = df.eq(df)
assert not rs.loc[0, 0]
rs = df.ne(df)
assert rs.loc[0, 0]
rs = df.gt(df)
assert not rs.loc[0, 0]
rs = df.lt(df)
assert not rs.loc[0, 0]
rs = df.ge(df)
assert not rs.loc[0, 0]
rs = df.le(df)
assert not rs.loc[0, 0]
def test_bool_flex_frame_complex_dtype(self):
# complex
arr = np.array([np.nan, 1, 6, np.nan])
arr2 = np.array([2j, np.nan, 7, None])
df = pd.DataFrame({"a": arr})
df2 = pd.DataFrame({"a": arr2})
msg = "|".join(
[
"'>' not supported between instances of '.*' and 'complex'",
r"unorderable types: .*complex\(\)", # PY35
]
)
with pytest.raises(TypeError, match=msg):
# inequalities are not well-defined for complex numbers
df.gt(df2)
with pytest.raises(TypeError, match=msg):
# regression test that we get the same behavior for Series
df["a"].gt(df2["a"])
with pytest.raises(TypeError, match=msg):
# Check that we match numpy behavior here
df.values > df2.values
rs = df.ne(df2)
assert rs.values.all()
arr3 = np.array([2j, np.nan, None])
df3 = pd.DataFrame({"a": arr3})
with pytest.raises(TypeError, match=msg):
# inequalities are not well-defined for complex numbers
df3.gt(2j)
with pytest.raises(TypeError, match=msg):
# regression test that we get the same behavior for Series
df3["a"].gt(2j)
with pytest.raises(TypeError, match=msg):
# Check that we match numpy behavior here
df3.values > 2j
def test_bool_flex_frame_object_dtype(self):
# corner, dtype=object
df1 = pd.DataFrame({"col": ["foo", np.nan, "bar"]})
df2 = pd.DataFrame({"col": ["foo", datetime.now(), "bar"]})
result = df1.ne(df2)
exp = pd.DataFrame({"col": [False, True, False]})
tm.assert_frame_equal(result, exp)
def test_flex_comparison_nat(self):
# GH 15697, GH 22163 df.eq(pd.NaT) should behave like df == pd.NaT,
# and _definitely_ not be NaN
df = pd.DataFrame([pd.NaT])
result = df == pd.NaT
# result.iloc[0, 0] is a np.bool_ object
assert result.iloc[0, 0].item() is False
result = df.eq(pd.NaT)
assert result.iloc[0, 0].item() is False
result = df != pd.NaT
assert result.iloc[0, 0].item() is True
result = df.ne(pd.NaT)
assert result.iloc[0, 0].item() is True
@pytest.mark.parametrize("opname", ["eq", "ne", "gt", "lt", "ge", "le"])
def test_df_flex_cmp_constant_return_types(self, opname):
# GH 15077, non-empty DataFrame
df = pd.DataFrame({"x": [1, 2, 3], "y": [1.0, 2.0, 3.0]})
const = 2
result = getattr(df, opname)(const).dtypes.value_counts()
tm.assert_series_equal(result, pd.Series([2], index=[np.dtype(bool)]))
@pytest.mark.parametrize("opname", ["eq", "ne", "gt", "lt", "ge", "le"])
def test_df_flex_cmp_constant_return_types_empty(self, opname):
# GH 15077 empty DataFrame
df = pd.DataFrame({"x": [1, 2, 3], "y": [1.0, 2.0, 3.0]})
const = 2
empty = df.iloc[:0]
result = getattr(empty, opname)(const).dtypes.value_counts()
tm.assert_series_equal(result, pd.Series([2], index=[np.dtype(bool)]))
# -------------------------------------------------------------------
# Arithmetic
class TestFrameFlexArithmetic:
def test_floordiv_axis0(self):
# make sure we df.floordiv(ser, axis=0) matches column-wise result
arr = np.arange(3)
ser = pd.Series(arr)
df = pd.DataFrame({"A": ser, "B": ser})
result = df.floordiv(ser, axis=0)
expected = pd.DataFrame({col: df[col] // ser for col in df.columns})
tm.assert_frame_equal(result, expected)
result2 = df.floordiv(ser.values, axis=0)
tm.assert_frame_equal(result2, expected)
@pytest.mark.slow
@pytest.mark.parametrize("opname", ["floordiv", "pow"])
def test_floordiv_axis0_numexpr_path(self, opname):
# case that goes through numexpr and has to fall back to masked_arith_op
op = getattr(operator, opname)
arr = np.arange(10 ** 6).reshape(100, -1)
df = pd.DataFrame(arr)
df["C"] = 1.0
ser = df[0]
result = getattr(df, opname)(ser, axis=0)
expected = pd.DataFrame({col: op(df[col], ser) for col in df.columns})
tm.assert_frame_equal(result, expected)
result2 = getattr(df, opname)(ser.values, axis=0)
tm.assert_frame_equal(result2, expected)
def test_df_add_td64_columnwise(self):
# GH 22534 Check that column-wise addition broadcasts correctly
dti = pd.date_range("2016-01-01", periods=10)
tdi = pd.timedelta_range("1", periods=10)
tser = pd.Series(tdi)
df = pd.DataFrame({0: dti, 1: tdi})
result = df.add(tser, axis=0)
expected = pd.DataFrame({0: dti + tdi, 1: tdi + tdi})
tm.assert_frame_equal(result, expected)
def test_df_add_flex_filled_mixed_dtypes(self):
# GH 19611
dti = pd.date_range("2016-01-01", periods=3)
ser = pd.Series(["1 Day", "NaT", "2 Days"], dtype="timedelta64[ns]")
df = pd.DataFrame({"A": dti, "B": ser})
other = pd.DataFrame({"A": ser, "B": ser})
fill = pd.Timedelta(days=1).to_timedelta64()
result = df.add(other, fill_value=fill)
expected = pd.DataFrame(
{
"A": pd.Series(
["2016-01-02", "2016-01-03", "2016-01-05"], dtype="datetime64[ns]"
),
"B": ser * 2,
}
)
tm.assert_frame_equal(result, expected)
def test_arith_flex_frame(
self, all_arithmetic_operators, float_frame, mixed_float_frame
):
# one instance of parametrized fixture
op = all_arithmetic_operators
def f(x, y):
# r-versions not in operator-stdlib; get op without "r" and invert
if op.startswith("__r"):
return getattr(operator, op.replace("__r", "__"))(y, x)
return getattr(operator, op)(x, y)
result = getattr(float_frame, op)(2 * float_frame)
expected = f(float_frame, 2 * float_frame)
tm.assert_frame_equal(result, expected)
# vs mix float
result = getattr(mixed_float_frame, op)(2 * mixed_float_frame)
expected = f(mixed_float_frame, 2 * mixed_float_frame)
tm.assert_frame_equal(result, expected)
_check_mixed_float(result, dtype=dict(C=None))
@pytest.mark.parametrize("op", ["__add__", "__sub__", "__mul__"])
def test_arith_flex_frame_mixed(
self, op, int_frame, mixed_int_frame, mixed_float_frame
):
f = getattr(operator, op)
# vs mix int
result = getattr(mixed_int_frame, op)(2 + mixed_int_frame)
expected = f(mixed_int_frame, 2 + mixed_int_frame)
# no overflow in the uint
dtype = None
if op in ["__sub__"]:
dtype = dict(B="uint64", C=None)
elif op in ["__add__", "__mul__"]:
dtype = dict(C=None)
tm.assert_frame_equal(result, expected)
_check_mixed_int(result, dtype=dtype)
# vs mix float
result = getattr(mixed_float_frame, op)(2 * mixed_float_frame)
expected = f(mixed_float_frame, 2 * mixed_float_frame)
tm.assert_frame_equal(result, expected)
_check_mixed_float(result, dtype=dict(C=None))
# vs plain int
result = getattr(int_frame, op)(2 * int_frame)
expected = f(int_frame, 2 * int_frame)
tm.assert_frame_equal(result, expected)
def test_arith_flex_frame_raise(self, all_arithmetic_operators, float_frame):
# one instance of parametrized fixture
op = all_arithmetic_operators
# Check that arrays with dim >= 3 raise
for dim in range(3, 6):
arr = np.ones((1,) * dim)
msg = "Unable to coerce to Series/DataFrame"
with pytest.raises(ValueError, match=msg):
getattr(float_frame, op)(arr)
def test_arith_flex_frame_corner(self, float_frame):
const_add = float_frame.add(1)
tm.assert_frame_equal(const_add, float_frame + 1)
# corner cases
result = float_frame.add(float_frame[:0])
tm.assert_frame_equal(result, float_frame * np.nan)
result = float_frame[:0].add(float_frame)
tm.assert_frame_equal(result, float_frame * np.nan)
with pytest.raises(NotImplementedError, match="fill_value"):
float_frame.add(float_frame.iloc[0], fill_value=3)
with pytest.raises(NotImplementedError, match="fill_value"):
float_frame.add(float_frame.iloc[0], axis="index", fill_value=3)
def test_arith_flex_series(self, simple_frame):
df = simple_frame
row = df.xs("a")
col = df["two"]
# after arithmetic refactor, add truediv here
ops = ["add", "sub", "mul", "mod"]
for op in ops:
f = getattr(df, op)
op = getattr(operator, op)
tm.assert_frame_equal(f(row), op(df, row))
tm.assert_frame_equal(f(col, axis=0), op(df.T, col).T)
# special case for some reason
tm.assert_frame_equal(df.add(row, axis=None), df + row)
# cases which will be refactored after big arithmetic refactor
tm.assert_frame_equal(df.div(row), df / row)
tm.assert_frame_equal(df.div(col, axis=0), (df.T / col).T)
# broadcasting issue in GH 7325
df = pd.DataFrame(np.arange(3 * 2).reshape((3, 2)), dtype="int64")
expected = pd.DataFrame([[np.nan, np.inf], [1.0, 1.5], [1.0, 1.25]])
result = df.div(df[0], axis="index")
tm.assert_frame_equal(result, expected)
df = pd.DataFrame(np.arange(3 * 2).reshape((3, 2)), dtype="float64")
expected = pd.DataFrame([[np.nan, np.inf], [1.0, 1.5], [1.0, 1.25]])
result = df.div(df[0], axis="index")
tm.assert_frame_equal(result, expected)
def test_arith_flex_zero_len_raises(self):
# GH 19522 passing fill_value to frame flex arith methods should
# raise even in the zero-length special cases
ser_len0 = pd.Series([], dtype=object)
df_len0 = pd.DataFrame(columns=["A", "B"])
df = pd.DataFrame([[1, 2], [3, 4]], columns=["A", "B"])
with pytest.raises(NotImplementedError, match="fill_value"):
df.add(ser_len0, fill_value="E")
with pytest.raises(NotImplementedError, match="fill_value"):
df_len0.sub(df["A"], axis=None, fill_value=3)
class TestFrameArithmetic:
def test_td64_op_nat_casting(self):
# Make sure we don't accidentally treat timedelta64(NaT) as datetime64
# when calling dispatch_to_series in DataFrame arithmetic
ser = pd.Series(["NaT", "NaT"], dtype="timedelta64[ns]")
df = pd.DataFrame([[1, 2], [3, 4]])
result = df * ser
expected = pd.DataFrame({0: ser, 1: ser})
tm.assert_frame_equal(result, expected)
def test_df_add_2d_array_rowlike_broadcasts(self):
# GH#23000
arr = np.arange(6).reshape(3, 2)
df = pd.DataFrame(arr, columns=[True, False], index=["A", "B", "C"])
rowlike = arr[[1], :] # shape --> (1, ncols)
assert rowlike.shape == (1, df.shape[1])
expected = pd.DataFrame(
[[2, 4], [4, 6], [6, 8]],
columns=df.columns,
index=df.index,
# specify dtype explicitly to avoid failing
# on 32bit builds
dtype=arr.dtype,
)
result = df + rowlike
tm.assert_frame_equal(result, expected)
result = rowlike + df
tm.assert_frame_equal(result, expected)
def test_df_add_2d_array_collike_broadcasts(self):
# GH#23000
arr = np.arange(6).reshape(3, 2)
df = pd.DataFrame(arr, columns=[True, False], index=["A", "B", "C"])
collike = arr[:, [1]] # shape --> (nrows, 1)
assert collike.shape == (df.shape[0], 1)
expected = pd.DataFrame(
[[1, 2], [5, 6], [9, 10]],
columns=df.columns,
index=df.index,
# specify dtype explicitly to avoid failing
# on 32bit builds
dtype=arr.dtype,
)
result = df + collike
tm.assert_frame_equal(result, expected)
result = collike + df
tm.assert_frame_equal(result, expected)
def test_df_arith_2d_array_rowlike_broadcasts(self, all_arithmetic_operators):
# GH#23000
opname = all_arithmetic_operators
arr = np.arange(6).reshape(3, 2)
df = pd.DataFrame(arr, columns=[True, False], index=["A", "B", "C"])
rowlike = arr[[1], :] # shape --> (1, ncols)
assert rowlike.shape == (1, df.shape[1])
exvals = [
getattr(df.loc["A"], opname)(rowlike.squeeze()),
getattr(df.loc["B"], opname)(rowlike.squeeze()),
getattr(df.loc["C"], opname)(rowlike.squeeze()),
]
expected = pd.DataFrame(exvals, columns=df.columns, index=df.index)
if opname in ["__rmod__", "__rfloordiv__"]:
# exvals will have dtypes [f8, i8, i8] so expected will be
# all-f8, but the DataFrame operation will return mixed dtypes
# use exvals[-1].dtype instead of "i8" for compat with 32-bit
# systems/pythons
expected[False] = expected[False].astype(exvals[-1].dtype)
result = getattr(df, opname)(rowlike)
| tm.assert_frame_equal(result, expected) | pandas._testing.assert_frame_equal |
# -*- coding:utf-8 -*-
# /usr/bin/env python
"""
Date: 2020/3/24 15:00
Desc: 生意社网站采集大宗商品现货价格及相应基差数据, 数据时间段从 20110104-至今
备注:现期差 = 现货价格 - 期货价格(这里的期货价格为结算价)
黄金为 元/克, 白银为 元/千克, 玻璃现货为 元/平方米, 鸡蛋现货为 元/公斤, 鸡蛋期货为 元/500千克, 其余为 元/吨.
焦炭现货规格是: 一级冶金焦; 焦炭期货规格: 介于一级和二级之间, 焦炭现期差仅供参考.
铁矿石现货价格是: 湿吨, 铁矿石期货价格是: 干吨
网页地址: http://www.100ppi.com/sf/
历史数据可以通过修改 url 地址来获取, 比如: http://www.100ppi.com/sf/day-2017-09-12.html
发现生意社的 bugs:
1. 2018-09-12 周三 数据缺失是因为生意社源数据在该交易日缺失: http://www.100ppi.com/sf/day-2018-09-12.html
"""
import datetime
import re
import time
import warnings
import pandas as pd
from akshare.futures import cons
from akshare.futures.requests_fun import pandas_read_html_link
from akshare.futures.symbol_var import chinese_to_english
calendar = cons.get_calendar()
def futures_spot_price_daily(start_day=None, end_day=None, vars_list=cons.contract_symbols):
"""
获取某段时间大宗商品现货价格及相应基差
:param start_day: str 开始日期 format:YYYY-MM-DD 或 YYYYMMDD 或 datetime.date对象; 默认为当天
:param end_day: str 结束数据 format:YYYY-MM-DD 或 YYYYMMDD 或 datetime.date对象; 默认为当天
:param vars_list: list 合约品种如 [RB, AL]; 默认参数为所有商品
:return: pandas.DataFrame
展期收益率数据:
var 商品品种 string
sp 现货价格 float
near_symbol 临近交割合约 string
near_price 临近交割合约结算价 float
dom_symbol 主力合约 string
dom_price 主力合约结算价 float
near_basis 临近交割合约相对现货的基差 float
dom_basis 主力合约相对现货的基差 float
near_basis_rate 临近交割合约相对现货的基差率 float
dom_basis_rate 主力合约相对现货的基差率 float
date 日期 string YYYYMMDD
"""
start_day = (
cons.convert_date(start_day) if start_day is not None else datetime.date.today()
)
end_day = (
cons.convert_date(end_day)
if end_day is not None
else cons.convert_date(cons.get_latest_data_date(datetime.datetime.now()))
)
df_list = []
while start_day <= end_day:
print(start_day)
temp_df = futures_spot_price(start_day, vars_list)
if temp_df is False:
return pd.concat(df_list).reset_index(drop=True)
elif temp_df is not None:
df_list.append(temp_df)
start_day += datetime.timedelta(days=1)
if len(df_list) > 0:
temp_df = pd.concat(df_list)
temp_df.reset_index(drop=True, inplace=True)
return temp_df
def futures_spot_price(date="20200401", vars_list=cons.contract_symbols):
"""
获取某个交易日大宗商品现货价格及相应基差
:param date: 开始日期 format:YYYY-MM-DD 或 YYYYMMDD 或 datetime.date对象 为空时为当天
:param vars_list: 合约品种如RB、AL等列表 为空时为所有商品
:return: pandas.DataFrame
展期收益率数据:
var 商品品种 string
sp 现货价格 float
near_symbol 临近交割合约 string
near_price 临近交割合约结算价 float
dom_symbol 主力合约 string
dom_price 主力合约结算价 float
near_basis 临近交割合约相对现货的基差 float
dom_basis 主力合约相对现货的基差 float
near_basis_rate 临近交割合约相对现货的基差率 float
dom_basis_rate 主力合约相对现货的基差率 float
date 日期 string YYYYMMDD
"""
date = cons.convert_date(date) if date is not None else datetime.date.today()
if date < datetime.date(2011, 1, 4):
raise Exception("数据源开始日期为 20110104, 请将获取数据时间点设置在 20110104 后")
if date.strftime("%Y%m%d") not in calendar:
warnings.warn(f"{date.strftime('%Y%m%d')}非交易日")
return None
u1 = cons.SYS_SPOT_PRICE_LATEST_URL
u2 = cons.SYS_SPOT_PRICE_URL.format(date.strftime("%Y-%m-%d"))
i = 1
while True:
for url in [u2, u1]:
try:
# url = u2
r = pandas_read_html_link(url)
string = r[0].loc[1, 1]
news = "".join(re.findall(r"[0-9]", string))
if news[3:11] == date.strftime("%Y%m%d"):
records = _check_information(r[1], date)
records.index = records["symbol"]
var_list_in_market = [i for i in vars_list if i in records.index]
temp_df = records.loc[var_list_in_market, :]
temp_df.reset_index(drop=True, inplace=True)
return temp_df
else:
time.sleep(3)
except:
print(f"{date.strftime('%Y-%m-%d')}日生意社数据连接失败,第{str(i)}次尝试,最多5次")
i += 1
if i > 5:
print(
f"{date.strftime('%Y-%m-%d')}日生意社数据连接失败, 如果当前交易日是 2018-09-12, 由于生意社源数据缺失, 无法访问, 否则为重复访问已超过5次,您的地址被网站墙了,请保存好返回数据,稍后从该日期起重试"
)
return False
def _check_information(df_data, date):
"""
进行数据验证和计算模块
:param df_data: pandas.DataFrame 采集的数据
:param date: datetime.date 具体某一天 YYYYMMDD
:return: pandas.DataFrame
中间数据
symbol spot_price near_contract ... near_basis_rate dom_basis_rate date
CU 49620.00 cu1811 ... -0.002418 -0.003426 20181108
RB 4551.54 rb1811 ... -0.013521 -0.134359 20181108
ZN 22420.00 zn1811 ... -0.032114 -0.076271 20181108
AL 13900.00 al1812 ... 0.005396 0.003957 20181108
AU 274.10 au1811 ... 0.005655 0.020430 20181108
WR 4806.25 wr1903 ... -0.180026 -0.237035 20181108
RU 10438.89 ru1811 ... -0.020969 0.084406 20181108
PB 18600.00 pb1811 ... -0.001344 -0.010215 20181108
AG 3542.67 ag1811 ... -0.000754 0.009408 20181108
BU 4045.53 bu1811 ... -0.129904 -0.149679 20181108
HC 4043.33 hc1811 ... -0.035449 -0.088128 20...
"""
df_data = df_data.loc[:, [0, 1, 2, 3, 5, 6]]
df_data.columns = [
"symbol",
"spot_price",
"near_contract",
"near_contract_price",
"dominant_contract",
"dominant_contract_price",
]
records = pd.DataFrame()
for string in df_data["symbol"].tolist():
if string == "PTA":
news = "PTA"
else:
news = "".join(re.findall(r"[\u4e00-\u9fa5]", string))
if news != "" and news not in ["商品", "价格", "上海期货交易所", "郑州商品交易所", "大连商品交易所"]:
symbol = chinese_to_english(news)
record = | pd.DataFrame(df_data[df_data["symbol"] == string]) | pandas.DataFrame |
from scipy.optimize import leastsq, curve_fit, minimize, OptimizeResult
import matplotlib
from matplotlib import axes
import matplotlib.pyplot as plt
import numpy as np
import math
from typing import Callable
import datetime
import pandas as pd
from io import StringIO
from numpy import mean, std, median
def f_logistic(x:np.ndarray, A, B, C, D) -> np.ndarray:
return (A - D)/(1 + (x/C)**B) + D
def loss_logistic(p, y, x):
A, B, C, D = p
return np.sum((y - f_logistic(x, A, B, C, D))**2)
def f_gompertz(x:np.ndarray, A, B, C, D) -> np.ndarray:
# return D + A * (np.exp(np.exp(B * (C * x))))
return D + C * np.exp(-B * np.exp(-x / A))
def loss_gompertz(p, y, x):
A, B, C, D = p
return np.sum((y - f_gompertz(x, A, B, C, D))**2)
def fit(x:np.ndarray, y:np.ndarray, lossFunc:Callable) -> OptimizeResult:
"""Tries to fit x and y data to using given loss function.
loss function itself contains the function to be fit
Args:
x (np.ndarray): x data
y (np.ndarray): y data
lossFunc (function): loss function
Returns:
OptimizeResult: scipy OptimizeResult object. Member x is numpy.ndarray
which contains the optimization solution.
"""
idx = (~np.isnan(x+y)) # eliminate missing data points
x = x[idx]
y = y[idx]
A0 = y.max() * 2
D0 = y.min() / 2
C0 = x.mean() * 2
B0 = 1
p0 = [A0, B0, C0, D0] # starting values to begin optimization.
r = minimize(lossFunc, x0=p0, args=(y, x), method='CG')
return r
def plotfunc(xrange:tuple, f:Callable, r:OptimizeResult, axs:matplotlib.axes) -> tuple:
xp = np.linspace(xrange[0], xrange[1], 100)
yp = f(xp, *r.x)
axs.plot(xp, yp)
return xp, yp
def plotdata(x:np.ndarray, y:np.ndarray, axs:matplotlib.axes, xrange=(-1,-1)) -> tuple:
idx = (~np.isnan(x+y)) # eliminate missing data points
x = x[idx]
y = y[idx]
xmin = xrange[0]
xmax = xrange[1]
if xmin == -1:
xmin = x.min()
if xmax == -1:
xmax = x.max()
x, y = x[x >= xmin], y[x >= xmin]
x, y = x[x <= xmax], y[x <= xmax]
axs.scatter(x, y)
return np.array(x), np.array(y)
def doublingt(xrange:tuple, f:Callable, r:OptimizeResult, axs:matplotlib.axes) -> tuple:
"""Plots doubling time chart in semi-log scale (log y - linear x axis).
Returns x and y lists in a tuple.
Time point for minimum doubling time can be retrieved by:
dx[dy.argmin()]
Args:
xrange (tuple): (xmin, xmax)
f (Callable): fit function
r (OptimizeResult): optimization results
axs (matplotlib.axes): the axis for the plot
Returns:
tuple: x and y as lists
"""
xp = np.linspace(xrange[0], xrange[1], 100)
yp = f(xp, *r.x)
dy = []
for i in range(0, len(xp)-1):
dx = xp[i+1] - xp[i]
_dy = math.log(2) * dx / (math.log(yp[i+1]) - math.log(yp[i]))
dy.append(_dy)
axs.set_yscale('log')
axs.minorticks_on()
axs.yaxis.set_minor_locator(plt.MaxNLocator(4))
axs.grid(b=True, which='major', color='#666666', linestyle='-')
axs.grid(b=True, which='minor', color='#999999', linestyle='-', alpha=0.2)
axs.plot(xp[:-1], dy, c='g')
return np.array(xp[:-1]), np.array(dy)
def timestr(s:str, century='20') -> datetime:
"""Converts YYMMDD-hhmm to datetime.datetime object.
century is set to 20 as default
Args:
s (str): date signature string YYMMDDhhmm
century (str): 2-digit century string. 20 as default
Returns:
datetime: datetime.datetime object
"""
return datetime.datetime(
int('{0}{1}'.format(century, s[0:2])), # year
int(s[2:4]), # month
int(s[4:6]), # day
int(s[7:9]), # hr
int(s[9:11]) # min
)
def dt(t0:str, t1:str) -> int:
"""Delta t as minutes between t0 and t1 date-time strings
Args:
t0 (str): date-time string in YYMMDD-hhmm format
t1 (str): date-time string in YYMMDD-hhmm format
Returns:
int: delta t in minutes
"""
return (timestr(t1).timestamp() - timestr(t0).timestamp()) / 60
def readPlates(fn:str) -> (pd.DataFrame, dict):
f = open(fn, 'r')
layout = []
plates = []
plateNames = []
tempFile = StringIO('')
line = f.readline()
header = ''
while line:
if '#' in line:
if header != '':
tempFile.flush()
tempFile.seek(0)
#print(header)
if header == 'layout':
df = pd.read_csv(tempFile, sep='\t', header=None)
layout = df.values.flatten().tolist()
else:
df = pd.read_csv(tempFile, sep='\t', header=None)
plates.append(df.values.flatten().tolist())
plateNames.append(header)
tempFile = StringIO('')
header = line[1:].strip()
else:
tempFile.write(line)
line = f.readline()
if header != '':
tempFile.flush()
tempFile.seek(0)
#print(header)
if header == 'layout':
df = | pd.read_csv(tempFile, sep='\t', header=None) | pandas.read_csv |
"""
Methods used by Block.replace and related methods.
"""
import operator
import re
from typing import Optional, Pattern, Union
import numpy as np
from pandas._typing import ArrayLike, Scalar
from pandas.core.dtypes.common import (
is_datetimelike_v_numeric,
is_numeric_v_string_like,
is_re,
is_scalar,
)
from pandas.core.dtypes.missing import isna
def compare_or_regex_search(
a: ArrayLike, b: Union[Scalar, Pattern], regex: bool, mask: ArrayLike
) -> Union[ArrayLike, bool]:
"""
Compare two array_like inputs of the same shape or two scalar values
Calls operator.eq or re.search, depending on regex argument. If regex is
True, perform an element-wise regex matching.
Parameters
----------
a : array_like
b : scalar or regex pattern
regex : bool
mask : array_like
Returns
-------
mask : array_like of bool
"""
def _check_comparison_types(
result: Union[ArrayLike, bool], a: ArrayLike, b: Union[Scalar, Pattern]
):
"""
Raises an error if the two arrays (a,b) cannot be compared.
Otherwise, returns the comparison result as expected.
"""
if is_scalar(result) and isinstance(a, np.ndarray):
type_names = [type(a).__name__, type(b).__name__]
if isinstance(a, np.ndarray):
type_names[0] = f"ndarray(dtype={a.dtype})"
raise TypeError(
f"Cannot compare types {repr(type_names[0])} and {repr(type_names[1])}"
)
if not regex:
op = lambda x: operator.eq(x, b)
else:
op = np.vectorize(
lambda x: bool(re.search(b, x))
if isinstance(x, str) and isinstance(b, (str, Pattern))
else False
)
# GH#32621 use mask to avoid comparing to NAs
if isinstance(a, np.ndarray):
a = a[mask]
if is_numeric_v_string_like(a, b):
# GH#29553 avoid deprecation warnings from numpy
return np.zeros(a.shape, dtype=bool)
elif is_datetimelike_v_numeric(a, b):
# GH#29553 avoid deprecation warnings from numpy
_check_comparison_types(False, a, b)
return False
result = op(a)
if isinstance(result, np.ndarray) and mask is not None:
# The shape of the mask can differ to that of the result
# since we may compare only a subset of a's or b's elements
tmp = np.zeros(mask.shape, dtype=np.bool_)
tmp[mask] = result
result = tmp
_check_comparison_types(result, a, b)
return result
def replace_regex(values: ArrayLike, rx: re.Pattern, value, mask: Optional[np.ndarray]):
"""
Parameters
----------
values : ArrayLike
Object dtype.
rx : re.Pattern
value : Any
mask : np.ndarray[bool], optional
Notes
-----
Alters values in-place.
"""
# deal with replacing values with objects (strings) that match but
# whose replacement is not a string (numeric, nan, object)
if | isna(value) | pandas.core.dtypes.missing.isna |
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
class BaseStrategy:
def __init__(self, df, mv_type):
self.df = df
self.mvType = mv_type
# calculates profit for the specific algorithm
def calculate_profit(self):
# daily profit
self.df["daily_profit"] = np.log(
self.df['close'] / self.df['close'].shift(1)).round(3)
# calculate strategy profit
# each signal =1 is our strategy buy or hold if we bought previously
self.df['strategy_profit'] = self.df['signal'].shift(
1) * self.df['daily_profit']
# We need to get rid of the NaN generated in the first row:
self.df.dropna(inplace=True)
return self.df
# function to generate buy and sell signals
def _generate_signal_position(self, long_period, short_period):
mva_short = f'{self.mvType}_{short_period}'.lower()
mva_long = f'{self.mvType}_{long_period}'.lower()
df = | pd.DataFrame(index=self.df.index) | pandas.DataFrame |
import numpy as np
import pytest
from pandas._libs import join as _join
from pandas import Categorical, DataFrame, Index, merge
import pandas._testing as tm
class TestIndexer:
@pytest.mark.parametrize(
"dtype", ["int32", "int64", "float32", "float64", "object"]
)
def test_outer_join_indexer(self, dtype):
indexer = _join.outer_join_indexer
left = np.arange(3, dtype=dtype)
right = np.arange(2, 5, dtype=dtype)
empty = np.array([], dtype=dtype)
result, lindexer, rindexer = indexer(left, right)
assert isinstance(result, np.ndarray)
assert isinstance(lindexer, np.ndarray)
assert isinstance(rindexer, np.ndarray)
tm.assert_numpy_array_equal(result, np.arange(5, dtype=dtype))
exp = np.array([0, 1, 2, -1, -1], dtype=np.int64)
tm.assert_numpy_array_equal(lindexer, exp)
exp = np.array([-1, -1, 0, 1, 2], dtype=np.int64)
tm.assert_numpy_array_equal(rindexer, exp)
result, lindexer, rindexer = indexer(empty, right)
tm.assert_numpy_array_equal(result, right)
exp = np.array([-1, -1, -1], dtype=np.int64)
tm.assert_numpy_array_equal(lindexer, exp)
exp = np.array([0, 1, 2], dtype=np.int64)
tm.assert_numpy_array_equal(rindexer, exp)
result, lindexer, rindexer = indexer(left, empty)
tm.assert_numpy_array_equal(result, left)
exp = np.array([0, 1, 2], dtype=np.int64)
tm.assert_numpy_array_equal(lindexer, exp)
exp = np.array([-1, -1, -1], dtype=np.int64)
tm.assert_numpy_array_equal(rindexer, exp)
def test_left_join_indexer_unique():
a = np.array([1, 2, 3, 4, 5], dtype=np.int64)
b = np.array([2, 2, 3, 4, 4], dtype=np.int64)
result = _join.left_join_indexer_unique(b, a)
expected = np.array([1, 1, 2, 3, 3], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
def test_left_outer_join_bug():
left = np.array(
[
0,
1,
0,
1,
1,
2,
3,
1,
0,
2,
1,
2,
0,
1,
1,
2,
3,
2,
3,
2,
1,
1,
3,
0,
3,
2,
3,
0,
0,
2,
3,
2,
0,
3,
1,
3,
0,
1,
3,
0,
0,
1,
0,
3,
1,
0,
1,
0,
1,
1,
0,
2,
2,
2,
2,
2,
0,
3,
1,
2,
0,
0,
3,
1,
3,
2,
2,
0,
1,
3,
0,
2,
3,
2,
3,
3,
2,
3,
3,
1,
3,
2,
0,
0,
3,
1,
1,
1,
0,
2,
3,
3,
1,
2,
0,
3,
1,
2,
0,
2,
],
dtype=np.int64,
)
right = np.array([3, 1], dtype=np.int64)
max_groups = 4
lidx, ridx = _join.left_outer_join(left, right, max_groups, sort=False)
exp_lidx = np.arange(len(left), dtype=np.int64)
exp_ridx = -np.ones(len(left), dtype=np.int64)
exp_ridx[left == 1] = 1
exp_ridx[left == 3] = 0
tm.assert_numpy_array_equal(lidx, exp_lidx)
tm.assert_numpy_array_equal(ridx, exp_ridx)
def test_inner_join_indexer():
a = np.array([1, 2, 3, 4, 5], dtype=np.int64)
b = np.array([0, 3, 5, 7, 9], dtype=np.int64)
index, ares, bres = _join.inner_join_indexer(a, b)
index_exp = np.array([3, 5], dtype=np.int64)
tm.assert_almost_equal(index, index_exp)
aexp = np.array([2, 4], dtype=np.int64)
bexp = np.array([1, 2], dtype=np.int64)
tm.assert_almost_equal(ares, aexp)
tm.assert_almost_equal(bres, bexp)
a = np.array([5], dtype=np.int64)
b = np.array([5], dtype=np.int64)
index, ares, bres = _join.inner_join_indexer(a, b)
tm.assert_numpy_array_equal(index, np.array([5], dtype=np.int64))
tm.assert_numpy_array_equal(ares, np.array([0], dtype=np.int64))
tm.assert_numpy_array_equal(bres, np.array([0], dtype=np.int64))
def test_outer_join_indexer():
a = np.array([1, 2, 3, 4, 5], dtype=np.int64)
b = np.array([0, 3, 5, 7, 9], dtype=np.int64)
index, ares, bres = _join.outer_join_indexer(a, b)
index_exp = np.array([0, 1, 2, 3, 4, 5, 7, 9], dtype=np.int64)
tm.assert_almost_equal(index, index_exp)
aexp = np.array([-1, 0, 1, 2, 3, 4, -1, -1], dtype=np.int64)
bexp = np.array([0, -1, -1, 1, -1, 2, 3, 4], dtype=np.int64)
tm.assert_almost_equal(ares, aexp)
tm.assert_almost_equal(bres, bexp)
a = np.array([5], dtype=np.int64)
b = np.array([5], dtype=np.int64)
index, ares, bres = _join.outer_join_indexer(a, b)
tm.assert_numpy_array_equal(index, np.array([5], dtype=np.int64))
tm.assert_numpy_array_equal(ares, np.array([0], dtype=np.int64))
tm.assert_numpy_array_equal(bres, np.array([0], dtype=np.int64))
def test_left_join_indexer():
a = np.array([1, 2, 3, 4, 5], dtype=np.int64)
b = np.array([0, 3, 5, 7, 9], dtype=np.int64)
index, ares, bres = _join.left_join_indexer(a, b)
tm.assert_almost_equal(index, a)
aexp = np.array([0, 1, 2, 3, 4], dtype=np.int64)
bexp = np.array([-1, -1, 1, -1, 2], dtype=np.int64)
tm.assert_almost_equal(ares, aexp)
tm.assert_almost_equal(bres, bexp)
a = np.array([5], dtype=np.int64)
b = np.array([5], dtype=np.int64)
index, ares, bres = _join.left_join_indexer(a, b)
tm.assert_numpy_array_equal(index, np.array([5], dtype=np.int64))
tm.assert_numpy_array_equal(ares, np.array([0], dtype=np.int64))
tm.assert_numpy_array_equal(bres, np.array([0], dtype=np.int64))
def test_left_join_indexer2():
idx = Index([1, 1, 2, 5])
idx2 = Index([1, 2, 5, 7, 9])
res, lidx, ridx = _join.left_join_indexer(idx2.values, idx.values)
exp_res = np.array([1, 1, 2, 5, 7, 9], dtype=np.int64)
tm.assert_almost_equal(res, exp_res)
exp_lidx = np.array([0, 0, 1, 2, 3, 4], dtype=np.int64)
tm.assert_almost_equal(lidx, exp_lidx)
exp_ridx = np.array([0, 1, 2, 3, -1, -1], dtype=np.int64)
tm.assert_almost_equal(ridx, exp_ridx)
def test_outer_join_indexer2():
idx = Index([1, 1, 2, 5])
idx2 = Index([1, 2, 5, 7, 9])
res, lidx, ridx = _join.outer_join_indexer(idx2.values, idx.values)
exp_res = np.array([1, 1, 2, 5, 7, 9], dtype=np.int64)
tm.assert_almost_equal(res, exp_res)
exp_lidx = np.array([0, 0, 1, 2, 3, 4], dtype=np.int64)
tm.assert_almost_equal(lidx, exp_lidx)
exp_ridx = np.array([0, 1, 2, 3, -1, -1], dtype=np.int64)
tm.assert_almost_equal(ridx, exp_ridx)
def test_inner_join_indexer2():
idx = | Index([1, 1, 2, 5]) | pandas.Index |
"""
Various processing utility functions
Usage:
import only
"""
import os
import pandas as pd
from pycytominer.cyto_utils import infer_cp_features
def get_recode_cols():
return_dict = {}
return_dict["recode_cols"] = {
"Metadata_CellLine": "Metadata_clone_number",
"Metadata_Dosage": "Metadata_treatment",
}
return_dict["recode_sample"] = {
"Clone A": "CloneA",
"Clone E": "CloneE",
"Clone E ": "CloneE",
"WT": "WT_parental",
"WT parental": "WT_parental",
}
return_dict["recode_treatment"] = {
"0.0": "0.1% DMSO",
"DMSO": "0.1% DMSO",
"0.7": "2.1 nM bortezomib",
"7.0": "21 nM bortezomib",
"70.0": "210 nM bortezomib",
"bortezomib": "21 nM bortezomib",
}
return return_dict
def load_data(
batch,
plates="all",
profile_dir="profiles",
suffix="normalized_feature_selected.csv.gz",
combine_dfs=False,
add_cell_count=False,
harmonize_cols=False,
cell_count_dir="cell_counts",
):
batch_dir = os.path.join(profile_dir, batch)
plate_folders = [x for x in os.listdir(batch_dir) if ".DS_Store" not in x]
plate_files = [
os.path.join(batch_dir, x, f"{x}_{suffix}")
for x in plate_folders
if ".DS_Store" not in x
]
plate_data = {}
for plate_idx in range(0, len(plate_files)):
plate = plate_folders[plate_idx]
if plates != "all":
if plate not in plates:
continue
df = pd.read_csv(plate_files[plate_idx]).assign(Metadata_batch=batch)
if add_cell_count:
df = merge_cell_count(df, batch, cell_count_dir=cell_count_dir)
if harmonize_cols:
recode_cols = get_recode_cols()
# Update columns and specific entries
if batch == "2019_06_25_Batch3":
df = df.assign(Metadata_treatment="Untreated")
df = df.rename(recode_cols["recode_cols"], axis="columns")
df.Metadata_clone_number = df.Metadata_clone_number.astype(str)
df.Metadata_treatment = df.Metadata_treatment.astype(str)
df.Metadata_clone_number = df.Metadata_clone_number.replace(
recode_cols["recode_sample"]
)
df.Metadata_treatment = df.Metadata_treatment.replace(
recode_cols["recode_treatment"]
)
plate_data[plate] = df
if combine_dfs:
plate_data = convert_data(plate_data)
return plate_data
def merge_cell_count(df, batch, cell_count_dir="cell_counts"):
# Load cell counts for the specific plates
count_files = [
os.path.join(cell_count_dir, x)
for x in os.listdir(cell_count_dir)
if batch in x
]
all_plate_dfs = []
for count_file in count_files:
plate = os.path.basename(count_file)
plate = plate.replace(batch, "").replace("cell_count.tsv", "").strip("_")
plate_df = | pd.read_csv(count_file, sep="\t") | pandas.read_csv |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
# Modifications copyright (C) 2019 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BERT finetuning runner."""
from __future__ import absolute_import, division, print_function
import argparse
import logging
import os
import sys
import random
import torch
import numpy as np
import pandas as pd
from tqdm import tqdm, trange
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from torch.nn import CrossEntropyLoss
from tensorboardX import SummaryWriter
from pytorch_pretrained_bert.file_utils import WEIGHTS_NAME, CONFIG_NAME
from pytorch_pretrained_bert.modeling import BertForSequenceClassification
from pytorch_pretrained_bert.tokenization import BertTokenizer
from pytorch_pretrained_bert.optimization import BertAdam
from run_classifier_dataset_utils import processors, convert_examples_to_features, compute_metrics
if sys.version_info[0] == 2:
import cPickle as pickle
else:
import pickle
logger = logging.getLogger(__name__)
def main():
"""Fine-tune BERT for a given task with given parameters."""
# Define all parameters, using argparse/Command Line Interface.
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
def add_args():
"""Add all possible options and defaults to the parser."""
# Hyperparameters of BERT
# Parameters often changed
parser.add_argument("--bert_model",
default="bert-base-uncased",
type=str,
help="Bert pre-trained model selected in the list: bert-base-uncased, "
"bert-large-uncased, bert-base-cased, bert-large-cased, "
"bert-base-multilingual-uncased, bert-base-multilingual-cased, bert-base-chinese.")
parser.add_argument("--max_seq_length",
default=128,
type=int,
help="The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, and sequences shorter \n"
"than this will be padded.")
parser.add_argument("--train_batch_size",
default=16,
type=int,
help="Total batch size for training.")
parser.add_argument("--learning_rate",
default=2e-5,
type=float,
help="The initial learning rate for Adam.")
parser.add_argument("--num_train_epochs",
default=3.0,
type=float,
help="Total number of training epochs to perform.")
parser.add_argument("--do_lower_case",
action='store_true',
help="Set this flag if you are using an uncased model.")
# Parameters usually unchanged
parser.add_argument("--warmup_proportion",
default=0.1,
type=float,
help="Proportion of training to perform linear learning rate warmup for. "
"E.g., 0.1 = 10%% of training.")
parser.add_argument("--eval_batch_size",
default=8,
type=int,
help="Total batch size for eval.")
# Parameters of the task
parser.add_argument("--task_name",
default="node",
type=str,
help="The name of the task to train. One of node, political-as, "
"political-ru, political-asu, agreement, node-ext, political-as-topics,"
"political-ru-topics, political-asu-topics, agreement-topics")
parser.add_argument("--input_to_use",
type=str,
default="both",
help="Which input to use. One of both, org, response, response-org.")
# Parameters for reproduction
parser.add_argument('--seed',
type=int,
default=42,
help="random seed for initialization")
parser.add_argument('--gradient_accumulation_steps',
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.")
# Parameters for where to save/load data
parser.add_argument("--data_dir",
default="../data",
type=str,
help="The input data dir. Should contain the .tsv file (or other data files) for the task.")
parser.add_argument("--output_dir",
default="run",
type=str,
help="The output directory where the model predictions and checkpoints will be written.")
parser.add_argument("--cache_dir",
default="",
type=str,
help="Where do you want to store the pre-trained models downloaded from s3")
parser.add_argument('--overwrite_output_dir',
action='store_true',
help="Overwrite the content of the output directory")
# Parameters to decide what to do (train, test, crossval, save the model)
parser.add_argument("--do_train",
action='store_true',
help="Whether to run training.")
parser.add_argument("--do_eval",
action='store_true',
help="Whether to run eval on the dev set.")
parser.add_argument("--do_train_eval",
action='store_true',
help="Whether to run training and eval.")
parser.add_argument('--n_times',
type=int,
default=10,
help="Number of restarts for every parameter setting in train&eval mode")
parser.add_argument("--do_cross_val",
action='store_true',
help="Whether to run cross-validation.")
parser.add_argument("--do_save",
action='store_true',
help="Whether to save the resulting model.")
parser.add_argument("--do_visualization",
action='store_true',
help="Whether to run visualization.")
# Additional parameters
parser.add_argument("--no_cuda",
action='store_true',
help="Whether not to use CUDA when available")
parser.add_argument('--log_level',
type=str,
default="info",
help="Verbosity of logging output. One of info or warn.")
# Add all parameters to the parser and parse them.
add_args()
args = parser.parse_args()
# Set up all parameters given the CLI arguments.
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
n_gpu = torch.cuda.device_count()
args.device = device
task_name = args.task_name.lower()
processor = processors[task_name](args.input_to_use)
label_list = processor.get_labels()
num_labels = len(label_list)
global_step = 0
tr_loss = 0
tb_writer = SummaryWriter()
# Prepare the logging.
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO if args.log_level == "info" else logging.WARN)
logger.info("device: {} n_gpu: {}".format(
device, n_gpu))
# Check the arguments and fail if the arguments are invalid.
if not args.do_train and not args.do_eval and not args.do_cross_val and not args.do_visualization \
and not args.do_train_eval:
raise ValueError("At least one of `do_train`, `do_eval` `do_cross_val` "
"or `do_visualization` or 'do_train_eval` must be True.")
if os.path.exists(args.output_dir) and os.listdir(
args.output_dir) and not args.overwrite_output_dir:
raise ValueError("Output directory ({}) already exists and is not empty. "
"Use the --overwrite_output_dir option.".format(args.output_dir))
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
if task_name not in processors:
raise ValueError("Task not found: %s" % (task_name))
if args.gradient_accumulation_steps < 1:
raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format(
args.gradient_accumulation_steps))
# Calculate the train_batch_size if gradient accumulation is used
args.train_batch_size = args.train_batch_size // args.gradient_accumulation_steps
# Set all seeds for reproducibility
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
def get_features_examples(mode):
"""Returns the features and examples of train or test mode."""
def convert(split, modus, exs):
"""Converts the examples or load them from cache."""
cached_features_file = os.path.join(args.data_dir, 'cache', '{0}_{1}_{2}_{3}_{4}_{5}'.format(modus,
list(filter(None, args.bert_model.split('/'))).pop(),
str(args.max_seq_length),
str(task_name), str(args.input_to_use), split))
# Try to load the cached features.
try:
with open(cached_features_file, "rb") as reader:
fs = pickle.load(reader)
# Creates and cache the features.
except FileNotFoundError:
if not os.path.exists(os.path.join(args.data_dir, 'cache')):
os.makedirs(os.path.join(args.data_dir, 'cache'))
fs = convert_examples_to_features(
exs, label_list, args.max_seq_length, tokenizer)
logger.info('Saving {0} features into cached file {1}'.format(mode, cached_features_file))
with open(cached_features_file, "wb") as writer:
pickle.dump(fs, writer)
return fs
# Return the features, examples and dataframes depending on the mode.
if mode == "train":
train_ex, df = processor.get_train_examples(args.data_dir)
return convert("X", mode, train_ex), train_ex, df
elif mode == "dev":
dev_ex, df = processor.get_dev_examples(args.data_dir)
return convert("X", mode, dev_ex), dev_ex, df
elif mode == "cross_val":
data = processor.get_splits(args.data_dir)
train_f_list, train_e_list, train_df_list, test_f_list, test_e_list, test_df_list = ([] for _ in range(6))
for i, (train_ex, train_df, test_ex, test_df) in enumerate(data):
train_e_list.append(train_ex)
train_df_list.append(train_df)
test_e_list.append(test_ex)
test_df_list.append(test_df)
# Create features from the examples
train_f_list.append(convert(i, "train", train_ex))
test_f_list.append(convert(i, "dev", test_ex))
return train_f_list, train_e_list, train_df_list, test_f_list, test_e_list, test_df_list
else:
raise ValueError("Invalid feature mode.")
def create_tensor_dataset(exfeatures):
"""Creates a TensoDataset out of the features."""
all_input_ids = torch.tensor([f.input_ids for f in exfeatures], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in exfeatures], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in exfeatures], dtype=torch.long)
all_label_ids = torch.tensor([f.label_id for f in exfeatures], dtype=torch.long)
return TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)
def do_training(train_fs, train_exs):
"""Runs BERT fine-tuning."""
# Allows to write to enclosed variables global_step
nonlocal global_step
# Create the batched training data out of the features.
train_data = create_tensor_dataset(train_fs)
train_sampler = RandomSampler(train_data)
train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=args.train_batch_size)
# Calculate the number of optimization steps.
num_train_optimization_steps = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs
# Prepare optimizer.
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
optimizer = BertAdam(optimizer_grouped_parameters,
lr=args.learning_rate,
warmup=args.warmup_proportion,
t_total=num_train_optimization_steps)
# Log some information about the training.
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_exs))
logger.info(" Batch size = %d", args.train_batch_size)
logger.info(" Num steps = %d", num_train_optimization_steps)
# Set the model to training mode and train for X epochs.
model.train()
for _ in trange(int(args.num_train_epochs), desc="Epoch"):
tr_loss = 0
nb_tr_examples, nb_tr_steps = 0, 0
# Iterate over all batches.
for step, batch in enumerate(tqdm(train_dataloader, desc="Iteration")):
batch = tuple(t.to(device) for t in batch)
input_ids, input_mask, segment_ids, label_ids = batch
# Get the Logits and calculate the loss.
logits = model(input_ids, token_type_ids=segment_ids, attention_mask=input_mask)
loss = CrossEntropyLoss()(logits.view(-1, num_labels), label_ids.view(-1))
# Scale the loss in gradient accumulation mode.
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
# Calculate the gradients.
loss.backward()
tr_loss += loss.item()
nb_tr_examples += input_ids.size(0)
nb_tr_steps += 1
# Update the weights every gradient_accumulation_steps steps.
if (step + 1) % args.gradient_accumulation_steps == 0:
optimizer.step()
optimizer.zero_grad()
global_step += 1
tb_writer.add_scalar('lr', optimizer.get_lr()[0], global_step)
tb_writer.add_scalar('loss', loss.item(), global_step)
def do_save():
"""Saves the current model, tokenizer and arguments."""
nonlocal model
nonlocal tokenizer
model_to_save = model.module if hasattr(model, 'module') else model
# Using the predefined names, we can load using `from_pretrained`.
output_model_file = os.path.join(args.output_dir, WEIGHTS_NAME)
output_config_file = os.path.join(args.output_dir, CONFIG_NAME)
# Save the trained model, configuration and tokenizer
torch.save(model_to_save.state_dict(), output_model_file)
model_to_save.config.to_json_file(output_config_file)
tokenizer.save_vocabulary(args.output_dir)
# Save the training arguments together with the trained model.
output_args_file = os.path.join(args.output_dir, 'training_args.bin')
torch.save(args, output_args_file)
def do_eval(eval_features, eval_examples):
"""Do evaluation on the current model."""
# Logg some information.
logger.info("***** Running evaluation *****")
logger.info(" Num examples = %d", len(eval_examples))
logger.info(" Batch size = %d", args.eval_batch_size)
# Get the eval data and create a sequential dataloader.
eval_data = create_tensor_dataset(eval_features)
eval_sampler = SequentialSampler(eval_data)
eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size)
# Set the model to eval mode (disable dropout)
model.eval()
eval_loss = 0
nb_eval_steps = 0
preds = []
out_label_ids = None
# Iterate over the evaluation data.
for input_ids, input_mask, segment_ids, label_ids in tqdm(eval_dataloader, desc="Evaluating"):
input_ids = input_ids.to(device)
input_mask = input_mask.to(device)
segment_ids = segment_ids.to(device)
label_ids = label_ids.to(device)
# Forward pass with deactivated autograd engine.
with torch.no_grad():
logits = model(input_ids, token_type_ids=segment_ids, attention_mask=input_mask)
# Calculate eval loss.
tmp_eval_loss = CrossEntropyLoss()(logits.view(-1, num_labels), label_ids.view(-1))
eval_loss += tmp_eval_loss.mean().item()
nb_eval_steps += 1
if len(preds) == 0:
preds.append(logits.detach().cpu().numpy())
out_label_ids = label_ids.detach().cpu().numpy()
else:
preds[0] = np.append(
preds[0], logits.detach().cpu().numpy(), axis=0)
out_label_ids = np.append(
out_label_ids, label_ids.detach().cpu().numpy(), axis=0)
# Calculate the mean loss and get all predictions.
eval_loss = eval_loss / nb_eval_steps
loss = tr_loss/global_step if args.do_train else None
preds = preds[0]
preds = np.argmax(preds, axis=1)
# Compute the metrics for the given task
result = compute_metrics(task_name, preds, out_label_ids)
# Save additional information in the result dict.
result['eval_loss'] = eval_loss
result['global_step'] = global_step
result['loss'] = loss
# Save all settings for external evaluation
result['_task'] = task_name
result['_input_mode'] = args.input_to_use
result['_learning_rate'] = args.learning_rate
result['_bert-model'] = args.bert_model
result['_batch_size'] = args.train_batch_size
result['_warmup'] = args.warmup_proportion
result['_num_epochs'] = args.num_train_epochs
result['_seq_len'] = args.max_seq_length
result['_seed'] = args.seed
result['_gradient_acc'] = args.gradient_accumulation_steps
return result, preds
def save_results(result_list, pred_list):
"""Saves the results and the predictions."""
# Save the results in a text file.
output_eval_file = os.path.join(args.output_dir, "eval_results.txt")
with open(output_eval_file, "a") as writer:
logger.info("***** Eval results *****")
for i, result_dict in enumerate(result_list):
logger.info("Run %i", i)
writer.write("Run %i\n" % i)
for key in sorted(result_dict.keys()):
if not key.startswith("_"):
logger.info(" %s = %s", key, str(result_dict[key]))
writer.write("%s = %s\n" % (key, str(result_dict[key])))
# Save the results and predictions in csv and tsv files.
output_csv_file = os.path.join(args.output_dir, "../eval_results.tsv")
output_preds_file = os.path.join(args.output_dir, "../eval_preds.csv")
df_res = | pd.DataFrame(result_list) | pandas.DataFrame |
#%%
from pymaid_creds import url, name, password, token
import pymaid
rm = pymaid.CatmaidInstance(url, token, name, password)
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import pandas as pd
import numpy.random as random
import gzip
import csv
import connectome_tools.celltype as ct
import connectome_tools.process_matrix as pm
import connectome_tools.process_graph as pg
from tqdm import tqdm
from joblib import Parallel, delayed
import networkx as nx
# allows text to be editable in Illustrator
plt.rcParams['pdf.fonttype'] = 42
plt.rcParams['ps.fonttype'] = 42
# font settings
plt.rcParams['font.size'] = 5
plt.rcParams['font.family'] = 'arial'
# load pairs
pairs = pm.Promat.get_pairs()
ipsi_pair_ids = pm.Promat.load_pairs_from_annotation('mw ipsilateral axon', pairs, return_type='all_pair_ids')
bilateral_pair_ids = pm.Promat.load_pairs_from_annotation('mw bilateral axon', pairs, return_type='all_pair_ids')
contra_pair_ids = pm.Promat.load_pairs_from_annotation('mw contralateral axon', pairs, return_type='all_pair_ids')
dVNC_pair_ids = pm.Promat.load_pairs_from_annotation('mw dVNC', pairs, return_type='all_pair_ids')
dSEZ_pair_ids = pm.Promat.load_pairs_from_annotation('mw dSEZ', pairs, return_type='all_pair_ids')
RGN_pair_ids = pm.Promat.load_pairs_from_annotation('mw RGN', pairs, return_type='all_pair_ids')
sensories_pair_ids = [pm.Promat.load_pairs_from_annotation(x, pairs, return_type='all_pair_ids') for x in pymaid.get_annotated('mw brain inputs').name]
all_sensories = [x for sublist in sensories_pair_ids for x in sublist]
# %%
# EXPERIMENT 1: removing edges from contralateral and bilateral neurons -> effect on path length?
# load previously generated paths
all_edges_combined = pd.read_csv('data/edges_threshold/ad_all-paired-edges.csv', index_col=0)
# iterations for random edge removal as control
n_init = 40
# excise edges and generate graphs
e_contra_contra, e_contra_contra_control = pg.Prograph.excise_edge_experiment(all_edges_combined, np.setdiff1d(contra_pair_ids, all_sensories), 'contralateral', n_init, seed=0, exclude_nodes=(all_sensories+dVNC_pair_ids))
e_bi_contra, e_bi_contra_control = pg.Prograph.excise_edge_experiment(all_edges_combined, np.setdiff1d(bilateral_pair_ids, all_sensories), 'contralateral', n_init, seed=0, exclude_nodes=(all_sensories+dVNC_pair_ids))
e_bi_ipsi, e_bi_ipsi_control = pg.Prograph.excise_edge_experiment(all_edges_combined, np.setdiff1d(bilateral_pair_ids, all_sensories), 'ipsilateral', n_init, seed=0, exclude_nodes=(all_sensories+dVNC_pair_ids))
e_all_contra, e_all_contra_control = pg.Prograph.excise_edge_experiment(all_edges_combined, np.setdiff1d(bilateral_pair_ids + contra_pair_ids, all_sensories), 'contralateral', n_init, seed=0, exclude_nodes=(all_sensories+dVNC_pair_ids))
# %%
# this chunk is incomplete
# write all graphs to graphml
# read all graph from graphml
graph = pg.Analyze_Nx_G(all_edges_combined, graph_type='directed')
shuffled_graphs = Parallel(n_jobs=-1)(delayed(nx.readwrite.graphml.read_graphml)(f'interhemisphere/csv/shuffled_graphs/iteration-{i}.graphml', node_type=int, edge_key_type=str) for i in tqdm(range(n_init)))
shuffled_graphs = [pg.Analyze_Nx_G(edges=x.edges, graph=x) for x in shuffled_graphs]
# %%
# generate and save paths
cutoff=5
# generate and save paths for experimental
save_path = [f'interhemisphere/csv/paths/excised-graph_all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-contra-contra',
f'interhemisphere/csv/paths/excised-graph_all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-bilateral-contra',
f'interhemisphere/csv/paths/excised-graph_all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-bilateral-ipsi',
f'interhemisphere/csv/paths/excised-graph_all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-all-contra']
experimental = [e_contra_contra, e_bi_contra, e_bi_ipsi, e_all_contra]
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(experimental[i].G, all_sensories, dVNC_pair_ids, cutoff=cutoff, save_path=save_path[i]) for i in tqdm((range(len(experimental)))))
# generate and save paths for controls
save_path = f'interhemisphere/csv/paths/excised-graph_all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-contra-contra_CONTROL-N'
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(e_contra_contra_control[i].G, all_sensories, dVNC_pair_ids, cutoff=cutoff, save_path=f'{save_path}{i}') for i in tqdm((range(n_init))))
save_path = f'interhemisphere/csv/paths/excised-graph_all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-bilateral-contra_CONTROL-N'
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(e_bi_contra_control[i].G, all_sensories, dVNC_pair_ids, cutoff=cutoff, save_path=f'{save_path}{i}') for i in tqdm((range(n_init))))
save_path = f'interhemisphere/csv/paths/excised-graph_all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-bilateral-ipsi_CONTROL-N'
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(e_bi_ipsi_control[i].G, all_sensories, dVNC_pair_ids, cutoff=cutoff, save_path=f'{save_path}{i}') for i in tqdm((range(n_init))))
save_path = f'interhemisphere/csv/paths/excised-graph_all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-all-contra_CONTROL-N'
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(e_all_contra_control[i].G, all_sensories, dVNC_pair_ids, cutoff=cutoff, save_path=f'{save_path}{i}') for i in tqdm((range(n_init))))
# %%
# analyze paths: total and count per # hops
def process_paths(excise_paths, control_paths, edges_removed):
excise_count = len(excise_paths)
control_counts = [len(x) for x in control_paths]
path_counts_data = []
for row in zip(control_counts, [f'control-{edges_removed}']*len(control_counts)):
path_counts_data.append(row)
path_counts_data.append([excise_count, f'excised-{edges_removed}'])
path_counts_data = pd.DataFrame(path_counts_data, columns=['count', 'condition'])
path_counts_data.to_csv(f'interhemisphere/csv/paths/processed/excised_graph_{edges_removed}.csv')
# count per # hops
excise_path_counts = [len(x) for x in excise_paths]
control_path_counts = [[len(x) for x in path] for path in control_paths]
path_counts_length_data = []
for i, path_length in enumerate(control_path_counts):
for row in zip(path_length, [f'control-{edges_removed}']*len(path_length), [i]*len(path_length)):
path_counts_length_data.append(row)
for row in zip(excise_path_counts, [f'excised-{edges_removed}']*len(excise_path_counts), [0]*len(excise_path_counts)):
path_counts_length_data.append(row)
path_counts_length_data = pd.DataFrame(path_counts_length_data, columns=['path_length', 'condition', 'N'])
path_counts_length_data['value'] = [1]*len(path_counts_length_data) # just adding [1] so that groupby has something to count
path_counts_length_data_counts = path_counts_length_data.groupby(['condition', 'N', 'path_length']).count()
path_counts_length_data_counts.to_csv(f'interhemisphere/csv/paths/processed/excised_graph_{edges_removed}_path_lengths.csv')
cutoff=5
n_init = 40
excise_Cc_paths = pg.Prograph.open_simple_paths(f'interhemisphere/csv/paths/excised-graph_all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-contra-contra.csv.gz')
control_Cc_paths = Parallel(n_jobs=-1)(delayed(pg.Prograph.open_simple_paths)(f'interhemisphere/csv/paths/excised-graph_all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-contra-contra_CONTROL-N{i}.csv.gz') for i in tqdm(range(n_init)))
process_paths(excise_Cc_paths, control_Cc_paths, edges_removed='Contra-contra')
excise_Bc_paths = pg.Prograph.open_simple_paths(f'interhemisphere/csv/paths/excised-graph_all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-bilateral-contra.csv.gz')
control_Bc_paths = Parallel(n_jobs=-1)(delayed(pg.Prograph.open_simple_paths)(f'interhemisphere/csv/paths/excised-graph_all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-bilateral-contra_CONTROL-N{i}.csv.gz') for i in tqdm(range(n_init)))
process_paths(excise_Bc_paths, control_Bc_paths, edges_removed='Bilateral-contra')
excise_Bi_paths = pg.Prograph.open_simple_paths(f'interhemisphere/csv/paths/excised-graph_all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-bilateral-ipsi.csv.gz')
control_Bi_paths = Parallel(n_jobs=-1)(delayed(pg.Prograph.open_simple_paths)(f'interhemisphere/csv/paths/excised-graph_all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-bilateral-ipsi_CONTROL-N{i}.csv.gz') for i in tqdm(range(n_init)))
process_paths(excise_Bi_paths, control_Bi_paths, edges_removed='Bilateral-ipsi')
excise_Ac_paths = pg.Prograph.open_simple_paths(f'interhemisphere/csv/paths/excised-graph_all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-all-contra.csv.gz')
control_Ac_paths = Parallel(n_jobs=-1)(delayed(pg.Prograph.open_simple_paths)(f'interhemisphere/csv/paths/excised-graph_all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-all-contra_CONTROL-N{i}.csv.gz') for i in tqdm(range(n_init)))
process_paths(excise_Ac_paths, control_Ac_paths, edges_removed='All-contra')
# wildtype paths
graph_paths = pg.Prograph.open_simple_paths(f'interhemisphere/csv/paths/all_paths_sens-to-dVNC_cutoff{cutoff}.csv.gz')
excise_count = len(graph_paths)
path_counts_data = []
path_counts_data.append([excise_count, f'wildtype'])
path_counts_data = pd.DataFrame(path_counts_data, columns=['count', 'condition'])
path_counts_data.to_csv(f'interhemisphere/csv/paths/processed/wildtype.csv')
path_counts_length_data = []
excise_path_counts = [len(x) for x in graph_paths]
for row in zip(excise_path_counts, [f'wildtype']*len(excise_path_counts), [0]*len(excise_path_counts)):
path_counts_length_data.append(row)
path_counts_length_data = pd.DataFrame(path_counts_length_data, columns=['path_length', 'condition', 'N'])
path_counts_length_data['value'] = [1]*len(path_counts_length_data) # just adding [1] so that groupby has something to count
path_counts_length_data_counts = path_counts_length_data.groupby(['condition', 'N', 'path_length']).count()
path_counts_length_data_counts.to_csv(f'interhemisphere/csv/paths/processed/wildtype_path_lengths.csv')
# %%
##########
# EXPERIMENT 2: removing random number of ipsi vs contra edges, effect on paths
#
# load previously generated paths
all_edges_combined = pd.read_csv('data/edges_threshold/ad_all-paired-edges.csv', index_col=0)
# iterations for random edge removal as control
n_init = 8
# excise edges and generate graphs
random_ipsi500, random_contra500 = pg.Prograph.excise_ipsi_contra_edge_experiment(all_edges_combined, 500, n_init, seed=0, exclude_nodes=(all_sensories+dVNC_pair_ids))
random_ipsi1000, random_contra1000 = pg.Prograph.excise_ipsi_contra_edge_experiment(all_edges_combined, 1000, n_init, seed=0, exclude_nodes=(all_sensories+dVNC_pair_ids))
random_ipsi2000, random_contra2000 = pg.Prograph.excise_ipsi_contra_edge_experiment(all_edges_combined, 2000, n_init, seed=0, exclude_nodes=(all_sensories+dVNC_pair_ids))
random_ipsi4000, random_contra4000 = pg.Prograph.excise_ipsi_contra_edge_experiment(all_edges_combined, 4000, n_init, seed=0, exclude_nodes=(all_sensories+dVNC_pair_ids))
# %%
# generate and save paths
cutoff=5
# generate and save paths for controls
save_path = f'interhemisphere/csv/paths/random-ipsi-contra-edges/all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-ipsi-500-N'
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(random_ipsi500[i].G, all_sensories, dVNC_pair_ids, cutoff=cutoff, save_path=f'{save_path}{i}') for i in tqdm((range(n_init))))
save_path = f'interhemisphere/csv/paths/random-ipsi-contra-edges/all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-contra-500-N'
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(random_contra500[i].G, all_sensories, dVNC_pair_ids, cutoff=cutoff, save_path=f'{save_path}{i}') for i in tqdm((range(n_init))))
save_path = f'interhemisphere/csv/paths/random-ipsi-contra-edges/all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-ipsi-1000-N'
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(random_ipsi1000[i].G, all_sensories, dVNC_pair_ids, cutoff=cutoff, save_path=f'{save_path}{i}') for i in tqdm((range(n_init))))
save_path = f'interhemisphere/csv/paths/random-ipsi-contra-edges/all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-contra-1000-N'
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(random_contra1000[i].G, all_sensories, dVNC_pair_ids, cutoff=cutoff, save_path=f'{save_path}{i}') for i in tqdm((range(n_init))))
save_path = f'interhemisphere/csv/paths/random-ipsi-contra-edges/all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-ipsi-2000-N'
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(random_ipsi2000[i].G, all_sensories, dVNC_pair_ids, cutoff=cutoff, save_path=f'{save_path}{i}') for i in tqdm((range(n_init))))
save_path = f'interhemisphere/csv/paths/random-ipsi-contra-edges/all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-contra-2000-N'
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(random_contra2000[i].G, all_sensories, dVNC_pair_ids, cutoff=cutoff, save_path=f'{save_path}{i}') for i in tqdm((range(n_init))))
save_path = f'interhemisphere/csv/paths/random-ipsi-contra-edges/all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-ipsi-4000-N'
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(random_ipsi4000[i].G, all_sensories, dVNC_pair_ids, cutoff=cutoff, save_path=f'{save_path}{i}') for i in tqdm((range(n_init))))
save_path = f'interhemisphere/csv/paths/random-ipsi-contra-edges/all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-contra-4000-N'
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(random_contra4000[i].G, all_sensories, dVNC_pair_ids, cutoff=cutoff, save_path=f'{save_path}{i}') for i in tqdm((range(n_init))))
# %%
# analyze paths: total and count per # hops
def process_paths_ipsi_contra(ipsi_paths, contra_paths, count_removed):
ipsi_counts = [len(x) for x in ipsi_paths]
contra_counts = [len(x) for x in contra_paths]
path_counts_data = []
for row in zip(ipsi_counts, [f'ipsi-{count_removed}']*len(ipsi_counts)):
path_counts_data.append(row)
for row in zip(contra_counts, [f'contra-{count_removed}']*len(contra_counts)):
path_counts_data.append(row)
path_counts_data = | pd.DataFrame(path_counts_data, columns=['count', 'condition']) | pandas.DataFrame |
"""Construct the clean data set"""
import pandas as pd
from pathlib import PurePath
import numpy as np
import datetime as dt
from pandas.tseries.holiday import USFederalHolidayCalendar
from scipy.interpolate import interp1d
from sklearn.svm import SVR
#========================================================================#
# interpolation functions #
#========================================================================#
def det_interp(x, kind='linear'):
""" A helper function for deterministic time seres interpolation
Args
----
x -- a dummy variable for the dataframe's columns
kwargs
------
kind -- one of scipy.interpolate.inter1d kwargs
return
------
interpolated values of the whole time series
"""
index = pd.Series(np.arange(x.shape[0]), index=x.index)
notnull = | pd.notnull(x) | pandas.notnull |
import sys
import timeit
import warnings
import numpy as np
import pandas as pd
from abc import abstractmethod
from math import ceil
from dask import dataframe as dd
from tqdm.auto import tqdm
from .tqdm_dask_progressbar import TQDMDaskProgressBar
from .base import (
_SwifterBaseObject,
suppress_stdout_stderr_logging,
ERRORS_TO_HANDLE,
SAMPLE_SIZE,
N_REPEATS,
)
class _SwifterObject(_SwifterBaseObject):
def __init__(
self,
pandas_obj,
npartitions=None,
dask_threshold=1,
scheduler="processes",
progress_bar=True,
progress_bar_desc=None,
allow_dask_on_strings=False,
):
super().__init__(base_obj=pandas_obj, npartitions=npartitions)
if self._obj.index.duplicated().any():
warnings.warn(
"This pandas object has duplicate indices, and swifter may not be able to improve performance. Consider resetting the indices with `df.reset_index(drop=True)`."
)
self._SAMPLE_SIZE = SAMPLE_SIZE if self._nrows > (25 * SAMPLE_SIZE) else int(ceil(self._nrows / 25))
self._dask_threshold = dask_threshold
self._scheduler = scheduler
self._progress_bar = progress_bar
self._progress_bar_desc = progress_bar_desc
self._allow_dask_on_strings = allow_dask_on_strings
def set_dask_threshold(self, dask_threshold=1):
"""
Set the threshold (seconds) for maximum allowed estimated duration of pandas apply before switching to dask
"""
self._dask_threshold = dask_threshold
return self
def set_dask_scheduler(self, scheduler="processes"):
"""
Set the dask scheduler
:param scheduler: String, ["threads", "processes"]
"""
self._scheduler = scheduler
return self
def progress_bar(self, enable=True, desc=None):
"""
Turn on/off the progress bar, and optionally add a custom description
"""
self._progress_bar = enable
self._progress_bar_desc = desc
return self
def allow_dask_on_strings(self, enable=True):
"""
Override the string processing default, which is to not use dask if a string is contained in the pandas object
"""
self._allow_dask_on_strings = enable
return self
def rolling(self, window, min_periods=None, center=False, win_type=None, on=None, axis=0, closed=None):
"""
Create a swifter rolling object
"""
kwds = {
"window": window,
"min_periods": min_periods,
"center": center,
"win_type": win_type,
"on": on,
"axis": axis,
"closed": closed,
}
return Rolling(
self._obj,
npartitions=self._npartitions,
dask_threshold=self._dask_threshold,
scheduler=self._scheduler,
progress_bar=self._progress_bar,
progress_bar_desc=self._progress_bar_desc,
allow_dask_on_strings=self._allow_dask_on_strings,
**kwds,
)
def resample(
self,
rule,
axis=0,
closed=None,
label=None,
convention="start",
kind=None,
loffset=None,
base=0,
on=None,
level=None,
origin=None,
offset=None,
):
"""
Create a swifter resampler object
"""
kwds = {
"rule": rule,
"axis": axis,
"closed": closed,
"label": label,
"convention": convention,
"kind": kind,
"loffset": loffset,
"base": base,
"on": on,
"level": level,
"origin": origin,
"offset": offset,
}
if not base:
kwds.pop("base")
return Resampler(
self._obj,
npartitions=self._npartitions,
dask_threshold=self._dask_threshold,
scheduler=self._scheduler,
progress_bar=self._progress_bar,
progress_bar_desc=self._progress_bar_desc,
allow_dask_on_strings=self._allow_dask_on_strings,
**kwds,
)
@pd.api.extensions.register_series_accessor("swifter")
class SeriesAccessor(_SwifterObject):
def _wrapped_apply(self, func, convert_dtype=True, args=(), **kwds):
def wrapped():
with suppress_stdout_stderr_logging():
self._obj.iloc[: self._SAMPLE_SIZE].apply(func, convert_dtype=convert_dtype, args=args, **kwds)
return wrapped
def _dask_apply(self, func, convert_dtype, *args, **kwds):
sample = self._obj.iloc[: self._npartitions * 2]
with suppress_stdout_stderr_logging():
meta = sample.apply(func, convert_dtype=convert_dtype, args=args, **kwds)
try:
# check that the dask map partitions matches the pandas apply
with suppress_stdout_stderr_logging():
tmp_df = (
dd.from_pandas(sample, npartitions=self._npartitions)
.map_partitions(func, *args, meta=meta, **kwds)
.compute(scheduler=self._scheduler)
)
self._validate_apply(
tmp_df.equals(meta), error_message="Dask map-partitions sample does not match pandas apply sample."
)
if self._progress_bar:
with TQDMDaskProgressBar(desc=self._progress_bar_desc or "Dask Apply"):
return (
dd.from_pandas(self._obj, npartitions=self._npartitions)
.map_partitions(func, *args, meta=meta, **kwds)
.compute(scheduler=self._scheduler)
)
else:
return (
dd.from_pandas(self._obj, npartitions=self._npartitions)
.map_partitions(func, *args, meta=meta, **kwds)
.compute(scheduler=self._scheduler)
)
except ERRORS_TO_HANDLE:
# if map partitions doesn't match pandas apply, we can use dask apply, but it will be a bit slower
if self._progress_bar:
with TQDMDaskProgressBar(desc=self._progress_bar_desc or "Dask Apply"):
return (
dd.from_pandas(self._obj, npartitions=self._npartitions)
.apply(lambda x: func(x, *args, **kwds), convert_dtype=convert_dtype, meta=meta)
.compute(scheduler=self._scheduler)
)
else:
return (
dd.from_pandas(self._obj, npartitions=self._npartitions)
.apply(lambda x: func(x, *args, **kwds), convert_dtype=convert_dtype, meta=meta)
.compute(scheduler=self._scheduler)
)
def apply(self, func, convert_dtype=True, args=(), **kwds):
"""
Apply the function to the Series using swifter
"""
# if the series is empty, return early using Pandas
if not self._nrows:
return self._obj.apply(func, convert_dtype=convert_dtype, args=args, **kwds)
sample = self._obj.iloc[: self._npartitions * 2]
# check if input is string or if the user is overriding the string processing default
allow_dask_processing = True if self._allow_dask_on_strings else (sample.dtype != "object")
if "axis" in kwds.keys():
kwds.pop("axis")
warnings.warn("Axis keyword not necessary because applying on a Series.")
try: # try to vectorize
with suppress_stdout_stderr_logging():
tmp_df = func(sample, *args, **kwds)
sample_df = sample.apply(func, convert_dtype=convert_dtype, args=args, **kwds)
self._validate_apply(
np.array_equal(sample_df, tmp_df) & (sample_df.shape == tmp_df.shape),
error_message="Vectorized function sample doesn't match pandas apply sample.",
)
return func(self._obj, *args, **kwds)
except ERRORS_TO_HANDLE: # if can't vectorize, estimate time to pandas apply
wrapped = self._wrapped_apply(func, convert_dtype=convert_dtype, args=args, **kwds)
timed = timeit.timeit(wrapped, number=N_REPEATS)
sample_proc_est = timed / N_REPEATS
est_apply_duration = sample_proc_est / self._SAMPLE_SIZE * self._obj.shape[0]
# if pandas sample apply takes too long and not performing str processing, use dask
if (est_apply_duration > self._dask_threshold) and allow_dask_processing:
return self._dask_apply(func, convert_dtype, *args, **kwds)
else: # use pandas
if self._progress_bar:
tqdm.pandas(desc=self._progress_bar_desc or "Pandas Apply")
return self._obj.progress_apply(func, convert_dtype=convert_dtype, args=args, **kwds)
else:
return self._obj.apply(func, convert_dtype=convert_dtype, args=args, **kwds)
@pd.api.extensions.register_dataframe_accessor("swifter")
class DataFrameAccessor(_SwifterObject):
def _wrapped_apply(self, func, axis=0, raw=None, result_type=None, args=(), **kwds):
def wrapped():
with suppress_stdout_stderr_logging():
self._obj.iloc[: self._SAMPLE_SIZE, :].apply(
func, axis=axis, raw=raw, result_type=result_type, args=args, **kwds
)
return wrapped
def _modin_apply(self, func, axis=0, raw=None, result_type=None, *args, **kwds):
sample = self._obj.iloc[: self._npartitions * 2, :]
try:
series = False
with suppress_stdout_stderr_logging():
import modin.pandas as md
sample_df = sample.apply(func, axis=axis, raw=raw, result_type=result_type, args=args, **kwds)
# check that the modin apply matches the pandas APPLY
tmp_df = (
md.DataFrame(sample)
.apply(func, axis=axis, raw=raw, result_type=result_type, args=args, **kwds)
._to_pandas()
)
if isinstance(sample_df, pd.Series) and isinstance(tmp_df, pd.DataFrame):
tmp_df = | pd.Series(tmp_df.values[:, 0]) | pandas.Series |
import pandas as pd
import pytest
from rdtools.normalization import normalize_with_expected_power
from pandas import Timestamp
import numpy as np
@pytest.fixture()
def times_15():
return pd.date_range(start='20200101 12:00', end='20200101 13:00', freq='15T')
@pytest.fixture()
def times_30():
return pd.date_range(start='20200101 12:00', end='20200101 13:00', freq='30T')
@pytest.fixture()
def pv_15(times_15):
return pd.Series([1.0, 2.5, 3.0, 2.2, 2.1], index=times_15)
@pytest.fixture()
def expected_15(times_15):
return pd.Series([1.2, 2.3, 2.8, 2.1, 2.0], index=times_15)
@pytest.fixture()
def irradiance_15(times_15):
return pd.Series([1000.0, 850.0, 950.0, 975.0, 890.0], index=times_15)
@pytest.fixture()
def pv_30(times_30):
return pd.Series([1.0, 3.0, 2.1], index=times_30)
@pytest.fixture()
def expected_30(times_30):
return pd.Series([1.2, 2.8, 2.0], index=times_30)
@pytest.fixture()
def irradiance_30(times_30):
return pd.Series([1000.0, 950.0, 890.0], index=times_30)
def test_normalize_with_expected_power_uniform_frequency(pv_15, expected_15, irradiance_15):
norm, insol = normalize_with_expected_power(
pv_15, expected_15, irradiance_15)
expected_norm = pd.Series(
{Timestamp('2020-01-01 12:15:00', freq='15T'): 1.0,
Timestamp('2020-01-01 12:30:00', freq='15T'): 1.0784313725490198,
Timestamp('2020-01-01 12:45:00', freq='15T'): 1.0612244897959184,
Timestamp('2020-01-01 13:00:00', freq='15T'): 1.0487804878048783}
)
expected_norm.name = 'energy_Wh'
expected_norm.index.freq = '15T'
expected_insol = pd.Series(
{Timestamp('2020-01-01 12:15:00', freq='15T'): 231.25,
Timestamp('2020-01-01 12:30:00', freq='15T'): 225.0,
Timestamp('2020-01-01 12:45:00', freq='15T'): 240.625,
Timestamp('2020-01-01 13:00:00', freq='15T'): 233.125}
)
expected_insol.name = 'energy_Wh'
expected_insol.index.freq = '15T'
pd.testing.assert_series_equal(norm, expected_norm)
pd.testing.assert_series_equal(insol, expected_insol)
def test_normalize_with_expected_power_energy_option(pv_15, expected_15, irradiance_15):
norm, insol = normalize_with_expected_power(
pv_15, expected_15, irradiance_15, pv_input='energy')
expected_norm = pd.Series(
{Timestamp('2020-01-01 12:00:00', freq='15T'): np.nan,
Timestamp('2020-01-01 12:15:00', freq='15T'): 5.714285714285714,
Timestamp('2020-01-01 12:30:00', freq='15T'): 4.705882352941177,
Timestamp('2020-01-01 12:45:00', freq='15T'): 3.5918367346938775,
Timestamp('2020-01-01 13:00:00', freq='15T'): 4.097560975609756}
)
expected_norm.name = 'energy_Wh'
expected_norm.index.freq = '15T'
expected_insol = pd.Series(
{Timestamp('2020-01-01 12:00:00', freq='15T'): np.nan,
Timestamp('2020-01-01 12:15:00', freq='15T'): 231.25,
Timestamp('2020-01-01 12:30:00', freq='15T'): 225.0,
Timestamp('2020-01-01 12:45:00', freq='15T'): 240.625,
Timestamp('2020-01-01 13:00:00', freq='15T'): 233.125}
)
expected_insol.name = 'energy_Wh'
expected_insol.index.freq = '15T'
pd.testing.assert_series_equal(norm, expected_norm)
pd.testing.assert_series_equal(insol, expected_insol)
def test_normalize_with_expected_power_low_freq_pv(pv_30, expected_15, irradiance_15):
norm, insol = normalize_with_expected_power(
pv_30, expected_15, irradiance_15)
expected_norm = pd.Series(
{Timestamp('2020-01-01 12:30:00', freq='30T'): 0.9302325581395349,
Timestamp('2020-01-01 13:00:00', freq='30T'): 1.1333333333333333}
)
expected_norm.name = 'energy_Wh'
expected_norm.index.freq = '30T'
expected_insol = pd.Series(
{Timestamp('2020-01-01 12:30:00', freq='30T'): 456.25,
Timestamp('2020-01-01 13:00:00', freq='30T'): 473.75}
)
expected_insol.name = 'energy_Wh'
expected_insol.index.freq = '30T'
pd.testing.assert_series_equal(norm, expected_norm)
pd.testing.assert_series_equal(insol, expected_insol)
def test_normalized_with_expected_power_low_freq_expected(pv_15, expected_30, irradiance_30):
norm, insol = normalize_with_expected_power(
pv_15, expected_30, irradiance_30)
expected_norm = pd.Series(
{Timestamp('2020-01-01 12:15:00', freq='15T'): 1.09375,
Timestamp('2020-01-01 12:30:00', freq='15T'): 1.1458333333333335,
| Timestamp('2020-01-01 12:45:00', freq='15T') | pandas.Timestamp |
from datetime import datetime
import numpy as np
import pytest
from pandas.core.dtypes.cast import find_common_type, is_dtype_equal
import pandas as pd
from pandas import DataFrame, Index, MultiIndex, Series
import pandas._testing as tm
class TestDataFrameCombineFirst:
def test_combine_first_mixed(self):
a = Series(["a", "b"], index=range(2))
b = Series(range(2), index=range(2))
f = DataFrame({"A": a, "B": b})
a = Series(["a", "b"], index=range(5, 7))
b = Series(range(2), index=range(5, 7))
g = DataFrame({"A": a, "B": b})
exp = DataFrame({"A": list("abab"), "B": [0, 1, 0, 1]}, index=[0, 1, 5, 6])
combined = f.combine_first(g)
tm.assert_frame_equal(combined, exp)
def test_combine_first(self, float_frame):
# disjoint
head, tail = float_frame[:5], float_frame[5:]
combined = head.combine_first(tail)
reordered_frame = float_frame.reindex(combined.index)
tm.assert_frame_equal(combined, reordered_frame)
assert tm.equalContents(combined.columns, float_frame.columns)
tm.assert_series_equal(combined["A"], reordered_frame["A"])
# same index
fcopy = float_frame.copy()
fcopy["A"] = 1
del fcopy["C"]
fcopy2 = float_frame.copy()
fcopy2["B"] = 0
del fcopy2["D"]
combined = fcopy.combine_first(fcopy2)
assert (combined["A"] == 1).all()
tm.assert_series_equal(combined["B"], fcopy["B"])
tm.assert_series_equal(combined["C"], fcopy2["C"])
tm.assert_series_equal(combined["D"], fcopy["D"])
# overlap
head, tail = reordered_frame[:10].copy(), reordered_frame
head["A"] = 1
combined = head.combine_first(tail)
assert (combined["A"][:10] == 1).all()
# reverse overlap
tail["A"][:10] = 0
combined = tail.combine_first(head)
assert (combined["A"][:10] == 0).all()
# no overlap
f = float_frame[:10]
g = float_frame[10:]
combined = f.combine_first(g)
tm.assert_series_equal(combined["A"].reindex(f.index), f["A"])
tm.assert_series_equal(combined["A"].reindex(g.index), g["A"])
# corner cases
comb = float_frame.combine_first(DataFrame())
tm.assert_frame_equal(comb, float_frame)
comb = DataFrame().combine_first(float_frame)
tm.assert_frame_equal(comb, float_frame)
comb = float_frame.combine_first(DataFrame(index=["faz", "boo"]))
assert "faz" in comb.index
# #2525
df = DataFrame({"a": [1]}, index=[datetime(2012, 1, 1)])
df2 = DataFrame(columns=["b"])
result = df.combine_first(df2)
assert "b" in result
def test_combine_first_mixed_bug(self):
idx = Index(["a", "b", "c", "e"])
ser1 = Series([5.0, -9.0, 4.0, 100.0], index=idx)
ser2 = Series(["a", "b", "c", "e"], index=idx)
ser3 = Series([12, 4, 5, 97], index=idx)
frame1 = DataFrame({"col0": ser1, "col2": ser2, "col3": ser3})
idx = Index(["a", "b", "c", "f"])
ser1 = Series([5.0, -9.0, 4.0, 100.0], index=idx)
ser2 = Series(["a", "b", "c", "f"], index=idx)
ser3 = Series([12, 4, 5, 97], index=idx)
frame2 = DataFrame({"col1": ser1, "col2": ser2, "col5": ser3})
combined = frame1.combine_first(frame2)
assert len(combined.columns) == 5
def test_combine_first_same_as_in_update(self):
# gh 3016 (same as in update)
df = DataFrame(
[[1.0, 2.0, False, True], [4.0, 5.0, True, False]],
columns=["A", "B", "bool1", "bool2"],
)
other = DataFrame([[45, 45]], index=[0], columns=["A", "B"])
result = df.combine_first(other)
tm.assert_frame_equal(result, df)
df.loc[0, "A"] = np.nan
result = df.combine_first(other)
df.loc[0, "A"] = 45
tm.assert_frame_equal(result, df)
def test_combine_first_doc_example(self):
# doc example
df1 = DataFrame(
{"A": [1.0, np.nan, 3.0, 5.0, np.nan], "B": [np.nan, 2.0, 3.0, np.nan, 6.0]}
)
df2 = DataFrame(
{
"A": [5.0, 2.0, 4.0, np.nan, 3.0, 7.0],
"B": [np.nan, np.nan, 3.0, 4.0, 6.0, 8.0],
}
)
result = df1.combine_first(df2)
expected = DataFrame({"A": [1, 2, 3, 5, 3, 7.0], "B": [np.nan, 2, 3, 4, 6, 8]})
tm.assert_frame_equal(result, expected)
def test_combine_first_return_obj_type_with_bools(self):
# GH3552
df1 = DataFrame(
[[np.nan, 3.0, True], [-4.6, np.nan, True], [np.nan, 7.0, False]]
)
df2 = DataFrame([[-42.6, np.nan, True], [-5.0, 1.6, False]], index=[1, 2])
expected = Series([True, True, False], name=2, dtype=bool)
result_12 = df1.combine_first(df2)[2]
tm.assert_series_equal(result_12, expected)
result_21 = df2.combine_first(df1)[2]
tm.assert_series_equal(result_21, expected)
@pytest.mark.parametrize(
"data1, data2, data_expected",
(
(
[datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)],
[pd.NaT, pd.NaT, pd.NaT],
[datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)],
),
(
[pd.NaT, pd.NaT, pd.NaT],
[datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)],
[datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)],
),
(
[datetime(2000, 1, 2), pd.NaT, pd.NaT],
[datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)],
[datetime(2000, 1, 2), datetime(2000, 1, 2), datetime(2000, 1, 3)],
),
(
[datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)],
[datetime(2000, 1, 2), pd.NaT, pd.NaT],
[datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)],
),
),
)
def test_combine_first_convert_datatime_correctly(
self, data1, data2, data_expected
):
# GH 3593
df1, df2 = DataFrame({"a": data1}), DataFrame({"a": data2})
result = df1.combine_first(df2)
expected = DataFrame({"a": data_expected})
tm.assert_frame_equal(result, expected)
def test_combine_first_align_nan(self):
# GH 7509 (not fixed)
dfa = DataFrame([[pd.Timestamp("2011-01-01"), 2]], columns=["a", "b"])
dfb = DataFrame([[4], [5]], columns=["b"])
assert dfa["a"].dtype == "datetime64[ns]"
assert dfa["b"].dtype == "int64"
res = dfa.combine_first(dfb)
exp = DataFrame(
{"a": [pd.Timestamp("2011-01-01"), pd.NaT], "b": [2, 5]},
columns=["a", "b"],
)
tm.assert_frame_equal(res, exp)
assert res["a"].dtype == "datetime64[ns]"
# ToDo: this must be int64
assert res["b"].dtype == "int64"
res = dfa.iloc[:0].combine_first(dfb)
exp = DataFrame({"a": [np.nan, np.nan], "b": [4, 5]}, columns=["a", "b"])
tm.assert_frame_equal(res, exp)
# ToDo: this must be datetime64
assert res["a"].dtype == "float64"
# ToDo: this must be int64
assert res["b"].dtype == "int64"
def test_combine_first_timezone(self):
# see gh-7630
data1 = pd.to_datetime("20100101 01:01").tz_localize("UTC")
df1 = DataFrame(
columns=["UTCdatetime", "abc"],
data=data1,
index=pd.date_range("20140627", periods=1),
)
data2 = pd.to_datetime("20121212 12:12").tz_localize("UTC")
df2 = DataFrame(
columns=["UTCdatetime", "xyz"],
data=data2,
index=pd.date_range("20140628", periods=1),
)
res = df2[["UTCdatetime"]].combine_first(df1)
exp = DataFrame(
{
"UTCdatetime": [
pd.Timestamp("2010-01-01 01:01", tz="UTC"),
pd.Timestamp("2012-12-12 12:12", tz="UTC"),
],
"abc": [pd.Timestamp("2010-01-01 01:01:00", tz="UTC"), pd.NaT],
},
columns=["UTCdatetime", "abc"],
index=pd.date_range("20140627", periods=2, freq="D"),
)
assert res["UTCdatetime"].dtype == "datetime64[ns, UTC]"
assert res["abc"].dtype == "datetime64[ns, UTC]"
tm.assert_frame_equal(res, exp)
# see gh-10567
dts1 = pd.date_range("2015-01-01", "2015-01-05", tz="UTC")
df1 = DataFrame({"DATE": dts1})
dts2 = pd.date_range("2015-01-03", "2015-01-05", tz="UTC")
df2 = DataFrame({"DATE": dts2})
res = df1.combine_first(df2)
tm.assert_frame_equal(res, df1)
assert res["DATE"].dtype == "datetime64[ns, UTC]"
dts1 = pd.DatetimeIndex(
["2011-01-01", "NaT", "2011-01-03", "2011-01-04"], tz="US/Eastern"
)
df1 = DataFrame({"DATE": dts1}, index=[1, 3, 5, 7])
dts2 = pd.DatetimeIndex(
["2012-01-01", "2012-01-02", "2012-01-03"], tz="US/Eastern"
)
df2 = DataFrame({"DATE": dts2}, index=[2, 4, 5])
res = df1.combine_first(df2)
exp_dts = pd.DatetimeIndex(
[
"2011-01-01",
"2012-01-01",
"NaT",
"2012-01-02",
"2011-01-03",
"2011-01-04",
],
tz="US/Eastern",
)
exp = DataFrame({"DATE": exp_dts}, index=[1, 2, 3, 4, 5, 7])
tm.assert_frame_equal(res, exp)
# different tz
dts1 = pd.date_range("2015-01-01", "2015-01-05", tz="US/Eastern")
df1 = DataFrame({"DATE": dts1})
dts2 = pd.date_range("2015-01-03", "2015-01-05")
df2 = DataFrame({"DATE": dts2})
# if df1 doesn't have NaN, keep its dtype
res = df1.combine_first(df2)
tm.assert_frame_equal(res, df1)
assert res["DATE"].dtype == "datetime64[ns, US/Eastern]"
dts1 = pd.date_range("2015-01-01", "2015-01-02", tz="US/Eastern")
df1 = DataFrame({"DATE": dts1})
dts2 = pd.date_range("2015-01-01", "2015-01-03")
df2 = DataFrame({"DATE": dts2})
res = df1.combine_first(df2)
exp_dts = [
pd.Timestamp("2015-01-01", tz="US/Eastern"),
pd.Timestamp("2015-01-02", tz="US/Eastern"),
pd.Timestamp("2015-01-03"),
]
exp = DataFrame({"DATE": exp_dts})
tm.assert_frame_equal(res, exp)
assert res["DATE"].dtype == "object"
def test_combine_first_timedelta(self):
data1 = pd.TimedeltaIndex(["1 day", "NaT", "3 day", "4day"])
df1 = DataFrame({"TD": data1}, index=[1, 3, 5, 7])
data2 = pd.TimedeltaIndex(["10 day", "11 day", "12 day"])
df2 = DataFrame({"TD": data2}, index=[2, 4, 5])
res = df1.combine_first(df2)
exp_dts = pd.TimedeltaIndex(
["1 day", "10 day", "NaT", "11 day", "3 day", "4 day"]
)
exp = DataFrame({"TD": exp_dts}, index=[1, 2, 3, 4, 5, 7])
tm.assert_frame_equal(res, exp)
assert res["TD"].dtype == "timedelta64[ns]"
def test_combine_first_period(self):
data1 = pd.PeriodIndex(["2011-01", "NaT", "2011-03", "2011-04"], freq="M")
df1 = DataFrame({"P": data1}, index=[1, 3, 5, 7])
data2 = pd.PeriodIndex(["2012-01-01", "2012-02", "2012-03"], freq="M")
df2 = DataFrame({"P": data2}, index=[2, 4, 5])
res = df1.combine_first(df2)
exp_dts = pd.PeriodIndex(
["2011-01", "2012-01", "NaT", "2012-02", "2011-03", "2011-04"], freq="M"
)
exp = DataFrame({"P": exp_dts}, index=[1, 2, 3, 4, 5, 7])
tm.assert_frame_equal(res, exp)
assert res["P"].dtype == data1.dtype
# different freq
dts2 = | pd.PeriodIndex(["2012-01-01", "2012-01-02", "2012-01-03"], freq="D") | pandas.PeriodIndex |
from datetime import timedelta
from functools import partial
import itertools
from parameterized import parameterized
import numpy as np
from numpy.testing import assert_array_equal, assert_almost_equal
import pandas as pd
from toolz import merge
from zipline.pipeline import SimplePipelineEngine, Pipeline, CustomFactor
from zipline.pipeline.common import (
EVENT_DATE_FIELD_NAME,
FISCAL_QUARTER_FIELD_NAME,
FISCAL_YEAR_FIELD_NAME,
SID_FIELD_NAME,
TS_FIELD_NAME,
)
from zipline.pipeline.data import DataSet
from zipline.pipeline.data import Column
from zipline.pipeline.domain import EquitySessionDomain
from zipline.pipeline.loaders.earnings_estimates import (
NextEarningsEstimatesLoader,
NextSplitAdjustedEarningsEstimatesLoader,
normalize_quarters,
PreviousEarningsEstimatesLoader,
PreviousSplitAdjustedEarningsEstimatesLoader,
split_normalized_quarters,
)
from zipline.testing.fixtures import (
WithAdjustmentReader,
WithTradingSessions,
ZiplineTestCase,
)
from zipline.testing.predicates import assert_equal
from zipline.testing.predicates import assert_frame_equal
from zipline.utils.numpy_utils import datetime64ns_dtype
from zipline.utils.numpy_utils import float64_dtype
import pytest
class Estimates(DataSet):
event_date = Column(dtype=datetime64ns_dtype)
fiscal_quarter = Column(dtype=float64_dtype)
fiscal_year = Column(dtype=float64_dtype)
estimate = Column(dtype=float64_dtype)
class MultipleColumnsEstimates(DataSet):
event_date = Column(dtype=datetime64ns_dtype)
fiscal_quarter = Column(dtype=float64_dtype)
fiscal_year = Column(dtype=float64_dtype)
estimate1 = Column(dtype=float64_dtype)
estimate2 = Column(dtype=float64_dtype)
def QuartersEstimates(announcements_out):
class QtrEstimates(Estimates):
num_announcements = announcements_out
name = Estimates
return QtrEstimates
def MultipleColumnsQuartersEstimates(announcements_out):
class QtrEstimates(MultipleColumnsEstimates):
num_announcements = announcements_out
name = Estimates
return QtrEstimates
def QuartersEstimatesNoNumQuartersAttr(num_qtr):
class QtrEstimates(Estimates):
name = Estimates
return QtrEstimates
def create_expected_df_for_factor_compute(start_date, sids, tuples, end_date):
"""
Given a list of tuples of new data we get for each sid on each critical
date (when information changes), create a DataFrame that fills that
data through a date range ending at `end_date`.
"""
df = pd.DataFrame(tuples, columns=[SID_FIELD_NAME, "estimate", "knowledge_date"])
df = df.pivot_table(
columns=SID_FIELD_NAME, values="estimate", index="knowledge_date", dropna=False
)
df = df.reindex(pd.date_range(start_date, end_date))
# Index name is lost during reindex.
df.index = df.index.rename("knowledge_date")
df["at_date"] = end_date.tz_localize("utc")
df = df.set_index(["at_date", df.index.tz_localize("utc")]).ffill()
new_sids = set(sids) - set(df.columns)
df = df.reindex(columns=df.columns.union(new_sids))
return df
class WithEstimates(WithTradingSessions, WithAdjustmentReader):
"""
ZiplineTestCase mixin providing cls.loader and cls.events as class
level fixtures.
Methods
-------
make_loader(events, columns) -> PipelineLoader
Method which returns the loader to be used throughout tests.
events : pd.DataFrame
The raw events to be used as input to the pipeline loader.
columns : dict[str -> str]
The dictionary mapping the names of BoundColumns to the
associated column name in the events DataFrame.
make_columns() -> dict[BoundColumn -> str]
Method which returns a dictionary of BoundColumns mapped to the
associated column names in the raw data.
"""
# Short window defined in order for test to run faster.
START_DATE = pd.Timestamp("2014-12-28", tz="utc")
END_DATE = pd.Timestamp("2015-02-04", tz="utc")
@classmethod
def make_loader(cls, events, columns):
raise NotImplementedError("make_loader")
@classmethod
def make_events(cls):
raise NotImplementedError("make_events")
@classmethod
def get_sids(cls):
return cls.events[SID_FIELD_NAME].unique()
@classmethod
def make_columns(cls):
return {
Estimates.event_date: "event_date",
Estimates.fiscal_quarter: "fiscal_quarter",
Estimates.fiscal_year: "fiscal_year",
Estimates.estimate: "estimate",
}
def make_engine(self, loader=None):
if loader is None:
loader = self.loader
return SimplePipelineEngine(
lambda x: loader,
self.asset_finder,
default_domain=EquitySessionDomain(
self.trading_days,
self.ASSET_FINDER_COUNTRY_CODE,
),
)
@classmethod
def init_class_fixtures(cls):
cls.events = cls.make_events()
cls.ASSET_FINDER_EQUITY_SIDS = cls.get_sids()
cls.ASSET_FINDER_EQUITY_SYMBOLS = [
"s" + str(n) for n in cls.ASSET_FINDER_EQUITY_SIDS
]
# We need to instantiate certain constants needed by supers of
# `WithEstimates` before we call their `init_class_fixtures`.
super(WithEstimates, cls).init_class_fixtures()
cls.columns = cls.make_columns()
# Some tests require `WithAdjustmentReader` to be set up by the time we
# make the loader.
cls.loader = cls.make_loader(
cls.events, {column.name: val for column, val in cls.columns.items()}
)
class WithOneDayPipeline(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
events : pd.DataFrame
A simple DataFrame with columns needed for estimates and a single sid
and no other data.
Tests
------
test_wrong_num_announcements_passed()
Tests that loading with an incorrect quarter number raises an error.
test_no_num_announcements_attr()
Tests that the loader throws an AssertionError if the dataset being
loaded has no `num_announcements` attribute.
"""
@classmethod
def make_columns(cls):
return {
MultipleColumnsEstimates.event_date: "event_date",
MultipleColumnsEstimates.fiscal_quarter: "fiscal_quarter",
MultipleColumnsEstimates.fiscal_year: "fiscal_year",
MultipleColumnsEstimates.estimate1: "estimate1",
MultipleColumnsEstimates.estimate2: "estimate2",
}
@classmethod
def make_events(cls):
return pd.DataFrame(
{
SID_FIELD_NAME: [0] * 2,
TS_FIELD_NAME: [pd.Timestamp("2015-01-01"), pd.Timestamp("2015-01-06")],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-10"),
pd.Timestamp("2015-01-20"),
],
"estimate1": [1.0, 2.0],
"estimate2": [3.0, 4.0],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: [2015, 2015],
}
)
@classmethod
def make_expected_out(cls):
raise NotImplementedError("make_expected_out")
@classmethod
def init_class_fixtures(cls):
super(WithOneDayPipeline, cls).init_class_fixtures()
cls.sid0 = cls.asset_finder.retrieve_asset(0)
cls.expected_out = cls.make_expected_out()
def test_load_one_day(self):
# We want to test multiple columns
dataset = MultipleColumnsQuartersEstimates(1)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline({c.name: c.latest for c in dataset.columns}),
start_date=pd.Timestamp("2015-01-15", tz="utc"),
end_date=pd.Timestamp("2015-01-15", tz="utc"),
)
assert_frame_equal(results.sort_index(1), self.expected_out.sort_index(1))
class PreviousWithOneDayPipeline(WithOneDayPipeline, ZiplineTestCase):
"""
Tests that previous quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_out(cls):
return pd.DataFrame(
{
EVENT_DATE_FIELD_NAME: pd.Timestamp("2015-01-10"),
"estimate1": 1.0,
"estimate2": 3.0,
FISCAL_QUARTER_FIELD_NAME: 1.0,
FISCAL_YEAR_FIELD_NAME: 2015.0,
},
index=pd.MultiIndex.from_tuples(
((pd.Timestamp("2015-01-15", tz="utc"), cls.sid0),)
),
)
class NextWithOneDayPipeline(WithOneDayPipeline, ZiplineTestCase):
"""
Tests that next quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_out(cls):
return pd.DataFrame(
{
EVENT_DATE_FIELD_NAME: | pd.Timestamp("2015-01-20") | pandas.Timestamp |
""" Test cases for DataFrame.plot """
import warnings
import numpy as np
import pytest
import pandas.util._test_decorators as td
import pandas as pd
from pandas import DataFrame
import pandas._testing as tm
from pandas.tests.plotting.common import TestPlotBase, _check_plot_works
@td.skip_if_no_mpl
class TestDataFrameColor(TestPlotBase):
def setup_method(self, method):
TestPlotBase.setup_method(self, method)
import matplotlib as mpl
mpl.rcdefaults()
self.tdf = tm.makeTimeDataFrame()
self.hexbin_df = DataFrame(
{
"A": np.random.uniform(size=20),
"B": np.random.uniform(size=20),
"C": np.arange(20) + np.random.uniform(size=20),
}
)
def test_mpl2_color_cycle_str(self):
# GH 15516
df = DataFrame(np.random.randn(10, 3), columns=["a", "b", "c"])
colors = ["C0", "C1", "C2", "C3", "C4", "C5", "C6", "C7", "C8", "C9"]
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always", "MatplotlibDeprecationWarning")
for color in colors:
_check_plot_works(df.plot, color=color)
# if warning is raised, check that it is the exact problematic one
# GH 36972
if w:
match = "Support for uppercase single-letter colors is deprecated"
warning_message = str(w[0].message)
msg = "MatplotlibDeprecationWarning related to CN colors was raised"
assert match not in warning_message, msg
def test_color_single_series_list(self):
# GH 3486
df = DataFrame({"A": [1, 2, 3]})
_check_plot_works(df.plot, color=["red"])
@pytest.mark.parametrize("color", [(1, 0, 0), (1, 0, 0, 0.5)])
def test_rgb_tuple_color(self, color):
# GH 16695
df = DataFrame({"x": [1, 2], "y": [3, 4]})
_check_plot_works(df.plot, x="x", y="y", color=color)
def test_color_empty_string(self):
df = DataFrame(np.random.randn(10, 2))
with pytest.raises(ValueError):
df.plot(color="")
def test_color_and_style_arguments(self):
df = DataFrame({"x": [1, 2], "y": [3, 4]})
# passing both 'color' and 'style' arguments should be allowed
# if there is no color symbol in the style strings:
ax = df.plot(color=["red", "black"], style=["-", "--"])
# check that the linestyles are correctly set:
linestyle = [line.get_linestyle() for line in ax.lines]
assert linestyle == ["-", "--"]
# check that the colors are correctly set:
color = [line.get_color() for line in ax.lines]
assert color == ["red", "black"]
# passing both 'color' and 'style' arguments should not be allowed
# if there is a color symbol in the style strings:
with pytest.raises(ValueError):
df.plot(color=["red", "black"], style=["k-", "r--"])
@pytest.mark.parametrize(
"color, expected",
[
("green", ["green"] * 4),
(["yellow", "red", "green", "blue"], ["yellow", "red", "green", "blue"]),
],
)
def test_color_and_marker(self, color, expected):
# GH 21003
df = DataFrame(np.random.random((7, 4)))
ax = df.plot(color=color, style="d--")
# check colors
result = [i.get_color() for i in ax.lines]
assert result == expected
# check markers and linestyles
assert all(i.get_linestyle() == "--" for i in ax.lines)
assert all(i.get_marker() == "d" for i in ax.lines)
@pytest.mark.slow
def test_bar_colors(self):
import matplotlib.pyplot as plt
default_colors = self._unpack_cycler(plt.rcParams)
df = DataFrame(np.random.randn(5, 5))
ax = df.plot.bar()
self._check_colors(ax.patches[::5], facecolors=default_colors[:5])
tm.close()
custom_colors = "rgcby"
ax = df.plot.bar(color=custom_colors)
self._check_colors(ax.patches[::5], facecolors=custom_colors)
tm.close()
from matplotlib import cm
# Test str -> colormap functionality
ax = df.plot.bar(colormap="jet")
rgba_colors = [cm.jet(n) for n in np.linspace(0, 1, 5)]
self._check_colors(ax.patches[::5], facecolors=rgba_colors)
tm.close()
# Test colormap functionality
ax = df.plot.bar(colormap=cm.jet)
rgba_colors = [cm.jet(n) for n in np.linspace(0, 1, 5)]
self._check_colors(ax.patches[::5], facecolors=rgba_colors)
tm.close()
ax = df.loc[:, [0]].plot.bar(color="DodgerBlue")
self._check_colors([ax.patches[0]], facecolors=["DodgerBlue"])
tm.close()
ax = df.plot(kind="bar", color="green")
self._check_colors(ax.patches[::5], facecolors=["green"] * 5)
tm.close()
def test_bar_user_colors(self):
df = DataFrame(
{"A": range(4), "B": range(1, 5), "color": ["red", "blue", "blue", "red"]}
)
# This should *only* work when `y` is specified, else
# we use one color per column
ax = df.plot.bar(y="A", color=df["color"])
result = [p.get_facecolor() for p in ax.patches]
expected = [
(1.0, 0.0, 0.0, 1.0),
(0.0, 0.0, 1.0, 1.0),
(0.0, 0.0, 1.0, 1.0),
(1.0, 0.0, 0.0, 1.0),
]
assert result == expected
@pytest.mark.slow
def test_if_scatterplot_colorbar_affects_xaxis_visibility(self):
# addressing issue #10611, to ensure colobar does not
# interfere with x-axis label and ticklabels with
# ipython inline backend.
random_array = np.random.random((1000, 3))
df = DataFrame(random_array, columns=["A label", "B label", "C label"])
ax1 = df.plot.scatter(x="A label", y="B label")
ax2 = df.plot.scatter(x="A label", y="B label", c="C label")
vis1 = [vis.get_visible() for vis in ax1.xaxis.get_minorticklabels()]
vis2 = [vis.get_visible() for vis in ax2.xaxis.get_minorticklabels()]
assert vis1 == vis2
vis1 = [vis.get_visible() for vis in ax1.xaxis.get_majorticklabels()]
vis2 = [vis.get_visible() for vis in ax2.xaxis.get_majorticklabels()]
assert vis1 == vis2
assert (
ax1.xaxis.get_label().get_visible() == ax2.xaxis.get_label().get_visible()
)
@pytest.mark.slow
def test_if_hexbin_xaxis_label_is_visible(self):
# addressing issue #10678, to ensure colobar does not
# interfere with x-axis label and ticklabels with
# ipython inline backend.
random_array = np.random.random((1000, 3))
df = | DataFrame(random_array, columns=["A label", "B label", "C label"]) | pandas.DataFrame |
import logging
import os
import numpy as np
import pandas as pd
from opencell.database import utils, constants
logger = logging.getLogger(__name__)
def parseFloat(val):
try:
val = float(val)
except ValueError:
val = float(str(val).replace(',', ''))
return val
def load_library_snapshot(filename):
'''
Load and format a CSV 'snapshot' of a library spreadsheet
These 'snapshots' are of the google sheet created/maintained by Manu
that contains the crispr designs for all plates
'''
# define maps from the column names in the google sheets
# to the column names in the models.metadata.CrisprDesign table
# (all required columns are included, even if their name is unchanged)
# column names in the original 'library' google sheet (containing plates 1-22)
library_columns = {
'plate_id': 'plate_id',
'well_id': 'well_id',
'gene_name': 'target_name',
'family': 'target_family',
'enst_id': 'enst_id',
'terminus_to_tag': 'target_terminus',
'protospacer_name': 'protospacer_name',
'protospacer_note': 'protospacer_notes',
'protospacer_sequence': 'protospacer_sequence',
'ultramer_name': 'template_name',
'ultramer_note': 'template_notes',
'ultramer_sequence': 'template_sequence',
}
# alternative column names for some columns, specific to the 'library v1.1' spreadsheet
# (this sheet starts with plate23 in Jan 2022)
library_columns_2022 = {
'tagged_terminus': 'target_terminus',
'ensembl_transcript_id': 'enst_id',
'gRNA_sequence': 'protospacer_sequence',
'donor_sequence': 'template_sequence',
}
library_columns.update(library_columns_2022)
library = pd.read_csv(filename)
library.rename(columns=library_columns, inplace=True)
# for clarity, format the plate_ids here
library['plate_id'] = library.plate_id.apply(utils.format_plate_design_id)
# drop any extraneous columns
dropped_columns = list(set(library.columns).difference(library_columns.values()))
library = library.drop(labels=dropped_columns, axis=1)
return library
def load_electroporation_history(filename):
'''
Load and format a 'snapshot' of the list of electroporations
(this is a google sheet from Manu)
Expected columns: ('plate_id', 'date', 'notes')
'''
electroporation_columns = {
'plate_id': 'plate_id',
'electroporation_date': 'date',
'comment': 'notes',
}
electroporations = | pd.read_csv(filename) | pandas.read_csv |
#
# bow_module.py
#
# Copyright (c) 2017 <NAME>
#
# This software is released under the MIT License.
# http://opensource.org/licenses/mit-license.php
#
#
# Includes pandas
#-----------------------------------------------------------------------------
# Copyright (c) 2012, PyData Development Team
# All rights reserved.
#
# Distributed under the terms of the BSD Simplified License.
#
# The full license is in the LICENSE file, distributed with this software.
#-----------------------------------------------------------------------------
import pandas as pd
def read_BOW_file(BOW_filename):
f = open(BOW_filename, 'r')
full_string = f.read()
f.close()
BOW = []
for line in full_string.split('\n'):
document = []
for word in line.split(','):
if word != '':
document.append(word)
if document != []:
BOW.append(document)
return BOW
def write_BOW_file(BOW, BOW_filename):
f = open(BOW_filename, 'w')
for d, line in enumerate(BOW):
for i, word in enumerate(line):
f.write(word)
if i != (len(line)-1):
f.write(',')
f.write('\n')
def make_bag_of_words_num(input_filename):
frequency_matrix = pd.read_csv(input_filename)
frequency_matrix.index = frequency_matrix.iloc[:,0]
del frequency_matrix[frequency_matrix.columns[0]]
word_list = frequency_matrix.columns.tolist()
BOW = []
for d, document in enumerate(frequency_matrix.index.tolist()):
bufList = []
for v, word in enumerate(frequency_matrix.columns.tolist()):
for i in range(frequency_matrix.loc[document, word]):
bufList.append(v)
BOW.append(bufList)
return BOW, word_list
def make_frequency_matrix(BOW, word_list, output_filename):
frequency_matrix = []
for document in BOW:
frequency_list = []
for i, word in enumerate(word_list):
frequency = document.count(i)
frequency_list.append(frequency)
frequency_matrix.append(frequency_list)
fm_df = | pd.DataFrame(frequency_matrix, columns=word_list) | pandas.DataFrame |
# Adapted from https://github.com/BinPro/CONCOCT/blob/develop/scripts/fasta_to_features.py
from itertools import product
from collections import OrderedDict
from .fasta import fasta_iter
def generate_feature_mapping(kmer_len):
BASE_COMPLEMENT = {"A": "T", "T": "A", "G": "C", "C": "G"}
kmer_hash = {}
counter = 0
for kmer in product("ATGC", repeat=kmer_len):
kmer = ''.join(kmer)
if kmer not in kmer_hash:
kmer_hash[kmer] = counter
rev_compl = tuple([BASE_COMPLEMENT[x] for x in reversed(kmer)])
kmer_hash[''.join(rev_compl)] = counter
counter += 1
return kmer_hash, counter
def generate_kmer_features_from_fasta(
fasta_file, length_threshold, kmer_len, split=False, split_threshold=0):
import numpy as np
import pandas as pd
def seq_list():
for h, seq in fasta_iter(fasta_file):
if not split:
yield h, seq
elif len(seq) >= split_threshold:
half = len(seq) // 2
yield (h + '_1', seq[:half])
yield (h + '_2', seq[half:])
kmer_dict, nr_features = generate_feature_mapping(kmer_len)
composition = OrderedDict()
for h, seq in seq_list():
if len(seq) < length_threshold:
continue
norm_seq = str(seq).upper()
kmers = [kmer_dict[norm_seq[i:i+kmer_len]]
for i in range(len(norm_seq) - kmer_len + 1)
if norm_seq[i:i+kmer_len] in kmer_dict] # ignore kmers with non-canonical bases
composition[h] = np.bincount(np.array(kmers, dtype=np.int64), minlength=nr_features)
df = | pd.DataFrame.from_dict(composition, orient='index', dtype=float) | pandas.DataFrame.from_dict |
"""
Provide the groupby split-apply-combine paradigm. Define the GroupBy
class providing the base-class of operations.
The SeriesGroupBy and DataFrameGroupBy sub-class
(defined in pandas.core.groupby.generic)
expose these user-facing objects to provide specific functionality.
"""
from contextlib import contextmanager
import datetime
from functools import partial, wraps
import inspect
import re
import types
from typing import (
Callable,
Dict,
FrozenSet,
Generic,
Hashable,
Iterable,
List,
Mapping,
Optional,
Tuple,
Type,
TypeVar,
Union,
)
import numpy as np
from pandas._config.config import option_context
from pandas._libs import Timestamp
import pandas._libs.groupby as libgroupby
from pandas._typing import FrameOrSeries, Scalar
from pandas.compat import set_function_name
from pandas.compat.numpy import function as nv
from pandas.errors import AbstractMethodError
from pandas.util._decorators import Appender, Substitution, cache_readonly, doc
from pandas.core.dtypes.cast import maybe_cast_result
from pandas.core.dtypes.common import (
ensure_float,
is_bool_dtype,
is_datetime64_dtype,
is_extension_array_dtype,
is_integer_dtype,
is_numeric_dtype,
is_object_dtype,
is_scalar,
)
from pandas.core.dtypes.missing import isna, notna
from pandas.core import nanops
import pandas.core.algorithms as algorithms
from pandas.core.arrays import Categorical, DatetimeArray
from pandas.core.base import DataError, PandasObject, SelectionMixin
import pandas.core.common as com
from pandas.core.frame import DataFrame
from pandas.core.generic import NDFrame
from pandas.core.groupby import base, ops
from pandas.core.indexes.api import CategoricalIndex, Index, MultiIndex
from pandas.core.series import Series
from pandas.core.sorting import get_group_index_sorter
_common_see_also = """
See Also
--------
Series.%(name)s
DataFrame.%(name)s
"""
_apply_docs = dict(
template="""
Apply function `func` group-wise and combine the results together.
The function passed to `apply` must take a {input} as its first
argument and return a DataFrame, Series or scalar. `apply` will
then take care of combining the results back together into a single
dataframe or series. `apply` is therefore a highly flexible
grouping method.
While `apply` is a very flexible method, its downside is that
using it can be quite a bit slower than using more specific methods
like `agg` or `transform`. Pandas offers a wide range of method that will
be much faster than using `apply` for their specific purposes, so try to
use them before reaching for `apply`.
Parameters
----------
func : callable
A callable that takes a {input} as its first argument, and
returns a dataframe, a series or a scalar. In addition the
callable may take positional and keyword arguments.
args, kwargs : tuple and dict
Optional positional and keyword arguments to pass to `func`.
Returns
-------
applied : Series or DataFrame
See Also
--------
pipe : Apply function to the full GroupBy object instead of to each
group.
aggregate : Apply aggregate function to the GroupBy object.
transform : Apply function column-by-column to the GroupBy object.
Series.apply : Apply a function to a Series.
DataFrame.apply : Apply a function to each row or column of a DataFrame.
""",
dataframe_examples="""
>>> df = pd.DataFrame({'A': 'a a b'.split(),
'B': [1,2,3],
'C': [4,6, 5]})
>>> g = df.groupby('A')
Notice that ``g`` has two groups, ``a`` and ``b``.
Calling `apply` in various ways, we can get different grouping results:
Example 1: below the function passed to `apply` takes a DataFrame as
its argument and returns a DataFrame. `apply` combines the result for
each group together into a new DataFrame:
>>> g[['B', 'C']].apply(lambda x: x / x.sum())
B C
0 0.333333 0.4
1 0.666667 0.6
2 1.000000 1.0
Example 2: The function passed to `apply` takes a DataFrame as
its argument and returns a Series. `apply` combines the result for
each group together into a new DataFrame:
>>> g[['B', 'C']].apply(lambda x: x.max() - x.min())
B C
A
a 1 2
b 0 0
Example 3: The function passed to `apply` takes a DataFrame as
its argument and returns a scalar. `apply` combines the result for
each group together into a Series, including setting the index as
appropriate:
>>> g.apply(lambda x: x.C.max() - x.B.min())
A
a 5
b 2
dtype: int64
""",
series_examples="""
>>> s = pd.Series([0, 1, 2], index='a a b'.split())
>>> g = s.groupby(s.index)
From ``s`` above we can see that ``g`` has two groups, ``a`` and ``b``.
Calling `apply` in various ways, we can get different grouping results:
Example 1: The function passed to `apply` takes a Series as
its argument and returns a Series. `apply` combines the result for
each group together into a new Series:
>>> g.apply(lambda x: x*2 if x.name == 'b' else x/2)
0 0.0
1 0.5
2 4.0
dtype: float64
Example 2: The function passed to `apply` takes a Series as
its argument and returns a scalar. `apply` combines the result for
each group together into a Series, including setting the index as
appropriate:
>>> g.apply(lambda x: x.max() - x.min())
a 1
b 0
dtype: int64
Notes
-----
In the current implementation `apply` calls `func` twice on the
first group to decide whether it can take a fast or slow code
path. This can lead to unexpected behavior if `func` has
side-effects, as they will take effect twice for the first
group.
Examples
--------
{examples}
""",
)
_pipe_template = """
Apply a function `func` with arguments to this %(klass)s object and return
the function's result.
%(versionadded)s
Use `.pipe` when you want to improve readability by chaining together
functions that expect Series, DataFrames, GroupBy or Resampler objects.
Instead of writing
>>> h(g(f(df.groupby('group')), arg1=a), arg2=b, arg3=c) # doctest: +SKIP
You can write
>>> (df.groupby('group')
... .pipe(f)
... .pipe(g, arg1=a)
... .pipe(h, arg2=b, arg3=c)) # doctest: +SKIP
which is much more readable.
Parameters
----------
func : callable or tuple of (callable, str)
Function to apply to this %(klass)s object or, alternatively,
a `(callable, data_keyword)` tuple where `data_keyword` is a
string indicating the keyword of `callable` that expects the
%(klass)s object.
args : iterable, optional
Positional arguments passed into `func`.
kwargs : dict, optional
A dictionary of keyword arguments passed into `func`.
Returns
-------
object : the return type of `func`.
See Also
--------
Series.pipe : Apply a function with arguments to a series.
DataFrame.pipe: Apply a function with arguments to a dataframe.
apply : Apply function to each group instead of to the
full %(klass)s object.
Notes
-----
See more `here
<https://pandas.pydata.org/pandas-docs/stable/user_guide/groupby.html#piping-function-calls>`_
Examples
--------
%(examples)s
"""
_transform_template = """
Call function producing a like-indexed %(klass)s on each group and
return a %(klass)s having the same indexes as the original object
filled with the transformed values
Parameters
----------
f : function
Function to apply to each group.
Can also accept a Numba JIT function with
``engine='numba'`` specified.
If the ``'numba'`` engine is chosen, the function must be
a user defined function with ``values`` and ``index`` as the
first and second arguments respectively in the function signature.
Each group's index will be passed to the user defined function
and optionally available for use.
.. versionchanged:: 1.1.0
*args
Positional arguments to pass to func
engine : str, default 'cython'
* ``'cython'`` : Runs the function through C-extensions from cython.
* ``'numba'`` : Runs the function through JIT compiled code from numba.
.. versionadded:: 1.1.0
engine_kwargs : dict, default None
* For ``'cython'`` engine, there are no accepted ``engine_kwargs``
* For ``'numba'`` engine, the engine can accept ``nopython``, ``nogil``
and ``parallel`` dictionary keys. The values must either be ``True`` or
``False``. The default ``engine_kwargs`` for the ``'numba'`` engine is
``{'nopython': True, 'nogil': False, 'parallel': False}`` and will be
applied to the function
.. versionadded:: 1.1.0
**kwargs
Keyword arguments to be passed into func.
Returns
-------
%(klass)s
See Also
--------
%(klass)s.groupby.apply
%(klass)s.groupby.aggregate
%(klass)s.transform
Notes
-----
Each group is endowed the attribute 'name' in case you need to know
which group you are working on.
The current implementation imposes three requirements on f:
* f must return a value that either has the same shape as the input
subframe or can be broadcast to the shape of the input subframe.
For example, if `f` returns a scalar it will be broadcast to have the
same shape as the input subframe.
* if this is a DataFrame, f must support application column-by-column
in the subframe. If f also supports application to the entire subframe,
then a fast path is used starting from the second chunk.
* f must not mutate groups. Mutation is not supported and may
produce unexpected results.
When using ``engine='numba'``, there will be no "fall back" behavior internally.
The group data and group index will be passed as numpy arrays to the JITed
user defined function, and no alternative execution attempts will be tried.
Examples
--------
>>> df = pd.DataFrame({'A' : ['foo', 'bar', 'foo', 'bar',
... 'foo', 'bar'],
... 'B' : ['one', 'one', 'two', 'three',
... 'two', 'two'],
... 'C' : [1, 5, 5, 2, 5, 5],
... 'D' : [2.0, 5., 8., 1., 2., 9.]})
>>> grouped = df.groupby('A')
>>> grouped.transform(lambda x: (x - x.mean()) / x.std())
C D
0 -1.154701 -0.577350
1 0.577350 0.000000
2 0.577350 1.154701
3 -1.154701 -1.000000
4 0.577350 -0.577350
5 0.577350 1.000000
Broadcast result of the transformation
>>> grouped.transform(lambda x: x.max() - x.min())
C D
0 4 6.0
1 3 8.0
2 4 6.0
3 3 8.0
4 4 6.0
5 3 8.0
"""
_agg_template = """
Aggregate using one or more operations over the specified axis.
Parameters
----------
func : function, str, list or dict
Function to use for aggregating the data. If a function, must either
work when passed a %(klass)s or when passed to %(klass)s.apply.
Accepted combinations are:
- function
- string function name
- list of functions and/or function names, e.g. ``[np.sum, 'mean']``
- dict of axis labels -> functions, function names or list of such.
Can also accept a Numba JIT function with
``engine='numba'`` specified.
If the ``'numba'`` engine is chosen, the function must be
a user defined function with ``values`` and ``index`` as the
first and second arguments respectively in the function signature.
Each group's index will be passed to the user defined function
and optionally available for use.
.. versionchanged:: 1.1.0
*args
Positional arguments to pass to func
engine : str, default 'cython'
* ``'cython'`` : Runs the function through C-extensions from cython.
* ``'numba'`` : Runs the function through JIT compiled code from numba.
.. versionadded:: 1.1.0
engine_kwargs : dict, default None
* For ``'cython'`` engine, there are no accepted ``engine_kwargs``
* For ``'numba'`` engine, the engine can accept ``nopython``, ``nogil``
and ``parallel`` dictionary keys. The values must either be ``True`` or
``False``. The default ``engine_kwargs`` for the ``'numba'`` engine is
``{'nopython': True, 'nogil': False, 'parallel': False}`` and will be
applied to the function
.. versionadded:: 1.1.0
**kwargs
Keyword arguments to be passed into func.
Returns
-------
%(klass)s
See Also
--------
%(klass)s.groupby.apply
%(klass)s.groupby.transform
%(klass)s.aggregate
Notes
-----
When using ``engine='numba'``, there will be no "fall back" behavior internally.
The group data and group index will be passed as numpy arrays to the JITed
user defined function, and no alternative execution attempts will be tried.
%(examples)s
"""
class GroupByPlot(PandasObject):
"""
Class implementing the .plot attribute for groupby objects.
"""
def __init__(self, groupby):
self._groupby = groupby
def __call__(self, *args, **kwargs):
def f(self):
return self.plot(*args, **kwargs)
f.__name__ = "plot"
return self._groupby.apply(f)
def __getattr__(self, name: str):
def attr(*args, **kwargs):
def f(self):
return getattr(self.plot, name)(*args, **kwargs)
return self._groupby.apply(f)
return attr
@contextmanager
def _group_selection_context(groupby):
"""
Set / reset the _group_selection_context.
"""
groupby._set_group_selection()
yield groupby
groupby._reset_group_selection()
_KeysArgType = Union[
Hashable,
List[Hashable],
Callable[[Hashable], Hashable],
List[Callable[[Hashable], Hashable]],
Mapping[Hashable, Hashable],
]
class _GroupBy(PandasObject, SelectionMixin, Generic[FrameOrSeries]):
_group_selection = None
_apply_whitelist: FrozenSet[str] = frozenset()
def __init__(
self,
obj: FrameOrSeries,
keys: Optional[_KeysArgType] = None,
axis: int = 0,
level=None,
grouper: "Optional[ops.BaseGrouper]" = None,
exclusions=None,
selection=None,
as_index: bool = True,
sort: bool = True,
group_keys: bool = True,
squeeze: bool = False,
observed: bool = False,
mutated: bool = False,
dropna: bool = True,
):
self._selection = selection
assert isinstance(obj, NDFrame), type(obj)
obj._consolidate_inplace()
self.level = level
if not as_index:
if not isinstance(obj, DataFrame):
raise TypeError("as_index=False only valid with DataFrame")
if axis != 0:
raise ValueError("as_index=False only valid for axis=0")
self.as_index = as_index
self.keys = keys
self.sort = sort
self.group_keys = group_keys
self.squeeze = squeeze
self.observed = observed
self.mutated = mutated
self.dropna = dropna
if grouper is None:
from pandas.core.groupby.grouper import get_grouper
grouper, exclusions, obj = get_grouper(
obj,
keys,
axis=axis,
level=level,
sort=sort,
observed=observed,
mutated=self.mutated,
dropna=self.dropna,
)
self.obj = obj
self.axis = obj._get_axis_number(axis)
self.grouper = grouper
self.exclusions = set(exclusions) if exclusions else set()
def __len__(self) -> int:
return len(self.groups)
def __repr__(self) -> str:
# TODO: Better repr for GroupBy object
return object.__repr__(self)
def _assure_grouper(self):
"""
We create the grouper on instantiation sub-classes may have a
different policy.
"""
pass
@property
def groups(self):
"""
Dict {group name -> group labels}.
"""
self._assure_grouper()
return self.grouper.groups
@property
def ngroups(self):
self._assure_grouper()
return self.grouper.ngroups
@property
def indices(self):
"""
Dict {group name -> group indices}.
"""
self._assure_grouper()
return self.grouper.indices
def _get_indices(self, names):
"""
Safe get multiple indices, translate keys for
datelike to underlying repr.
"""
def get_converter(s):
# possibly convert to the actual key types
# in the indices, could be a Timestamp or a np.datetime64
if isinstance(s, datetime.datetime):
return lambda key: Timestamp(key)
elif isinstance(s, np.datetime64):
return lambda key: Timestamp(key).asm8
else:
return lambda key: key
if len(names) == 0:
return []
if len(self.indices) > 0:
index_sample = next(iter(self.indices))
else:
index_sample = None # Dummy sample
name_sample = names[0]
if isinstance(index_sample, tuple):
if not isinstance(name_sample, tuple):
msg = "must supply a tuple to get_group with multiple grouping keys"
raise ValueError(msg)
if not len(name_sample) == len(index_sample):
try:
# If the original grouper was a tuple
return [self.indices[name] for name in names]
except KeyError as err:
# turns out it wasn't a tuple
msg = (
"must supply a same-length tuple to get_group "
"with multiple grouping keys"
)
raise ValueError(msg) from err
converters = [get_converter(s) for s in index_sample]
names = (tuple(f(n) for f, n in zip(converters, name)) for name in names)
else:
converter = get_converter(index_sample)
names = (converter(name) for name in names)
return [self.indices.get(name, []) for name in names]
def _get_index(self, name):
"""
Safe get index, translate keys for datelike to underlying repr.
"""
return self._get_indices([name])[0]
@cache_readonly
def _selected_obj(self):
# Note: _selected_obj is always just `self.obj` for SeriesGroupBy
if self._selection is None or isinstance(self.obj, Series):
if self._group_selection is not None:
return self.obj[self._group_selection]
return self.obj
else:
return self.obj[self._selection]
def _reset_group_selection(self):
"""
Clear group based selection.
Used for methods needing to return info on each group regardless of
whether a group selection was previously set.
"""
if self._group_selection is not None:
# GH12839 clear cached selection too when changing group selection
self._group_selection = None
self._reset_cache("_selected_obj")
def _set_group_selection(self):
"""
Create group based selection.
Used when selection is not passed directly but instead via a grouper.
NOTE: this should be paired with a call to _reset_group_selection
"""
grp = self.grouper
if not (
self.as_index
and getattr(grp, "groupings", None) is not None
and self.obj.ndim > 1
and self._group_selection is None
):
return
ax = self.obj._info_axis
groupers = [g.name for g in grp.groupings if g.level is None and g.in_axis]
if len(groupers):
# GH12839 clear selected obj cache when group selection changes
self._group_selection = ax.difference(Index(groupers), sort=False).tolist()
self._reset_cache("_selected_obj")
def _set_result_index_ordered(self, result):
# set the result index on the passed values object and
# return the new object, xref 8046
# the values/counts are repeated according to the group index
# shortcut if we have an already ordered grouper
if not self.grouper.is_monotonic:
index = Index(np.concatenate(self._get_indices(self.grouper.result_index)))
result.set_axis(index, axis=self.axis, inplace=True)
result = result.sort_index(axis=self.axis)
result.set_axis(self.obj._get_axis(self.axis), axis=self.axis, inplace=True)
return result
def _dir_additions(self):
return self.obj._dir_additions() | self._apply_whitelist
def __getattr__(self, attr: str):
if attr in self._internal_names_set:
return object.__getattribute__(self, attr)
if attr in self.obj:
return self[attr]
raise AttributeError(
f"'{type(self).__name__}' object has no attribute '{attr}'"
)
@Substitution(
klass="GroupBy",
versionadded=".. versionadded:: 0.21.0",
examples="""\
>>> df = pd.DataFrame({'A': 'a b a b'.split(), 'B': [1, 2, 3, 4]})
>>> df
A B
0 a 1
1 b 2
2 a 3
3 b 4
To get the difference between each groups maximum and minimum value in one
pass, you can do
>>> df.groupby('A').pipe(lambda x: x.max() - x.min())
B
A
a 2
b 2""",
)
@Appender(_pipe_template)
def pipe(self, func, *args, **kwargs):
return com.pipe(self, func, *args, **kwargs)
plot = property(GroupByPlot)
def _make_wrapper(self, name):
assert name in self._apply_whitelist
self._set_group_selection()
# need to setup the selection
# as are not passed directly but in the grouper
f = getattr(self._selected_obj, name)
if not isinstance(f, types.MethodType):
return self.apply(lambda self: getattr(self, name))
f = getattr(type(self._selected_obj), name)
sig = inspect.signature(f)
def wrapper(*args, **kwargs):
# a little trickery for aggregation functions that need an axis
# argument
if "axis" in sig.parameters:
if kwargs.get("axis", None) is None:
kwargs["axis"] = self.axis
def curried(x):
return f(x, *args, **kwargs)
# preserve the name so we can detect it when calling plot methods,
# to avoid duplicates
curried.__name__ = name
# special case otherwise extra plots are created when catching the
# exception below
if name in base.plotting_methods:
return self.apply(curried)
try:
return self.apply(curried)
except TypeError as err:
if not re.search(
"reduction operation '.*' not allowed for this dtype", str(err)
):
# We don't have a cython implementation
# TODO: is the above comment accurate?
raise
if self.obj.ndim == 1:
# this can be called recursively, so need to raise ValueError
raise ValueError
# GH#3688 try to operate item-by-item
result = self._aggregate_item_by_item(name, *args, **kwargs)
return result
wrapper.__name__ = name
return wrapper
def get_group(self, name, obj=None):
"""
Construct DataFrame from group with provided name.
Parameters
----------
name : object
The name of the group to get as a DataFrame.
obj : DataFrame, default None
The DataFrame to take the DataFrame out of. If
it is None, the object groupby was called on will
be used.
Returns
-------
group : same type as obj
"""
if obj is None:
obj = self._selected_obj
inds = self._get_index(name)
if not len(inds):
raise KeyError(name)
return obj._take_with_is_copy(inds, axis=self.axis)
def __iter__(self):
"""
Groupby iterator.
Returns
-------
Generator yielding sequence of (name, subsetted object)
for each group
"""
return self.grouper.get_iterator(self.obj, axis=self.axis)
@Appender(
_apply_docs["template"].format(
input="dataframe", examples=_apply_docs["dataframe_examples"]
)
)
def apply(self, func, *args, **kwargs):
func = self._is_builtin_func(func)
# this is needed so we don't try and wrap strings. If we could
# resolve functions to their callable functions prior, this
# wouldn't be needed
if args or kwargs:
if callable(func):
@wraps(func)
def f(g):
with np.errstate(all="ignore"):
return func(g, *args, **kwargs)
elif hasattr(nanops, "nan" + func):
# TODO: should we wrap this in to e.g. _is_builtin_func?
f = getattr(nanops, "nan" + func)
else:
raise ValueError(
"func must be a callable if args or kwargs are supplied"
)
else:
f = func
# ignore SettingWithCopy here in case the user mutates
with option_context("mode.chained_assignment", None):
try:
result = self._python_apply_general(f)
except TypeError:
# gh-20949
# try again, with .apply acting as a filtering
# operation, by excluding the grouping column
# This would normally not be triggered
# except if the udf is trying an operation that
# fails on *some* columns, e.g. a numeric operation
# on a string grouper column
with _group_selection_context(self):
return self._python_apply_general(f)
return result
def _python_apply_general(self, f):
keys, values, mutated = self.grouper.apply(f, self._selected_obj, self.axis)
return self._wrap_applied_output(
keys, values, not_indexed_same=mutated or self.mutated
)
def _iterate_slices(self) -> Iterable[Series]:
raise AbstractMethodError(self)
def transform(self, func, *args, **kwargs):
raise AbstractMethodError(self)
def _cumcount_array(self, ascending: bool = True):
"""
Parameters
----------
ascending : bool, default True
If False, number in reverse, from length of group - 1 to 0.
Notes
-----
this is currently implementing sort=False
(though the default is sort=True) for groupby in general
"""
ids, _, ngroups = self.grouper.group_info
sorter = get_group_index_sorter(ids, ngroups)
ids, count = ids[sorter], len(ids)
if count == 0:
return np.empty(0, dtype=np.int64)
run = np.r_[True, ids[:-1] != ids[1:]]
rep = np.diff(np.r_[np.nonzero(run)[0], count])
out = (~run).cumsum()
if ascending:
out -= np.repeat(out[run], rep)
else:
out = np.repeat(out[np.r_[run[1:], True]], rep) - out
rev = np.empty(count, dtype=np.intp)
rev[sorter] = np.arange(count, dtype=np.intp)
return out[rev].astype(np.int64, copy=False)
def _transform_should_cast(self, func_nm: str) -> bool:
"""
Parameters
----------
func_nm: str
The name of the aggregation function being performed
Returns
-------
bool
Whether transform should attempt to cast the result of aggregation
"""
return (self.size().fillna(0) > 0).any() and (
func_nm not in base.cython_cast_blacklist
)
def _cython_transform(self, how: str, numeric_only: bool = True, **kwargs):
output: Dict[base.OutputKey, np.ndarray] = {}
for idx, obj in enumerate(self._iterate_slices()):
name = obj.name
is_numeric = is_numeric_dtype(obj.dtype)
if numeric_only and not is_numeric:
continue
try:
result, _ = self.grouper.transform(obj.values, how, **kwargs)
except NotImplementedError:
continue
if self._transform_should_cast(how):
result = maybe_cast_result(result, obj, how=how)
key = base.OutputKey(label=name, position=idx)
output[key] = result
if len(output) == 0:
raise DataError("No numeric types to aggregate")
return self._wrap_transformed_output(output)
def _wrap_aggregated_output(self, output: Mapping[base.OutputKey, np.ndarray]):
raise AbstractMethodError(self)
def _wrap_transformed_output(self, output: Mapping[base.OutputKey, np.ndarray]):
raise AbstractMethodError(self)
def _wrap_applied_output(self, keys, values, not_indexed_same: bool = False):
raise AbstractMethodError(self)
def _cython_agg_general(
self, how: str, alt=None, numeric_only: bool = True, min_count: int = -1
):
output: Dict[base.OutputKey, Union[np.ndarray, DatetimeArray]] = {}
# Ideally we would be able to enumerate self._iterate_slices and use
# the index from enumeration as the key of output, but ohlc in particular
# returns a (n x 4) array. Output requires 1D ndarrays as values, so we
# need to slice that up into 1D arrays
idx = 0
for obj in self._iterate_slices():
name = obj.name
is_numeric = is_numeric_dtype(obj.dtype)
if numeric_only and not is_numeric:
continue
result, agg_names = self.grouper.aggregate(
obj._values, how, min_count=min_count
)
if agg_names:
# e.g. ohlc
assert len(agg_names) == result.shape[1]
for result_column, result_name in zip(result.T, agg_names):
key = base.OutputKey(label=result_name, position=idx)
output[key] = maybe_cast_result(result_column, obj, how=how)
idx += 1
else:
assert result.ndim == 1
key = base.OutputKey(label=name, position=idx)
output[key] = maybe_cast_result(result, obj, how=how)
idx += 1
if len(output) == 0:
raise DataError("No numeric types to aggregate")
return self._wrap_aggregated_output(output)
def _python_agg_general(
self, func, *args, engine="cython", engine_kwargs=None, **kwargs
):
func = self._is_builtin_func(func)
if engine != "numba":
f = lambda x: func(x, *args, **kwargs)
# iterate through "columns" ex exclusions to populate output dict
output: Dict[base.OutputKey, np.ndarray] = {}
for idx, obj in enumerate(self._iterate_slices()):
name = obj.name
if self.grouper.ngroups == 0:
# agg_series below assumes ngroups > 0
continue
if engine == "numba":
result, counts = self.grouper.agg_series(
obj,
func,
*args,
engine=engine,
engine_kwargs=engine_kwargs,
**kwargs,
)
else:
try:
# if this function is invalid for this dtype, we will ignore it.
result, counts = self.grouper.agg_series(obj, f)
except TypeError:
continue
assert result is not None
key = base.OutputKey(label=name, position=idx)
output[key] = maybe_cast_result(result, obj, numeric_only=True)
if len(output) == 0:
return self._python_apply_general(f)
if self.grouper._filter_empty_groups:
mask = counts.ravel() > 0
for key, result in output.items():
# since we are masking, make sure that we have a float object
values = result
if is_numeric_dtype(values.dtype):
values = ensure_float(values)
output[key] = maybe_cast_result(values[mask], result)
return self._wrap_aggregated_output(output)
def _concat_objects(self, keys, values, not_indexed_same: bool = False):
from pandas.core.reshape.concat import concat
def reset_identity(values):
# reset the identities of the components
# of the values to prevent aliasing
for v in com.not_none(*values):
ax = v._get_axis(self.axis)
ax._reset_identity()
return values
if not not_indexed_same:
result = concat(values, axis=self.axis)
ax = self._selected_obj._get_axis(self.axis)
# this is a very unfortunate situation
# we can't use reindex to restore the original order
# when the ax has duplicates
# so we resort to this
# GH 14776, 30667
if ax.has_duplicates:
indexer, _ = result.index.get_indexer_non_unique(ax.values)
indexer = algorithms.unique1d(indexer)
result = result.take(indexer, axis=self.axis)
else:
result = result.reindex(ax, axis=self.axis)
elif self.group_keys:
values = reset_identity(values)
if self.as_index:
# possible MI return case
group_keys = keys
group_levels = self.grouper.levels
group_names = self.grouper.names
result = concat(
values,
axis=self.axis,
keys=group_keys,
levels=group_levels,
names=group_names,
sort=False,
)
else:
# GH5610, returns a MI, with the first level being a
# range index
keys = list(range(len(values)))
result = concat(values, axis=self.axis, keys=keys)
else:
values = reset_identity(values)
result = concat(values, axis=self.axis)
if isinstance(result, Series) and self._selection_name is not None:
result.name = self._selection_name
return result
def _apply_filter(self, indices, dropna):
if len(indices) == 0:
indices = np.array([], dtype="int64")
else:
indices = np.sort(np.concatenate(indices))
if dropna:
filtered = self._selected_obj.take(indices, axis=self.axis)
else:
mask = np.empty(len(self._selected_obj.index), dtype=bool)
mask.fill(False)
mask[indices.astype(int)] = True
# mask fails to broadcast when passed to where; broadcast manually.
mask = np.tile(mask, list(self._selected_obj.shape[1:]) + [1]).T
filtered = self._selected_obj.where(mask) # Fill with NaNs.
return filtered
# To track operations that expand dimensions, like ohlc
OutputFrameOrSeries = TypeVar("OutputFrameOrSeries", bound=NDFrame)
class GroupBy(_GroupBy[FrameOrSeries]):
"""
Class for grouping and aggregating relational data.
See aggregate, transform, and apply functions on this object.
It's easiest to use obj.groupby(...) to use GroupBy, but you can also do:
::
grouped = groupby(obj, ...)
Parameters
----------
obj : pandas object
axis : int, default 0
level : int, default None
Level of MultiIndex
groupings : list of Grouping objects
Most users should ignore this
exclusions : array-like, optional
List of columns to exclude
name : str
Most users should ignore this
Returns
-------
**Attributes**
groups : dict
{group name -> group labels}
len(grouped) : int
Number of groups
Notes
-----
After grouping, see aggregate, apply, and transform functions. Here are
some other brief notes about usage. When grouping by multiple groups, the
result index will be a MultiIndex (hierarchical) by default.
Iteration produces (key, group) tuples, i.e. chunking the data by group. So
you can write code like:
::
grouped = obj.groupby(keys, axis=axis)
for key, group in grouped:
# do something with the data
Function calls on GroupBy, if not specially implemented, "dispatch" to the
grouped data. So if you group a DataFrame and wish to invoke the std()
method on each group, you can simply do:
::
df.groupby(mapper).std()
rather than
::
df.groupby(mapper).aggregate(np.std)
You can pass arguments to these "wrapped" functions, too.
See the online documentation for full exposition on these topics and much
more
"""
@property
def _obj_1d_constructor(self) -> Type["Series"]:
# GH28330 preserve subclassed Series/DataFrames
if isinstance(self.obj, DataFrame):
return self.obj._constructor_sliced
assert isinstance(self.obj, Series)
return self.obj._constructor
def _bool_agg(self, val_test, skipna):
"""
Shared func to call any / all Cython GroupBy implementations.
"""
def objs_to_bool(vals: np.ndarray) -> Tuple[np.ndarray, Type]:
if is_object_dtype(vals):
vals = np.array([bool(x) for x in vals])
else:
vals = vals.astype(np.bool)
return vals.view(np.uint8), np.bool
def result_to_bool(result: np.ndarray, inference: Type) -> np.ndarray:
return result.astype(inference, copy=False)
return self._get_cythonized_result(
"group_any_all",
aggregate=True,
cython_dtype=np.dtype(np.uint8),
needs_values=True,
needs_mask=True,
pre_processing=objs_to_bool,
post_processing=result_to_bool,
val_test=val_test,
skipna=skipna,
)
@Substitution(name="groupby")
@Appender(_common_see_also)
def any(self, skipna: bool = True):
"""
Return True if any value in the group is truthful, else False.
Parameters
----------
skipna : bool, default True
Flag to ignore nan values during truth testing.
Returns
-------
bool
"""
return self._bool_agg("any", skipna)
@Substitution(name="groupby")
@Appender(_common_see_also)
def all(self, skipna: bool = True):
"""
Return True if all values in the group are truthful, else False.
Parameters
----------
skipna : bool, default True
Flag to ignore nan values during truth testing.
Returns
-------
bool
"""
return self._bool_agg("all", skipna)
@Substitution(name="groupby")
@Appender(_common_see_also)
def count(self):
"""
Compute count of group, excluding missing values.
Returns
-------
Series or DataFrame
Count of values within each group.
"""
# defined here for API doc
raise NotImplementedError
@Substitution(name="groupby")
@Substitution(see_also=_common_see_also)
def mean(self, numeric_only: bool = True):
"""
Compute mean of groups, excluding missing values.
Parameters
----------
numeric_only : bool, default True
Include only float, int, boolean columns. If None, will attempt to use
everything, then use only numeric data.
Returns
-------
pandas.Series or pandas.DataFrame
%(see_also)s
Examples
--------
>>> df = pd.DataFrame({'A': [1, 1, 2, 1, 2],
... 'B': [np.nan, 2, 3, 4, 5],
... 'C': [1, 2, 1, 1, 2]}, columns=['A', 'B', 'C'])
Groupby one column and return the mean of the remaining columns in
each group.
>>> df.groupby('A').mean()
B C
A
1 3.0 1.333333
2 4.0 1.500000
Groupby two columns and return the mean of the remaining column.
>>> df.groupby(['A', 'B']).mean()
C
A B
1 2.0 2
4.0 1
2 3.0 1
5.0 2
Groupby one column and return the mean of only particular column in
the group.
>>> df.groupby('A')['B'].mean()
A
1 3.0
2 4.0
Name: B, dtype: float64
"""
return self._cython_agg_general(
"mean",
alt=lambda x, axis: Series(x).mean(numeric_only=numeric_only),
numeric_only=numeric_only,
)
@Substitution(name="groupby")
@Appender(_common_see_also)
def median(self, numeric_only=True):
"""
Compute median of groups, excluding missing values.
For multiple groupings, the result index will be a MultiIndex
Parameters
----------
numeric_only : bool, default True
Include only float, int, boolean columns. If None, will attempt to use
everything, then use only numeric data.
Returns
-------
Series or DataFrame
Median of values within each group.
"""
return self._cython_agg_general(
"median",
alt=lambda x, axis: Series(x).median(axis=axis, numeric_only=numeric_only),
numeric_only=numeric_only,
)
@Substitution(name="groupby")
@Appender(_common_see_also)
def std(self, ddof: int = 1):
"""
Compute standard deviation of groups, excluding missing values.
For multiple groupings, the result index will be a MultiIndex.
Parameters
----------
ddof : int, default 1
Degrees of freedom.
Returns
-------
Series or DataFrame
Standard deviation of values within each group.
"""
# TODO: implement at Cython level?
return np.sqrt(self.var(ddof=ddof))
@Substitution(name="groupby")
@Appender(_common_see_also)
def var(self, ddof: int = 1):
"""
Compute variance of groups, excluding missing values.
For multiple groupings, the result index will be a MultiIndex.
Parameters
----------
ddof : int, default 1
Degrees of freedom.
Returns
-------
Series or DataFrame
Variance of values within each group.
"""
if ddof == 1:
return self._cython_agg_general(
"var", alt=lambda x, axis: Series(x).var(ddof=ddof)
)
else:
func = lambda x: x.var(ddof=ddof)
with _group_selection_context(self):
return self._python_agg_general(func)
@Substitution(name="groupby")
@Appender(_common_see_also)
def sem(self, ddof: int = 1):
"""
Compute standard error of the mean of groups, excluding missing values.
For multiple groupings, the result index will be a MultiIndex.
Parameters
----------
ddof : int, default 1
Degrees of freedom.
Returns
-------
Series or DataFrame
Standard error of the mean of values within each group.
"""
return self.std(ddof=ddof) / np.sqrt(self.count())
@Substitution(name="groupby")
@Appender(_common_see_also)
def size(self):
"""
Compute group sizes.
Returns
-------
Series
Number of rows in each group.
"""
result = self.grouper.size()
# GH28330 preserve subclassed Series/DataFrames through calls
if issubclass(self.obj._constructor, Series):
result = self._obj_1d_constructor(result, name=self.obj.name)
else:
result = self._obj_1d_constructor(result)
return self._reindex_output(result, fill_value=0)
@classmethod
def _add_numeric_operations(cls):
"""
Add numeric operations to the GroupBy generically.
"""
def groupby_function(
name: str,
alias: str,
npfunc,
numeric_only: bool = True,
min_count: int = -1,
):
_local_template = """
Compute %(f)s of group values.
Parameters
----------
numeric_only : bool, default %(no)s
Include only float, int, boolean columns. If None, will attempt to use
everything, then use only numeric data.
min_count : int, default %(mc)s
The required number of valid values to perform the operation. If fewer
than ``min_count`` non-NA values are present the result will be NA.
Returns
-------
Series or DataFrame
Computed %(f)s of values within each group.
"""
@Substitution(name="groupby", f=name, no=numeric_only, mc=min_count)
@Appender(_common_see_also)
@Appender(_local_template)
def func(self, numeric_only=numeric_only, min_count=min_count):
self._set_group_selection()
# try a cython aggregation if we can
try:
return self._cython_agg_general(
how=alias,
alt=npfunc,
numeric_only=numeric_only,
min_count=min_count,
)
except DataError:
pass
except NotImplementedError as err:
if "function is not implemented for this dtype" in str(
err
) or "category dtype not supported" in str(err):
# raised in _get_cython_function, in some cases can
# be trimmed by implementing cython funcs for more dtypes
pass
else:
raise
# apply a non-cython aggregation
result = self.aggregate(lambda x: npfunc(x, axis=self.axis))
return result
set_function_name(func, name, cls)
return func
def first_compat(obj: FrameOrSeries, axis: int = 0):
def first(x: Series):
x = x.array[notna(x.array)]
if len(x) == 0:
return np.nan
return x[0]
if isinstance(obj, DataFrame):
return obj.apply(first, axis=axis)
elif isinstance(obj, Series):
return first(obj)
else:
raise TypeError(type(obj))
def last_compat(obj: FrameOrSeries, axis: int = 0):
def last(x: Series):
x = x.array[notna(x.array)]
if len(x) == 0:
return np.nan
return x[-1]
if isinstance(obj, DataFrame):
return obj.apply(last, axis=axis)
elif isinstance(obj, Series):
return last(obj)
else:
raise TypeError(type(obj))
cls.sum = groupby_function("sum", "add", np.sum, min_count=0)
cls.prod = groupby_function("prod", "prod", np.prod, min_count=0)
cls.min = groupby_function("min", "min", np.min, numeric_only=False)
cls.max = groupby_function("max", "max", np.max, numeric_only=False)
cls.first = groupby_function("first", "first", first_compat, numeric_only=False)
cls.last = groupby_function("last", "last", last_compat, numeric_only=False)
@Substitution(name="groupby")
@Appender(_common_see_also)
def ohlc(self) -> DataFrame:
"""
Compute open, high, low and close values of a group, excluding missing values.
For multiple groupings, the result index will be a MultiIndex
Returns
-------
DataFrame
Open, high, low and close values within each group.
"""
return self._apply_to_column_groupbys(lambda x: x._cython_agg_general("ohlc"))
@doc(DataFrame.describe)
def describe(self, **kwargs):
with _group_selection_context(self):
result = self.apply(lambda x: x.describe(**kwargs))
if self.axis == 1:
return result.T
return result.unstack()
def resample(self, rule, *args, **kwargs):
"""
Provide resampling when using a TimeGrouper.
Given a grouper, the function resamples it according to a string
"string" -> "frequency".
See the :ref:`frequency aliases <timeseries.offset_aliases>`
documentation for more details.
Parameters
----------
rule : str or DateOffset
The offset string or object representing target grouper conversion.
*args, **kwargs
Possible arguments are `how`, `fill_method`, `limit`, `kind` and
`on`, and other arguments of `TimeGrouper`.
Returns
-------
Grouper
Return a new grouper with our resampler appended.
See Also
--------
Grouper : Specify a frequency to resample with when
grouping by a key.
DatetimeIndex.resample : Frequency conversion and resampling of
time series.
Examples
--------
>>> idx = pd.date_range('1/1/2000', periods=4, freq='T')
>>> df = pd.DataFrame(data=4 * [range(2)],
... index=idx,
... columns=['a', 'b'])
>>> df.iloc[2, 0] = 5
>>> df
a b
2000-01-01 00:00:00 0 1
2000-01-01 00:01:00 0 1
2000-01-01 00:02:00 5 1
2000-01-01 00:03:00 0 1
Downsample the DataFrame into 3 minute bins and sum the values of
the timestamps falling into a bin.
>>> df.groupby('a').resample('3T').sum()
a b
a
0 2000-01-01 00:00:00 0 2
2000-01-01 00:03:00 0 1
5 2000-01-01 00:00:00 5 1
Upsample the series into 30 second bins.
>>> df.groupby('a').resample('30S').sum()
a b
a
0 2000-01-01 00:00:00 0 1
2000-01-01 00:00:30 0 0
2000-01-01 00:01:00 0 1
2000-01-01 00:01:30 0 0
2000-01-01 00:02:00 0 0
2000-01-01 00:02:30 0 0
2000-01-01 00:03:00 0 1
5 2000-01-01 00:02:00 5 1
Resample by month. Values are assigned to the month of the period.
>>> df.groupby('a').resample('M').sum()
a b
a
0 2000-01-31 0 3
5 2000-01-31 5 1
Downsample the series into 3 minute bins as above, but close the right
side of the bin interval.
>>> df.groupby('a').resample('3T', closed='right').sum()
a b
a
0 1999-12-31 23:57:00 0 1
2000-01-01 00:00:00 0 2
5 2000-01-01 00:00:00 5 1
Downsample the series into 3 minute bins and close the right side of
the bin interval, but label each bin using the right edge instead of
the left.
>>> df.groupby('a').resample('3T', closed='right', label='right').sum()
a b
a
0 2000-01-01 00:00:00 0 1
2000-01-01 00:03:00 0 2
5 2000-01-01 00:03:00 5 1
"""
from pandas.core.resample import get_resampler_for_grouping
return get_resampler_for_grouping(self, rule, *args, **kwargs)
@Substitution(name="groupby")
@Appender(_common_see_also)
def rolling(self, *args, **kwargs):
"""
Return a rolling grouper, providing rolling functionality per group.
"""
from pandas.core.window import RollingGroupby
return RollingGroupby(self, *args, **kwargs)
@Substitution(name="groupby")
@Appender(_common_see_also)
def expanding(self, *args, **kwargs):
"""
Return an expanding grouper, providing expanding
functionality per group.
"""
from pandas.core.window import ExpandingGroupby
return ExpandingGroupby(self, *args, **kwargs)
def _fill(self, direction, limit=None):
"""
Shared function for `pad` and `backfill` to call Cython method.
Parameters
----------
direction : {'ffill', 'bfill'}
Direction passed to underlying Cython function. `bfill` will cause
values to be filled backwards. `ffill` and any other values will
default to a forward fill
limit : int, default None
Maximum number of consecutive values to fill. If `None`, this
method will convert to -1 prior to passing to Cython
Returns
-------
`Series` or `DataFrame` with filled values
See Also
--------
pad
backfill
"""
# Need int value for Cython
if limit is None:
limit = -1
return self._get_cythonized_result(
"group_fillna_indexer",
needs_mask=True,
cython_dtype=np.dtype(np.int64),
result_is_index=True,
direction=direction,
limit=limit,
)
@Substitution(name="groupby")
def pad(self, limit=None):
"""
Forward fill the values.
Parameters
----------
limit : int, optional
Limit of how many values to fill.
Returns
-------
Series or DataFrame
Object with missing values filled.
See Also
--------
Series.pad
DataFrame.pad
Series.fillna
DataFrame.fillna
"""
return self._fill("ffill", limit=limit)
ffill = pad
@Substitution(name="groupby")
def backfill(self, limit=None):
"""
Backward fill the values.
Parameters
----------
limit : int, optional
Limit of how many values to fill.
Returns
-------
Series or DataFrame
Object with missing values filled.
See Also
--------
Series.backfill
DataFrame.backfill
Series.fillna
DataFrame.fillna
"""
return self._fill("bfill", limit=limit)
bfill = backfill
@Substitution(name="groupby")
@Substitution(see_also=_common_see_also)
def nth(self, n: Union[int, List[int]], dropna: Optional[str] = None) -> DataFrame:
"""
Take the nth row from each group if n is an int, or a subset of rows
if n is a list of ints.
If dropna, will take the nth non-null row, dropna is either
'all' or 'any'; this is equivalent to calling dropna(how=dropna)
before the groupby.
Parameters
----------
n : int or list of ints
A single nth value for the row or a list of nth values.
dropna : None or str, optional
Apply the specified dropna operation before counting which row is
the nth row. Needs to be None, 'any' or 'all'.
Returns
-------
Series or DataFrame
N-th value within each group.
%(see_also)s
Examples
--------
>>> df = pd.DataFrame({'A': [1, 1, 2, 1, 2],
... 'B': [np.nan, 2, 3, 4, 5]}, columns=['A', 'B'])
>>> g = df.groupby('A')
>>> g.nth(0)
B
A
1 NaN
2 3.0
>>> g.nth(1)
B
A
1 2.0
2 5.0
>>> g.nth(-1)
B
A
1 4.0
2 5.0
>>> g.nth([0, 1])
B
A
1 NaN
1 2.0
2 3.0
2 5.0
Specifying `dropna` allows count ignoring ``NaN``
>>> g.nth(0, dropna='any')
B
A
1 2.0
2 3.0
NaNs denote group exhausted when using dropna
>>> g.nth(3, dropna='any')
B
A
1 NaN
2 NaN
Specifying `as_index=False` in `groupby` keeps the original index.
>>> df.groupby('A', as_index=False).nth(1)
A B
1 1 2.0
4 2 5.0
"""
valid_containers = (set, list, tuple)
if not isinstance(n, (valid_containers, int)):
raise TypeError("n needs to be an int or a list/set/tuple of ints")
if not dropna:
if isinstance(n, int):
nth_values = [n]
elif isinstance(n, valid_containers):
nth_values = list(set(n))
nth_array = np.array(nth_values, dtype=np.intp)
self._set_group_selection()
mask_left = np.in1d(self._cumcount_array(), nth_array)
mask_right = np.in1d(self._cumcount_array(ascending=False) + 1, -nth_array)
mask = mask_left | mask_right
ids, _, _ = self.grouper.group_info
# Drop NA values in grouping
mask = mask & (ids != -1)
out = self._selected_obj[mask]
if not self.as_index:
return out
result_index = self.grouper.result_index
out.index = result_index[ids[mask]]
if not self.observed and isinstance(result_index, CategoricalIndex):
out = out.reindex(result_index)
out = self._reindex_output(out)
return out.sort_index() if self.sort else out
# dropna is truthy
if isinstance(n, valid_containers):
raise ValueError("dropna option with a list of nth values is not supported")
if dropna not in ["any", "all"]:
# Note: when agg-ing picker doesn't raise this, just returns NaN
raise ValueError(
"For a DataFrame groupby, dropna must be "
"either None, 'any' or 'all', "
f"(was passed {dropna})."
)
# old behaviour, but with all and any support for DataFrames.
# modified in GH 7559 to have better perf
max_len = n if n >= 0 else -1 - n
dropped = self.obj.dropna(how=dropna, axis=self.axis)
# get a new grouper for our dropped obj
if self.keys is None and self.level is None:
# we don't have the grouper info available
# (e.g. we have selected out
# a column that is not in the current object)
axis = self.grouper.axis
grouper = axis[axis.isin(dropped.index)]
else:
# create a grouper with the original parameters, but on dropped
# object
from pandas.core.groupby.grouper import get_grouper
grouper, _, _ = get_grouper(
dropped,
key=self.keys,
axis=self.axis,
level=self.level,
sort=self.sort,
mutated=self.mutated,
)
grb = dropped.groupby(grouper, as_index=self.as_index, sort=self.sort)
sizes, result = grb.size(), grb.nth(n)
mask = (sizes < max_len).values
# set the results which don't meet the criteria
if len(result) and mask.any():
result.loc[mask] = np.nan
# reset/reindex to the original groups
if len(self.obj) == len(dropped) or len(result) == len(
self.grouper.result_index
):
result.index = self.grouper.result_index
else:
result = result.reindex(self.grouper.result_index)
return result
def quantile(self, q=0.5, interpolation: str = "linear"):
"""
Return group values at the given quantile, a la numpy.percentile.
Parameters
----------
q : float or array-like, default 0.5 (50% quantile)
Value(s) between 0 and 1 providing the quantile(s) to compute.
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
Method to use when the desired quantile falls between two points.
Returns
-------
Series or DataFrame
Return type determined by caller of GroupBy object.
See Also
--------
Series.quantile : Similar method for Series.
DataFrame.quantile : Similar method for DataFrame.
numpy.percentile : NumPy method to compute qth percentile.
Examples
--------
>>> df = pd.DataFrame([
... ['a', 1], ['a', 2], ['a', 3],
... ['b', 1], ['b', 3], ['b', 5]
... ], columns=['key', 'val'])
>>> df.groupby('key').quantile()
val
key
a 2.0
b 3.0
"""
from pandas import concat
def pre_processor(vals: np.ndarray) -> Tuple[np.ndarray, Optional[Type]]:
if is_object_dtype(vals):
raise TypeError(
"'quantile' cannot be performed against 'object' dtypes!"
)
inference = None
if is_integer_dtype(vals.dtype):
if is_extension_array_dtype(vals.dtype):
vals = vals.to_numpy(dtype=float, na_value=np.nan)
inference = np.int64
elif is_bool_dtype(vals.dtype) and is_extension_array_dtype(vals.dtype):
vals = vals.to_numpy(dtype=float, na_value=np.nan)
elif is_datetime64_dtype(vals.dtype):
inference = "datetime64[ns]"
vals = np.asarray(vals).astype(np.float)
return vals, inference
def post_processor(vals: np.ndarray, inference: Optional[Type]) -> np.ndarray:
if inference:
# Check for edge case
if not (
is_integer_dtype(inference)
and interpolation in {"linear", "midpoint"}
):
vals = vals.astype(inference)
return vals
if is_scalar(q):
return self._get_cythonized_result(
"group_quantile",
aggregate=True,
needs_values=True,
needs_mask=True,
cython_dtype=np.dtype(np.float64),
pre_processing=pre_processor,
post_processing=post_processor,
q=q,
interpolation=interpolation,
)
else:
results = [
self._get_cythonized_result(
"group_quantile",
aggregate=True,
needs_values=True,
needs_mask=True,
cython_dtype=np.dtype(np.float64),
pre_processing=pre_processor,
post_processing=post_processor,
q=qi,
interpolation=interpolation,
)
for qi in q
]
result = concat(results, axis=0, keys=q)
# fix levels to place quantiles on the inside
# TODO(GH-10710): Ideally, we could write this as
# >>> result.stack(0).loc[pd.IndexSlice[:, ..., q], :]
# but this hits https://github.com/pandas-dev/pandas/issues/10710
# which doesn't reorder the list-like `q` on the inner level.
order = list(range(1, result.index.nlevels)) + [0]
# temporarily saves the index names
index_names = np.array(result.index.names)
# set index names to positions to avoid confusion
result.index.names = np.arange(len(index_names))
# place quantiles on the inside
result = result.reorder_levels(order)
# restore the index names in order
result.index.names = index_names[order]
# reorder rows to keep things sorted
indices = np.arange(len(result)).reshape([len(q), self.ngroups]).T.flatten()
return result.take(indices)
@Substitution(name="groupby")
def ngroup(self, ascending: bool = True):
"""
Number each group from 0 to the number of groups - 1.
This is the enumerative complement of cumcount. Note that the
numbers given to the groups match the order in which the groups
would be seen when iterating over the groupby object, not the
order they are first observed.
Parameters
----------
ascending : bool, default True
If False, number in reverse, from number of group - 1 to 0.
Returns
-------
Series
Unique numbers for each group.
See Also
--------
.cumcount : Number the rows in each group.
Examples
--------
>>> df = pd.DataFrame({"A": list("aaabba")})
>>> df
A
0 a
1 a
2 a
3 b
4 b
5 a
>>> df.groupby('A').ngroup()
0 0
1 0
2 0
3 1
4 1
5 0
dtype: int64
>>> df.groupby('A').ngroup(ascending=False)
0 1
1 1
2 1
3 0
4 0
5 1
dtype: int64
>>> df.groupby(["A", [1,1,2,3,2,1]]).ngroup()
0 0
1 0
2 1
3 3
4 2
5 0
dtype: int64
"""
with _group_selection_context(self):
index = self._selected_obj.index
result = self._obj_1d_constructor(self.grouper.group_info[0], index)
if not ascending:
result = self.ngroups - 1 - result
return result
@ | Substitution(name="groupby") | pandas.util._decorators.Substitution |
#!/usr/bin/env python
# coding: utf-8
# # Create datasets
# ## Scalling, Reduction and Feature Selection
# The original dataset and/or the ballanced ones will be first splitted into separated files as training and test subsets using a **seed**. All the scalling and feature selection will be apply **only on training set**:
# - *Dataset split*: train, test sets; the train set will be divided into train and validation in future Machine Learning hyperparameter search for the best model with a ML method;
# - *Scalling* of train set using centering, standardization, etc.;
# - *Reduction* of train set dimension (after scalling): decrease the number of features using less dimensions/derived features;
# - *Feature selection* using train set (after scalling): decrease the number of features by keeping only the most important for the classification.
#
# Two CSV files will be create for each type of scalling, reduction or feature selection: *tr* - trainin and *ts* - test.
# In[ ]:
import numpy as np
import pandas as pd
import os
from sklearn.model_selection import train_test_split # for dataset split
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import RobustScaler
from sklearn.preprocessing import QuantileTransformer
from sklearn.preprocessing import PowerTransformer
# Let's define the name of the original dataset, the folder and the prefix characters for each scalling, dimension reduction or feature selection. Each transformation will add a prefix to the previous name of the file.
#
# **You can used the original dataset that could be unballanced or the ballanced datasets obtained with previous scripts (one file only)!**
# In[ ]:
# Create scalled datasets using normalized MA dataset
# Two CSV files will be create for each type of scalling, reduction or feature selection
WorkingFolder = './datasets/'
# change this with ballanced datasets such as upsampl.ds_MA.csv or downsampl.ds_MA.csv
# if you want to run all files, you should modify the entire script by looping all
# transformation using a list of input files [original, undersampled, upsampled]
sOrigDataSet = 'ds_MA.csv'
# Split details
seed = 44 # for reproductibility
test_size = 0.25 # train size = 1 - test_size
outVar = 'Lij' # output variable
# Scalers: the files as prefix + original name
# =================================================
# Original (no scaling!), StandardScaler, MinMaxScaler, RobustScaler,
# QuantileTransformer (normal), QuantileTransformer(uniform)
# scaler prefix for file name
#scalerPrefix = ['o', 's', 'm', 'r', 'pyj', 'qn', 'qu']
# scalerPrefix = ['o', 's', 'm', 'r']
scalerPrefix = ['s']
# sklearn scalers
#scalerList = [None, StandardScaler(), MinMaxScaler(),
# RobustScaler(quantile_range=(25, 75)),
# PowerTransformer(method='yeo-johnson'),
# QuantileTransformer(output_distribution='normal'),
# QuantileTransformer(output_distribution='uniform')]
# sklearn scalers
# scalerList = [None, StandardScaler(), MinMaxScaler(), RobustScaler()]
scalerList = [StandardScaler()]
# Dimension Reductions
# ===================
# PCA
reductionPrefix = 'pca'
# Feature selection
# =================
# RF feature selection, Univariate feature selection using chi-squared test,
# Univariate feature selection with mutual information
# prefix to add to the processed files for each FS method
#FSprefix = ['fs.rf.',
# 'fs.univchi.',
# 'fs.univmi.']
FSprefix = ['fs-rf.']
# number of total features for reduction and selection
noSelFeatures = 50
# Start by reading the original dataset:
# In[ ]:
print('-> Reading source dataset:',sOrigDataSet,'...')
df = pd.read_csv(os.path.join(WorkingFolder, sOrigDataSet))
print('Columns:',len(df.columns),'Rows:',len(df))
print('Done')
# ## Dataset split
#
# First, split the dataset using stratification for non-ballanced datasets: the ratio between the classes is the same in training and test sets.
# In[ ]:
# Get features and ouput as dataframes
print('--> Split of dataset in training and test ...')
X = df.drop(outVar, axis = 1) # remove output variable from input features
y = df[outVar] # get only the output variable
# get only the values for features and output (as arrays)
Xdata = X.values # get values of features
Ydata = y.values # get output values
# split data in training and test sets (X = input features, y = output variable)
# using a seed, test size (defined above) and stratification for un-ballanced classes
X_train, X_test, y_train, y_test = train_test_split(Xdata, Ydata,
test_size=test_size,
random_state=seed,
stratify=Ydata)
print('X_train:', X_train.shape)
print('y_train:', y_train.shape)
print('X_test:', X_test.shape)
print('y_test:', y_test.shape)
print('Done!')
# ## Dataset scaling
#
# Two files will be saved for training and test sets for each scaling including non-scalling dataset.
# In[ ]:
# Scale dataset
print('-> Scaling dataset train and test:')
for scaler in scalerList: # or scalerPrefix
# new file name; we will add tr and ts + csv
newFile = scalerPrefix[scalerList.index(scaler)]+'.'+sOrigDataSet[:-4]
# decide to scale or not
if scaler == None: # if it is the original dataset, do not scale!
print('--> Original (no scaler!) ...')
X_train_transf = X_train # do not modify train set
X_test_transf = X_test # do not modify test set
else: # if it is not the original dataset, apply scalers
print('--> Scaler:', str(scaler), '...')
X_train_transf = scaler.fit_transform(X_train) # use a scaler to modify only train set
X_test_transf = scaler.transform(X_test) # use the same transformation for test set
# Save the training scaled dataset
df_tr_scaler = pd.DataFrame(X_train_transf, columns=X.columns)
df_tr_scaler[outVar]= y_train
newFile_tr = newFile +'_tr.csv'
print('---> Saving training:', newFile_tr, ' ...')
df_tr_scaler.to_csv(os.path.join(WorkingFolder, newFile_tr), index=False)
# Save the test scaled dataset
df_ts_scaler = | pd.DataFrame(X_test_transf, columns=X.columns) | pandas.DataFrame |
import matplotlib.pyplot as plt
import pandas as pd
df = | pd.DataFrame([[1,2,3],[7,0,3],[1,2,2]],columns=['col1','col2','col3'])
df.plot() | pandas.DataFrame |
# Implementation of Multiplicative Marketing Mix Model, Adstock and Diminishing Return
# Author: <NAME>
# Pystan Installation Tips (mac, anaconda3)
# 1. In bash:
# (create a stan environment, install pystan, current version is 2.19)
# conda create -n stan_env python=3.7 -c conda-forge
# conda activate stan_env
# conda install pystan -c conda-forge
# (install gcc5, pystan 2.19 requires gcc4.9.3 and above)
# brew install gcc@5
# (look for 'gcc-10', 'g++-10')
# ls /usr/local/bin | grep gcc
# ls /usr/local/bin | grep g++
#
# 2. Open Anaconda Navigator > Home > Applications on: select stan_env as environment, launch Notebook
#
# 3. In python:
# import os
# os.environ['CC'] = 'gcc-10'
# os.environ['CXX'] = 'g++-10'
import warnings
warnings.filterwarnings("ignore")
import numpy as np
import pandas as pd
import sys
import time
from datetime import datetime
from datetime import timedelta
import matplotlib.pyplot as plt
import seaborn as sns
get_ipython().run_line_magic('matplotlib', 'inline')
sns.color_palette("husl")
sns.set_style('darkgrid')
# Data
# Four years' (209 weeks) records of sales, media impression and media spending at weekly level.
df = pd.read_csv('data.csv')
# 1. media variables
# media impression
mdip_cols=[col for col in df.columns if 'mdip_' in col]
# media spending
mdsp_cols=[col for col in df.columns if 'mdsp_' in col]
# 2. control variables
# macro economics variables
me_cols = [col for col in df.columns if 'me_' in col]
# store count variables
st_cols = ['st_ct']
# markdown/discount variables
mrkdn_cols = [col for col in df.columns if 'mrkdn_' in col]
# holiday variables
hldy_cols = [col for col in df.columns if 'hldy_' in col]
# seasonality variables
seas_cols = [col for col in df.columns if 'seas_' in col]
base_vars = me_cols+st_cols+mrkdn_cols+hldy_cols+seas_cols
# 3. sales variables
sales_cols =['sales']
df[['wk_strt_dt']+mdip_cols+['sales']].head()
# EDA - correlation, distribution plots
plt.figure(figsize=(24,20))
sns.heatmap(df[mdip_cols+['sales']].corr(), square=True, annot=True, vmax=1, vmin=-1, cmap='RdBu')
plt.figure(figsize=(50,50))
sns.pairplot(df[mdip_cols+['sales']], vars=mdip_cols+['sales'])
# 1.1 Adstock
def apply_adstock(x, L, P, D):
'''
params:
x: original media variable, array
L: length
P: peak, delay in effect
D: decay, retain rate
returns:
array, adstocked media variable
'''
x = np.append(np.zeros(L-1), x)
weights = np.zeros(L)
for l in range(L):
weight = D**((l-P)**2)
weights[L-1-l] = weight
adstocked_x = []
for i in range(L-1, len(x)):
x_array = x[i-L+1:i+1]
xi = sum(x_array * weights)/sum(weights)
adstocked_x.append(xi)
adstocked_x = np.array(adstocked_x)
return adstocked_x
def adstock_transform(df, md_cols, adstock_params):
'''
params:
df: original data
md_cols: list, media variables to be transformed
adstock_params: dict,
e.g., {'sem': {'L': 8, 'P': 0, 'D': 0.1}, 'dm': {'L': 4, 'P': 1, 'D': 0.7}}
returns:
adstocked df
'''
md_df = pd.DataFrame()
for md_col in md_cols:
md = md_col.split('_')[-1]
L, P, D = adstock_params[md]['L'], adstock_params[md]['P'], adstock_params[md]['D']
xa = apply_adstock(df[md_col].values, L, P, D)
md_df[md_col] = xa
return md_df
# # plot adstock with varying decay
# fig, ax = plt.subplots(figsize=(15,5))
# psets = [
# [8, 1, 0.1],
# [8, 1, 0.9]
# ]
# xm = df['mdip_vidtr'].values
# sns.lineplot(x=range(52), y=xm[-52:], ax=ax, label='original')
# for i in range(len(psets)):
# p = psets[i]
# L, P, D = p[0], p[1], p[2]
# xm_adstocked = apply_adstock(xm, L, P, D)
# sns.lineplot(x=range(52), y=xm_adstocked[-52:], ax=ax,
# label='L=%d, P=%d, D=%.1f'%(L, P, D))
# ax.lines[i+1].set_linestyle("--")
# ax.set_title('Adstock Parameter: Decay', fontsize=16)
# # plot adstock with varying length
# fig, ax = plt.subplots(figsize=(15,5))
# psets = [
# [4, 1, 0.9],
# [12, 1, 0.9]
# ]
# xm = df['mdip_vidtr'].values
# sns.lineplot(x=range(52), y=xm[-52:], ax=ax, label='original')
# for i in range(len(psets)):
# p = psets[i]
# L, P, D = p[0], p[1], p[2]
# xm_adstocked = apply_adstock(xm, L, P, D)
# sns.lineplot(x=range(52), y=xm_adstocked[-52:], ax=ax,
# label='L=%d, P=%d, D=%.1f'%(L, P, D))
# ax.lines[i+1].set_linestyle("--")
# ax.set_title('Adstock Parameter: Length', fontsize=16)
# 1.2 Diminishing Return
def hill_transform(x, ec, slope):
return 1 / (1 + (x / ec)**(-slope))
# # plot hill function with varying K and S
# fig, ax = plt.subplots(figsize=(9,6))
# psets = [
# [0.5, 0.5],
# [0.5, 1.0],
# [0.95, 1.0],
# [0.95, 3.0]
# ]
# xm = np.arange(0,2,0.05)
# for i in range(len(psets)):
# p = psets[i]
# ec, slope = p[0], p[1]
# sns.lineplot(x=xm, y=hill_transform(xm, ec, slope), ax=ax,
# label='K=%.2f, S=%.1f'%(ec, slope))
# #ax.lines[i+1].set_linestyle("--")
# ax.set_title('Hill Function', fontsize=16)
# 2. Model Implementation
# The model is built in a stacked way. Three models are trained:
# - Control Model
# - Marketing Mix Model
# - Diminishing Return Model
# 2.1 Control Model / Base Sales Model
import pystan
import os
os.environ['CC'] = 'gcc-10'
os.environ['CXX'] = 'g++-10'
# helper functions
from sklearn.metrics import mean_squared_error
def mean_absolute_percentage_error(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs((y_true - y_pred) / y_true)) * 100
def apply_mean_center(x):
mu = np.mean(x)
xm = x/mu
return xm, mu
def mean_center_trandform(df, cols):
'''
returns:
mean-centered df
scaler, dict
'''
df_new = pd.DataFrame()
sc = {}
for col in cols:
x = df[col].values
df_new[col], mu = apply_mean_center(x)
sc[col] = mu
return df_new, sc
def mean_log1p_trandform(df, cols):
'''
returns:
mean-centered, log1p transformed df
scaler, dict
'''
df_new = pd.DataFrame()
sc = {}
for col in cols:
x = df[col].values
xm, mu = apply_mean_center(x)
sc[col] = mu
df_new[col] = np.log1p(xm)
return df_new, sc
import json
def save_json(data, file_name):
with open(file_name, 'w') as fp:
json.dump(data, fp)
def load_json(file_name):
with open(file_name, 'r') as fp:
data = json.load(fp)
return data
# mean-centralize: sales, numeric base_vars
df_ctrl, sc_ctrl = mean_center_trandform(df, ['sales']+me_cols+st_cols+mrkdn_cols)
df_ctrl = pd.concat([df_ctrl, df[hldy_cols+seas_cols]], axis=1)
# variables positively related to sales: macro economy, store count, markdown, holiday
pos_vars = [col for col in base_vars if col not in seas_cols]
X1 = df_ctrl[pos_vars].values
# variables may have either positive or negtive impact on sales: seasonality
pn_vars = seas_cols
X2 = df_ctrl[pn_vars].values
ctrl_data = {
'N': len(df_ctrl),
'K1': len(pos_vars),
'K2': len(pn_vars),
'X1': X1,
'X2': X2,
'y': df_ctrl['sales'].values,
'max_intercept': min(df_ctrl['sales'])
}
ctrl_code1 = '''
data {
int N; // number of observations
int K1; // number of positive predictors
int K2; // number of positive/negative predictors
real max_intercept; // restrict the intercept to be less than the minimum y
matrix[N, K1] X1;
matrix[N, K2] X2;
vector[N] y;
}
parameters {
vector<lower=0>[K1] beta1; // regression coefficients for X1 (positive)
vector[K2] beta2; // regression coefficients for X2
real<lower=0, upper=max_intercept> alpha; // intercept
real<lower=0> noise_var; // residual variance
}
model {
// Define the priors
beta1 ~ normal(0, 1);
beta2 ~ normal(0, 1);
noise_var ~ inv_gamma(0.05, 0.05 * 0.01);
// The likelihood
y ~ normal(X1*beta1 + X2*beta2 + alpha, sqrt(noise_var));
}
'''
sm1 = pystan.StanModel(model_code=ctrl_code1, verbose=True)
fit1 = sm1.sampling(data=ctrl_data, iter=2000, chains=4)
fit1_result = fit1.extract()
# extract control model parameters and predict base sales -> df['base_sales']
def extract_ctrl_model(fit_result, pos_vars=pos_vars, pn_vars=pn_vars,
extract_param_list=False):
ctrl_model = {}
ctrl_model['pos_vars'] = pos_vars
ctrl_model['pn_vars'] = pn_vars
ctrl_model['beta1'] = fit_result['beta1'].mean(axis=0).tolist()
ctrl_model['beta2'] = fit_result['beta2'].mean(axis=0).tolist()
ctrl_model['alpha'] = fit_result['alpha'].mean()
if extract_param_list:
ctrl_model['beta1_list'] = fit_result['beta1'].tolist()
ctrl_model['beta2_list'] = fit_result['beta2'].tolist()
ctrl_model['alpha_list'] = fit_result['alpha'].tolist()
return ctrl_model
def ctrl_model_predict(ctrl_model, df):
pos_vars, pn_vars = ctrl_model['pos_vars'], ctrl_model['pn_vars']
X1, X2 = df[pos_vars], df[pn_vars]
beta1, beta2 = np.array(ctrl_model['beta1']), np.array(ctrl_model['beta2'])
alpha = ctrl_model['alpha']
y_pred = np.dot(X1, beta1) + np.dot(X2, beta2) + alpha
return y_pred
base_sales_model = extract_ctrl_model(fit1_result, pos_vars=pos_vars, pn_vars=pn_vars)
base_sales = ctrl_model_predict(base_sales_model, df_ctrl)
df['base_sales'] = base_sales*sc_ctrl['sales']
# evaluate control model
print('mape: ', mean_absolute_percentage_error(df['sales'], df['base_sales']))
# np.savetxt("base_sales_pred.csv", df['base_sales'].values, delimiter=",")
# save_json(base_sales_model, 'ctrl_model.json')
# df['base_sales'] = pd.read_csv('base_sales_pred.csv', header=None).values
# 2.2 Marketing Mix Model
df_mmm, sc_mmm = mean_log1p_trandform(df, ['sales', 'base_sales'])
mu_mdip = df[mdip_cols].apply(np.mean, axis=0).values
max_lag = 8
num_media = len(mdip_cols)
# padding zero * (max_lag-1) rows
X_media = np.concatenate((np.zeros((max_lag-1, num_media)), df[mdip_cols].values), axis=0)
X_ctrl = df_mmm['base_sales'].values.reshape(len(df),1)
model_data2 = {
'N': len(df),
'max_lag': max_lag,
'num_media': num_media,
'X_media': X_media,
'mu_mdip': mu_mdip,
'num_ctrl': X_ctrl.shape[1],
'X_ctrl': X_ctrl,
'y': df_mmm['sales'].values
}
model_code2 = '''
functions {
// the adstock transformation with a vector of weights
real Adstock(vector t, row_vector weights) {
return dot_product(t, weights) / sum(weights);
}
}
data {
// the total number of observations
int<lower=1> N;
// the vector of sales
real y[N];
// the maximum duration of lag effect, in weeks
int<lower=1> max_lag;
// the number of media channels
int<lower=1> num_media;
// matrix of media variables
matrix[N+max_lag-1, num_media] X_media;
// vector of media variables' mean
real mu_mdip[num_media];
// the number of other control variables
int<lower=1> num_ctrl;
// a matrix of control variables
matrix[N, num_ctrl] X_ctrl;
}
parameters {
// residual variance
real<lower=0> noise_var;
// the intercept
real tau;
// the coefficients for media variables and base sales
vector<lower=0>[num_media+num_ctrl] beta;
// the decay and peak parameter for the adstock transformation of
// each media
vector<lower=0,upper=1>[num_media] decay;
vector<lower=0,upper=ceil(max_lag/2)>[num_media] peak;
}
transformed parameters {
// the cumulative media effect after adstock
real cum_effect;
// matrix of media variables after adstock
matrix[N, num_media] X_media_adstocked;
// matrix of all predictors
matrix[N, num_media+num_ctrl] X;
// adstock, mean-center, log1p transformation
row_vector[max_lag] lag_weights;
for (nn in 1:N) {
for (media in 1 : num_media) {
for (lag in 1 : max_lag) {
lag_weights[max_lag-lag+1] <- pow(decay[media], (lag - 1 - peak[media]) ^ 2);
}
cum_effect <- Adstock(sub_col(X_media, nn, media, max_lag), lag_weights);
X_media_adstocked[nn, media] <- log1p(cum_effect/mu_mdip[media]);
}
X <- append_col(X_media_adstocked, X_ctrl);
}
}
model {
decay ~ beta(3,3);
peak ~ uniform(0, ceil(max_lag/2));
tau ~ normal(0, 5);
for (i in 1 : num_media+num_ctrl) {
beta[i] ~ normal(0, 1);
}
noise_var ~ inv_gamma(0.05, 0.05 * 0.01);
y ~ normal(tau + X * beta, sqrt(noise_var));
}
'''
sm2 = pystan.StanModel(model_code=model_code2, verbose=True)
fit2 = sm2.sampling(data=model_data2, iter=1000, chains=3)
fit2_result = fit2.extract()
# extract mmm parameters
def extract_mmm(fit_result, max_lag=max_lag,
media_vars=mdip_cols, ctrl_vars=['base_sales'],
extract_param_list=True):
mmm = {}
mmm['max_lag'] = max_lag
mmm['media_vars'], mmm['ctrl_vars'] = media_vars, ctrl_vars
mmm['decay'] = decay = fit_result['decay'].mean(axis=0).tolist()
mmm['peak'] = peak = fit_result['peak'].mean(axis=0).tolist()
mmm['beta'] = fit_result['beta'].mean(axis=0).tolist()
mmm['tau'] = fit_result['tau'].mean()
if extract_param_list:
mmm['decay_list'] = fit_result['decay'].tolist()
mmm['peak_list'] = fit_result['peak'].tolist()
mmm['beta_list'] = fit_result['beta'].tolist()
mmm['tau_list'] = fit_result['tau'].tolist()
adstock_params = {}
media_names = [col.replace('mdip_', '') for col in media_vars]
for i in range(len(media_names)):
adstock_params[media_names[i]] = {
'L': max_lag,
'P': peak[i],
'D': decay[i]
}
mmm['adstock_params'] = adstock_params
return mmm
mmm = extract_mmm(fit2, max_lag=max_lag,
media_vars=mdip_cols, ctrl_vars=['base_sales'])
# save_json(mmm, 'mmm1.json')
# plot media coefficients' distributions
# red line: mean, green line: median
beta_media = {}
for i in range(len(mmm['media_vars'])):
md = mmm['media_vars'][i]
betas = []
for j in range(len(mmm['beta_list'])):
betas.append(mmm['beta_list'][j][i])
beta_media[md] = np.array(betas)
f = plt.figure(figsize=(18,15))
for i in range(len(mmm['media_vars'])):
ax = f.add_subplot(5,3,i+1)
md = mmm['media_vars'][i]
x = beta_media[md]
mean_x = x.mean()
median_x = np.median(x)
ax = sns.distplot(x)
ax.axvline(mean_x, color='r', linestyle='-')
ax.axvline(median_x, color='g', linestyle='-')
ax.set_title(md)
# Decompose sales to media channels' contribution
# Each media channel's contribution = total sales - sales upon removal the channel
# decompose sales to media contribution
def mmm_decompose_contrib(mmm, df, original_sales=df['sales']):
# adstock params
adstock_params = mmm['adstock_params']
# coefficients, intercept
beta, tau = mmm['beta'], mmm['tau']
# variables
media_vars, ctrl_vars = mmm['media_vars'], mmm['ctrl_vars']
num_media, num_ctrl = len(media_vars), len(ctrl_vars)
# X_media2: adstocked, mean-centered media variables + 1
X_media2 = adstock_transform(df, media_vars, adstock_params)
X_media2, sc_mmm2 = mean_center_trandform(X_media2, media_vars)
X_media2 = X_media2 + 1
# X_ctrl2, mean-centered control variables + 1
X_ctrl2, sc_mmm2_1 = mean_center_trandform(df[ctrl_vars], ctrl_vars)
X_ctrl2 = X_ctrl2 + 1
# y_true2, mean-centered sales variable + 1
y_true2, sc_mmm2_2 = mean_center_trandform(df, ['sales'])
y_true2 = y_true2 + 1
sc_mmm2.update(sc_mmm2_1)
sc_mmm2.update(sc_mmm2_2)
# X2 <- media variables + ctrl variable
X2 = | pd.concat([X_media2, X_ctrl2], axis=1) | pandas.concat |
from pandas import DataFrame
import pandas as pd
import numpy as np
import numpy
def method(arr):
index = 0;
ag = arr
for r in arr:
if not (str(r).replace(" ","")== ""):
temp = "$" + str(ag[index])
ag[index] = temp
if r is None:
ag[index] = " "
index+=1
return ag
def has(string,arr):
for x in arr:
if (str(x).lower() in str(string).lower()):
return True
return False
def getNum(string,arr):
i = 0
for x in arr:
if (str(x).lower() in str(string).lower()):
return i
i += 1
return 0
def repl(arr):
hhh = 0
for g in arr:
if g is None or g == "-" or g=="nan":
arr[hhh] = " "
hhh += 1
return arr
def appen(arr,num):
light = arr
for i in range(0,num):
numpy.append(light, [" "])
return light
writer1 = pd.ExcelFile("publicColleges+tuitions.xlsx")
df = writer1.parse("Sheet1")
arr = df.as_matrix()
index = 0
A = arr[:, 0]
puCOST_IN = arr[:, 1]
puCOST_OUT = arr[:, 2]
puLIVE = repl(arr[:, 3])
for s in A:
A[index] = A[index].replace("-"," ")
index += 1
reader1 = pd.ExcelFile("collegeGridLoans.xlsx")
df2 = reader1.parse("Sheet1")
arr2 = df2.as_matrix()
index = 0
B = arr2[:, 0] #big grid college names
writer2 = pd.ExcelFile("privateColleges+tuitions.xlsx")
df3 = writer2.parse("Sheet1")
arr3 = df3.as_matrix()
index = 0
G = arr3[:, 0]
prCOST_IN = arr3[:, 1]
prCOST_OUT = arr3[:, 2]
prLIVE = repl(arr[:, 3])
#Big grid college names: B
#public college names: A
#private college names: G
privateCostsIN = [None] * len(B)
publicCostsIN = [None] * len(B)
privateCostsOUT = [None] * len(B)
publicCostsOUT = [None] * len(B)
privateLive = [None] * len(B)
publicLive = [None] * len(B)
both_live = [None] * len(B)
both_costIN = [None] * len(B)
both_costOUT = [None] * len(B)
pubPriv = [None] * len(B)
print(prLIVE)
for streamline in A:
if has(streamline,B):
e = getNum(streamline,B)
r = getNum(streamline,A)
pubPriv[e] = "Public"
publicCostsIN[e] = puCOST_IN[r]
publicCostsOUT[e] = puCOST_OUT[r]
both_costOUT[e] = puCOST_OUT[r]
both_costIN[e] = puCOST_IN[r]
if r < 701:
publicLive[e] = puLIVE[r]
both_live[e] = puLIVE[r]
for streamline in G:
if has(streamline, B):
e = getNum(streamline, B)
r = getNum(streamline, G)
if 0 <= e and e < len(B) and 0 <= r and r < len(B):
pubPriv[e] = "Private"
privateCostsIN[e] = prCOST_IN[r]
privateCostsOUT[e] = prCOST_OUT[r]
both_costOUT[e] = prCOST_OUT[r]
both_costIN[e] = prCOST_IN[r]
if r < 701:
privateLive[e] = prLIVE[r]
both_live[e] = prLIVE[r]
pubPriv = repl(pubPriv)
both_costIN = repl(both_costIN)
both_costOUT = repl(both_costOUT)
both_costIN = method(both_costIN)
both_live = method(both_live)
both_costOUT = method(both_costOUT)
df2['On-Campus Tuition'] = both_costIN
df2['Off-Campus Tuition'] = both_costOUT
df2['Housing'] = both_live
df2.loc[:, 10] = | pd.Series(pubPriv, index=df2.index) | pandas.Series |
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import itertools
import statsmodels.api as sm
import sklearn
import sklearn.ensemble
from sklearn.model_selection import StratifiedKFold, cross_val_score, LeaveOneOut, LeavePOut, GridSearchCV
import sklearn.linear_model
import warnings
sns.set(style='darkgrid', palette='muted', font_scale=1.5)
__all__ = ['plotROC', 'plotROCObj',
'plotProb',
'plotLogisticL1Paths',
'plotLogisticL1Vars',
'logisticL1NestedCV',
'plotLogisticL1NestedTuning',
'nestedCVClassifier',
'computeROC',
'computeCVROC',
'captureStandardization',
'smLogisticRegression',
'rocStats',
'compute2x2',
'plotNestedCVParams',
'plotNestedCVScores']
def plotROCObj(**objD):
fprL = [o['fpr'] for o in objD.values()]
tprL = [o['tpr'] for o in objD.values()]
aucL = [o['AUC'].mean() for o in objD.values()]
accL = [o['ACC'].mean() for o in objD.values()]
labelL = objD.keys()
outcomeVar = [o['Yvar'] for o in objD.values()][0]
plotROC(fprL, tprL, aucL, accL, labelL, outcomeVar)
def plotROC(fprL, tprL, aucL=None, accL=None, labelL=None, outcomeVar=''):
if labelL is None and aucL is None and accL is None:
labelL = ['Model %d' % i for i in range(len(fprL))]
else:
if not accL is None:
labelL = ['%s (AUC = %0.2f; ACC = %0.2f)' % (label, auc, acc) for label, auc, acc in zip(labelL, aucL, accL)]
else:
labelL = ['%s (AUC = %0.2f)' % (label, auc) for label, auc in zip(labelL, aucL)]
colors = sns.color_palette('Set1', n_colors=len(fprL))
plt.clf()
plt.gca().set_aspect('equal')
for i, (fpr, tpr, label) in enumerate(zip(fprL, tprL, labelL)):
plt.plot(fpr, tpr, color=colors[i], lw=2, label=label)
plt.plot([0, 1], [0, 1], '--', color='gray', label='Chance')
plt.xlim([0, 1])
plt.ylim([0, 1])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
if outcomeVar == '':
plt.title('ROC')
else:
plt.title('ROC for %s' % outcomeVar)
plt.legend(loc="lower right", fontsize=10)
plt.show()
def plotProb(outcome, prob, **kwargs):
"""Scatter plot of probabilities for one outcome.
Parameters
----------
outcome : pd.Series
prob : pd.Series
Predicted probabilities returned from computeROC or computeCVROC"""
colors = sns.color_palette('Set1', n_colors=2)
tmp = pd.concat((outcome, prob), join='inner', axis=1)
tmp = tmp.sort_values(by=[outcome.name, 'Prob'])
tmp['x'] = np.arange(tmp.shape[0])
plt.clf()
for color, val in zip(colors, tmp[outcome.name].unique()):
ind = tmp[outcome.name] == val
lab = '%s = %1.0f (%d)' % (outcome.name, val, ind.sum())
plt.scatter(tmp.x.loc[ind], tmp.Prob.loc[ind], label=lab, color=color, **kwargs)
plt.plot([0, tmp.shape[0]], [0.5, 0.5], 'k--', lw=1)
plt.legend(loc='upper left')
plt.ylabel('Predicted Pr(%s)' % outcome.name)
plt.ylim((-0.05, 1.05))
plt.xlim(-1, tmp.shape[0])
plt.show()
def plotLogisticL1Paths(lo):
tmp = lo['paths'].mean(axis=0)
if len(lo['Xvars']) == (tmp.shape[1] - 1):
predVars = np.concatenate((np.array(lo['Xvars']), ['Intercept']))
else:
predVars = np.array(lo['Xvars'])
plt.clf()
plt.plot(np.log10(lo['Cs']), tmp, '-')
yl = plt.ylim()
xl = plt.xlim()
plt.plot(np.log10([lo['optimalCs'].mean()]*2), yl, '--k')
plt.ylabel('Coefficient')
plt.xlabel('Regularization parameter ($log_{10} C$)\n(lower is more regularized)')
topi = np.nonzero(lo['finalResult'].coef_.ravel() != 0)[0]
plt.annotate(s='$N_{vars}=%d$' % len(topi),
xy=(np.log10(lo['finalResult'].C), yl[1]),
ha='left', va='top', size=10)
for i in topi:
a = predVars[i]
cInd = np.where(tmp[:, i] != 0)[0][0]
y = tmp[cInd+2, i]
x = np.log10(lo['Cs'][cInd+2])
plt.annotate(a, xy=(x, y), ha='left', va='center', size=7)
y = tmp[-1, i]
x = np.log10(lo['Cs'][-1])
plt.annotate(a, xy=(x, y), ha='left', va='center', size=7)
plt.show()
def plotLogisticL1NestedTuning(lo):
plt.clf()
colors = sns.color_palette('Set1', n_colors=10)
for outi in range(lo['scores'].shape[0]):
sc = lo['scores'][outi, :, :].mean(axis=0)
plt.plot(np.log10(lo['Cs']), sc, '-', color=colors[outi])
mnmx = sc.min(), sc.max()
plt.plot(np.log10([lo['optimalCs'][outi]]*2), mnmx, '--', color=colors[outi])
plt.xlim(np.log10(lo['Cs'][[0, -1]]))
plt.ylabel('Score (log-likelihood)')
plt.xlabel('Regularization parameter ($log_{10} C$)\n(lower is more regularized)')
plt.title('Regularization tuning in nested CV')
plt.show()
def plotLogisticL1Vars(lo):
pctSelection = 100 * (lo['coefs'] != 0).mean(axis=0)
finalInd = (lo['finalResult'].coef_ != 0).ravel()
x = np.arange(len(pctSelection))
plt.clf()
plt.barh(width=pctSelection[finalInd], bottom=x[finalInd], align='center', color='red', label='Yes')
plt.barh(width=pctSelection[~finalInd], bottom=x[~finalInd], align='center', color='blue', label='No')
plt.yticks(range(len(pctSelection)), lo['Xvars'], size=8)
plt.ylabel('Predictors')
plt.xlabel('% times selected in 10-fold CV')
plt.legend(loc=0, title='Final model?')
def logisticL1NestedCV(df, outcomeVar, predVars, nFolds=10, LPO=None, Cs=10, n_jobs=1):
"""Apply logistic regression with L1-regularization (LASSO) to df.
Uses nested cross-validation framework with inner folds to optimize C
and outer test folds to evaluate performance.
Parameters
----------
df : pd.DataFrame
Must contain outcome and predictor variables.
outcomeVar : str
predVars : ndarray or list
Predictor variables in the model.
nFolds : int
N-fold stratified cross-validation
LPO : int or None
Use Leave-P-Out cross-validation instead of StratifiedNFoldCV
Cs : int or list
Each of the values in Cs describes the inverse of regularization strength.
If Cs is as an int, then a grid of Cs values are chosen in a logarithmic
scale between 1e-4 and 1e4. Smaller values specify stronger regularization.
Returns
-------
results : dict
Contains results as keys below:
fpr: (100, ) average FPR for ROC
tpr: (100, ) average TPR for ROC
AUC: (outerFolds, ) AUC of ROC for each outer test fold
meanAUC: (1, ) AUC of the average ROC
ACC: (outerFolds, ) accuracy across outer test folds
scores: (outerFolds, innerFolds, Cs) log-likelihood for each C across inner and outer CV folds
optimalCs: (outerFolds, ) optimal C from each set of inner CV
finalResult: final fitted model with predict() exposed
prob: (N,) pd.Series of predicted probabilities avg over outer folds
varList: (Nvars, ) list of vars with non-zero coef in final model
Cs: (Cs, ) pre-specified grid of Cs
coefs: (outerFolds, predVars) refit with optimalC in each fold
paths: (outerFolds, Cs, predVars + intercept) avg across inner folds
XVars: list of all vars in X
yVar: name of outcome variable
N: total number of rows/instances in the model"""
if not isinstance(predVars, list):
predVars = list(predVars)
tmp = df[[outcomeVar] + predVars].dropna()
X,y = tmp[predVars].astype(float), tmp[outcomeVar].astype(float)
if LPO is None:
innerCV = StratifiedKFold(n_splits=nFolds, shuffle=True)
outerCV = StratifiedKFold(n_splits=nFolds, shuffle=True)
else:
innerCV = LeavePOut(LPO)
outerCV = LeavePOut(LPO)
scorerFunc = sklearn.metrics.make_scorer(sklearn.metrics.log_loss,
greater_is_better=False,
needs_proba=True,
needs_threshold=False,
labels=[0, 1])
fpr = np.linspace(0, 1, 100)
tpr = np.nan * np.zeros((fpr.shape[0], nFolds))
acc = np.nan * np.zeros(nFolds)
auc = np.nan * np.zeros(nFolds)
paths = []
coefs = []
probs = []
optimalCs = np.nan * np.zeros(nFolds)
scores = []
for outi, (trainInd, testInd) in enumerate(outerCV.split(X=X, y=y)):
Xtrain, Xtest = X.iloc[trainInd], X.iloc[testInd]
ytrain, ytest = y.iloc[trainInd], y.iloc[testInd]
model = sklearn.linear_model.LogisticRegressionCV(Cs=Cs,
cv=innerCV,
penalty='l1',
solver='liblinear',
scoring=scorerFunc,
refit=True,
n_jobs=n_jobs)
"""With refit = True, the scores are averaged across all folds,
and the coefs and the C that corresponds to the best score is taken,
and a final refit is done using these parameters."""
results = model.fit(X=Xtrain, y=ytrain)
prob = results.predict_proba(Xtest)
class1Ind = np.nonzero(results.classes_ == 1)[0][0]
fprTest, tprTest, _ = sklearn.metrics.roc_curve(ytest, prob[:, class1Ind])
tpr[:, outi] = np.interp(fpr, fprTest, tprTest)
auc[outi] = sklearn.metrics.auc(fprTest, tprTest)
acc[outi] = sklearn.metrics.accuracy_score(ytest, np.round(prob[:, class1Ind]), normalize=True)
optimalCs[outi] = results.C_[0]
scores.append(results.scores_[1])
paths.append(results.coefs_paths_[1])
coefs.append(results.coef_)
probs.append(pd.Series(prob[:, class1Ind], index=Xtest.index))
meanTPR = np.mean(tpr, axis=1)
meanTPR[0], meanTPR[-1] = 0, 1
meanACC = np.mean(acc)
meanAUC = sklearn.metrics.auc(fpr, meanTPR)
meanC = 10**np.mean(np.log10(optimalCs))
paths = np.concatenate([p.mean(axis=0, keepdims=True) for p in paths], axis=0)
scores = np.concatenate([s[None, :, :] for s in scores], axis=0)
"""Compute mean probability over test predictions in CV"""
probS = | pd.concat(probs) | pandas.concat |
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import skimage.io
import functools
from skimage import measure
from scipy.spatial import distance
from sklearn.metrics import pairwise_distances_argmin_min
from loguru import logger
import numpy as np
import matplotlib.pyplot as plt
logger.info('Import OK')
# define location parameters
input_folder = f'ImmunoEM-vesicle-detection/python_results/ImmunoEM-vesicle-detection_Analyzed/feature_validation/'
output_folder = f'ImmunoEM-vesicle-detection/python_results/ImmunoEM-vesicle-detection_Analyzed/feature_properties/'
if not os.path.exists(output_folder):
os.mkdir(output_folder)
def find_edge(feature_coords):
edge = pd.DataFrame()
# for (roi_type), roi_type_df in feature_coords.groupby(['roi_key']):
for (roi), roi_type_df in feature_coords.groupby(['roi_key']):
roi_type_df
# explode df
roi_type_df = roi_type_df.explode('coords')
roi_type_df = roi_type_df.reset_index()
# assign x and y
roi_type_df['x'] = roi_type_df['coords'].str[0]
roi_type_df['y'] = roi_type_df['coords'].str[1]
scanX = roi_type_df.groupby(['roi_key', 'x']).agg(y_min=pd.NamedAgg(
column='y', aggfunc='min'), y_max=pd.NamedAgg(column='y', aggfunc='max'))
scanX = scanX.reset_index()
#GET TO USE MELT :))))))
scanX = pd.melt(scanX, id_vars=['roi_key', 'x'],
value_vars=['y_min', 'y_max'], value_name='y')
scanX = scanX.drop(['variable'], axis=1)
edge = edge.append(scanX)
logger.info('Edges prepared')
return edge
#stackoverflow: https://stackoverflow.com/questions/48887912/find-minimum-distance-between-points-of-two-lists-in-python/48888321
def get_closest_pair_of_points(point_list_1, point_list_2):
"""
Determine the two points from two disjoint lists of points that are closest to
each other and the distance between them.
Args:
point_list_1: First list of points.
point_list_2: Second list of points.
Returns:
Two points that make the closest distance and the distance between them.
"""
indeces_of_closest_point_in_list_2, distances = pairwise_distances_argmin_min(
point_list_1, point_list_2)
# Get index of a point pair that makes the smallest distance.
min_distance_pair_index = np.argmin(distances)
# Get the two points that make this smallest distance.
min_distance_pair_point_1 = point_list_1[min_distance_pair_index]
min_distance_pair_point_2 = point_list_2[indeces_of_closest_point_in_list_2[min_distance_pair_index]]
min_distance = distances[min_distance_pair_index]
return min_distance_pair_point_1, min_distance_pair_point_2, min_distance
def measure_edge_dist(df_1, df_2): # measuring euclid distance b/w coordinate pairs
df_1coord = [(coord1, coord2)
for coord1, coord2 in df_1[['x', 'y']].values]
df_2coord = [(coord1, coord2)
for coord1, coord2 in df_2[['x', 'y']].values]
return get_closest_pair_of_points(df_1coord, df_2coord)
def compare_edges(scanX): # start with roi_key, x, y of out lines
# make dfs of each image
# res = dict(tuple(scanX.groupby('roi_key')))
scanX['image_name'] = scanX['roi_key'].str.split('_').str[0]
edge_distances = []
for image, image_df in scanX.groupby('image_name'):
logger.info(f'processing {image}')
image_df['roi_type'] = image_df['roi_key'].str.split('_').str[1]
vesicles_only = image_df[image_df['roi_type'] == 'vesicle']
clefts_only = image_df[image_df['roi_type'] == 'cleft']
membranes_only = image_df[image_df['roi_type'] == 'membrane']
for vesicle, vesicle_df in vesicles_only.groupby('roi_key'):
#logger.info(f'processing {vesicle}')
for cleft, cleft_df in clefts_only.groupby('roi_key'):
measure_distance = measure_edge_dist(vesicle_df, cleft_df)
#unpack the tuple
edge_distances.append(
[vesicle, cleft, measure_distance[0], measure_distance[1], measure_distance[2]])
#logger.info(f'measuring {vesicle} distance to next membrane')
for membrane, membrane_df in membranes_only.groupby('roi_key'):
measure_distance = measure_edge_dist(vesicle_df, membrane_df)
edge_distances.append(
[vesicle, membrane, measure_distance[0], measure_distance[1], measure_distance[2]])
# turn edge_distances into df
edge_distances = pd.DataFrame(edge_distances, columns=[
'vesicle_key', 'otherRoi_key', 'vesicle_coord', 'otherRoi_coord', 'measure_distance']) # turn edge_distances into df
return edge_distances
# --------------Scale variable--------------
# 540 pixes per 200 nm (measured in FIJI)
scale = 540/200
# --------------Initialise file lists--------------
image_list_ext = [folder for folder in os.listdir(input_folder)]
do_not_quantitate = []
image_list = [filename.replace('.npy', '') for filename in image_list_ext if not any(
word in filename for word in do_not_quantitate)]
valid_narrays = {filename.replace('.npy', ''): np.load(f'{input_folder}{filename}.npy') for filename in image_list}
logger.info('valid numpy arrays loaded')
features = | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 5 13:24:18 2020
@author: earne
"""
from collections import defaultdict
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from scipy import stats
import seaborn as sns
from sipperplots import (
get_any_idi,
get_side_idi,
get_content_idi,
get_chronogram_vals,
preproc_averaging
)
def format_avg_output(output, averaging):
if averaging == 'datetime':
output.index.name = 'Date'
elif averaging == 'time':
first = output.index[0]
output.index = [i - first for i in output.index]
output.index = (output.index.total_seconds()/3600).astype(int)
output.index.name = 'Hours Since {}:00'.format(str(first.hour))
elif averaging == 'elapsed':
output.index = output.index.astype(int)
output.index.name = 'Elapsed Hours'
return output
def drinkcount_cumulative(sipper, show_left=True, show_right=True,
show_content=[], **kwargs):
output = pd.DataFrame()
df = sipper.data
if 'date_filter' in kwargs:
s, e = kwargs['date_filter']
df = df[(df.index >= s) &
(df.index <= e)].copy()
if show_left:
l = pd.DataFrame({'LeftCount' : df['LeftCount']}, index=df.index)
output = output.join(l, how='outer')
if show_right:
r = pd.DataFrame({'RightCount' : df['RightCount']}, index=df.index)
output = output.join(r, how='outer')
if show_content:
for c in show_content:
count = sipper.get_content_values(c, out='Count', df=df)
if not count.empty:
temp = pd.DataFrame({c +'Count' : count}, index=count.index)
output = output.join(temp, how='outer')
return output
def drinkcount_binned(sipper, binsize='1H', show_left=True, show_right=True,
show_content=[], **kwargs):
output = pd.DataFrame()
df = sipper.data
if 'date_filter' in kwargs:
s, e = kwargs['date_filter']
df = df[(df.index >= s) &
(df.index <= e)].copy()
base = df.index[0].hour
if show_left:
binned = df['LeftCount'].diff().resample(binsize, base=base).sum()
l = pd.DataFrame({'LeftCount' : binned}, index=binned.index)
output = output.join(l, how='outer')
if show_right:
binned = df['RightCount'].diff().resample(binsize, base=base).sum()
r = pd.DataFrame({'RightCount' : binned}, index=binned.index)
output = output.join(r, how='outer')
if show_content:
for c in show_content:
count = sipper.get_content_values(c, out='Count', df=df)
binned = count.diff().resample(binsize, base=base).sum()
if not count.empty:
temp = pd.DataFrame({c+'Count' : binned}, index=binned.index)
output = output.join(temp, how='outer')
return output
def drinkduration_cumulative(sipper, show_left=True, show_right=True,
show_content=[], **kwargs):
output = pd.DataFrame()
df = sipper.data
if 'date_filter' in kwargs:
s, e = kwargs['date_filter']
df = df[(df.index >= s) &
(df.index <= e)].copy()
if show_left:
l = pd.DataFrame({'LeftDuration' : df['LeftDuration']}, index=df.index)
output = output.join(l, how='outer')
if show_right:
r = pd.DataFrame({'RightDuration' : df['RightDuration']}, index=df.index)
output = output.join(r, how='outer')
if show_content:
for c in show_content:
count = sipper.get_content_values(c, out='Count', df=df)
if not count.empty:
temp = pd.DataFrame({c+'Duration' : count}, index=count.index)
output = output.join(temp, how='outer')
return output
def drinkduration_binned(sipper, binsize='1H', show_left=True, show_right=True,
show_content=[], **kwargs):
output = pd.DataFrame()
df = sipper.data
if 'date_filter' in kwargs:
s, e = kwargs['date_filter']
df = df[(df.index >= s) &
(df.index <= e)].copy()
base = df.index[0].hour
if show_left:
binned = df['LeftDuration'].diff().resample(binsize, base=base).sum()
l = pd.DataFrame({'LeftDuration' : binned}, index=binned.index)
output = output.join(l, how='outer')
if show_right:
binned = df['RightDuration'].diff().resample(binsize, base=base).sum()
r = pd.DataFrame({'RightDuration' : binned}, index=binned.index)
output = output.join(r, how='outer')
if show_content:
for c in show_content:
count = sipper.get_content_values(c, out='Count', df=df)
binned = count.diff().resample(binsize, base=base).sum()
if not count.empty:
temp = pd.DataFrame({c+'Duration' : binned}, index=binned.index)
output = output.join(temp, how='outer')
return output
def interdrink_intervals(sippers, kde=True, logx=True,
combine=False, **kwargs):
if combine:
output = idi_onecurve(sippers, kde, logx, **kwargs)
else:
output = idi_multicurve(sippers, kde, logx, **kwargs)
return output
def idi_onecurve(sippers, kde, logx, **kwargs):
bar_df = pd.DataFrame()
kde_df = pd.DataFrame()
combined = []
for sipper in sippers:
fig = plt.figure()
plt.clf()
df = sipper.data.copy()
if 'date_filter' in kwargs:
s, e = kwargs['date_filter']
df = df[(df.index >= s) &
(df.index <= e)].copy()
y = get_any_idi(sipper)
if logx:
y = [np.log10(val) for val in y if not pd.isna(val) if val != 0]
bins = np.round(np.arange(-2, 5, .1), 2)
else:
bins = np.linspace(0, 900, 50)
combined += list(y)
plot = sns.distplot(combined, bins=bins, norm_hist=False, kde=kde)
if kde:
if plot.get_lines():
line = plot.get_lines()[0]
x, y = line.get_data()
kde_df = kde_df.reindex(x)
kde_df['Values'] = y
bar_x = [v.get_x() for v in plot.patches]
bar_h = [v.get_height() for v in plot.patches]
bar_df = bar_df.reindex(bar_x)
bar_df['Values'] = bar_h
bar_df.index.name = 'log10(minutes)' if logx else 'minutes'
kde_df.index.name = 'log10(minutes)' if logx else 'minutes'
plt.close()
return bar_df, kde_df
def idi_multicurve(sippers, kde, logx, **kwargs):
bar_df = pd.DataFrame()
kde_df = pd.DataFrame()
for sipper in sippers:
fig = plt.figure()
plt.clf()
df = sipper.data.copy()
if 'date_filter' in kwargs:
s, e = kwargs['date_filter']
df = df[(df.index >= s) &
(df.index <= e)].copy()
y = get_any_idi(sipper)
if logx:
y = [np.log10(val) for val in y if not pd.isna(val) if val != 0]
bins = np.round(np.arange(-2, 5, .1), 2)
else:
bins = np.linspace(0, 900, 50)
plot = sns.distplot(y, bins=bins, norm_hist=False, kde=kde)
bar_x = [v.get_x() for v in plot.patches]
bar_h = [v.get_height() for v in plot.patches]
btemp = pd.DataFrame({sipper.filename : bar_h}, index=bar_x)
bar_df = bar_df.join(btemp, how='outer')
if kde:
if plot.get_lines():
line = plot.get_lines()[0]
x, y = line.get_data()
ktemp = pd.DataFrame({sipper.filename : y}, index=x)
kde_df = kde_df.join(ktemp, how='outer')
plt.close()
bar_df.index.name = 'log10(minutes)' if logx else 'minutes'
kde_df.index.name = 'log10(minutes)' if logx else 'minutes'
return bar_df, kde_df
def interdrink_intervals_byside(sippers, kde=True, logx=True, **kwargs):
bar_df = pd.DataFrame()
kde_df = pd.DataFrame()
for side in ['Left', 'Right']:
combined = []
fig = plt.figure()
plt.clf()
for sipper in sippers:
df = sipper.data.copy()
if 'date_filter' in kwargs:
s, e = kwargs['date_filter']
df = df[(df.index >= s) &
(df.index <= e)].copy()
y = get_side_idi(sipper, side)
if logx:
y = [np.log10(val) for val in y if not pd.isna(val) if val != 0]
bins = np.round(np.arange(-2, 5, .1), 2)
else:
bins = np.linspace(0, 900, 50)
combined += list(y)
plot = sns.distplot(combined, bins=bins, norm_hist=False, kde=kde)
if kde:
if plot.get_lines():
line = plot.get_lines()[0]
x, y = line.get_data()
ktemp = pd.DataFrame({side:y}, index=x)
kde_df = kde_df.join(ktemp, how='outer')
bar_x = [v.get_x() for v in plot.patches]
bar_h = [v.get_height() for v in plot.patches]
btemp = pd.DataFrame({side:bar_h}, index=bar_x)
bar_df = bar_df.join(btemp, how='outer')
plt.close()
bar_df.index.name = 'log10(minutes)' if logx else 'minutes'
kde_df.index.name = 'log10(minutes)' if logx else 'minutes'
return bar_df, kde_df
def interdrink_intervals_bycontent(sippers, idi_content, kde=True, logx=True,
**kwargs):
bar_df = pd.DataFrame()
kde_df = pd.DataFrame()
for c in idi_content:
combined = []
fig = plt.figure()
plt.clf()
for sipper in sippers:
df = sipper.data.copy()
if 'date_filter' in kwargs:
s, e = kwargs['date_filter']
df = df[(df.index >= s) &
(df.index <= e)].copy()
y = get_content_idi(sipper, c, df=df)
if logx:
y = [np.log10(val) for val in y if not pd.isna(val) if val != 0]
bins = np.round(np.arange(-2, 5, .1), 2)
else:
bins = np.linspace(0, 900, 50)
combined += list(y)
plot = sns.distplot(combined, bins=bins, norm_hist=False, kde=kde)
if kde:
if plot.get_lines():
line = plot.get_lines()[0]
x, y = line.get_data()
ktemp = pd.DataFrame({c:y}, index=x)
kde_df = kde_df.join(ktemp, how='outer')
bar_x = [v.get_x() for v in plot.patches]
bar_h = [v.get_height() for v in plot.patches]
btemp = pd.DataFrame({c:bar_h}, index=bar_x)
bar_df = bar_df.join(btemp, how='outer')
plt.close()
bar_df.index.name = 'log10(minutes)' if logx else 'minutes'
kde_df.index.name = 'log10(minutes)' if logx else 'minutes'
return bar_df, kde_df
def drinkcount_chronogram(sipper, circ_left=True, circ_right=True,
circ_content=None, lights_on=7,
lights_off=19, **kwargs):
output = pd.DataFrame()
df = sipper.data
if 'date_filter' in kwargs:
s, e = kwargs['date_filter']
df = df[(df.index >= s) &
(df.index <= e)].copy()
to_plot = []
labels = []
if circ_left:
to_plot.append(df['LeftCount'])
labels.append('Left')
if circ_right:
to_plot.append(df['RightCount'])
labels.append('Right')
if circ_content:
for c in circ_content:
vals = sipper.get_content_values(c, 'Count', df=df)
if not vals.empty:
to_plot.append()
labels.append(c)
for i, series in enumerate(to_plot):
reindexed = get_chronogram_vals(series, lights_on, lights_off)
if reindexed.empty:
continue
label = labels[i]
temp = pd.DataFrame({label:reindexed}, index=reindexed.index)
output = output.join(temp, how='outer')
output.index.name = 'Hours Into Light Cycle'
return output
def drinkcount_chronogram_grouped(sippers, groups, circ_left=True, circ_right=True,
circ_content=None, circ_var='SEM', lights_on=7,
lights_off=19, **kwargs):
output = pd.DataFrame(index=range(0,24))
output.index.name = 'Hours Into Light Cycle'
to_plot = defaultdict(list)
for group in groups:
for sipper in sippers:
if group in sipper.groups:
df = sipper.data
if 'date_filter' in kwargs:
s, e = kwargs['date_filter']
df = df[(df.index >= s) &
(df.index <= e)].copy()
if circ_left:
key = group + ' - Left'
vals = get_chronogram_vals(df['LeftCount'],
lights_on,
lights_off)
vals.name = sipper.basename
to_plot[key].append(vals)
if circ_right:
key = group + ' - Right'
vals = get_chronogram_vals(df['RightCount'],
lights_on,
lights_off)
vals.name = sipper.basename
to_plot[key].append(vals)
if circ_content:
for c in circ_content:
key = group + ' - ' + c
content_vals = sipper.get_content_values(c, 'Count', df)
if not content_vals.empty:
vals = get_chronogram_vals(content_vals,
lights_on,
lights_off)
vals.name = sipper.basename
to_plot[key].append(vals)
for i, (label, data) in enumerate(to_plot.items()):
y = np.nanmean(data, axis=0)
for d in data:
output[label + ' - ' + d.name] = d
output[label + ' MEAN'] = y
if circ_var == 'SEM':
sem = stats.sem(data, axis=0, nan_policy='omit')
output[label + ' SEM'] = sem
elif circ_var == 'STD':
std = np.nanstd(data, axis=0)
output[label + ' STD'] = std
return output
def drinkduration_chronogram(sipper, circ_left=True, circ_right=True,
circ_content=None, lights_on=7,
lights_off=19, **kwargs):
output = pd.DataFrame()
df = sipper.data
if 'date_filter' in kwargs:
s, e = kwargs['date_filter']
df = df[(df.index >= s) &
(df.index <= e)].copy()
to_plot = []
labels = []
if circ_left:
to_plot.append(df['LeftDuration'])
labels.append('Left')
if circ_right:
to_plot.append(df['RightDuration'])
labels.append('Right')
if circ_content:
for c in circ_content:
vals = sipper.get_content_values(c, 'Duration', df=df)
if not vals.empty:
to_plot.append()
labels.append(c)
for i, series in enumerate(to_plot):
reindexed = get_chronogram_vals(series, lights_on, lights_off)
if reindexed.empty:
continue
label = labels[i]
temp = pd.DataFrame({label:reindexed}, index=reindexed.index)
output = output.join(temp, how='outer')
output.index.name = 'Hours Into Light Cycle'
return output
def drinkduration_chronogram_grouped(sippers, groups, circ_left=True, circ_right=True,
circ_content=None, circ_var='SEM', lights_on=7,
lights_off=19, **kwargs):
output = pd.DataFrame(index=range(0,24))
output.index.name = 'Hours Into Light Cycle'
to_plot = defaultdict(list)
for group in groups:
for sipper in sippers:
if group in sipper.groups:
df = sipper.data
if 'date_filter' in kwargs:
s, e = kwargs['date_filter']
df = df[(df.index >= s) &
(df.index <= e)].copy()
if circ_left:
key = group + ' - Left'
vals = get_chronogram_vals(df['LeftDuration'],
lights_on,
lights_off)
vals.name = sipper.basename
to_plot[key].append(vals)
if circ_right:
key = group + ' - Right'
vals = get_chronogram_vals(df['RightDuration'],
lights_on,
lights_off)
vals.name = sipper.basename
to_plot[key].append(vals)
if circ_content:
for c in circ_content:
key = group + ' - ' + c
content_vals = sipper.get_content_values(c, 'Duration', df)
if not content_vals.empty:
vals = get_chronogram_vals(content_vals,
lights_on,
lights_off)
vals.name = sipper.basename
to_plot[key].append(vals)
for i, (label, data) in enumerate(to_plot.items()):
y = np.nanmean(data, axis=0)
for d in data:
output[label + ' - ' + d.name] = d
output[label + ' MEAN'] = y
if circ_var == 'SEM':
sem = stats.sem(data, axis=0, nan_policy='omit')
output[label + ' SEM'] = sem
elif circ_var == 'STD':
std = np.nanstd(data, axis=0)
output[label + ' STD'] = std
return output
def side_preference(sipper, pref_side='Left', pref_metric='Count', pref_bins='1H',
**kwargs):
df = sipper.data
if 'date_filter' in kwargs:
s, e = kwargs['date_filter']
df = df[(df.index >= s) &
(df.index <= e)].copy()
base = df.index[0].hour
lcol = 'Left' + pref_metric
rcol = 'Right' + pref_metric
l_data = df[lcol].diff().resample(pref_bins, base=base).sum()
r_data = df[rcol].diff().resample(pref_bins, base=base).sum()
total = l_data + r_data
if pref_side == 'Left':
preference = l_data/total
else:
preference = r_data/total
preference *= 100
output = pd.DataFrame(preference)
output.columns = ['{} Preference (% Drink {})'.format(pref_side, pref_metric)]
return output
def content_preference(sipper, pref_content=[], pref_metric='Count', pref_bins='1H',
lights_on=7, lights_off=19, shade_dark=True, **kwargs):
output = pd.DataFrame()
df = sipper.data
if 'date_filter' in kwargs:
s, e = kwargs['date_filter']
df = df[(df.index >= s) &
(df.index <= e)].copy()
base = df.index[0].hour
for i, c in enumerate(pref_content):
target = sipper.get_content_values(c, out=pref_metric, df=df)
if target.empty:
continue
target = target.diff().resample(pref_bins, base=base).sum()
other = sipper.get_content_values(c, out=pref_metric, df=df,
opposite=True)
other = other.diff().resample(pref_bins, base=base).sum()
if not target.empty and not other.empty:
preference = target / (target + other) * 100
temp = pd.DataFrame({c : preference}, index=preference.index)
output = output.join(temp, how='outer')
return output
def averaged_drinkcount(sippers, groups, averaging='datetime', avg_bins='1H',
avg_var='SEM', show_left=True, show_right=True,
show_content=[], **kwargs):
output = pd.DataFrame()
to_plot = defaultdict(list)
for group in groups:
for sipper in sippers:
if group in sipper.groups:
df = sipper.data
if 'date_filter' in kwargs:
s, e = kwargs['date_filter']
df = df[(df.index >= s) &
(df.index <= e)].copy()
if show_left:
key = '{} - Left'.format(group)
vals = df['LeftCount'].diff().rename(sipper.basename)
to_plot[key].append(vals)
if show_right:
key = '{} - Right'.format(group)
vals = df['RightCount'].diff().rename(sipper.basename)
to_plot[key].append(vals)
if show_content:
for c in show_content:
key = '{} - {}'.format(group, c)
vals = sipper.get_content_values(c, out='Count',
df=df).diff()
if not vals.empty:
to_plot[key].append(vals.rename(sipper.basename))
for i, (label, data) in enumerate(to_plot.items()):
temp = | pd.DataFrame() | pandas.DataFrame |
# flake8: noqa: F841
import tempfile
from typing import Any, Dict, List, Union
from pandas.io.parsers import TextFileReader
import numpy as np
import pandas as pd
from . import check_series_result, check_dataframe_result
def test_types_to_datetime() -> None:
df = pd.DataFrame({"year": [2015, 2016], "month": [2, 3], "day": [4, 5]})
r1: pd.Series = pd.to_datetime(df)
r2: pd.Series = pd.to_datetime(df, unit="s", origin="unix", infer_datetime_format=True)
r3: pd.Series = pd.to_datetime(df, unit="ns", dayfirst=True, utc=None, format="%M:%D", exact=False)
r4: pd.DatetimeIndex = pd.to_datetime([1, 2], unit="D", origin=pd.Timestamp("01/01/2000"))
r5: pd.DatetimeIndex = pd.to_datetime([1, 2], unit="D", origin=3)
r6: pd.DatetimeIndex = pd.to_datetime(["2022-01-03", "2022-02-22"])
r7: pd.DatetimeIndex = pd.to_datetime(pd.Index(["2022-01-03", "2022-02-22"]))
r8: pd.Series = pd.to_datetime({"year": [2015, 2016], "month": [2, 3], "day": [4, 5]})
def test_types_concat() -> None:
s = pd.Series([0, 1, -10])
s2 = pd.Series([7, -5, 10])
check_series_result(pd.concat([s, s2]))
check_dataframe_result(pd.concat([s, s2], axis=1))
check_series_result(pd.concat([s, s2], keys=["first", "second"], sort=True))
check_series_result(pd.concat([s, s2], keys=["first", "second"], names=["source", "row"]))
# Depends on the axis
rs1: Union[pd.Series, pd.DataFrame] = pd.concat({"a": s, "b": s2})
rs1a: Union[pd.Series, pd.DataFrame] = pd.concat({"a": s, "b": s2}, axis=1)
rs2: Union[pd.Series, pd.DataFrame] = pd.concat({1: s, 2: s2})
rs2a: Union[pd.Series, pd.DataFrame] = | pd.concat({1: s, 2: s2}, axis=1) | pandas.concat |
# -*- coding: utf-8 -*-
import pandas as pd
import pandas.types.concat as _concat
import pandas.util.testing as tm
class TestConcatCompat(tm.TestCase):
def check_concat(self, to_concat, exp):
for klass in [pd.Index, pd.Series]:
to_concat_klass = [klass(c) for c in to_concat]
res = _concat.get_dtype_kinds(to_concat_klass)
self.assertEqual(res, set(exp))
def test_get_dtype_kinds(self):
to_concat = [['a'], [1, 2]]
self.check_concat(to_concat, ['i', 'object'])
to_concat = [[3, 4], [1, 2]]
self.check_concat(to_concat, ['i'])
to_concat = [[3, 4], [1, 2.1]]
self.check_concat(to_concat, ['i', 'f'])
def test_get_dtype_kinds_datetimelike(self):
to_concat = [pd.DatetimeIndex(['2011-01-01']),
pd.DatetimeIndex(['2011-01-02'])]
self.check_concat(to_concat, ['datetime'])
to_concat = [pd.TimedeltaIndex(['1 days']),
pd.TimedeltaIndex(['2 days'])]
self.check_concat(to_concat, ['timedelta'])
def test_get_dtype_kinds_datetimelike_object(self):
to_concat = [pd.DatetimeIndex(['2011-01-01']),
pd.DatetimeIndex(['2011-01-02'], tz='US/Eastern')]
self.check_concat(to_concat,
['datetime', 'datetime64[ns, US/Eastern]'])
to_concat = [ | pd.DatetimeIndex(['2011-01-01'], tz='Asia/Tokyo') | pandas.DatetimeIndex |
import numpy as np
import pandas as pd
import pandas.util.testing as tm
import pandas.tseries.period as period
from pandas import period_range, PeriodIndex, Index, date_range
def _permute(obj):
return obj.take(np.random.permutation(len(obj)))
class TestPeriodIndex(tm.TestCase):
def setUp(self):
pass
def test_joins(self):
index = period_range('1/1/2000', '1/20/2000', freq='D')
for kind in ['inner', 'outer', 'left', 'right']:
joined = index.join(index[:-5], how=kind)
tm.assertIsInstance(joined, PeriodIndex)
self.assertEqual(joined.freq, index.freq)
def test_join_self(self):
index = period_range('1/1/2000', '1/20/2000', freq='D')
for kind in ['inner', 'outer', 'left', 'right']:
res = index.join(index, how=kind)
self.assertIs(index, res)
def test_join_does_not_recur(self):
df = tm.makeCustomDataframe(
3, 2, data_gen_f=lambda *args: np.random.randint(2),
c_idx_type='p', r_idx_type='dt')
s = df.iloc[:2, 0]
res = s.index.join(df.columns, how='outer')
expected = Index([s.index[0], s.index[1],
df.columns[0], df.columns[1]], object)
tm.assert_index_equal(res, expected)
def test_union(self):
# union
rng1 = pd.period_range('1/1/2000', freq='D', periods=5)
other1 = pd.period_range('1/6/2000', freq='D', periods=5)
expected1 = pd.period_range('1/1/2000', freq='D', periods=10)
rng2 = pd.period_range('1/1/2000', freq='D', periods=5)
other2 = pd.period_range('1/4/2000', freq='D', periods=5)
expected2 = pd.period_range('1/1/2000', freq='D', periods=8)
rng3 = pd.period_range('1/1/2000', freq='D', periods=5)
other3 = pd.PeriodIndex([], freq='D')
expected3 = pd.period_range('1/1/2000', freq='D', periods=5)
rng4 = pd.period_range('2000-01-01 09:00', freq='H', periods=5)
other4 = pd.period_range('2000-01-02 09:00', freq='H', periods=5)
expected4 = pd.PeriodIndex(['2000-01-01 09:00', '2000-01-01 10:00',
'2000-01-01 11:00', '2000-01-01 12:00',
'2000-01-01 13:00', '2000-01-02 09:00',
'2000-01-02 10:00', '2000-01-02 11:00',
'2000-01-02 12:00', '2000-01-02 13:00'],
freq='H')
rng5 = pd.PeriodIndex(['2000-01-01 09:01', '2000-01-01 09:03',
'2000-01-01 09:05'], freq='T')
other5 = pd.PeriodIndex(['2000-01-01 09:01', '2000-01-01 09:05'
'2000-01-01 09:08'],
freq='T')
expected5 = pd.PeriodIndex(['2000-01-01 09:01', '2000-01-01 09:03',
'2000-01-01 09:05', '2000-01-01 09:08'],
freq='T')
rng6 = pd.period_range('2000-01-01', freq='M', periods=7)
other6 = pd.period_range('2000-04-01', freq='M', periods=7)
expected6 = pd.period_range('2000-01-01', freq='M', periods=10)
rng7 = pd.period_range('2003-01-01', freq='A', periods=5)
other7 = pd.period_range('1998-01-01', freq='A', periods=8)
expected7 = pd.period_range('1998-01-01', freq='A', periods=10)
for rng, other, expected in [(rng1, other1, expected1),
(rng2, other2, expected2),
(rng3, other3, expected3), (rng4, other4,
expected4),
(rng5, other5, expected5), (rng6, other6,
expected6),
(rng7, other7, expected7)]:
result_union = rng.union(other)
tm.assert_index_equal(result_union, expected)
def test_union_misc(self):
index = period_range('1/1/2000', '1/20/2000', freq='D')
result = index[:-5].union(index[10:])
tm.assert_index_equal(result, index)
# not in order
result = _permute(index[:-5]).union(_permute(index[10:]))
tm.assert_index_equal(result, index)
# raise if different frequencies
index = period_range('1/1/2000', '1/20/2000', freq='D')
index2 = period_range('1/1/2000', '1/20/2000', freq='W-WED')
with tm.assertRaises(period.IncompatibleFrequency):
index.union(index2)
msg = 'can only call with other PeriodIndex-ed objects'
with tm.assertRaisesRegexp(ValueError, msg):
index.join(index.to_timestamp())
index3 = period_range('1/1/2000', '1/20/2000', freq='2D')
with tm.assertRaises(period.IncompatibleFrequency):
index.join(index3)
def test_union_dataframe_index(self):
rng1 = pd.period_range('1/1/1999', '1/1/2012', freq='M')
s1 = pd.Series(np.random.randn(len(rng1)), rng1)
rng2 = pd.period_range('1/1/1980', '12/1/2001', freq='M')
s2 = pd.Series(np.random.randn(len(rng2)), rng2)
df = pd.DataFrame({'s1': s1, 's2': s2})
exp = pd.period_range('1/1/1980', '1/1/2012', freq='M')
self.assert_index_equal(df.index, exp)
def test_intersection(self):
index = period_range('1/1/2000', '1/20/2000', freq='D')
result = index[:-5].intersection(index[10:])
tm.assert_index_equal(result, index[10:-5])
# not in order
left = _permute(index[:-5])
right = _permute(index[10:])
result = left.intersection(right).sort_values()
tm.assert_index_equal(result, index[10:-5])
# raise if different frequencies
index = period_range('1/1/2000', '1/20/2000', freq='D')
index2 = period_range('1/1/2000', '1/20/2000', freq='W-WED')
with tm.assertRaises(period.IncompatibleFrequency):
index.intersection(index2)
index3 = period_range('1/1/2000', '1/20/2000', freq='2D')
with tm.assertRaises(period.IncompatibleFrequency):
index.intersection(index3)
def test_intersection_cases(self):
base = period_range('6/1/2000', '6/30/2000', freq='D', name='idx')
# if target has the same name, it is preserved
rng2 = period_range('5/15/2000', '6/20/2000', freq='D', name='idx')
expected2 = period_range('6/1/2000', '6/20/2000', freq='D',
name='idx')
# if target name is different, it will be reset
rng3 = period_range('5/15/2000', '6/20/2000', freq='D', name='other')
expected3 = period_range('6/1/2000', '6/20/2000', freq='D',
name=None)
rng4 = period_range('7/1/2000', '7/31/2000', freq='D', name='idx')
expected4 = PeriodIndex([], name='idx', freq='D')
for (rng, expected) in [(rng2, expected2), (rng3, expected3),
(rng4, expected4)]:
result = base.intersection(rng)
tm.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(result.freq, expected.freq)
# non-monotonic
base = PeriodIndex(['2011-01-05', '2011-01-04', '2011-01-02',
'2011-01-03'], freq='D', name='idx')
rng2 = PeriodIndex(['2011-01-04', '2011-01-02',
'2011-02-02', '2011-02-03'],
freq='D', name='idx')
expected2 = PeriodIndex(['2011-01-04', '2011-01-02'], freq='D',
name='idx')
rng3 = PeriodIndex(['2011-01-04', '2011-01-02', '2011-02-02',
'2011-02-03'],
freq='D', name='other')
expected3 = PeriodIndex(['2011-01-04', '2011-01-02'], freq='D',
name=None)
rng4 = | period_range('7/1/2000', '7/31/2000', freq='D', name='idx') | pandas.period_range |
# -*- coding: utf-8 -*-
""" Librairie personnelle pour manipulation les modèles de machine learning
"""
# ====================================================================
# Outils ML - projet 4 Openclassrooms
# Version : 0.0.0 - CRE LR 23/03/2021
# ====================================================================
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from IPython.display import display
import time
import pickle
import shap
import outils_data
from math import sqrt
from sklearn.metrics import mean_squared_error, r2_score, mean_absolute_error, \
explained_variance_score, median_absolute_error
from sklearn.model_selection import cross_validate, RandomizedSearchCV, \
GridSearchCV, learning_curve # , cross_val_score
from sklearn.dummy import DummyRegressor
from sklearn.linear_model import LinearRegression, Ridge, Lasso, ElasticNet, \
BayesianRidge, HuberRegressor, \
OrthogonalMatchingPursuit, Lars, SGDRegressor
from sklearn.svm import SVR
from sklearn.neighbors import KNeighborsRegressor
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import AdaBoostRegressor, RandomForestRegressor, \
ExtraTreesRegressor, GradientBoostingRegressor, BaggingRegressor
from sklearn.inspection import permutation_importance
from sklearn.feature_selection import RFECV
from xgboost import XGBRegressor
from lightgbm import LGBMRegressor
from catboost import CatBoostRegressor
import eli5
from eli5.sklearn import PermutationImportance
from pprint import pprint
# --------------------------------------------------------------------
# -- VERSION
# --------------------------------------------------------------------
__version__ = '0.0.0'
# --------------------------------------------------------------------
# -- Entrainer/predire modele de regression de base avec cross-validation
# --------------------------------------------------------------------
def process_regression(
model_reg,
X_train,
X_test,
y_train,
y_test,
df_resultats,
titre,
affiche_tableau=True,
affiche_comp=True,
affiche_erreur=True,
xlim_sup=130000000):
"""
Lance un modele de régression, effectue cross-validation et sauvegarde les
performances
Parameters
----------
model_reg : modèle de régression initialisé, obligatoire.
X_train : train set matrice X, obligatoire.
X_test : test set matrice X, obligatoire.
y_train : train set vecteur y, obligatoire.
y_test : test set, vecteur y, obligatoire.
df_resultats : dataframe sauvegardant les traces, obligatoire
titre : titre à inscrire dans le tableau de sauvegarde, obligatoire.
affiche_tableau : booleen affiche le tableau de résultat, facultatif.
affiche_comp : booleen affiche le graphique comparant y_test/y_pres,
facultatif.
affiche_erreur : booleen affiche le graphique des erreurs, facultatif.
xlim_sup : limite supérieure de x, facultatif.
Returns
-------
df_resultats : Le dataframe de sauvegarde des performances.
y_pred : Les prédictions pour le modèle
"""
# Top début d'exécution
time_start = time.time()
# Entraînement du modèle
model_reg.fit(X_train, y_train)
# Sauvegarde du modèle de régression entaîné
with open('modeles/modele_' + titre + '.pickle', 'wb') as f:
pickle.dump(model_reg, f, pickle.HIGHEST_PROTOCOL)
# Prédictions avec le test set
y_pred = model_reg.predict(X_test)
# Top fin d'exécution
time_end = time.time()
# Pour que le R2 soit représentatif des valeurs réelles
y_test_nt = (10 ** y_test) + 1
y_pred_nt = (10 ** y_pred) + 1
# Calcul des métriques
mae = mean_absolute_error(y_test_nt, y_pred_nt)
mse = mean_squared_error(y_test_nt, y_pred_nt)
rmse = sqrt(mse)
r2 = r2_score(y_test_nt, y_pred_nt)
errors = abs(y_pred - y_test_nt)
mape = 100 * np.mean(errors / y_test_nt)
accuracy = 100 - mape
# durée d'exécution
time_execution = time_end - time_start
# cross validation
scoring = ['r2', 'neg_mean_squared_error']
scores = cross_validate(model_reg, X_train, y_train, cv=10,
scoring=scoring, return_train_score=True)
# Sauvegarde des performances
df_resultats = df_resultats.append(pd.DataFrame({
'Modèle': [titre],
'R2': [r2],
'MSE': [mse],
'RMSE': [rmse],
'MAE': [mae],
'Erreur moy': [np.mean(errors)],
'Précision': [accuracy],
'Durée': [time_execution],
'Test R2 CV': [scores['test_r2'].mean()],
'Test R2 +/-': [scores['test_r2'].std()],
'Test MSE CV': [-(scores['test_neg_mean_squared_error'].mean())],
'Train R2 CV': [scores['train_r2'].mean()],
'Train R2 +/-': [scores['train_r2'].std()],
'Train MSE CV': [-(scores['train_neg_mean_squared_error'].mean())]
}), ignore_index=True)
if affiche_tableau:
display(df_resultats.style.hide_index())
if affiche_comp:
# retour aux valeurs d'origine
test = (10 ** y_test) + 1
predict = (10 ** y_pred) + 1
# Affichage Test vs Predictions
sns.jointplot(
test,
predict,
kind='reg')
plt.xlabel('y_test')
plt.ylabel('y_predicted')
plt.suptitle(t='Tests /Predictions pour : '
+ str(titre),
y=0,
fontsize=16,
alpha=0.75,
weight='bold',
ha='center')
plt.xlim([0, xlim_sup])
plt.show()
if affiche_erreur:
# retour aux valeurs d'origine
test = (10 ** y_test) + 1
predict = (10 ** y_pred) + 1
# affichage des erreurs
df_res = | pd.DataFrame({'true': test, 'pred': predict}) | pandas.DataFrame |
"""
Tests the usecols functionality during parsing
for all of the parsers defined in parsers.py
"""
from io import StringIO
import numpy as np
import pytest
from pandas._libs.tslib import Timestamp
from pandas import DataFrame, Index
import pandas._testing as tm
_msg_validate_usecols_arg = (
"'usecols' must either be list-like "
"of all strings, all unicode, all "
"integers or a callable."
)
_msg_validate_usecols_names = (
"Usecols do not match columns, columns expected but not found: {0}"
)
def test_raise_on_mixed_dtype_usecols(all_parsers):
# See gh-12678
data = """a,b,c
1000,2000,3000
4000,5000,6000
"""
usecols = [0, "b", 2]
parser = all_parsers
with pytest.raises(ValueError, match=_msg_validate_usecols_arg):
parser.read_csv(StringIO(data), usecols=usecols)
@pytest.mark.parametrize("usecols", [(1, 2), ("b", "c")])
def test_usecols(all_parsers, usecols):
data = """\
a,b,c
1,2,3
4,5,6
7,8,9
10,11,12"""
parser = all_parsers
result = parser.read_csv(StringIO(data), usecols=usecols)
expected = DataFrame([[2, 3], [5, 6], [8, 9], [11, 12]], columns=["b", "c"])
tm.assert_frame_equal(result, expected)
def test_usecols_with_names(all_parsers):
data = """\
a,b,c
1,2,3
4,5,6
7,8,9
10,11,12"""
parser = all_parsers
names = ["foo", "bar"]
result = parser.read_csv(StringIO(data), names=names, usecols=[1, 2], header=0)
expected = DataFrame([[2, 3], [5, 6], [8, 9], [11, 12]], columns=names)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"names,usecols", [(["b", "c"], [1, 2]), (["a", "b", "c"], ["b", "c"])]
)
def test_usecols_relative_to_names(all_parsers, names, usecols):
data = """\
1,2,3
4,5,6
7,8,9
10,11,12"""
parser = all_parsers
result = parser.read_csv(StringIO(data), names=names, header=None, usecols=usecols)
expected = DataFrame([[2, 3], [5, 6], [8, 9], [11, 12]], columns=["b", "c"])
tm.assert_frame_equal(result, expected)
def test_usecols_relative_to_names2(all_parsers):
# see gh-5766
data = """\
1,2,3
4,5,6
7,8,9
10,11,12"""
parser = all_parsers
result = parser.read_csv(
StringIO(data), names=["a", "b"], header=None, usecols=[0, 1]
)
expected = DataFrame([[1, 2], [4, 5], [7, 8], [10, 11]], columns=["a", "b"])
tm.assert_frame_equal(result, expected)
def test_usecols_name_length_conflict(all_parsers):
data = """\
1,2,3
4,5,6
7,8,9
10,11,12"""
parser = all_parsers
msg = "Number of passed names did not match number of header fields in the file"
with pytest.raises(ValueError, match=msg):
parser.read_csv(StringIO(data), names=["a", "b"], header=None, usecols=[1])
def test_usecols_single_string(all_parsers):
# see gh-20558
parser = all_parsers
data = """foo, bar, baz
1000, 2000, 3000
4000, 5000, 6000"""
with pytest.raises(ValueError, match=_msg_validate_usecols_arg):
parser.read_csv(StringIO(data), usecols="foo")
@pytest.mark.parametrize(
"data", ["a,b,c,d\n1,2,3,4\n5,6,7,8", "a,b,c,d\n1,2,3,4,\n5,6,7,8,"]
)
def test_usecols_index_col_false(all_parsers, data):
# see gh-9082
parser = all_parsers
usecols = ["a", "c", "d"]
expected = DataFrame({"a": [1, 5], "c": [3, 7], "d": [4, 8]})
result = parser.read_csv(StringIO(data), usecols=usecols, index_col=False)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("index_col", ["b", 0])
@pytest.mark.parametrize("usecols", [["b", "c"], [1, 2]])
def test_usecols_index_col_conflict(all_parsers, usecols, index_col):
# see gh-4201: test that index_col as integer reflects usecols
parser = all_parsers
data = "a,b,c,d\nA,a,1,one\nB,b,2,two"
expected = DataFrame({"c": [1, 2]}, index=Index(["a", "b"], name="b"))
result = parser.read_csv(StringIO(data), usecols=usecols, index_col=index_col)
tm.assert_frame_equal(result, expected)
def test_usecols_index_col_conflict2(all_parsers):
# see gh-4201: test that index_col as integer reflects usecols
parser = all_parsers
data = "a,b,c,d\nA,a,1,one\nB,b,2,two"
expected = DataFrame({"b": ["a", "b"], "c": [1, 2], "d": ("one", "two")})
expected = expected.set_index(["b", "c"])
result = parser.read_csv(
StringIO(data), usecols=["b", "c", "d"], index_col=["b", "c"]
)
tm.assert_frame_equal(result, expected)
def test_usecols_implicit_index_col(all_parsers):
# see gh-2654
parser = all_parsers
data = "a,b,c\n4,apple,bat,5.7\n8,orange,cow,10"
result = parser.read_csv(StringIO(data), usecols=["a", "b"])
expected = DataFrame({"a": ["apple", "orange"], "b": ["bat", "cow"]}, index=[4, 8])
tm.assert_frame_equal(result, expected)
def test_usecols_regex_sep(all_parsers):
# see gh-2733
parser = all_parsers
data = "a b c\n4 apple bat 5.7\n8 orange cow 10"
result = parser.read_csv(StringIO(data), sep=r"\s+", usecols=("a", "b"))
expected = DataFrame({"a": ["apple", "orange"], "b": ["bat", "cow"]}, index=[4, 8])
tm.assert_frame_equal(result, expected)
def test_usecols_with_whitespace(all_parsers):
parser = all_parsers
data = "a b c\n4 apple bat 5.7\n8 orange cow 10"
result = parser.read_csv(StringIO(data), delim_whitespace=True, usecols=("a", "b"))
expected = DataFrame({"a": ["apple", "orange"], "b": ["bat", "cow"]}, index=[4, 8])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"usecols,expected",
[
# Column selection by index.
([0, 1], DataFrame(data=[[1000, 2000], [4000, 5000]], columns=["2", "0"])),
# Column selection by name.
(["0", "1"], DataFrame(data=[[2000, 3000], [5000, 6000]], columns=["0", "1"])),
],
)
def test_usecols_with_integer_like_header(all_parsers, usecols, expected):
parser = all_parsers
data = """2,0,1
1000,2000,3000
4000,5000,6000"""
result = parser.read_csv(StringIO(data), usecols=usecols)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("usecols", [[0, 2, 3], [3, 0, 2]])
def test_usecols_with_parse_dates(all_parsers, usecols):
# see gh-9755
data = """a,b,c,d,e
0,1,20140101,0900,4
0,1,20140102,1000,4"""
parser = all_parsers
parse_dates = [[1, 2]]
cols = {
"a": [0, 0],
"c_d": [Timestamp("2014-01-01 09:00:00"), Timestamp("2014-01-02 10:00:00")],
}
expected = | DataFrame(cols, columns=["c_d", "a"]) | pandas.DataFrame |
import math
import matplotlib.pyplot as plt
import numpy as np
from os import listdir
import pandas as pd
import random
import re
from scipy.stats import norm as std_norm
import seaborn as sns
import tqdm
# plots a heat map of chromosome interaction intensities
def interaction_heat_map(all_files, data_path):
biggest_num = max([max(chrm_nums(file)) for file in all_files])
heat_matrix = np.zeros((biggest_num, biggest_num))
# intensity = { (chr_x, chr_y): interaction intensity }
intensity = count_intermingling(all_files, data_path)
for pair in intensity:
chr_x, chr_y = pair
heat_matrix[chr_x-1, chr_y-1] = intensity[pair]
heat_matrix[chr_y-1, chr_x-1] = intensity[pair]
sns.heatmap(heat_matrix, cbar_kws={'label': 'Number of Intermingling Regions'})
plt.title('Heat Map of Inter-Chromosome Interaction Counts')
plt.xlabel('Chromosome number')
plt.ylabel('Chromosome number')
plt.show()
# maps a pair of chrms to the intensity of their interaction
def count_intermingling(all_files, data_path):
n_intermingle = dict()
mu, sigma = mean_std(all_files, data_path)
print('\nCounting interminglings...')
for file in tqdm.tqdm(all_files):
x, y = chrm_nums(file)
if x != y:
# intensity score = sum of the rectangular areas with significant interaction freq
n_intermingle[(int(x), int(y))] = sum([w*h for _, w, h in monte_carlo(x, y, mu, sigma, data_path)])
return n_intermingle
# returns the chromosome numbers in the file name
def chrm_nums(file):
chrx = re.search('chr(.+?)_', file).group(1)
chry = re.search('_chr(.+?).txt', file).group(1)
return int(chrx), int(chry)
# runs a greedy search 50 times to find the regions with lowest p-values
def monte_carlo(x, y, mu, sigma, data_path):
file_name = f'{data_path}chr{x}_chr{y}.txt'
Z = t_interaction_matrix(file_name)
interacting = []
while True:
witnessed = [greedy_search(Z, mu, sigma) for _ in range(50)]
# return the trial with the lowest p-value
best_M, best_p, best_row, best_col = min(witnessed, key=lambda wit: wit[1])
# the extended matrix is no longer significant
if best_p > 0.01:
return interacting
else:
h, w = best_M.shape
# sub-matrix is an area with significant interaction
interacting.append(((best_col, best_row), w, h))
mean = np.nanmean(best_M)
Z[best_row: best_row + h, best_col: best_col + w] -= mean
# fills a freq matrix, Z, where Z[i, j] = scaled_freq btwn chrm_i and chrm_j
def t_interaction_matrix(file):
df = read(file)
n_rows, n_cols = df['xloc'].max()+1, df['yloc'].max()+1
freq_matrix = np.zeros((n_rows, n_cols))
freq_matrix[df['xloc'], df['yloc']] = df['freq']
return freq_matrix
# finds regions with small p-values
# uses adjusted p-value to perform a greedy search--instead of testing every sub-matrix
def greedy_search(freq_matrix, mu, sigma):
n_rows, n_cols = freq_matrix.shape
def adjusted_p_value(M):
# returns the number of sub-matrices in an m x n matrix
def count_submatrices(m, n):
return m * (m + 1) * n * (n + 1) / 4
top = (np.nanmean(M) - mu) * math.sqrt(M.size)
n_sub = count_submatrices(n_rows, n_cols)
# adjust by the number of possible sub-matrices
return std_norm.sf(top / sigma) * n_sub
# the region begins as a random position
row, col = random.randint(0, n_rows - 1), random.randint(0, n_cols - 1)
rand_choice = freq_matrix[row, col]
M_0 = np.array([[rand_choice]])
# continue extending the region until the p-value starts increasing
while True:
p_value = adjusted_p_value(M_0)
p_values = []
for sub_m in extend(row, col, M_0, freq_matrix):
p_values.append((adjusted_p_value(sub_m[0]), sub_m))
best_p, best_m = min(p_values, key=lambda x: x[0])
if best_p >= p_value:
return M_0, p_value, row, col
else:
M_0, row, col = best_m
# extends the sub-matrix M to the right, left, top, and bottom
def extend(r, c, M, matrix):
h, w = M.shape
M_r, rr, cr = matrix[r: r + h, c: c + w + 1].copy(), r, c
M_l, rl, cl = matrix[r: r + h, c - 1: c + w].copy(), r, c - 1
M_t, rt, ct = matrix[r - 1: r + h, c: c + w].copy(), r - 1, c
M_b, rb, cb = matrix[r: r + h + 1, c: c + w].copy(), r, c
return (M_r, rr, cr), (M_l, rl, cl), (M_t, rt, ct), (M_b, rb, cb)
# aggregates the interaction frequencies among all files, and computes the mean and std
def mean_std(all_files, data_path):
all_freqs = pd.Series([])
# files are sparse, so must account for the extra zeros
total_size = 0
print('\nCalculating mean and std...')
for file in tqdm.tqdm(all_files):
chrx, chry = chrm_nums(file)
if chrx != chry:
chr_df = read(data_path + file)
file_freqs = chr_df.freq
all_freqs = | pd.concat([all_freqs, file_freqs]) | pandas.concat |
import pandas as pd
import matplotlib.pyplot as plt
from graphviz import Digraph
from simrd.telemetry import Telemetry
TRACE_STATS = [
'time', 'pinned_memory', 'locked_memory', 'evictable_memory', 'total_memory', 'memory_pressure'
]
class State:
def __init__(self, telemetry : Telemetry):
self.material = set()
self.evicted = set()
self.locked = set()
self.pending = set()
self.pinned = set()
self.banished = set()
self.timesteps = sorted(telemetry.trace.timesteps)
self.time_idx = -1
self.time = 0
self.pressure = 0
self.telemetry = telemetry
# initialize to be all evicted and uncomputed
self.evicted.update(list(self.telemetry.tensor.keys()))
self.uncomputed = self.evicted.copy()
# group tensors by their parent opcalls
self.call_groups = {}
for tid in self.evicted:
op_id = self.telemetry.get('tensor', tid, 'op_id')
if op_id not in self.call_groups:
self.call_groups[op_id] = []
self.call_groups[op_id].append(tid)
# group tensors by their underlying storages
self.storage_groups = {}
for tid in self.evicted:
storage_id = self.telemetry.get('tensor', tid, 'storage_id')
if storage_id not in self.storage_groups:
self.storage_groups[storage_id] = []
self.storage_groups[storage_id].append(tid)
def _step(self):
if self.time_idx + 1 >= len(self.timesteps):
return False
self.time_idx += 1
self.time = self.timesteps[self.time_idx]
# A tensor cannot be evicted *and then* computed on the same timestep,
# only computed then evicted. Thus, we can set computed = computed \ evicted.
# evicted /\ material = empty
# pending subset evicted
# banished subset evicted
# locked subset material
# pinned subset material
# locked /\ pinned = empty
# add new material tensors
compute = self.telemetry.trace.compute.get(self.time, [])
self.material.update(compute)
self.evicted.difference_update(compute)
self.pending.difference_update(compute)
self.uncomputed.difference_update(compute)
# add new evicted and banished tensors
for sid in self.telemetry.trace.evict.get(self.time, []):
self.evicted.update(self.storage_groups[sid])
for sid in self.telemetry.trace.banish.get(self.time, []):
self.evicted.update(self.storage_groups[sid])
self.banished.update(self.storage_groups[sid])
# add new pending tensors
self.pending.update(self.telemetry.trace.pending.get(self.time, []))
# update pinned, locked; a lock can only be locked after being unlocked at
# the same timestep, *assuming computations take nonzero time*
for sid in self.telemetry.trace.unlock.get(self.time, []):
self.locked.difference_update(self.storage_groups[sid])
for sid in self.telemetry.trace.lock.get(self.time, []):
self.locked.update(self.storage_groups[sid])
for sid in self.telemetry.trace.pin.get(self.time, []):
self.pinned.update(self.storage_groups[sid])
self.material.difference_update(self.evicted)
self.locked.difference_update(self.evicted)
self.pinned.difference_update(self.evicted)
# Track pinned and locked separately
for tid in self.pinned:
if tid in self.storage_groups:
self.locked.difference_update(self.storage_groups[tid])
# Mark any material tensors belonging to a pinned group as pinned
for tid in self.material:
sid = self.telemetry.get('tensor', tid, 'storage_id')
if sid in self.pinned:
self.pinned.add(tid)
self.pressure = self.telemetry.trace.pressure.get(self.time, 0)
assert len(self.material.intersection(self.evicted)) == 0
assert len(self.banished.intersection(self.evicted)) == len(self.banished)
assert len(self.locked.intersection(self.pinned)) == 0
assert len(self.locked.intersection(self.evicted)) == 0
assert len(self.locked.intersection(self.material)) == len(self.locked)
assert len(self.material) + len(self.evicted) == len(self.telemetry.tensor.keys())
return True
def step(self, steps=1):
for i in range(steps):
if not self._step():
return False
return True
def tensor_name(self, tid):
op_id = self.telemetry.get('tensor', tid, 'op_id')
op_name = self.telemetry.get('operator', op_id, 'name')
if op_name == None:
op_name = self.telemetry.get('tensor', tid, 'name')
if self.telemetry.get('operator', op_id, 'outputs') > 1:
index = self.telemetry.get('tensor', tid, 'index')
name = '{}.{}'.format(op_name, index)
else:
name = op_name
return name
def render_dot(self, filename=None, **kwargs):
"""
Returns the dot graph (of the current state) as a string, and optionally
outputs a rendered image to the specified filename (without file extension;
the extension and format can be changed by setting format='png', etc.
although pdf seems to be the fastest).
"""
g = Digraph(engine='dot')
g.attr('node', shape='box', style='filled')
g.attr(compound='true', rankdir='LR')
for call_id in self.call_groups:
with g.subgraph(name='cluster_{}'.format(call_id)) as gg:
gg.attr(style='filled', color='lightgrey')
for tid in self.call_groups[call_id]:
if tid in self.material:
args = {'color': 'white'}
if tid in self.pinned:
args['color'] = 'orange'
elif tid in self.locked:
args['color'] = 'pink'
elif tid in self.evicted:
args = {'color': 'gray'}
if tid in self.banished:
args['color'] = 'red'
elif tid in self.uncomputed:
args['color'] = 'white'
args['fillcolor'] = 'black'
args['fontcolor'] = 'white'
args['style'] = 'dashed, filled'
if tid in self.pending:
args['fillcolor'] = 'darkgreen'
args['fontcolor'] = 'white'
gg.node(str(tid), self.tensor_name(tid), **args)
# edges, only draw one per op output cluster
for p in self.telemetry.adj_list:
c_op_ids = set()
for c in self.telemetry.adj_list[p]:
c_op_id = self.telemetry.get('tensor', c, 'op_id')
if c_op_id not in c_op_ids:
c_op_ids.add(c_op_id)
else:
continue
c_cluster = 'cluster_{}'.format(c_op_id)
args = {}
if self.telemetry.get('tensor', c, 'is_alias'):
args['style'] = 'dashed'
g.edge(str(p), str(c), lhead=c_cluster, **args)
if filename is not None:
g.render(filename, cleanup=True, **kwargs)
return g.source
def analyze_trace(tel : Telemetry):
s = State(tel)
data = []
while s.step():
pinned_mem = 0
locked_mem = 0
evictable_mem = 0
total_mem = 0
for tid in s.pinned:
pinned_mem += s.telemetry.get('tensor', tid, 'size')
for tid in s.locked:
locked_mem += s.telemetry.get('tensor', tid, 'size')
for tid in s.material.difference(s.pinned).difference(s.locked):
evictable_mem += s.telemetry.get('tensor', tid, 'size')
total_mem = pinned_mem + locked_mem + evictable_mem
mem_pressure = total_mem + s.pressure
data.append([s.time, pinned_mem, locked_mem, evictable_mem, total_mem, mem_pressure])
return pd.DataFrame(data, columns=TRACE_STATS)
def analyze_max_pinned(tel : Telemetry, filename, render_graph=False):
s = State(tel)
max_pinned_memory = -1
max_pinned_step = -1
max_pinned_time = -1
while s.step():
pinned_mem = sum(map(lambda tid: s.telemetry.get('tensor', tid, 'size'), s.pinned))
if pinned_mem > max_pinned_memory:
max_pinned_memory = pinned_mem
max_pinned_step = s.time_idx
max_pinned_time = s.time
print('maximum amount of pinned memory: {} at step {} (time = {})'.format(
max_pinned_memory, max_pinned_step, max_pinned_time
))
# now graph
s = State(tel)
while s.step():
if s.time_idx == max_pinned_step:
if render_graph:
s.render_dot(filename)
pinned_data = []
for tid in s.pinned:
pinned_data.append(s.telemetry.tensor[tid])
stats = pd.DataFrame(pinned_data, columns=Telemetry.TENSOR_STATS)
return stats
def analyze_max_locked(tel : Telemetry, filename, render_graph=False):
s = State(tel)
max_locked_memory = -1
max_locked_step = -1
max_locked_time = -1
while s.step():
locked_mem = sum(map(lambda tid: s.telemetry.get('tensor', tid, 'size'), s.locked))
if locked_mem > max_locked_memory:
max_locked_memory = locked_mem
max_locked_step = s.time_idx
max_locked_time = s.time
print('maximum amount of locked memory: {} at step {} (time = {})'.format(
max_locked_memory, max_locked_step, max_locked_time
))
# now graph
s = State(tel)
while s.step():
if s.time_idx == max_locked_step:
if render_graph:
s.render_dot(filename)
pinned_data = []
for tid in s.pinned:
pinned_data.append(s.telemetry.tensor[tid])
stats = | pd.DataFrame(pinned_data, columns=Telemetry.TENSOR_STATS) | pandas.DataFrame |
import pytest
import pandas as pd
import numpy as np
from numpy import pi, sqrt
import matplotlib.pyplot as plt
import os
from numpy.testing import assert_almost_equal, assert_allclose
from rolldecayestimators.ikeda import Ikeda, IkedaR
from rolldecayestimators import lambdas
import rolldecayestimators
import pyscores2.test
import pyscores2.indata
import pyscores2.output
@pytest.fixture
def ikeda():
N=10
data = np.zeros(N)
w_hat=np.linspace(0.1,1, N)
B_W0_hat = | pd.Series(data=data, index=w_hat) | pandas.Series |
# -*- coding:utf-8 -*-
# /usr/bin/env python
"""
Author: <NAME>
date: 2020/1/23 9:07
contact: <EMAIL>
desc: 新增-事件接口
新增-事件接口新型冠状病毒-网易
新增-事件接口新型冠状病毒-丁香园
新增-事件接口新型冠状病毒-百度
"""
import json
import time
from io import BytesIO
import demjson
import pandas as pd
import requests
from PIL import Image
from bs4 import BeautifulSoup
from akshare.event.cons import province_dict, city_dict
# pd.set_option('display.max_columns', None) # just for debug
def epidemic_163(indicator="实时"):
"""
网易网页端-新冠状病毒-实时人数统计情况
国内和海外
https://news.163.com/special/epidemic/?spssid=93326430940df93a37229666dfbc4b96&spsw=4&spss=other&#map_block
https://news.163.com/special/epidemic/?spssid=93326430940df93a37229666dfbc4b96&spsw=4&spss=other&
:return: 返回国内各地区和海外地区情况
:rtype: pandas.DataFrame
"""
url = "https://c.m.163.com/ug/api/wuhan/app/data/list-total"
headers = {
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.130 Safari/537.36",
}
params = {
"t": int(time.time() * 1000),
}
res = requests.get(url, params=params, headers=headers)
data_json = res.json()
hist_today_df = | pd.DataFrame([item["today"] for item in data_json["data"]["chinaDayList"]], index=[item["date"] for item in data_json["data"]["chinaDayList"]]) | pandas.DataFrame |
import re
import pandas as pd
from config import Config
class Dataset(Config):
"""
Attributes
----------
ukbb_vars: list
Variable names based on user selections as coded in the Biobank.
recoded_vars: list
Variable names based on user selections as will be recoded.
df: DataFrame
Dataset which can be manipulated using the below methods
Methods
-------
create_binary_variables(voi: str, patterns: dict)
Takes as input a variable of interest (e.g., 'medication') and a dictionary with keys representing new
variable names mapped onto regular expressions. New binary variables will be created based on whether
each individual has a value matching the regular expression in any of the columns related to the variable
of interest.
Example:
>>> myDataset.create_binary_variables("medication", {"taking_painkiller": "(aspirin|tylenol)"})
recode_diagnoses()
Creates new variables for groups of diagnoses included or excluded, based on
whether one or more of such diagnoses is present.
apply_inclusion_criteria(method: str)
Apply inclusion criteria based on specified method. Available options are "AND" and "OR".
apply_exclusion_criteria()
Apply exclusion criteria by removing cases where any of the specified diagnoses are present
clean(voi: str)
Takes as input a variable of interest (e.g., 'medication'). Removes all columns beginning with this string from the final dataframe.
recode_vars()
Replace values for each variable as specified in the config class
write_csv()
Write self.df to the filepath specified in the config class
"""
ukbb_vars, recoded_vars = ["eid"], ["eid"]
for var in Config.variables:
if Config.variables[var]["Included"]:
array_vars = []
for i in Config.variables[var]['ArrayRange']:
array_vars.append(f"{Config.variables[var]['DataField']}-{Config.variables[var]['InstanceNum']}.{i}")
ukbb_vars += array_vars
if len(Config.variables[var]['ArrayRange']) == 1:
recoded_vars.append(f"{var}_t{Config.variables[var]['InstanceNum']}")
else:
array_vars = []
for i in Config.variables[var]['ArrayRange']:
array_vars.append(f"{var}_t{Config.variables[var]['InstanceNum']}_{i}")
recoded_vars += array_vars
assert len(ukbb_vars) == len(recoded_vars)
def __init__(self) -> None:
self.df = pd.read_csv(self.filepaths["RawData"], dtype=str, usecols=self.ukbb_vars)
self.df.rename({k: v for k, v in zip(self.ukbb_vars, self.recoded_vars)}, axis=1, inplace=True)
self.df.dropna(axis=1, how="all", inplace=True)
def create_binary_variables(self, voi: str, patterns: dict):
cols = [col for col in self.df if col.startswith(voi)]
all_vars = list(patterns.keys())
new_vars = {var_name: [] for var_name in ["eid"] + all_vars}
for index, row in self.df[cols].iterrows():
new_vars["eid"].append(self.df["eid"][index])
for pat in patterns:
for value in row:
try:
if re.match(patterns[pat], value) is not None:
new_vars[pat].append(True)
break
except TypeError:
continue
if len(new_vars["eid"]) != len(new_vars[pat]):
new_vars[pat].append(False)
if not sum([len(x) for x in new_vars.values()]) == len(new_vars["eid"]) * len(new_vars.keys()):
raise ValueError(f"{sum([len(x) for x in new_vars.values()])} != {len(new_vars['eid']) * len(new_vars.keys())}")
new_df = pd.DataFrame(new_vars)
self.df = pd.merge(self.df, new_df, left_on="eid", right_on="eid")
def recode_diagnoses(self):
dx_cols = [col for col in self.df if col.startswith("diagnoses")]
all_dx = list(self.selected_diagnoses.keys())
new_vars = {var_name: [] for var_name in ["eid"] + all_dx}
for i in range(len(self.df)):
new_vars["eid"].append(self.df["eid"][i])
for col in dx_cols:
value = self.df[col][i]
if pd.isnull(value):
for dx in all_dx:
if len(new_vars[dx]) != len(new_vars["eid"]):
new_vars[dx].append(False)
break
for dx in self.selected_diagnoses:
if re.match(self.selected_diagnoses[dx], value) is not None:
if len(new_vars[dx]) != len(new_vars["eid"]):
new_vars[dx].append(True)
assert sum([len(x) for x in new_vars.values()]) == len(new_vars["eid"]) * len(new_vars.keys())
new_df = pd.DataFrame(new_vars)
self.df = pd.merge(self.df, new_df, left_on="eid", right_on="eid")
self.df.drop(dx_cols, axis=1, inplace=True)
def apply_inclusion_criteria(self, method: str):
if method == "AND":
for key in self.included_diagnoses:
self.df = self.df[self.df[key] == True]
elif method == "OR":
list_series = [self.df[key] == True for key in self.included_diagnoses]
included = | pd.concat(list_series, axis=1) | pandas.concat |
#!/usr/bin/env python
# coding: utf-8
import pandas as pd
import matplotlib.pyplot as plt
import geopandas as gpd
import requests
# For storing png files in memory
import io
# For generating GIF
import imageio
###########################################################
########## Globals....
###########################################################
# Top value to use in scale, 0 = mean + 2 std devs
max_value = 0
# Bottom value to use in scale, should be zero
min_value = 0
# SANDAG API Link
sd_api = "https://opendata.arcgis.com/datasets/854d7e48e3dc451aa93b9daf82789089_0.geojson"
# Zipcode shape file link
zip_shape_full = "https://github.com/mulroony/State-zip-code-GeoJSON/raw/master/ca_california_zip_codes_geo.min.json"
# File to write gif to. Leave blank to just render inline
# Probably won't work in this script...
gif_path = "/tmp/SD_Covid_cases.gif"
#gif_path = ""
# Select ColorMap: https://matplotlib.org/3.2.1/tutorials/colors/colormaps.html
color_map = 'YlOrRd'
###########################################################
###########################################################
r = requests.get(sd_api)
rd = [ _r['properties'] for _r in r.json()['features'] ]
case_df = pd.DataFrame(rd)
# Cleanup, reduce mem
del [r, rd]
known_zips = list(case_df['ziptext'].unique())
print("Zipcodes in data: %s"%(len(known_zips)))
# Got API link from: https://sdgis-sandag.opendata.arcgis.com/datasets/covid-19-statistics-by-zip-code
r = requests.get(zip_shape_full)
rd = r.json()
zip_shape_known = {}
zip_shape_known['type'] = rd['type']
zip_shape_known['features'] = []
for i in rd['features']:
if i['properties']['ZCTA5CE10'] in known_zips:
zip_shape_known['features'].append(i)
print("Found %s matching zip codes in shape file"%(len(zip_shape_known['features'])))
del [r, rd]
gdf = gpd.GeoDataFrame.from_features(zip_shape_known)
gdf.rename(columns={'ZCTA5CE10': 'zipcode'}, inplace=True)
gdf.set_index('zipcode',inplace=True)
# Drop time from date, not useful
case_df['date'] = case_df['updatedate'].apply(lambda x: x.split(" ")[0])
# Drop unused fields
case_df.drop(inplace=True, columns=['FID',
'zipcode_zip',
'created_date',
'updatedate',
'created_date',
'created_user',
'last_edited_date',
'last_edited_user',
'globalid'])
# Missing data becomes zeros
case_df.fillna(0, inplace=True)
# Rename column
case_df.rename(columns={'ziptext': 'zipcode'}, inplace=True)
# Drop duplicates, have seen some
case_df.drop_duplicates(subset=['zipcode','date'], inplace=True)
# Ugly, but creates table I want
case_df = case_df.groupby(['zipcode', 'date']).sum().unstack().fillna(0)
# End up with nested column name, remove it
case_df.columns = case_df.columns.droplevel()
# Create super list of all case values so we can get stats if we are going to use it
if max_value == 0:
_tmp_case_list = []
# Not necessary, but can't hurt
dates = sorted(case_df.columns.values)
# subtract tomorrow from today, and set that as the value for today. repeat, skipping last day...
for i in range(len(dates)-1):
today = dates[i]
tomorrow = dates[i+1]
case_df[today] = case_df[tomorrow] - case_df[today]
# #Uncomment to find all negative values. Happens due to adjusting the numbers, we handle it
# #Good to do though
# _tmp_df = case_df[today].apply(lambda x: x if x < -1 else None).dropna()
# if _tmp_df.values.size > 0:
# print("%s"%(today))
# print(_tmp_df)
if max_value == 0:
_tmp_case_list += list(case_df[today].values)
if max_value == 0:
_tmp_case_df = | pd.DataFrame(_tmp_case_list) | pandas.DataFrame |
import numpy as np
import pandas as pd
from functools import reduce
import seaborn as sns
from matplotlib import pyplot
def hap_load_and_process(url_or_path_to_csv_file, rename_dict,final_list):
# Method Chain 1 (Load data and deal with missing data)
df1 = (
pd.read_csv(url_or_path_to_csv_file)
.rename(columns=rename_dict)
#.dropna()
# etc...
)
# Method Chain 2 (Create new columns, drop others, and do processing)
df2 = (
df1
#.assign(status=lambda x: np.where((x.period > 2014), 1, 0))
.sort_values("country", ascending=True)
.reset_index(drop=True)
.loc[:, final_list]
)
# Make sure to return the latest dataframe
return df2
def ind_load_and_process(url_or_path_to_csv_file, ind):
# Method Chain 1 (Load data and deal with missing data)
df1 = (
| pd.read_csv(url_or_path_to_csv_file) | pandas.read_csv |
import itertools
import numpy as np
import pandas as pd
import pytest
import xarray as xr
from xarray.core.missing import (
NumpyInterpolator,
ScipyInterpolator,
SplineInterpolator,
_get_nan_block_lengths,
get_clean_interp_index,
)
from xarray.core.pycompat import dask_array_type
from xarray.tests import (
assert_allclose,
assert_array_equal,
assert_equal,
raises_regex,
requires_bottleneck,
requires_cftime,
requires_dask,
requires_scipy,
)
from xarray.tests.test_cftime_offsets import _CFTIME_CALENDARS
@pytest.fixture
def da():
return xr.DataArray([0, np.nan, 1, 2, np.nan, 3, 4, 5, np.nan, 6, 7], dims="time")
@pytest.fixture
def cf_da():
def _cf_da(calendar, freq="1D"):
times = xr.cftime_range(
start="1970-01-01", freq=freq, periods=10, calendar=calendar
)
values = np.arange(10)
return xr.DataArray(values, dims=("time",), coords={"time": times})
return _cf_da
@pytest.fixture
def ds():
ds = xr.Dataset()
ds["var1"] = xr.DataArray(
[0, np.nan, 1, 2, np.nan, 3, 4, 5, np.nan, 6, 7], dims="time"
)
ds["var2"] = xr.DataArray(
[10, np.nan, 11, 12, np.nan, 13, 14, 15, np.nan, 16, 17], dims="x"
)
return ds
def make_interpolate_example_data(shape, frac_nan, seed=12345, non_uniform=False):
rs = np.random.RandomState(seed)
vals = rs.normal(size=shape)
if frac_nan == 1:
vals[:] = np.nan
elif frac_nan == 0:
pass
else:
n_missing = int(vals.size * frac_nan)
ys = np.arange(shape[0])
xs = np.arange(shape[1])
if n_missing:
np.random.shuffle(ys)
ys = ys[:n_missing]
np.random.shuffle(xs)
xs = xs[:n_missing]
vals[ys, xs] = np.nan
if non_uniform:
# construct a datetime index that has irregular spacing
deltas = pd.TimedeltaIndex(unit="d", data=rs.normal(size=shape[0], scale=10))
coords = {"time": (pd.Timestamp("2000-01-01") + deltas).sort_values()}
else:
coords = {"time": pd.date_range("2000-01-01", freq="D", periods=shape[0])}
da = xr.DataArray(vals, dims=("time", "x"), coords=coords)
df = da.to_pandas()
return da, df
@requires_scipy
def test_interpolate_pd_compat():
shapes = [(8, 8), (1, 20), (20, 1), (100, 100)]
frac_nans = [0, 0.5, 1]
methods = [
"linear",
"nearest",
"zero",
"slinear",
"quadratic",
"cubic",
# "next",
# "previous",
]
for (shape, frac_nan, method) in itertools.product(shapes, frac_nans, methods):
da, df = make_interpolate_example_data(shape, frac_nan)
for dim in ["time", "x"]:
actual = da.interpolate_na(method=method, dim=dim, fill_value=np.nan)
expected = df.interpolate(
method=method, axis=da.get_axis_num(dim), fill_value=(np.nan, np.nan)
)
# Note, Pandas does some odd things with the left/right fill_value
# for the linear methods. This next line inforces the xarray
# fill_value convention on the pandas output. Therefore, this test
# only checks that interpolated values are the same (not nans)
expected.values[pd.isnull(actual.values)] = np.nan
np.testing.assert_allclose(actual.values, expected.values)
@requires_scipy
@pytest.mark.parametrize("method", ["barycentric", "krog", "pchip", "spline", "akima"])
def test_scipy_methods_function(method):
# Note: Pandas does some wacky things with these methods and the full
# integration tests wont work.
da, _ = make_interpolate_example_data((25, 25), 0.4, non_uniform=True)
actual = da.interpolate_na(method=method, dim="time")
assert (da.count("time") <= actual.count("time")).all()
@requires_scipy
def test_interpolate_pd_compat_non_uniform_index():
shapes = [(8, 8), (1, 20), (20, 1), (100, 100)]
frac_nans = [0, 0.5, 1]
methods = ["time", "index", "values"]
for (shape, frac_nan, method) in itertools.product(shapes, frac_nans, methods):
da, df = make_interpolate_example_data(shape, frac_nan, non_uniform=True)
for dim in ["time", "x"]:
if method == "time" and dim != "time":
continue
actual = da.interpolate_na(
method="linear", dim=dim, use_coordinate=True, fill_value=np.nan
)
expected = df.interpolate(
method=method, axis=da.get_axis_num(dim), fill_value=np.nan
)
# Note, Pandas does some odd things with the left/right fill_value
# for the linear methods. This next line inforces the xarray
# fill_value convention on the pandas output. Therefore, this test
# only checks that interpolated values are the same (not nans)
expected.values[pd.isnull(actual.values)] = np.nan
np.testing.assert_allclose(actual.values, expected.values)
@requires_scipy
def test_interpolate_pd_compat_polynomial():
shapes = [(8, 8), (1, 20), (20, 1), (100, 100)]
frac_nans = [0, 0.5, 1]
orders = [1, 2, 3]
for (shape, frac_nan, order) in itertools.product(shapes, frac_nans, orders):
da, df = make_interpolate_example_data(shape, frac_nan)
for dim in ["time", "x"]:
actual = da.interpolate_na(
method="polynomial", order=order, dim=dim, use_coordinate=False
)
expected = df.interpolate(
method="polynomial", order=order, axis=da.get_axis_num(dim)
)
np.testing.assert_allclose(actual.values, expected.values)
@requires_scipy
def test_interpolate_unsorted_index_raises():
vals = np.array([1, 2, 3], dtype=np.float64)
expected = xr.DataArray(vals, dims="x", coords={"x": [2, 1, 3]})
with raises_regex(ValueError, "Index 'x' must be monotonically increasing"):
expected.interpolate_na(dim="x", method="index")
def test_interpolate_no_dim_raises():
da = xr.DataArray(np.array([1, 2, np.nan, 5], dtype=np.float64), dims="x")
with raises_regex(NotImplementedError, "dim is a required argument"):
da.interpolate_na(method="linear")
def test_interpolate_invalid_interpolator_raises():
da = xr.DataArray(np.array([1, 2, np.nan, 5], dtype=np.float64), dims="x")
with raises_regex(ValueError, "not a valid"):
da.interpolate_na(dim="x", method="foo")
def test_interpolate_duplicate_values_raises():
data = np.random.randn(2, 3)
da = xr.DataArray(data, coords=[("x", ["a", "a"]), ("y", [0, 1, 2])])
with raises_regex(ValueError, "Index 'x' has duplicate values"):
da.interpolate_na(dim="x", method="foo")
def test_interpolate_multiindex_raises():
data = np.random.randn(2, 3)
data[1, 1] = np.nan
da = xr.DataArray(data, coords=[("x", ["a", "b"]), ("y", [0, 1, 2])])
das = da.stack(z=("x", "y"))
with raises_regex(TypeError, "Index 'z' must be castable to float64"):
das.interpolate_na(dim="z")
def test_interpolate_2d_coord_raises():
coords = {
"x": xr.Variable(("a", "b"), np.arange(6).reshape(2, 3)),
"y": xr.Variable(("a", "b"), np.arange(6).reshape(2, 3)) * 2,
}
data = np.random.randn(2, 3)
data[1, 1] = np.nan
da = xr.DataArray(data, dims=("a", "b"), coords=coords)
with raises_regex(ValueError, "interpolation must be 1D"):
da.interpolate_na(dim="a", use_coordinate="x")
@requires_scipy
def test_interpolate_kwargs():
da = xr.DataArray(np.array([4, 5, np.nan], dtype=np.float64), dims="x")
expected = xr.DataArray(np.array([4, 5, 6], dtype=np.float64), dims="x")
actual = da.interpolate_na(dim="x", fill_value="extrapolate")
assert_equal(actual, expected)
expected = xr.DataArray(np.array([4, 5, -999], dtype=np.float64), dims="x")
actual = da.interpolate_na(dim="x", fill_value=-999)
assert_equal(actual, expected)
def test_interpolate_keep_attrs():
vals = np.array([1, 2, 3, 4, 5, 6], dtype=np.float64)
mvals = vals.copy()
mvals[2] = np.nan
missing = xr.DataArray(mvals, dims="x")
missing.attrs = {"test": "value"}
actual = missing.interpolate_na(dim="x", keep_attrs=True)
assert actual.attrs == {"test": "value"}
def test_interpolate():
vals = np.array([1, 2, 3, 4, 5, 6], dtype=np.float64)
expected = xr.DataArray(vals, dims="x")
mvals = vals.copy()
mvals[2] = np.nan
missing = xr.DataArray(mvals, dims="x")
actual = missing.interpolate_na(dim="x")
assert_equal(actual, expected)
def test_interpolate_nonans():
vals = np.array([1, 2, 3, 4, 5, 6], dtype=np.float64)
expected = xr.DataArray(vals, dims="x")
actual = expected.interpolate_na(dim="x")
assert_equal(actual, expected)
@requires_scipy
def test_interpolate_allnans():
vals = np.full(6, np.nan, dtype=np.float64)
expected = xr.DataArray(vals, dims="x")
actual = expected.interpolate_na(dim="x")
assert_equal(actual, expected)
@requires_bottleneck
def test_interpolate_limits():
da = xr.DataArray(
np.array([1, 2, np.nan, np.nan, np.nan, 6], dtype=np.float64), dims="x"
)
actual = da.interpolate_na(dim="x", limit=None)
assert actual.isnull().sum() == 0
actual = da.interpolate_na(dim="x", limit=2)
expected = xr.DataArray(
np.array([1, 2, 3, 4, np.nan, 6], dtype=np.float64), dims="x"
)
assert_equal(actual, expected)
@requires_scipy
def test_interpolate_methods():
for method in [
"linear",
"nearest",
"zero",
"slinear",
"quadratic",
"cubic",
"previous",
"next",
]:
kwargs = {}
da = xr.DataArray(
np.array([0, 1, 2, np.nan, np.nan, np.nan, 6, 7, 8], dtype=np.float64),
dims="x",
)
actual = da.interpolate_na("x", method=method, **kwargs)
assert actual.isnull().sum() == 0
actual = da.interpolate_na("x", method=method, limit=2, **kwargs)
assert actual.isnull().sum() == 1
@requires_scipy
def test_interpolators():
for method, interpolator in [
("linear", NumpyInterpolator),
("linear", ScipyInterpolator),
("spline", SplineInterpolator),
]:
xi = np.array([-1, 0, 1, 2, 5], dtype=np.float64)
yi = np.array([-10, 0, 10, 20, 50], dtype=np.float64)
x = np.array([3, 4], dtype=np.float64)
f = interpolator(xi, yi, method=method)
out = f(x)
assert pd.isnull(out).sum() == 0
def test_interpolate_use_coordinate():
xc = xr.Variable("x", [100, 200, 300, 400, 500, 600])
da = xr.DataArray(
np.array([1, 2, np.nan, np.nan, np.nan, 6], dtype=np.float64),
dims="x",
coords={"xc": xc},
)
# use_coordinate == False is same as using the default index
actual = da.interpolate_na(dim="x", use_coordinate=False)
expected = da.interpolate_na(dim="x")
assert_equal(actual, expected)
# possible to specify non index coordinate
actual = da.interpolate_na(dim="x", use_coordinate="xc")
expected = da.interpolate_na(dim="x")
assert_equal(actual, expected)
# possible to specify index coordinate by name
actual = da.interpolate_na(dim="x", use_coordinate="x")
expected = da.interpolate_na(dim="x")
assert_equal(actual, expected)
@requires_dask
def test_interpolate_dask():
da, _ = make_interpolate_example_data((40, 40), 0.5)
da = da.chunk({"x": 5})
actual = da.interpolate_na("time")
expected = da.load().interpolate_na("time")
assert isinstance(actual.data, dask_array_type)
assert_equal(actual.compute(), expected)
# with limit
da = da.chunk({"x": 5})
actual = da.interpolate_na("time", limit=3)
expected = da.load().interpolate_na("time", limit=3)
assert isinstance(actual.data, dask_array_type)
assert_equal(actual, expected)
@requires_dask
def test_interpolate_dask_raises_for_invalid_chunk_dim():
da, _ = make_interpolate_example_data((40, 40), 0.5)
da = da.chunk({"time": 5})
# this checks for ValueError in dask.array.apply_gufunc
with raises_regex(ValueError, "consists of multiple chunks"):
da.interpolate_na("time")
@requires_dask
@requires_scipy
@pytest.mark.parametrize("dtype, method", [(int, "linear"), (int, "nearest")])
def test_interpolate_dask_expected_dtype(dtype, method):
da = xr.DataArray(
data=np.array([0, 1], dtype=dtype),
dims=["time"],
coords=dict(time=np.array([0, 1])),
).chunk(dict(time=2))
da = da.interp(time=np.array([0, 0.5, 1, 2]), method=method)
assert da.dtype == da.compute().dtype
@requires_bottleneck
def test_ffill():
da = xr.DataArray(np.array([4, 5, np.nan], dtype=np.float64), dims="x")
expected = xr.DataArray(np.array([4, 5, 5], dtype=np.float64), dims="x")
actual = da.ffill("x")
assert_equal(actual, expected)
@requires_bottleneck
@requires_dask
def test_ffill_dask():
da, _ = make_interpolate_example_data((40, 40), 0.5)
da = da.chunk({"x": 5})
actual = da.ffill("time")
expected = da.load().ffill("time")
assert isinstance(actual.data, dask_array_type)
assert_equal(actual, expected)
# with limit
da = da.chunk({"x": 5})
actual = da.ffill("time", limit=3)
expected = da.load().ffill("time", limit=3)
assert isinstance(actual.data, dask_array_type)
assert_equal(actual, expected)
@requires_bottleneck
@requires_dask
def test_bfill_dask():
da, _ = make_interpolate_example_data((40, 40), 0.5)
da = da.chunk({"x": 5})
actual = da.bfill("time")
expected = da.load().bfill("time")
assert isinstance(actual.data, dask_array_type)
assert_equal(actual, expected)
# with limit
da = da.chunk({"x": 5})
actual = da.bfill("time", limit=3)
expected = da.load().bfill("time", limit=3)
assert isinstance(actual.data, dask_array_type)
assert_equal(actual, expected)
@requires_bottleneck
def test_ffill_bfill_nonans():
vals = np.array([1, 2, 3, 4, 5, 6], dtype=np.float64)
expected = xr.DataArray(vals, dims="x")
actual = expected.ffill(dim="x")
assert_equal(actual, expected)
actual = expected.bfill(dim="x")
assert_equal(actual, expected)
@requires_bottleneck
def test_ffill_bfill_allnans():
vals = np.full(6, np.nan, dtype=np.float64)
expected = xr.DataArray(vals, dims="x")
actual = expected.ffill(dim="x")
assert_equal(actual, expected)
actual = expected.bfill(dim="x")
assert_equal(actual, expected)
@requires_bottleneck
def test_ffill_functions(da):
result = da.ffill("time")
assert result.isnull().sum() == 0
@requires_bottleneck
def test_ffill_limit():
da = xr.DataArray(
[0, np.nan, np.nan, np.nan, np.nan, 3, 4, 5, np.nan, 6, 7], dims="time"
)
result = da.ffill("time")
expected = xr.DataArray([0, 0, 0, 0, 0, 3, 4, 5, 5, 6, 7], dims="time")
assert_array_equal(result, expected)
result = da.ffill("time", limit=1)
expected = xr.DataArray(
[0, 0, np.nan, np.nan, np.nan, 3, 4, 5, 5, 6, 7], dims="time"
)
assert_array_equal(result, expected)
def test_interpolate_dataset(ds):
actual = ds.interpolate_na(dim="time")
# no missing values in var1
assert actual["var1"].count("time") == actual.dims["time"]
# var2 should be the same as it was
assert_array_equal(actual["var2"], ds["var2"])
@requires_bottleneck
def test_ffill_dataset(ds):
ds.ffill(dim="time")
@requires_bottleneck
def test_bfill_dataset(ds):
ds.ffill(dim="time")
@requires_bottleneck
@pytest.mark.parametrize(
"y, lengths",
[
[np.arange(9), [[3, 3, 3, 0, 3, 3, 0, 2, 2]]],
[np.arange(9) * 3, [[9, 9, 9, 0, 9, 9, 0, 6, 6]]],
[[0, 2, 5, 6, 7, 8, 10, 12, 14], [[6, 6, 6, 0, 4, 4, 0, 4, 4]]],
],
)
def test_interpolate_na_nan_block_lengths(y, lengths):
arr = [[np.nan, np.nan, np.nan, 1, np.nan, np.nan, 4, np.nan, np.nan]]
da = xr.DataArray(arr * 2, dims=["x", "y"], coords={"x": [0, 1], "y": y})
index = get_clean_interp_index(da, dim="y", use_coordinate=True)
actual = _get_nan_block_lengths(da, dim="y", index=index)
expected = da.copy(data=lengths * 2)
assert_equal(actual, expected)
@requires_cftime
@pytest.mark.parametrize("calendar", _CFTIME_CALENDARS)
def test_get_clean_interp_index_cf_calendar(cf_da, calendar):
"""The index for CFTimeIndex is in units of days. This means that if two series using a 360 and 365 days
calendar each have a trend of .01C/year, the linear regression coefficients will be different because they
have different number of days.
Another option would be to have an index in units of years, but this would likely create other difficulties.
"""
i = get_clean_interp_index(cf_da(calendar), dim="time")
np.testing.assert_array_equal(i, np.arange(10) * 1e9 * 86400)
@requires_cftime
@pytest.mark.parametrize(
("calendar", "freq"), zip(["gregorian", "proleptic_gregorian"], ["1D", "1M", "1Y"])
)
def test_get_clean_interp_index_dt(cf_da, calendar, freq):
"""In the gregorian case, the index should be proportional to normal datetimes."""
g = cf_da(calendar, freq=freq)
g["stime"] = xr.Variable(data=g.time.to_index().to_datetimeindex(), dims=("time",))
gi = get_clean_interp_index(g, "time")
si = get_clean_interp_index(g, "time", use_coordinate="stime")
np.testing.assert_array_equal(gi, si)
def test_get_clean_interp_index_potential_overflow():
da = xr.DataArray(
[0, 1, 2],
dims=("time",),
coords={"time": xr.cftime_range("0000-01-01", periods=3, calendar="360_day")},
)
get_clean_interp_index(da, "time")
@pytest.mark.parametrize("index", ([0, 2, 1], [0, 1, 1]))
def test_get_clean_interp_index_strict(index):
da = xr.DataArray([0, 1, 2], dims=("x",), coords={"x": index})
with pytest.raises(ValueError):
get_clean_interp_index(da, "x")
clean = get_clean_interp_index(da, "x", strict=False)
np.testing.assert_array_equal(index, clean)
assert clean.dtype == np.float64
@pytest.fixture
def da_time():
return xr.DataArray(
[np.nan, 1, 2, np.nan, np.nan, 5, np.nan, np.nan, np.nan, np.nan, 10],
dims=["t"],
)
def test_interpolate_na_max_gap_errors(da_time):
with raises_regex(
NotImplementedError, "max_gap not implemented for unlabeled coordinates"
):
da_time.interpolate_na("t", max_gap=1)
with raises_regex(ValueError, "max_gap must be a scalar."):
da_time.interpolate_na("t", max_gap=(1,))
da_time["t"] = | pd.date_range("2001-01-01", freq="H", periods=11) | pandas.date_range |
# This script uses OSMnx to generate the road network data for US municipalities
# Importing required modules
import pandas as pd
import osmnx as ox
import networkx as nx
import matplotlib.cm as cm
import matplotlib.colors as colors
# Defining username + directories
username = ''
direc = 'C:/Users/' + username + '/Documents/Data/road_networks/'
figs_direc = direc + 'figures2/'
# Defining the list of locations to include in this study
locations = ['Atlanta, Georgia', 'Austin, Texas', 'Baltimore, Maryland',
'Birmingham, Alabama', 'Boston, Massachusetts', 'Buffalo, New York',
'Charlotte, North Carolina', 'Chicago, Illinois', 'Cincinnati, Ohio',
# 'Cleveland, Ohio', 'Columbus, Ohio', 'Dallas, Texas', 'Denver, Colorado',
'Columbus, Ohio', 'Dallas, Texas', 'Denver, Colorado',
'Detroit, Michigan', 'Grand Rapids, Michigan', 'Hartford, Connecticut',
'Houston, Texas', 'Indianapolis, Indiana', 'Jacksonville, Florida',
'Kansas City, Missouri', 'Las Vegas, Nevada', 'Los Angeles, California',
'Louisville, Kentucky', 'Memphis, Tennessee', 'Miami, Florida',
'Milwaukee, Wisconsin', 'Minneapolis, Minnesota', 'Nashville, Tennessee',
'New Orleans, Louisana', 'New York, New York', 'Oklahoma City, Oklahoma',
'Orlando, Florida', 'Philadelphia, Pennsylvania', 'Phoenix, Arizona',
'Pittsburgh, Pennsylvania', 'Portland, Oregon', 'Providence, Rhode Island',
'Raleigh, North Carolina', 'Richmond, Virginia', 'Riverside, California',
'Rochester, New York', 'Sacramento, California', 'Salt Lake City, Utah',
'San Antonio, Texas', 'San Diego, California', 'San Francisco, California',
'San Jose, California', 'Seattle, Washington', 'St. Louis, Missouri',
'Tampa, Florida', 'Tucson, Arizona', 'Virginia Beach, Virginia', 'Washington, DC']
# Initializing a dataframe
df = | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
import re
import warnings
from datetime import timedelta
from itertools import product
import pytest
import numpy as np
import pandas as pd
from pandas import (CategoricalIndex, DataFrame, Index, MultiIndex,
compat, date_range, period_range)
from pandas.compat import PY3, long, lrange, lzip, range, u, PYPY
from pandas.errors import PerformanceWarning, UnsortedIndexError
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.core.indexes.base import InvalidIndexError
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
from pandas._libs.tslib import Timestamp
import pandas.util.testing as tm
from pandas.util.testing import assert_almost_equal, assert_copy
from .common import Base
class TestMultiIndex(Base):
_holder = MultiIndex
_compat_props = ['shape', 'ndim', 'size']
def setup_method(self, method):
major_axis = Index(['foo', 'bar', 'baz', 'qux'])
minor_axis = Index(['one', 'two'])
major_labels = np.array([0, 0, 1, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 0, 1])
self.index_names = ['first', 'second']
self.indices = dict(index=MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels
], names=self.index_names,
verify_integrity=False))
self.setup_indices()
def create_index(self):
return self.index
def test_can_hold_identifiers(self):
idx = self.create_index()
key = idx[0]
assert idx._can_hold_identifiers_and_holds_name(key) is True
def test_boolean_context_compat2(self):
# boolean context compat
# GH7897
i1 = MultiIndex.from_tuples([('A', 1), ('A', 2)])
i2 = MultiIndex.from_tuples([('A', 1), ('A', 3)])
common = i1.intersection(i2)
def f():
if common:
pass
tm.assert_raises_regex(ValueError, 'The truth value of a', f)
def test_labels_dtypes(self):
# GH 8456
i = MultiIndex.from_tuples([('A', 1), ('A', 2)])
assert i.labels[0].dtype == 'int8'
assert i.labels[1].dtype == 'int8'
i = MultiIndex.from_product([['a'], range(40)])
assert i.labels[1].dtype == 'int8'
i = MultiIndex.from_product([['a'], range(400)])
assert i.labels[1].dtype == 'int16'
i = MultiIndex.from_product([['a'], range(40000)])
assert i.labels[1].dtype == 'int32'
i = pd.MultiIndex.from_product([['a'], range(1000)])
assert (i.labels[0] >= 0).all()
assert (i.labels[1] >= 0).all()
def test_where(self):
i = MultiIndex.from_tuples([('A', 1), ('A', 2)])
def f():
i.where(True)
pytest.raises(NotImplementedError, f)
def test_where_array_like(self):
i = MultiIndex.from_tuples([('A', 1), ('A', 2)])
klasses = [list, tuple, np.array, pd.Series]
cond = [False, True]
for klass in klasses:
def f():
return i.where(klass(cond))
pytest.raises(NotImplementedError, f)
def test_repeat(self):
reps = 2
numbers = [1, 2, 3]
names = np.array(['foo', 'bar'])
m = MultiIndex.from_product([
numbers, names], names=names)
expected = MultiIndex.from_product([
numbers, names.repeat(reps)], names=names)
tm.assert_index_equal(m.repeat(reps), expected)
with tm.assert_produces_warning(FutureWarning):
result = m.repeat(n=reps)
tm.assert_index_equal(result, expected)
def test_numpy_repeat(self):
reps = 2
numbers = [1, 2, 3]
names = np.array(['foo', 'bar'])
m = MultiIndex.from_product([
numbers, names], names=names)
expected = MultiIndex.from_product([
numbers, names.repeat(reps)], names=names)
tm.assert_index_equal(np.repeat(m, reps), expected)
msg = "the 'axis' parameter is not supported"
tm.assert_raises_regex(
ValueError, msg, np.repeat, m, reps, axis=1)
def test_set_name_methods(self):
# so long as these are synonyms, we don't need to test set_names
assert self.index.rename == self.index.set_names
new_names = [name + "SUFFIX" for name in self.index_names]
ind = self.index.set_names(new_names)
assert self.index.names == self.index_names
assert ind.names == new_names
with tm.assert_raises_regex(ValueError, "^Length"):
ind.set_names(new_names + new_names)
new_names2 = [name + "SUFFIX2" for name in new_names]
res = ind.set_names(new_names2, inplace=True)
assert res is None
assert ind.names == new_names2
# set names for specific level (# GH7792)
ind = self.index.set_names(new_names[0], level=0)
assert self.index.names == self.index_names
assert ind.names == [new_names[0], self.index_names[1]]
res = ind.set_names(new_names2[0], level=0, inplace=True)
assert res is None
assert ind.names == [new_names2[0], self.index_names[1]]
# set names for multiple levels
ind = self.index.set_names(new_names, level=[0, 1])
assert self.index.names == self.index_names
assert ind.names == new_names
res = ind.set_names(new_names2, level=[0, 1], inplace=True)
assert res is None
assert ind.names == new_names2
@pytest.mark.parametrize('inplace', [True, False])
def test_set_names_with_nlevel_1(self, inplace):
# GH 21149
# Ensure that .set_names for MultiIndex with
# nlevels == 1 does not raise any errors
expected = pd.MultiIndex(levels=[[0, 1]],
labels=[[0, 1]],
names=['first'])
m = pd.MultiIndex.from_product([[0, 1]])
result = m.set_names('first', level=0, inplace=inplace)
if inplace:
result = m
tm.assert_index_equal(result, expected)
def test_set_levels_labels_directly(self):
# setting levels/labels directly raises AttributeError
levels = self.index.levels
new_levels = [[lev + 'a' for lev in level] for level in levels]
labels = self.index.labels
major_labels, minor_labels = labels
major_labels = [(x + 1) % 3 for x in major_labels]
minor_labels = [(x + 1) % 1 for x in minor_labels]
new_labels = [major_labels, minor_labels]
with pytest.raises(AttributeError):
self.index.levels = new_levels
with pytest.raises(AttributeError):
self.index.labels = new_labels
def test_set_levels(self):
# side note - you probably wouldn't want to use levels and labels
# directly like this - but it is possible.
levels = self.index.levels
new_levels = [[lev + 'a' for lev in level] for level in levels]
def assert_matching(actual, expected, check_dtype=False):
# avoid specifying internal representation
# as much as possible
assert len(actual) == len(expected)
for act, exp in zip(actual, expected):
act = np.asarray(act)
exp = np.asarray(exp)
tm.assert_numpy_array_equal(act, exp, check_dtype=check_dtype)
# level changing [w/o mutation]
ind2 = self.index.set_levels(new_levels)
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# level changing [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels, inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, new_levels)
# level changing specific level [w/o mutation]
ind2 = self.index.set_levels(new_levels[0], level=0)
assert_matching(ind2.levels, [new_levels[0], levels[1]])
assert_matching(self.index.levels, levels)
ind2 = self.index.set_levels(new_levels[1], level=1)
assert_matching(ind2.levels, [levels[0], new_levels[1]])
assert_matching(self.index.levels, levels)
# level changing multiple levels [w/o mutation]
ind2 = self.index.set_levels(new_levels, level=[0, 1])
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# level changing specific level [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels[0], level=0, inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, [new_levels[0], levels[1]])
assert_matching(self.index.levels, levels)
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels[1], level=1, inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, [levels[0], new_levels[1]])
assert_matching(self.index.levels, levels)
# level changing multiple levels [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels, level=[0, 1],
inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# illegal level changing should not change levels
# GH 13754
original_index = self.index.copy()
for inplace in [True, False]:
with tm.assert_raises_regex(ValueError, "^On"):
self.index.set_levels(['c'], level=0, inplace=inplace)
assert_matching(self.index.levels, original_index.levels,
check_dtype=True)
with tm.assert_raises_regex(ValueError, "^On"):
self.index.set_labels([0, 1, 2, 3, 4, 5], level=0,
inplace=inplace)
assert_matching(self.index.labels, original_index.labels,
check_dtype=True)
with tm.assert_raises_regex(TypeError, "^Levels"):
self.index.set_levels('c', level=0, inplace=inplace)
assert_matching(self.index.levels, original_index.levels,
check_dtype=True)
with tm.assert_raises_regex(TypeError, "^Labels"):
self.index.set_labels(1, level=0, inplace=inplace)
assert_matching(self.index.labels, original_index.labels,
check_dtype=True)
def test_set_labels(self):
# side note - you probably wouldn't want to use levels and labels
# directly like this - but it is possible.
labels = self.index.labels
major_labels, minor_labels = labels
major_labels = [(x + 1) % 3 for x in major_labels]
minor_labels = [(x + 1) % 1 for x in minor_labels]
new_labels = [major_labels, minor_labels]
def assert_matching(actual, expected):
# avoid specifying internal representation
# as much as possible
assert len(actual) == len(expected)
for act, exp in zip(actual, expected):
act = np.asarray(act)
exp = np.asarray(exp, dtype=np.int8)
tm.assert_numpy_array_equal(act, exp)
# label changing [w/o mutation]
ind2 = self.index.set_labels(new_labels)
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels, inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, new_labels)
# label changing specific level [w/o mutation]
ind2 = self.index.set_labels(new_labels[0], level=0)
assert_matching(ind2.labels, [new_labels[0], labels[1]])
assert_matching(self.index.labels, labels)
ind2 = self.index.set_labels(new_labels[1], level=1)
assert_matching(ind2.labels, [labels[0], new_labels[1]])
assert_matching(self.index.labels, labels)
# label changing multiple levels [w/o mutation]
ind2 = self.index.set_labels(new_labels, level=[0, 1])
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing specific level [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels[0], level=0, inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, [new_labels[0], labels[1]])
assert_matching(self.index.labels, labels)
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels[1], level=1, inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, [labels[0], new_labels[1]])
assert_matching(self.index.labels, labels)
# label changing multiple levels [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels, level=[0, 1],
inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing for levels of different magnitude of categories
ind = pd.MultiIndex.from_tuples([(0, i) for i in range(130)])
new_labels = range(129, -1, -1)
expected = pd.MultiIndex.from_tuples(
[(0, i) for i in new_labels])
# [w/o mutation]
result = ind.set_labels(labels=new_labels, level=1)
assert result.equals(expected)
# [w/ mutation]
result = ind.copy()
result.set_labels(labels=new_labels, level=1, inplace=True)
assert result.equals(expected)
def test_set_levels_labels_names_bad_input(self):
levels, labels = self.index.levels, self.index.labels
names = self.index.names
with tm.assert_raises_regex(ValueError, 'Length of levels'):
self.index.set_levels([levels[0]])
with tm.assert_raises_regex(ValueError, 'Length of labels'):
self.index.set_labels([labels[0]])
with tm.assert_raises_regex(ValueError, 'Length of names'):
self.index.set_names([names[0]])
# shouldn't scalar data error, instead should demand list-like
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_levels(levels[0])
# shouldn't scalar data error, instead should demand list-like
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_labels(labels[0])
# shouldn't scalar data error, instead should demand list-like
with tm.assert_raises_regex(TypeError, 'list-like'):
self.index.set_names(names[0])
# should have equal lengths
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_levels(levels[0], level=[0, 1])
with tm.assert_raises_regex(TypeError, 'list-like'):
self.index.set_levels(levels, level=0)
# should have equal lengths
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_labels(labels[0], level=[0, 1])
with tm.assert_raises_regex(TypeError, 'list-like'):
self.index.set_labels(labels, level=0)
# should have equal lengths
with tm.assert_raises_regex(ValueError, 'Length of names'):
self.index.set_names(names[0], level=[0, 1])
with tm.assert_raises_regex(TypeError, 'string'):
self.index.set_names(names, level=0)
def test_set_levels_categorical(self):
# GH13854
index = MultiIndex.from_arrays([list("xyzx"), [0, 1, 2, 3]])
for ordered in [False, True]:
cidx = CategoricalIndex(list("bac"), ordered=ordered)
result = index.set_levels(cidx, 0)
expected = MultiIndex(levels=[cidx, [0, 1, 2, 3]],
labels=index.labels)
tm.assert_index_equal(result, expected)
result_lvl = result.get_level_values(0)
expected_lvl = CategoricalIndex(list("bacb"),
categories=cidx.categories,
ordered=cidx.ordered)
tm.assert_index_equal(result_lvl, expected_lvl)
def test_metadata_immutable(self):
levels, labels = self.index.levels, self.index.labels
# shouldn't be able to set at either the top level or base level
mutable_regex = re.compile('does not support mutable operations')
with tm.assert_raises_regex(TypeError, mutable_regex):
levels[0] = levels[0]
with tm.assert_raises_regex(TypeError, mutable_regex):
levels[0][0] = levels[0][0]
# ditto for labels
with tm.assert_raises_regex(TypeError, mutable_regex):
labels[0] = labels[0]
with tm.assert_raises_regex(TypeError, mutable_regex):
labels[0][0] = labels[0][0]
# and for names
names = self.index.names
with tm.assert_raises_regex(TypeError, mutable_regex):
names[0] = names[0]
def test_inplace_mutation_resets_values(self):
levels = [['a', 'b', 'c'], [4]]
levels2 = [[1, 2, 3], ['a']]
labels = [[0, 1, 0, 2, 2, 0], [0, 0, 0, 0, 0, 0]]
mi1 = MultiIndex(levels=levels, labels=labels)
mi2 = MultiIndex(levels=levels2, labels=labels)
vals = mi1.values.copy()
vals2 = mi2.values.copy()
assert mi1._tuples is not None
# Make sure level setting works
new_vals = mi1.set_levels(levels2).values
tm.assert_almost_equal(vals2, new_vals)
# Non-inplace doesn't kill _tuples [implementation detail]
tm.assert_almost_equal(mi1._tuples, vals)
# ...and values is still same too
tm.assert_almost_equal(mi1.values, vals)
# Inplace should kill _tuples
mi1.set_levels(levels2, inplace=True)
tm.assert_almost_equal(mi1.values, vals2)
# Make sure label setting works too
labels2 = [[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]]
exp_values = np.empty((6,), dtype=object)
exp_values[:] = [(long(1), 'a')] * 6
# Must be 1d array of tuples
assert exp_values.shape == (6,)
new_values = mi2.set_labels(labels2).values
# Not inplace shouldn't change
tm.assert_almost_equal(mi2._tuples, vals2)
# Should have correct values
tm.assert_almost_equal(exp_values, new_values)
# ...and again setting inplace should kill _tuples, etc
mi2.set_labels(labels2, inplace=True)
tm.assert_almost_equal(mi2.values, new_values)
def test_copy_in_constructor(self):
levels = np.array(["a", "b", "c"])
labels = np.array([1, 1, 2, 0, 0, 1, 1])
val = labels[0]
mi = MultiIndex(levels=[levels, levels], labels=[labels, labels],
copy=True)
assert mi.labels[0][0] == val
labels[0] = 15
assert mi.labels[0][0] == val
val = levels[0]
levels[0] = "PANDA"
assert mi.levels[0][0] == val
def test_set_value_keeps_names(self):
# motivating example from #3742
lev1 = ['hans', 'hans', 'hans', 'grethe', 'grethe', 'grethe']
lev2 = ['1', '2', '3'] * 2
idx = pd.MultiIndex.from_arrays([lev1, lev2], names=['Name', 'Number'])
df = pd.DataFrame(
np.random.randn(6, 4),
columns=['one', 'two', 'three', 'four'],
index=idx)
df = df.sort_index()
assert df._is_copy is None
assert df.index.names == ('Name', 'Number')
df.at[('grethe', '4'), 'one'] = 99.34
assert df._is_copy is None
assert df.index.names == ('Name', 'Number')
def test_copy_names(self):
# Check that adding a "names" parameter to the copy is honored
# GH14302
multi_idx = pd.Index([(1, 2), (3, 4)], names=['MyName1', 'MyName2'])
multi_idx1 = multi_idx.copy()
assert multi_idx.equals(multi_idx1)
assert multi_idx.names == ['MyName1', 'MyName2']
assert multi_idx1.names == ['MyName1', 'MyName2']
multi_idx2 = multi_idx.copy(names=['NewName1', 'NewName2'])
assert multi_idx.equals(multi_idx2)
assert multi_idx.names == ['MyName1', 'MyName2']
assert multi_idx2.names == ['NewName1', 'NewName2']
multi_idx3 = multi_idx.copy(name=['NewName1', 'NewName2'])
assert multi_idx.equals(multi_idx3)
assert multi_idx.names == ['MyName1', 'MyName2']
assert multi_idx3.names == ['NewName1', 'NewName2']
def test_names(self):
# names are assigned in setup
names = self.index_names
level_names = [level.name for level in self.index.levels]
assert names == level_names
# setting bad names on existing
index = self.index
tm.assert_raises_regex(ValueError, "^Length of names",
setattr, index, "names",
list(index.names) + ["third"])
tm.assert_raises_regex(ValueError, "^Length of names",
setattr, index, "names", [])
# initializing with bad names (should always be equivalent)
major_axis, minor_axis = self.index.levels
major_labels, minor_labels = self.index.labels
tm.assert_raises_regex(ValueError, "^Length of names", MultiIndex,
levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels],
names=['first'])
tm.assert_raises_regex(ValueError, "^Length of names", MultiIndex,
levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels],
names=['first', 'second', 'third'])
# names are assigned
index.names = ["a", "b"]
ind_names = list(index.names)
level_names = [level.name for level in index.levels]
assert ind_names == level_names
def test_astype(self):
expected = self.index.copy()
actual = self.index.astype('O')
assert_copy(actual.levels, expected.levels)
assert_copy(actual.labels, expected.labels)
self.check_level_names(actual, expected.names)
with tm.assert_raises_regex(TypeError, "^Setting.*dtype.*object"):
self.index.astype(np.dtype(int))
@pytest.mark.parametrize('ordered', [True, False])
def test_astype_category(self, ordered):
# GH 18630
msg = '> 1 ndim Categorical are not supported at this time'
with tm.assert_raises_regex(NotImplementedError, msg):
self.index.astype(CategoricalDtype(ordered=ordered))
if ordered is False:
# dtype='category' defaults to ordered=False, so only test once
with tm.assert_raises_regex(NotImplementedError, msg):
self.index.astype('category')
def test_constructor_single_level(self):
result = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux']],
labels=[[0, 1, 2, 3]], names=['first'])
assert isinstance(result, MultiIndex)
expected = Index(['foo', 'bar', 'baz', 'qux'], name='first')
tm.assert_index_equal(result.levels[0], expected)
assert result.names == ['first']
def test_constructor_no_levels(self):
tm.assert_raises_regex(ValueError, "non-zero number "
"of levels/labels",
MultiIndex, levels=[], labels=[])
both_re = re.compile('Must pass both levels and labels')
with tm.assert_raises_regex(TypeError, both_re):
MultiIndex(levels=[])
with tm.assert_raises_regex(TypeError, both_re):
MultiIndex(labels=[])
def test_constructor_mismatched_label_levels(self):
labels = [np.array([1]), np.array([2]), np.array([3])]
levels = ["a"]
tm.assert_raises_regex(ValueError, "Length of levels and labels "
"must be the same", MultiIndex,
levels=levels, labels=labels)
length_error = re.compile('>= length of level')
label_error = re.compile(r'Unequal label lengths: \[4, 2\]')
# important to check that it's looking at the right thing.
with tm.assert_raises_regex(ValueError, length_error):
MultiIndex(levels=[['a'], ['b']],
labels=[[0, 1, 2, 3], [0, 3, 4, 1]])
with tm.assert_raises_regex(ValueError, label_error):
MultiIndex(levels=[['a'], ['b']], labels=[[0, 0, 0, 0], [0, 0]])
# external API
with tm.assert_raises_regex(ValueError, length_error):
self.index.copy().set_levels([['a'], ['b']])
with tm.assert_raises_regex(ValueError, label_error):
self.index.copy().set_labels([[0, 0, 0, 0], [0, 0]])
def test_constructor_nonhashable_names(self):
# GH 20527
levels = [[1, 2], [u'one', u'two']]
labels = [[0, 0, 1, 1], [0, 1, 0, 1]]
names = ((['foo'], ['bar']))
message = "MultiIndex.name must be a hashable type"
tm.assert_raises_regex(TypeError, message,
MultiIndex, levels=levels,
labels=labels, names=names)
# With .rename()
mi = MultiIndex(levels=[[1, 2], [u'one', u'two']],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=('foo', 'bar'))
renamed = [['foor'], ['barr']]
tm.assert_raises_regex(TypeError, message, mi.rename, names=renamed)
# With .set_names()
tm.assert_raises_regex(TypeError, message, mi.set_names, names=renamed)
@pytest.mark.parametrize('names', [['a', 'b', 'a'], ['1', '1', '2'],
['1', 'a', '1']])
def test_duplicate_level_names(self, names):
# GH18872
pytest.raises(ValueError, pd.MultiIndex.from_product,
[[0, 1]] * 3, names=names)
# With .rename()
mi = pd.MultiIndex.from_product([[0, 1]] * 3)
tm.assert_raises_regex(ValueError, "Duplicated level name:",
mi.rename, names)
# With .rename(., level=)
mi.rename(names[0], level=1, inplace=True)
tm.assert_raises_regex(ValueError, "Duplicated level name:",
mi.rename, names[:2], level=[0, 2])
def assert_multiindex_copied(self, copy, original):
# Levels should be (at least, shallow copied)
tm.assert_copy(copy.levels, original.levels)
tm.assert_almost_equal(copy.labels, original.labels)
# Labels doesn't matter which way copied
tm.assert_almost_equal(copy.labels, original.labels)
assert copy.labels is not original.labels
# Names doesn't matter which way copied
assert copy.names == original.names
assert copy.names is not original.names
# Sort order should be copied
assert copy.sortorder == original.sortorder
def test_copy(self):
i_copy = self.index.copy()
self.assert_multiindex_copied(i_copy, self.index)
def test_shallow_copy(self):
i_copy = self.index._shallow_copy()
self.assert_multiindex_copied(i_copy, self.index)
def test_view(self):
i_view = self.index.view()
self.assert_multiindex_copied(i_view, self.index)
def check_level_names(self, index, names):
assert [level.name for level in index.levels] == list(names)
def test_changing_names(self):
# names should be applied to levels
level_names = [level.name for level in self.index.levels]
self.check_level_names(self.index, self.index.names)
view = self.index.view()
copy = self.index.copy()
shallow_copy = self.index._shallow_copy()
# changing names should change level names on object
new_names = [name + "a" for name in self.index.names]
self.index.names = new_names
self.check_level_names(self.index, new_names)
# but not on copies
self.check_level_names(view, level_names)
self.check_level_names(copy, level_names)
self.check_level_names(shallow_copy, level_names)
# and copies shouldn't change original
shallow_copy.names = [name + "c" for name in shallow_copy.names]
self.check_level_names(self.index, new_names)
def test_get_level_number_integer(self):
self.index.names = [1, 0]
assert self.index._get_level_number(1) == 0
assert self.index._get_level_number(0) == 1
pytest.raises(IndexError, self.index._get_level_number, 2)
tm.assert_raises_regex(KeyError, 'Level fourth not found',
self.index._get_level_number, 'fourth')
def test_from_arrays(self):
arrays = []
for lev, lab in zip(self.index.levels, self.index.labels):
arrays.append(np.asarray(lev).take(lab))
# list of arrays as input
result = MultiIndex.from_arrays(arrays, names=self.index.names)
tm.assert_index_equal(result, self.index)
# infer correctly
result = MultiIndex.from_arrays([[pd.NaT, Timestamp('20130101')],
['a', 'b']])
assert result.levels[0].equals(Index([Timestamp('20130101')]))
assert result.levels[1].equals(Index(['a', 'b']))
def test_from_arrays_iterator(self):
# GH 18434
arrays = []
for lev, lab in zip(self.index.levels, self.index.labels):
arrays.append(np.asarray(lev).take(lab))
# iterator as input
result = MultiIndex.from_arrays(iter(arrays), names=self.index.names)
tm.assert_index_equal(result, self.index)
# invalid iterator input
with tm.assert_raises_regex(
TypeError, "Input must be a list / sequence of array-likes."):
MultiIndex.from_arrays(0)
def test_from_arrays_index_series_datetimetz(self):
idx1 = pd.date_range('2015-01-01 10:00', freq='D', periods=3,
tz='US/Eastern')
idx2 = pd.date_range('2015-01-01 10:00', freq='H', periods=3,
tz='Asia/Tokyo')
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_series_timedelta(self):
idx1 = pd.timedelta_range('1 days', freq='D', periods=3)
idx2 = pd.timedelta_range('2 hours', freq='H', periods=3)
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_series_period(self):
idx1 = pd.period_range('2011-01-01', freq='D', periods=3)
idx2 = pd.period_range('2015-01-01', freq='H', periods=3)
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_datetimelike_mixed(self):
idx1 = pd.date_range('2015-01-01 10:00', freq='D', periods=3,
tz='US/Eastern')
idx2 = pd.date_range('2015-01-01 10:00', freq='H', periods=3)
idx3 = pd.timedelta_range('1 days', freq='D', periods=3)
idx4 = pd.period_range('2011-01-01', freq='D', periods=3)
result = pd.MultiIndex.from_arrays([idx1, idx2, idx3, idx4])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
tm.assert_index_equal(result.get_level_values(2), idx3)
tm.assert_index_equal(result.get_level_values(3), idx4)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1),
pd.Series(idx2),
pd.Series(idx3),
pd.Series(idx4)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result2.get_level_values(2), idx3)
tm.assert_index_equal(result2.get_level_values(3), idx4)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_series_categorical(self):
# GH13743
idx1 = pd.CategoricalIndex(list("abcaab"), categories=list("bac"),
ordered=False)
idx2 = pd.CategoricalIndex(list("abcaab"), categories=list("bac"),
ordered=True)
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
result3 = pd.MultiIndex.from_arrays([idx1.values, idx2.values])
tm.assert_index_equal(result3.get_level_values(0), idx1)
tm.assert_index_equal(result3.get_level_values(1), idx2)
def test_from_arrays_empty(self):
# 0 levels
with tm.assert_raises_regex(
ValueError, "Must pass non-zero number of levels/labels"):
MultiIndex.from_arrays(arrays=[])
# 1 level
result = MultiIndex.from_arrays(arrays=[[]], names=['A'])
assert isinstance(result, MultiIndex)
expected = Index([], name='A')
tm.assert_index_equal(result.levels[0], expected)
# N levels
for N in [2, 3]:
arrays = [[]] * N
names = list('ABC')[:N]
result = MultiIndex.from_arrays(arrays=arrays, names=names)
expected = MultiIndex(levels=[[]] * N, labels=[[]] * N,
names=names)
tm.assert_index_equal(result, expected)
def test_from_arrays_invalid_input(self):
invalid_inputs = [1, [1], [1, 2], [[1], 2],
'a', ['a'], ['a', 'b'], [['a'], 'b']]
for i in invalid_inputs:
pytest.raises(TypeError, MultiIndex.from_arrays, arrays=i)
def test_from_arrays_different_lengths(self):
# see gh-13599
idx1 = [1, 2, 3]
idx2 = ['a', 'b']
tm.assert_raises_regex(ValueError, '^all arrays must '
'be same length$',
MultiIndex.from_arrays, [idx1, idx2])
idx1 = []
idx2 = ['a', 'b']
tm.assert_raises_regex(ValueError, '^all arrays must '
'be same length$',
MultiIndex.from_arrays, [idx1, idx2])
idx1 = [1, 2, 3]
idx2 = []
tm.assert_raises_regex(ValueError, '^all arrays must '
'be same length$',
MultiIndex.from_arrays, [idx1, idx2])
def test_from_product(self):
first = ['foo', 'bar', 'buz']
second = ['a', 'b', 'c']
names = ['first', 'second']
result = MultiIndex.from_product([first, second], names=names)
tuples = [('foo', 'a'), ('foo', 'b'), ('foo', 'c'), ('bar', 'a'),
('bar', 'b'), ('bar', 'c'), ('buz', 'a'), ('buz', 'b'),
('buz', 'c')]
expected = MultiIndex.from_tuples(tuples, names=names)
tm.assert_index_equal(result, expected)
def test_from_product_iterator(self):
# GH 18434
first = ['foo', 'bar', 'buz']
second = ['a', 'b', 'c']
names = ['first', 'second']
tuples = [('foo', 'a'), ('foo', 'b'), ('foo', 'c'), ('bar', 'a'),
('bar', 'b'), ('bar', 'c'), ('buz', 'a'), ('buz', 'b'),
('buz', 'c')]
expected = MultiIndex.from_tuples(tuples, names=names)
# iterator as input
result = MultiIndex.from_product(iter([first, second]), names=names)
tm.assert_index_equal(result, expected)
# Invalid non-iterable input
with tm.assert_raises_regex(
TypeError, "Input must be a list / sequence of iterables."):
MultiIndex.from_product(0)
def test_from_product_empty(self):
# 0 levels
with tm.assert_raises_regex(
ValueError, "Must pass non-zero number of levels/labels"):
MultiIndex.from_product([])
# 1 level
result = MultiIndex.from_product([[]], names=['A'])
expected = pd.Index([], name='A')
tm.assert_index_equal(result.levels[0], expected)
# 2 levels
l1 = [[], ['foo', 'bar', 'baz'], []]
l2 = [[], [], ['a', 'b', 'c']]
names = ['A', 'B']
for first, second in zip(l1, l2):
result = MultiIndex.from_product([first, second], names=names)
expected = MultiIndex(levels=[first, second],
labels=[[], []], names=names)
tm.assert_index_equal(result, expected)
# GH12258
names = ['A', 'B', 'C']
for N in range(4):
lvl2 = lrange(N)
result = MultiIndex.from_product([[], lvl2, []], names=names)
expected = MultiIndex(levels=[[], lvl2, []],
labels=[[], [], []], names=names)
tm.assert_index_equal(result, expected)
def test_from_product_invalid_input(self):
invalid_inputs = [1, [1], [1, 2], [[1], 2],
'a', ['a'], ['a', 'b'], [['a'], 'b']]
for i in invalid_inputs:
pytest.raises(TypeError, MultiIndex.from_product, iterables=i)
def test_from_product_datetimeindex(self):
dt_index = date_range('2000-01-01', periods=2)
mi = pd.MultiIndex.from_product([[1, 2], dt_index])
etalon = construct_1d_object_array_from_listlike([(1, pd.Timestamp(
'2000-01-01')), (1, pd.Timestamp('2000-01-02')), (2, pd.Timestamp(
'2000-01-01')), (2, pd.Timestamp('2000-01-02'))])
tm.assert_numpy_array_equal(mi.values, etalon)
def test_from_product_index_series_categorical(self):
# GH13743
first = ['foo', 'bar']
for ordered in [False, True]:
idx = pd.CategoricalIndex(list("abcaab"), categories=list("bac"),
ordered=ordered)
expected = pd.CategoricalIndex(list("abcaab") + list("abcaab"),
categories=list("bac"),
ordered=ordered)
for arr in [idx, pd.Series(idx), idx.values]:
result = pd.MultiIndex.from_product([first, arr])
tm.assert_index_equal(result.get_level_values(1), expected)
def test_values_boxed(self):
tuples = [(1, pd.Timestamp('2000-01-01')), (2, pd.NaT),
(3, pd.Timestamp('2000-01-03')),
(1, pd.Timestamp('2000-01-04')),
(2, pd.Timestamp('2000-01-02')),
(3, pd.Timestamp('2000-01-03'))]
result = pd.MultiIndex.from_tuples(tuples)
expected = construct_1d_object_array_from_listlike(tuples)
tm.assert_numpy_array_equal(result.values, expected)
# Check that code branches for boxed values produce identical results
tm.assert_numpy_array_equal(result.values[:4], result[:4].values)
def test_values_multiindex_datetimeindex(self):
# Test to ensure we hit the boxing / nobox part of MI.values
ints = np.arange(10 ** 18, 10 ** 18 + 5)
naive = pd.DatetimeIndex(ints)
aware = pd.DatetimeIndex(ints, tz='US/Central')
idx = pd.MultiIndex.from_arrays([naive, aware])
result = idx.values
outer = pd.DatetimeIndex([x[0] for x in result])
tm.assert_index_equal(outer, naive)
inner = pd.DatetimeIndex([x[1] for x in result])
tm.assert_index_equal(inner, aware)
# n_lev > n_lab
result = idx[:2].values
outer = pd.DatetimeIndex([x[0] for x in result])
tm.assert_index_equal(outer, naive[:2])
inner = pd.DatetimeIndex([x[1] for x in result])
tm.assert_index_equal(inner, aware[:2])
def test_values_multiindex_periodindex(self):
# Test to ensure we hit the boxing / nobox part of MI.values
ints = np.arange(2007, 2012)
pidx = pd.PeriodIndex(ints, freq='D')
idx = pd.MultiIndex.from_arrays([ints, pidx])
result = idx.values
outer = pd.Int64Index([x[0] for x in result])
tm.assert_index_equal(outer, pd.Int64Index(ints))
inner = pd.PeriodIndex([x[1] for x in result])
tm.assert_index_equal(inner, pidx)
# n_lev > n_lab
result = idx[:2].values
outer = pd.Int64Index([x[0] for x in result])
tm.assert_index_equal(outer, pd.Int64Index(ints[:2]))
inner = pd.PeriodIndex([x[1] for x in result])
tm.assert_index_equal(inner, pidx[:2])
def test_append(self):
result = self.index[:3].append(self.index[3:])
assert result.equals(self.index)
foos = [self.index[:1], self.index[1:3], self.index[3:]]
result = foos[0].append(foos[1:])
assert result.equals(self.index)
# empty
result = self.index.append([])
assert result.equals(self.index)
def test_append_mixed_dtypes(self):
# GH 13660
dti = date_range('2011-01-01', freq='M', periods=3, )
dti_tz = date_range('2011-01-01', freq='M', periods=3, tz='US/Eastern')
pi = period_range('2011-01', freq='M', periods=3)
mi = MultiIndex.from_arrays([[1, 2, 3],
[1.1, np.nan, 3.3],
['a', 'b', 'c'],
dti, dti_tz, pi])
assert mi.nlevels == 6
res = mi.append(mi)
exp = MultiIndex.from_arrays([[1, 2, 3, 1, 2, 3],
[1.1, np.nan, 3.3, 1.1, np.nan, 3.3],
['a', 'b', 'c', 'a', 'b', 'c'],
dti.append(dti),
dti_tz.append(dti_tz),
pi.append(pi)])
tm.assert_index_equal(res, exp)
other = MultiIndex.from_arrays([['x', 'y', 'z'], ['x', 'y', 'z'],
['x', 'y', 'z'], ['x', 'y', 'z'],
['x', 'y', 'z'], ['x', 'y', 'z']])
res = mi.append(other)
exp = MultiIndex.from_arrays([[1, 2, 3, 'x', 'y', 'z'],
[1.1, np.nan, 3.3, 'x', 'y', 'z'],
['a', 'b', 'c', 'x', 'y', 'z'],
dti.append(pd.Index(['x', 'y', 'z'])),
dti_tz.append(pd.Index(['x', 'y', 'z'])),
pi.append(pd.Index(['x', 'y', 'z']))])
tm.assert_index_equal(res, exp)
def test_get_level_values(self):
result = self.index.get_level_values(0)
expected = Index(['foo', 'foo', 'bar', 'baz', 'qux', 'qux'],
name='first')
tm.assert_index_equal(result, expected)
assert result.name == 'first'
result = self.index.get_level_values('first')
expected = self.index.get_level_values(0)
tm.assert_index_equal(result, expected)
# GH 10460
index = MultiIndex(
levels=[CategoricalIndex(['A', 'B']),
CategoricalIndex([1, 2, 3])],
labels=[np.array([0, 0, 0, 1, 1, 1]),
np.array([0, 1, 2, 0, 1, 2])])
exp = CategoricalIndex(['A', 'A', 'A', 'B', 'B', 'B'])
tm.assert_index_equal(index.get_level_values(0), exp)
exp = CategoricalIndex([1, 2, 3, 1, 2, 3])
tm.assert_index_equal(index.get_level_values(1), exp)
def test_get_level_values_int_with_na(self):
# GH 17924
arrays = [['a', 'b', 'b'], [1, np.nan, 2]]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(1)
expected = Index([1, np.nan, 2])
tm.assert_index_equal(result, expected)
arrays = [['a', 'b', 'b'], [np.nan, np.nan, 2]]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(1)
expected = Index([np.nan, np.nan, 2])
tm.assert_index_equal(result, expected)
def test_get_level_values_na(self):
arrays = [[np.nan, np.nan, np.nan], ['a', np.nan, 1]]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(0)
expected = pd.Index([np.nan, np.nan, np.nan])
tm.assert_index_equal(result, expected)
result = index.get_level_values(1)
expected = pd.Index(['a', np.nan, 1])
tm.assert_index_equal(result, expected)
arrays = [['a', 'b', 'b'], pd.DatetimeIndex([0, 1, pd.NaT])]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(1)
expected = pd.DatetimeIndex([0, 1, pd.NaT])
tm.assert_index_equal(result, expected)
arrays = [[], []]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(0)
expected = pd.Index([], dtype=object)
tm.assert_index_equal(result, expected)
def test_get_level_values_all_na(self):
# GH 17924 when level entirely consists of nan
arrays = [[np.nan, np.nan, np.nan], ['a', np.nan, 1]]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(0)
expected = pd.Index([np.nan, np.nan, np.nan], dtype=np.float64)
tm.assert_index_equal(result, expected)
result = index.get_level_values(1)
expected = pd.Index(['a', np.nan, 1], dtype=object)
tm.assert_index_equal(result, expected)
def test_reorder_levels(self):
# this blows up
tm.assert_raises_regex(IndexError, '^Too many levels',
self.index.reorder_levels, [2, 1, 0])
def test_nlevels(self):
assert self.index.nlevels == 2
def test_iter(self):
result = list(self.index)
expected = [('foo', 'one'), ('foo', 'two'), ('bar', 'one'),
('baz', 'two'), ('qux', 'one'), ('qux', 'two')]
assert result == expected
def test_legacy_pickle(self):
if PY3:
pytest.skip("testing for legacy pickles not "
"support on py3")
path = tm.get_data_path('multiindex_v1.pickle')
obj = pd.read_pickle(path)
obj2 = MultiIndex.from_tuples(obj.values)
assert obj.equals(obj2)
res = obj.get_indexer(obj)
exp = np.arange(len(obj), dtype=np.intp)
assert_almost_equal(res, exp)
res = obj.get_indexer(obj2[::-1])
exp = obj.get_indexer(obj[::-1])
exp2 = obj2.get_indexer(obj2[::-1])
assert_almost_equal(res, exp)
assert_almost_equal(exp, exp2)
def test_legacy_v2_unpickle(self):
# 0.7.3 -> 0.8.0 format manage
path = tm.get_data_path('mindex_073.pickle')
obj = pd.read_pickle(path)
obj2 = MultiIndex.from_tuples(obj.values)
assert obj.equals(obj2)
res = obj.get_indexer(obj)
exp = np.arange(len(obj), dtype=np.intp)
assert_almost_equal(res, exp)
res = obj.get_indexer(obj2[::-1])
exp = obj.get_indexer(obj[::-1])
exp2 = obj2.get_indexer(obj2[::-1])
assert_almost_equal(res, exp)
assert_almost_equal(exp, exp2)
def test_roundtrip_pickle_with_tz(self):
# GH 8367
# round-trip of timezone
index = MultiIndex.from_product(
[[1, 2], ['a', 'b'], date_range('20130101', periods=3,
tz='US/Eastern')
], names=['one', 'two', 'three'])
unpickled = tm.round_trip_pickle(index)
assert index.equal_levels(unpickled)
def test_from_tuples_index_values(self):
result = MultiIndex.from_tuples(self.index)
assert (result.values == self.index.values).all()
def test_contains(self):
assert ('foo', 'two') in self.index
assert ('bar', 'two') not in self.index
assert None not in self.index
def test_contains_top_level(self):
midx = MultiIndex.from_product([['A', 'B'], [1, 2]])
assert 'A' in midx
assert 'A' not in midx._engine
def test_contains_with_nat(self):
# MI with a NaT
mi = MultiIndex(levels=[['C'],
pd.date_range('2012-01-01', periods=5)],
labels=[[0, 0, 0, 0, 0, 0], [-1, 0, 1, 2, 3, 4]],
names=[None, 'B'])
assert ('C', pd.Timestamp('2012-01-01')) in mi
for val in mi.values:
assert val in mi
def test_is_all_dates(self):
assert not self.index.is_all_dates
def test_is_numeric(self):
# MultiIndex is never numeric
assert not self.index.is_numeric()
def test_getitem(self):
# scalar
assert self.index[2] == ('bar', 'one')
# slice
result = self.index[2:5]
expected = self.index[[2, 3, 4]]
assert result.equals(expected)
# boolean
result = self.index[[True, False, True, False, True, True]]
result2 = self.index[np.array([True, False, True, False, True, True])]
expected = self.index[[0, 2, 4, 5]]
assert result.equals(expected)
assert result2.equals(expected)
def test_getitem_group_select(self):
sorted_idx, _ = self.index.sortlevel(0)
assert sorted_idx.get_loc('baz') == slice(3, 4)
assert sorted_idx.get_loc('foo') == slice(0, 2)
def test_get_loc(self):
assert self.index.get_loc(('foo', 'two')) == 1
assert self.index.get_loc(('baz', 'two')) == 3
pytest.raises(KeyError, self.index.get_loc, ('bar', 'two'))
pytest.raises(KeyError, self.index.get_loc, 'quux')
pytest.raises(NotImplementedError, self.index.get_loc, 'foo',
method='nearest')
# 3 levels
index = MultiIndex(levels=[Index(lrange(4)), Index(lrange(4)), Index(
lrange(4))], labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])])
pytest.raises(KeyError, index.get_loc, (1, 1))
assert index.get_loc((2, 0)) == slice(3, 5)
def test_get_loc_duplicates(self):
index = Index([2, 2, 2, 2])
result = index.get_loc(2)
expected = slice(0, 4)
assert result == expected
# pytest.raises(Exception, index.get_loc, 2)
index = Index(['c', 'a', 'a', 'b', 'b'])
rs = index.get_loc('c')
xp = 0
assert rs == xp
def test_get_value_duplicates(self):
index = MultiIndex(levels=[['D', 'B', 'C'],
[0, 26, 27, 37, 57, 67, 75, 82]],
labels=[[0, 0, 0, 1, 2, 2, 2, 2, 2, 2],
[1, 3, 4, 6, 0, 2, 2, 3, 5, 7]],
names=['tag', 'day'])
assert index.get_loc('D') == slice(0, 3)
with pytest.raises(KeyError):
index._engine.get_value(np.array([]), 'D')
def test_get_loc_level(self):
index = MultiIndex(levels=[Index(lrange(4)), Index(lrange(4)), Index(
lrange(4))], labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])])
loc, new_index = index.get_loc_level((0, 1))
expected = slice(1, 2)
exp_index = index[expected].droplevel(0).droplevel(0)
assert loc == expected
assert new_index.equals(exp_index)
loc, new_index = index.get_loc_level((0, 1, 0))
expected = 1
assert loc == expected
assert new_index is None
pytest.raises(KeyError, index.get_loc_level, (2, 2))
index = MultiIndex(levels=[[2000], lrange(4)], labels=[np.array(
[0, 0, 0, 0]), np.array([0, 1, 2, 3])])
result, new_index = index.get_loc_level((2000, slice(None, None)))
expected = slice(None, None)
assert result == expected
assert new_index.equals(index.droplevel(0))
@pytest.mark.parametrize('level', [0, 1])
@pytest.mark.parametrize('null_val', [np.nan, pd.NaT, None])
def test_get_loc_nan(self, level, null_val):
# GH 18485 : NaN in MultiIndex
levels = [['a', 'b'], ['c', 'd']]
key = ['b', 'd']
levels[level] = np.array([0, null_val], dtype=type(null_val))
key[level] = null_val
idx = MultiIndex.from_product(levels)
assert idx.get_loc(tuple(key)) == 3
def test_get_loc_missing_nan(self):
# GH 8569
idx = MultiIndex.from_arrays([[1.0, 2.0], [3.0, 4.0]])
assert isinstance(idx.get_loc(1), slice)
pytest.raises(KeyError, idx.get_loc, 3)
pytest.raises(KeyError, idx.get_loc, np.nan)
pytest.raises(KeyError, idx.get_loc, [np.nan])
@pytest.mark.parametrize('dtype1', [int, float, bool, str])
@pytest.mark.parametrize('dtype2', [int, float, bool, str])
def test_get_loc_multiple_dtypes(self, dtype1, dtype2):
# GH 18520
levels = [np.array([0, 1]).astype(dtype1),
np.array([0, 1]).astype(dtype2)]
idx = pd.MultiIndex.from_product(levels)
assert idx.get_loc(idx[2]) == 2
@pytest.mark.parametrize('level', [0, 1])
@pytest.mark.parametrize('dtypes', [[int, float], [float, int]])
def test_get_loc_implicit_cast(self, level, dtypes):
# GH 18818, GH 15994 : as flat index, cast int to float and vice-versa
levels = [['a', 'b'], ['c', 'd']]
key = ['b', 'd']
lev_dtype, key_dtype = dtypes
levels[level] = np.array([0, 1], dtype=lev_dtype)
key[level] = key_dtype(1)
idx = MultiIndex.from_product(levels)
assert idx.get_loc(tuple(key)) == 3
def test_get_loc_cast_bool(self):
# GH 19086 : int is casted to bool, but not vice-versa
levels = [[False, True], np.arange(2, dtype='int64')]
idx = MultiIndex.from_product(levels)
assert idx.get_loc((0, 1)) == 1
assert idx.get_loc((1, 0)) == 2
pytest.raises(KeyError, idx.get_loc, (False, True))
pytest.raises(KeyError, idx.get_loc, (True, False))
def test_slice_locs(self):
df = tm.makeTimeDataFrame()
stacked = df.stack()
idx = stacked.index
slob = slice(*idx.slice_locs(df.index[5], df.index[15]))
sliced = stacked[slob]
expected = df[5:16].stack()
tm.assert_almost_equal(sliced.values, expected.values)
slob = slice(*idx.slice_locs(df.index[5] + timedelta(seconds=30),
df.index[15] - timedelta(seconds=30)))
sliced = stacked[slob]
expected = df[6:15].stack()
tm.assert_almost_equal(sliced.values, expected.values)
def test_slice_locs_with_type_mismatch(self):
df = tm.makeTimeDataFrame()
stacked = df.stack()
idx = stacked.index
tm.assert_raises_regex(TypeError, '^Level type mismatch',
idx.slice_locs, (1, 3))
tm.assert_raises_regex(TypeError, '^Level type mismatch',
idx.slice_locs,
df.index[5] + timedelta(
seconds=30), (5, 2))
df = tm.makeCustomDataframe(5, 5)
stacked = df.stack()
idx = stacked.index
with tm.assert_raises_regex(TypeError, '^Level type mismatch'):
idx.slice_locs(timedelta(seconds=30))
# TODO: Try creating a UnicodeDecodeError in exception message
with tm.assert_raises_regex(TypeError, '^Level type mismatch'):
idx.slice_locs(df.index[1], (16, "a"))
def test_slice_locs_not_sorted(self):
index = MultiIndex(levels=[Index(lrange(4)), Index(lrange(4)), Index(
lrange(4))], labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])])
tm.assert_raises_regex(KeyError, "[Kk]ey length.*greater than "
"MultiIndex lexsort depth",
index.slice_locs, (1, 0, 1), (2, 1, 0))
# works
sorted_index, _ = index.sortlevel(0)
# should there be a test case here???
sorted_index.slice_locs((1, 0, 1), (2, 1, 0))
def test_slice_locs_partial(self):
sorted_idx, _ = self.index.sortlevel(0)
result = sorted_idx.slice_locs(('foo', 'two'), ('qux', 'one'))
assert result == (1, 5)
result = sorted_idx.slice_locs(None, ('qux', 'one'))
assert result == (0, 5)
result = sorted_idx.slice_locs(('foo', 'two'), None)
assert result == (1, len(sorted_idx))
result = sorted_idx.slice_locs('bar', 'baz')
assert result == (2, 4)
def test_slice_locs_not_contained(self):
# some searchsorted action
index = MultiIndex(levels=[[0, 2, 4, 6], [0, 2, 4]],
labels=[[0, 0, 0, 1, 1, 2, 3, 3, 3],
[0, 1, 2, 1, 2, 2, 0, 1, 2]], sortorder=0)
result = index.slice_locs((1, 0), (5, 2))
assert result == (3, 6)
result = index.slice_locs(1, 5)
assert result == (3, 6)
result = index.slice_locs((2, 2), (5, 2))
assert result == (3, 6)
result = index.slice_locs(2, 5)
assert result == (3, 6)
result = index.slice_locs((1, 0), (6, 3))
assert result == (3, 8)
result = index.slice_locs(-1, 10)
assert result == (0, len(index))
def test_consistency(self):
# need to construct an overflow
major_axis = lrange(70000)
minor_axis = lrange(10)
major_labels = np.arange(70000)
minor_labels = np.repeat(lrange(10), 7000)
# the fact that is works means it's consistent
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
# inconsistent
major_labels = np.array([0, 0, 1, 1, 1, 2, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 1, 0, 1, 0, 1])
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
assert not index.is_unique
def test_truncate(self):
major_axis = Index(lrange(4))
minor_axis = Index(lrange(2))
major_labels = np.array([0, 0, 1, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 0, 1])
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
result = index.truncate(before=1)
assert 'foo' not in result.levels[0]
assert 1 in result.levels[0]
result = index.truncate(after=1)
assert 2 not in result.levels[0]
assert 1 in result.levels[0]
result = index.truncate(before=1, after=2)
assert len(result.levels[0]) == 2
# after < before
pytest.raises(ValueError, index.truncate, 3, 1)
def test_get_indexer(self):
major_axis = Index(lrange(4))
minor_axis = Index(lrange(2))
major_labels = np.array([0, 0, 1, 2, 2, 3, 3], dtype=np.intp)
minor_labels = np.array([0, 1, 0, 0, 1, 0, 1], dtype=np.intp)
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
idx1 = index[:5]
idx2 = index[[1, 3, 5]]
r1 = idx1.get_indexer(idx2)
assert_almost_equal(r1, np.array([1, 3, -1], dtype=np.intp))
r1 = idx2.get_indexer(idx1, method='pad')
e1 = np.array([-1, 0, 0, 1, 1], dtype=np.intp)
assert_almost_equal(r1, e1)
r2 = idx2.get_indexer(idx1[::-1], method='pad')
assert_almost_equal(r2, e1[::-1])
rffill1 = idx2.get_indexer(idx1, method='ffill')
assert_almost_equal(r1, rffill1)
r1 = idx2.get_indexer(idx1, method='backfill')
e1 = np.array([0, 0, 1, 1, 2], dtype=np.intp)
assert_almost_equal(r1, e1)
r2 = idx2.get_indexer(idx1[::-1], method='backfill')
assert_almost_equal(r2, e1[::-1])
rbfill1 = idx2.get_indexer(idx1, method='bfill')
assert_almost_equal(r1, rbfill1)
# pass non-MultiIndex
r1 = idx1.get_indexer(idx2.values)
rexp1 = idx1.get_indexer(idx2)
assert_almost_equal(r1, rexp1)
r1 = idx1.get_indexer([1, 2, 3])
assert (r1 == [-1, -1, -1]).all()
# create index with duplicates
idx1 = Index(lrange(10) + lrange(10))
idx2 = Index(lrange(20))
msg = "Reindexing only valid with uniquely valued Index objects"
with tm.assert_raises_regex(InvalidIndexError, msg):
idx1.get_indexer(idx2)
def test_get_indexer_nearest(self):
midx = MultiIndex.from_tuples([('a', 1), ('b', 2)])
with pytest.raises(NotImplementedError):
midx.get_indexer(['a'], method='nearest')
with pytest.raises(NotImplementedError):
midx.get_indexer(['a'], method='pad', tolerance=2)
def test_hash_collisions(self):
# non-smoke test that we don't get hash collisions
index = MultiIndex.from_product([np.arange(1000), np.arange(1000)],
names=['one', 'two'])
result = index.get_indexer(index.values)
tm.assert_numpy_array_equal(result, np.arange(
len(index), dtype='intp'))
for i in [0, 1, len(index) - 2, len(index) - 1]:
result = index.get_loc(index[i])
assert result == i
def test_format(self):
self.index.format()
self.index[:0].format()
def test_format_integer_names(self):
index = MultiIndex(levels=[[0, 1], [0, 1]],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]], names=[0, 1])
index.format(names=True)
def test_format_sparse_display(self):
index = MultiIndex(levels=[[0, 1], [0, 1], [0, 1], [0]],
labels=[[0, 0, 0, 1, 1, 1], [0, 0, 1, 0, 0, 1],
[0, 1, 0, 0, 1, 0], [0, 0, 0, 0, 0, 0]])
result = index.format()
assert result[3] == '1 0 0 0'
def test_format_sparse_config(self):
warn_filters = warnings.filters
warnings.filterwarnings('ignore', category=FutureWarning,
module=".*format")
# GH1538
pd.set_option('display.multi_sparse', False)
result = self.index.format()
assert result[1] == 'foo two'
tm.reset_display_options()
warnings.filters = warn_filters
def test_to_frame(self):
tuples = [(1, 'one'), (1, 'two'), (2, 'one'), (2, 'two')]
index = MultiIndex.from_tuples(tuples)
result = index.to_frame(index=False)
expected = DataFrame(tuples)
tm.assert_frame_equal(result, expected)
result = index.to_frame()
expected.index = index
tm.assert_frame_equal(result, expected)
tuples = [(1, 'one'), (1, 'two'), (2, 'one'), (2, 'two')]
index = MultiIndex.from_tuples(tuples, names=['first', 'second'])
result = index.to_frame(index=False)
expected = DataFrame(tuples)
expected.columns = ['first', 'second']
tm.assert_frame_equal(result, expected)
result = index.to_frame()
expected.index = index
| tm.assert_frame_equal(result, expected) | pandas.util.testing.assert_frame_equal |
import logging
import os
import re
import shutil
from datetime import datetime
from itertools import combinations
from random import randint
import numpy as np
import pandas as pd
import psutil
import pytest
from dask import dataframe as dd
from distributed.utils_test import cluster
from tqdm import tqdm
import featuretools as ft
from featuretools import EntitySet, Timedelta, calculate_feature_matrix, dfs
from featuretools.computational_backends import utils
from featuretools.computational_backends.calculate_feature_matrix import (
FEATURE_CALCULATION_PERCENTAGE,
_chunk_dataframe_groups,
_handle_chunk_size,
scatter_warning
)
from featuretools.computational_backends.utils import (
bin_cutoff_times,
create_client_and_cluster,
n_jobs_to_workers
)
from featuretools.feature_base import (
AggregationFeature,
DirectFeature,
IdentityFeature
)
from featuretools.primitives import (
Count,
Max,
Min,
Percentile,
Sum,
TransformPrimitive
)
from featuretools.tests.testing_utils import (
backward_path,
get_mock_client_cluster,
to_pandas
)
from featuretools.utils.gen_utils import Library, import_or_none
ks = import_or_none('databricks.koalas')
def test_scatter_warning(caplog):
logger = logging.getLogger('featuretools')
match = "EntitySet was only scattered to {} out of {} workers"
warning_message = match.format(1, 2)
logger.propagate = True
scatter_warning(1, 2)
logger.propagate = False
assert warning_message in caplog.text
# TODO: final assert fails w/ Dask
def test_calc_feature_matrix(es):
if not all(isinstance(entity.df, pd.DataFrame) for entity in es.entities):
pytest.xfail('Distributed dataframe result not ordered')
times = list([datetime(2011, 4, 9, 10, 30, i * 6) for i in range(5)] +
[datetime(2011, 4, 9, 10, 31, i * 9) for i in range(4)] +
[datetime(2011, 4, 9, 10, 40, 0)] +
[datetime(2011, 4, 10, 10, 40, i) for i in range(2)] +
[datetime(2011, 4, 10, 10, 41, i * 3) for i in range(3)] +
[datetime(2011, 4, 10, 11, 10, i * 3) for i in range(2)])
instances = range(17)
cutoff_time = pd.DataFrame({'time': times, es['log'].index: instances})
labels = [False] * 3 + [True] * 2 + [False] * 9 + [True] + [False] * 2
property_feature = ft.Feature(es['log']['value']) > 10
feature_matrix = calculate_feature_matrix([property_feature],
es,
cutoff_time=cutoff_time,
verbose=True)
assert (feature_matrix[property_feature.get_name()] == labels).values.all()
error_text = 'features must be a non-empty list of features'
with pytest.raises(AssertionError, match=error_text):
feature_matrix = calculate_feature_matrix('features', es, cutoff_time=cutoff_time)
with pytest.raises(AssertionError, match=error_text):
feature_matrix = calculate_feature_matrix([], es, cutoff_time=cutoff_time)
with pytest.raises(AssertionError, match=error_text):
feature_matrix = calculate_feature_matrix([1, 2, 3], es, cutoff_time=cutoff_time)
error_text = "cutoff_time times must be datetime type: try casting via "\
"pd\\.to_datetime\\(\\)"
with pytest.raises(TypeError, match=error_text):
calculate_feature_matrix([property_feature],
es,
instance_ids=range(17),
cutoff_time=17)
error_text = 'cutoff_time must be a single value or DataFrame'
with pytest.raises(TypeError, match=error_text):
calculate_feature_matrix([property_feature],
es,
instance_ids=range(17),
cutoff_time=times)
cutoff_times_dup = pd.DataFrame({'time': [datetime(2018, 3, 1),
datetime(2018, 3, 1)],
es['log'].index: [1, 1]})
error_text = 'Duplicated rows in cutoff time dataframe.'
with pytest.raises(AssertionError, match=error_text):
feature_matrix = calculate_feature_matrix([property_feature],
entityset=es,
cutoff_time=cutoff_times_dup)
cutoff_reordered = cutoff_time.iloc[[-1, 10, 1]] # 3 ids not ordered by cutoff time
feature_matrix = calculate_feature_matrix([property_feature],
es,
cutoff_time=cutoff_reordered,
verbose=True)
assert all(feature_matrix.index == cutoff_reordered["id"].values)
# fails with Dask and Koalas entitysets, cutoff time not reordered; cannot verify out of order
# - can't tell if wrong/different all are false so can't check positional
def test_cfm_warns_dask_cutoff_time(es):
times = list([datetime(2011, 4, 9, 10, 30, i * 6) for i in range(5)] +
[datetime(2011, 4, 9, 10, 31, i * 9) for i in range(4)] +
[datetime(2011, 4, 9, 10, 40, 0)] +
[datetime(2011, 4, 10, 10, 40, i) for i in range(2)] +
[datetime(2011, 4, 10, 10, 41, i * 3) for i in range(3)] +
[datetime(2011, 4, 10, 11, 10, i * 3) for i in range(2)])
instances = range(17)
cutoff_time = pd.DataFrame({'time': times,
es['log'].index: instances})
cutoff_time = dd.from_pandas(cutoff_time, npartitions=4)
property_feature = ft.Feature(es['log']['value']) > 10
match = "cutoff_time should be a Pandas DataFrame: " \
"computing cutoff_time, this may take a while"
with pytest.warns(UserWarning, match=match):
calculate_feature_matrix([property_feature],
es,
cutoff_time=cutoff_time)
def test_cfm_compose(es, lt):
property_feature = ft.Feature(es['log']['value']) > 10
feature_matrix = calculate_feature_matrix([property_feature],
es,
cutoff_time=lt,
verbose=True)
feature_matrix = to_pandas(feature_matrix, index='id', sort_index=True)
assert (feature_matrix[property_feature.get_name()] ==
feature_matrix['label_func']).values.all()
def test_cfm_compose_approximate(es, lt):
if not all(isinstance(entity.df, pd.DataFrame) for entity in es.entities):
pytest.xfail('dask does not support approximate')
property_feature = ft.Feature(es['log']['value']) > 10
feature_matrix = calculate_feature_matrix([property_feature],
es,
cutoff_time=lt,
approximate='1s',
verbose=True)
assert(type(feature_matrix) == pd.core.frame.DataFrame)
feature_matrix = to_pandas(feature_matrix, index='id', sort_index=True)
assert (feature_matrix[property_feature.get_name()] ==
feature_matrix['label_func']).values.all()
def test_cfm_dask_compose(dask_es, lt):
property_feature = ft.Feature(dask_es['log']['value']) > 10
feature_matrix = calculate_feature_matrix([property_feature],
dask_es,
cutoff_time=lt,
verbose=True)
feature_matrix = feature_matrix.compute()
assert (feature_matrix[property_feature.get_name()] == feature_matrix['label_func']).values.all()
# tests approximate, skip for dask/koalas
def test_cfm_approximate_correct_ordering():
trips = {
'trip_id': [i for i in range(1000)],
'flight_time': [datetime(1998, 4, 2) for i in range(350)] + [datetime(1997, 4, 3) for i in range(650)],
'flight_id': [randint(1, 25) for i in range(1000)],
'trip_duration': [randint(1, 999) for i in range(1000)]
}
df = pd.DataFrame.from_dict(trips)
es = EntitySet('flights')
es.entity_from_dataframe("trips",
dataframe=df,
index="trip_id",
time_index='flight_time')
es.normalize_entity(base_entity_id="trips",
new_entity_id="flights",
index="flight_id",
make_time_index=True)
features = dfs(entityset=es, target_entity='trips', features_only=True)
flight_features = [feature for feature in features
if isinstance(feature, DirectFeature) and
isinstance(feature.base_features[0],
AggregationFeature)]
property_feature = IdentityFeature(es['trips']['trip_id'])
cutoff_time = pd.DataFrame.from_dict({'instance_id': df['trip_id'],
'time': df['flight_time']})
time_feature = IdentityFeature(es['trips']['flight_time'])
feature_matrix = calculate_feature_matrix(flight_features + [property_feature, time_feature],
es,
cutoff_time_in_index=True,
cutoff_time=cutoff_time)
feature_matrix.index.names = ['instance', 'time']
assert(np.all(feature_matrix.reset_index('time').reset_index()[['instance', 'time']].values == feature_matrix[['trip_id', 'flight_time']].values))
feature_matrix_2 = calculate_feature_matrix(flight_features + [property_feature, time_feature],
es,
cutoff_time=cutoff_time,
cutoff_time_in_index=True,
approximate=Timedelta(2, 'd'))
feature_matrix_2.index.names = ['instance', 'time']
assert(np.all(feature_matrix_2.reset_index('time').reset_index()[['instance', 'time']].values == feature_matrix_2[['trip_id', 'flight_time']].values))
for column in feature_matrix:
for x, y in zip(feature_matrix[column], feature_matrix_2[column]):
assert ((pd.isnull(x) and pd.isnull(y)) or (x == y))
# uses approximate, skip for dask/koalas entitysets
def test_cfm_no_cutoff_time_index(pd_es):
agg_feat = ft.Feature(pd_es['log']['id'], parent_entity=pd_es['sessions'], primitive=Count)
agg_feat4 = ft.Feature(agg_feat, parent_entity=pd_es['customers'], primitive=Sum)
dfeat = DirectFeature(agg_feat4, pd_es['sessions'])
cutoff_time = pd.DataFrame({
'time': [datetime(2013, 4, 9, 10, 31, 19), datetime(2013, 4, 9, 11, 0, 0)],
'instance_id': [0, 2]
})
feature_matrix = calculate_feature_matrix([dfeat, agg_feat],
pd_es,
cutoff_time_in_index=False,
approximate=Timedelta(12, 's'),
cutoff_time=cutoff_time)
assert feature_matrix.index.name == 'id'
assert feature_matrix.index.values.tolist() == [0, 2]
assert feature_matrix[dfeat.get_name()].tolist() == [10, 10]
assert feature_matrix[agg_feat.get_name()].tolist() == [5, 1]
cutoff_time = pd.DataFrame({
'time': [datetime(2011, 4, 9, 10, 31, 19), datetime(2011, 4, 9, 11, 0, 0)],
'instance_id': [0, 2]
})
feature_matrix_2 = calculate_feature_matrix([dfeat, agg_feat],
pd_es,
cutoff_time_in_index=False,
approximate=Timedelta(10, 's'),
cutoff_time=cutoff_time)
assert feature_matrix_2.index.name == 'id'
assert feature_matrix_2.index.tolist() == [0, 2]
assert feature_matrix_2[dfeat.get_name()].tolist() == [7, 10]
assert feature_matrix_2[agg_feat.get_name()].tolist() == [5, 1]
# TODO: fails with dask entitysets
# TODO: fails with koalas entitysets
def test_cfm_duplicated_index_in_cutoff_time(es):
if not all(isinstance(entity.df, pd.DataFrame) for entity in es.entities):
pytest.xfail('Distributed results not ordered, missing duplicates')
times = [datetime(2011, 4, 1), datetime(2011, 5, 1),
datetime(2011, 4, 1), datetime(2011, 5, 1)]
instances = [1, 1, 2, 2]
property_feature = ft.Feature(es['log']['value']) > 10
cutoff_time = pd.DataFrame({'id': instances, 'time': times},
index=[1, 1, 1, 1])
feature_matrix = calculate_feature_matrix([property_feature],
es,
cutoff_time=cutoff_time,
chunk_size=1)
assert (feature_matrix.shape[0] == cutoff_time.shape[0])
# TODO: fails with Dask, Koalas
def test_saveprogress(es, tmpdir):
if not all(isinstance(entity.df, pd.DataFrame) for entity in es.entities):
pytest.xfail('saveprogress fails with distributed entitysets')
times = list([datetime(2011, 4, 9, 10, 30, i * 6) for i in range(5)] +
[datetime(2011, 4, 9, 10, 31, i * 9) for i in range(4)] +
[datetime(2011, 4, 9, 10, 40, 0)] +
[datetime(2011, 4, 10, 10, 40, i) for i in range(2)] +
[datetime(2011, 4, 10, 10, 41, i * 3) for i in range(3)] +
[datetime(2011, 4, 10, 11, 10, i * 3) for i in range(2)])
cutoff_time = pd.DataFrame({'time': times, 'instance_id': range(17)})
property_feature = ft.Feature(es['log']['value']) > 10
save_progress = str(tmpdir)
fm_save = calculate_feature_matrix([property_feature],
es,
cutoff_time=cutoff_time,
save_progress=save_progress)
_, _, files = next(os.walk(save_progress))
files = [os.path.join(save_progress, file) for file in files]
# there are 17 datetime files created above
assert len(files) == 17
list_df = []
for file_ in files:
df = pd.read_csv(file_, index_col="id", header=0)
list_df.append(df)
merged_df = pd.concat(list_df)
merged_df.set_index(pd.DatetimeIndex(times), inplace=True, append=True)
fm_no_save = calculate_feature_matrix([property_feature],
es,
cutoff_time=cutoff_time)
assert np.all((merged_df.sort_index().values) == (fm_save.sort_index().values))
assert np.all((fm_no_save.sort_index().values) == (fm_save.sort_index().values))
assert np.all((fm_no_save.sort_index().values) == (merged_df.sort_index().values))
shutil.rmtree(save_progress)
def test_cutoff_time_correctly(es):
property_feature = ft.Feature(es['log']['id'], parent_entity=es['customers'], primitive=Count)
times = [datetime(2011, 4, 10), datetime(2011, 4, 11), datetime(2011, 4, 7)]
cutoff_time = pd.DataFrame({'time': times, 'instance_id': [0, 1, 2]})
feature_matrix = calculate_feature_matrix([property_feature],
es,
cutoff_time=cutoff_time)
feature_matrix = to_pandas(feature_matrix, index='id', sort_index=True)
labels = [10, 5, 0]
assert (feature_matrix[property_feature.get_name()] == labels).values.all()
def test_cutoff_time_binning():
cutoff_time = pd.DataFrame({
'time': [
datetime(2011, 4, 9, 12, 31),
datetime(2011, 4, 10, 11),
datetime(2011, 4, 10, 13, 10, 1)
],
'instance_id': [1, 2, 3]
})
binned_cutoff_times = bin_cutoff_times(cutoff_time, Timedelta(4, 'h'))
labels = [datetime(2011, 4, 9, 12),
datetime(2011, 4, 10, 8),
datetime(2011, 4, 10, 12)]
for i in binned_cutoff_times.index:
assert binned_cutoff_times['time'][i] == labels[i]
binned_cutoff_times = bin_cutoff_times(cutoff_time, Timedelta(25, 'h'))
labels = [datetime(2011, 4, 8, 22),
datetime(2011, 4, 9, 23),
datetime(2011, 4, 9, 23)]
for i in binned_cutoff_times.index:
assert binned_cutoff_times['time'][i] == labels[i]
error_text = "Unit is relative"
with pytest.raises(ValueError, match=error_text):
binned_cutoff_times = bin_cutoff_times(cutoff_time, Timedelta(1, 'mo'))
def test_training_window_fails_dask(dask_es):
property_feature = ft.Feature(dask_es['log']['id'],
parent_entity=dask_es['customers'],
primitive=Count)
error_text = "Using training_window is not supported with Dask Entities"
with pytest.raises(ValueError, match=error_text):
calculate_feature_matrix([property_feature],
dask_es,
training_window='2 hours')
def test_cutoff_time_columns_order(es):
property_feature = ft.Feature(es['log']['id'], parent_entity=es['customers'], primitive=Count)
times = [datetime(2011, 4, 10), datetime(2011, 4, 11), datetime(2011, 4, 7)]
id_col_names = ['instance_id', es['customers'].index]
time_col_names = ['time', es['customers'].time_index]
for id_col in id_col_names:
for time_col in time_col_names:
cutoff_time = pd.DataFrame({'dummy_col_1': [1, 2, 3],
id_col: [0, 1, 2],
'dummy_col_2': [True, False, False],
time_col: times})
feature_matrix = calculate_feature_matrix([property_feature],
es,
cutoff_time=cutoff_time)
labels = [10, 5, 0]
feature_matrix = to_pandas(feature_matrix, index='id', sort_index=True)
assert (feature_matrix[property_feature.get_name()] == labels).values.all()
def test_cutoff_time_df_redundant_column_names(es):
property_feature = ft.Feature(es['log']['id'], parent_entity=es['customers'], primitive=Count)
times = [datetime(2011, 4, 10), datetime(2011, 4, 11), datetime(2011, 4, 7)]
cutoff_time = pd.DataFrame({es['customers'].index: [0, 1, 2],
'instance_id': [0, 1, 2],
'dummy_col': [True, False, False],
'time': times})
err_msg = 'Cutoff time DataFrame cannot contain both a column named "instance_id" and a column' \
' with the same name as the target entity index'
with pytest.raises(AttributeError, match=err_msg):
calculate_feature_matrix([property_feature],
es,
cutoff_time=cutoff_time)
cutoff_time = pd.DataFrame({es['customers'].time_index: [0, 1, 2],
'instance_id': [0, 1, 2],
'dummy_col': [True, False, False],
'time': times})
err_msg = 'Cutoff time DataFrame cannot contain both a column named "time" and a column' \
' with the same name as the target entity time index'
with pytest.raises(AttributeError, match=err_msg):
calculate_feature_matrix([property_feature],
es,
cutoff_time=cutoff_time)
def test_training_window(pd_es):
property_feature = ft.Feature(pd_es['log']['id'], parent_entity=pd_es['customers'], primitive=Count)
top_level_agg = ft.Feature(pd_es['customers']['id'], parent_entity=pd_es[u'régions'], primitive=Count)
# make sure features that have a direct to a higher level agg
# so we have multiple "filter eids" in get_pandas_data_slice,
# and we go through the loop to pull data with a training_window param more than once
dagg = DirectFeature(top_level_agg, pd_es['customers'])
# for now, warns if last_time_index not present
times = [datetime(2011, 4, 9, 12, 31),
datetime(2011, 4, 10, 11),
datetime(2011, 4, 10, 13, 10)]
cutoff_time = pd.DataFrame({'time': times, 'instance_id': [0, 1, 2]})
warn_text = "Using training_window but last_time_index is not set on entity customers"
with pytest.warns(UserWarning, match=warn_text):
feature_matrix = calculate_feature_matrix([property_feature, dagg],
pd_es,
cutoff_time=cutoff_time,
training_window='2 hours')
pd_es.add_last_time_indexes()
error_text = 'Training window cannot be in observations'
with pytest.raises(AssertionError, match=error_text):
feature_matrix = calculate_feature_matrix([property_feature],
pd_es,
cutoff_time=cutoff_time,
training_window=Timedelta(2, 'observations'))
# Case1. include_cutoff_time = True
feature_matrix = calculate_feature_matrix([property_feature, dagg],
pd_es,
cutoff_time=cutoff_time,
training_window='2 hours',
include_cutoff_time=True)
prop_values = [4, 5, 1]
dagg_values = [3, 2, 1]
assert (feature_matrix[property_feature.get_name()] == prop_values).values.all()
assert (feature_matrix[dagg.get_name()] == dagg_values).values.all()
# Case2. include_cutoff_time = False
feature_matrix = calculate_feature_matrix([property_feature, dagg],
pd_es,
cutoff_time=cutoff_time,
training_window='2 hours',
include_cutoff_time=False)
prop_values = [5, 5, 2]
dagg_values = [3, 2, 1]
assert (feature_matrix[property_feature.get_name()] == prop_values).values.all()
assert (feature_matrix[dagg.get_name()] == dagg_values).values.all()
# Case3. include_cutoff_time = False with single cutoff time value
feature_matrix = calculate_feature_matrix([property_feature, dagg],
pd_es,
cutoff_time=pd.to_datetime("2011-04-09 10:40:00"),
training_window='9 minutes',
include_cutoff_time=False)
prop_values = [0, 4, 0]
dagg_values = [3, 3, 3]
assert (feature_matrix[property_feature.get_name()] == prop_values).values.all()
assert (feature_matrix[dagg.get_name()] == dagg_values).values.all()
# Case4. include_cutoff_time = True with single cutoff time value
feature_matrix = calculate_feature_matrix([property_feature, dagg],
pd_es,
cutoff_time=pd.to_datetime("2011-04-10 10:40:00"),
training_window='2 days',
include_cutoff_time=True)
prop_values = [0, 10, 1]
dagg_values = [3, 3, 3]
assert (feature_matrix[property_feature.get_name()] == prop_values).values.all()
assert (feature_matrix[dagg.get_name()] == dagg_values).values.all()
def test_training_window_overlap(pd_es):
pd_es.add_last_time_indexes()
count_log = ft.Feature(
base=pd_es['log']['id'],
parent_entity=pd_es['customers'],
primitive=Count,
)
cutoff_time = pd.DataFrame({
'id': [0, 0],
'time': ['2011-04-09 10:30:00', '2011-04-09 10:40:00'],
}).astype({'time': 'datetime64[ns]'})
# Case1. include_cutoff_time = True
actual = calculate_feature_matrix(
features=[count_log],
entityset=pd_es,
cutoff_time=cutoff_time,
cutoff_time_in_index=True,
training_window='10 minutes',
include_cutoff_time=True,
)['COUNT(log)']
np.testing.assert_array_equal(actual.values, [1, 9])
# Case2. include_cutoff_time = False
actual = calculate_feature_matrix(
features=[count_log],
entityset=pd_es,
cutoff_time=cutoff_time,
cutoff_time_in_index=True,
training_window='10 minutes',
include_cutoff_time=False,
)['COUNT(log)']
np.testing.assert_array_equal(actual.values, [0, 9])
def test_include_cutoff_time_without_training_window(es):
es.add_last_time_indexes()
count_log = ft.Feature(
base=es['log']['id'],
parent_entity=es['customers'],
primitive=Count,
)
cutoff_time = pd.DataFrame({
'id': [0, 0],
'time': ['2011-04-09 10:30:00', '2011-04-09 10:31:00'],
}).astype({'time': 'datetime64[ns]'})
# Case1. include_cutoff_time = True
actual = calculate_feature_matrix(
features=[count_log],
entityset=es,
cutoff_time=cutoff_time,
cutoff_time_in_index=True,
include_cutoff_time=True,
)['COUNT(log)']
actual = to_pandas(actual)
np.testing.assert_array_equal(actual.values, [1, 6])
# Case2. include_cutoff_time = False
actual = calculate_feature_matrix(
features=[count_log],
entityset=es,
cutoff_time=cutoff_time,
cutoff_time_in_index=True,
include_cutoff_time=False,
)['COUNT(log)']
actual = to_pandas(actual)
np.testing.assert_array_equal(actual.values, [0, 5])
# Case3. include_cutoff_time = True with single cutoff time value
actual = calculate_feature_matrix(
features=[count_log],
entityset=es,
cutoff_time=pd.to_datetime("2011-04-09 10:31:00"),
instance_ids=[0],
cutoff_time_in_index=True,
include_cutoff_time=True,
)['COUNT(log)']
actual = to_pandas(actual)
np.testing.assert_array_equal(actual.values, [6])
# Case4. include_cutoff_time = False with single cutoff time value
actual = calculate_feature_matrix(
features=[count_log],
entityset=es,
cutoff_time=pd.to_datetime("2011-04-09 10:31:00"),
instance_ids=[0],
cutoff_time_in_index=True,
include_cutoff_time=False,
)['COUNT(log)']
actual = to_pandas(actual)
np.testing.assert_array_equal(actual.values, [5])
def test_approximate_dfeat_of_agg_on_target_include_cutoff_time(pd_es):
agg_feat = ft.Feature(pd_es['log']['id'], parent_entity=pd_es['sessions'], primitive=Count)
agg_feat2 = ft.Feature(agg_feat, parent_entity=pd_es['customers'], primitive=Sum)
dfeat = DirectFeature(agg_feat2, pd_es['sessions'])
cutoff_time = pd.DataFrame({'time': [datetime(2011, 4, 9, 10, 31, 19)], 'instance_id': [0]})
feature_matrix = calculate_feature_matrix([dfeat, agg_feat2, agg_feat],
pd_es,
approximate=Timedelta(20, 's'),
cutoff_time=cutoff_time,
include_cutoff_time=False)
# binned cutoff_time will be datetime(2011, 4, 9, 10, 31, 0) and
# log event 5 at datetime(2011, 4, 9, 10, 31, 0) will be
# excluded due to approximate cutoff time point
assert feature_matrix[dfeat.get_name()].tolist() == [5]
assert feature_matrix[agg_feat.get_name()].tolist() == [5]
feature_matrix = calculate_feature_matrix([dfeat, agg_feat],
pd_es,
approximate=Timedelta(20, 's'),
cutoff_time=cutoff_time,
include_cutoff_time=True)
# binned cutoff_time will be datetime(2011, 4, 9, 10, 31, 0) and
# log event 5 at datetime(2011, 4, 9, 10, 31, 0) will be
# included due to approximate cutoff time point
assert feature_matrix[dfeat.get_name()].tolist() == [6]
assert feature_matrix[agg_feat.get_name()].tolist() == [5]
def test_training_window_recent_time_index(pd_es):
# customer with no sessions
row = {
'id': [3],
'age': [73],
u'région_id': ['United States'],
'cohort': [1],
'cancel_reason': ["Lost interest"],
'loves_ice_cream': [True],
'favorite_quote': ["Don't look back. Something might be gaining on you."],
'signup_date': [datetime(2011, 4, 10)],
'upgrade_date': [datetime(2011, 4, 12)],
'cancel_date': [datetime(2011, 5, 13)],
'date_of_birth': [datetime(1938, 2, 1)],
'engagement_level': [2],
}
to_add_df = pd.DataFrame(row)
to_add_df.index = range(3, 4)
# have to convert category to int in order to concat
old_df = pd_es['customers'].df
old_df.index = old_df.index.astype("int")
old_df["id"] = old_df["id"].astype(int)
df = pd.concat([old_df, to_add_df], sort=True)
# convert back after
df.index = df.index.astype("category")
df["id"] = df["id"].astype("category")
pd_es['customers'].update_data(df=df, recalculate_last_time_indexes=False)
pd_es.add_last_time_indexes()
property_feature = ft.Feature(pd_es['log']['id'], parent_entity=pd_es['customers'], primitive=Count)
top_level_agg = ft.Feature(pd_es['customers']['id'], parent_entity=pd_es[u'régions'], primitive=Count)
dagg = DirectFeature(top_level_agg, pd_es['customers'])
instance_ids = [0, 1, 2, 3]
times = [datetime(2011, 4, 9, 12, 31), datetime(2011, 4, 10, 11),
datetime(2011, 4, 10, 13, 10, 1), datetime(2011, 4, 10, 1, 59, 59)]
cutoff_time = pd.DataFrame({'time': times, 'instance_id': instance_ids})
# Case1. include_cutoff_time = True
feature_matrix = calculate_feature_matrix(
[property_feature, dagg],
pd_es,
cutoff_time=cutoff_time,
training_window='2 hours',
include_cutoff_time=True,
)
prop_values = [4, 5, 1, 0]
assert (feature_matrix[property_feature.get_name()] == prop_values).values.all()
dagg_values = [3, 2, 1, 3]
feature_matrix.sort_index(inplace=True)
assert (feature_matrix[dagg.get_name()] == dagg_values).values.all()
# Case2. include_cutoff_time = False
feature_matrix = calculate_feature_matrix(
[property_feature, dagg],
pd_es,
cutoff_time=cutoff_time,
training_window='2 hours',
include_cutoff_time=False,
)
prop_values = [5, 5, 1, 0]
assert (feature_matrix[property_feature.get_name()] == prop_values).values.all()
dagg_values = [3, 2, 1, 3]
feature_matrix.sort_index(inplace=True)
assert (feature_matrix[dagg.get_name()] == dagg_values).values.all()
# TODO: add test to fail w/ koalas
def test_approximate_fails_dask(dask_es):
agg_feat = ft.Feature(dask_es['log']['id'],
parent_entity=dask_es['sessions'],
primitive=Count)
error_text = "Using approximate is not supported with Dask Entities"
with pytest.raises(ValueError, match=error_text):
calculate_feature_matrix([agg_feat],
dask_es,
approximate=Timedelta(1, 'week'))
def test_approximate_multiple_instances_per_cutoff_time(pd_es):
agg_feat = ft.Feature(pd_es['log']['id'], parent_entity=pd_es['sessions'], primitive=Count)
agg_feat2 = ft.Feature(agg_feat, parent_entity=pd_es['customers'], primitive=Sum)
dfeat = DirectFeature(agg_feat2, pd_es['sessions'])
times = [datetime(2011, 4, 9, 10, 31, 19), datetime(2011, 4, 9, 11, 0, 0)]
cutoff_time = pd.DataFrame({'time': times, 'instance_id': [0, 2]})
feature_matrix = calculate_feature_matrix([dfeat, agg_feat],
pd_es,
approximate=Timedelta(1, 'week'),
cutoff_time=cutoff_time)
assert feature_matrix.shape[0] == 2
assert feature_matrix[agg_feat.get_name()].tolist() == [5, 1]
def test_approximate_with_multiple_paths(pd_diamond_es):
pd_es = pd_diamond_es
path = backward_path(pd_es, ['regions', 'customers', 'transactions'])
agg_feat = ft.AggregationFeature(pd_es['transactions']['id'],
parent_entity=pd_es['regions'],
relationship_path=path,
primitive=Count)
dfeat = DirectFeature(agg_feat, pd_es['customers'])
times = [datetime(2011, 4, 9, 10, 31, 19), datetime(2011, 4, 9, 11, 0, 0)]
cutoff_time = pd.DataFrame({'time': times, 'instance_id': [0, 2]})
feature_matrix = calculate_feature_matrix([dfeat],
pd_es,
approximate=Timedelta(1, 'week'),
cutoff_time=cutoff_time)
assert feature_matrix[dfeat.get_name()].tolist() == [6, 2]
def test_approximate_dfeat_of_agg_on_target(pd_es):
agg_feat = ft.Feature(pd_es['log']['id'], parent_entity=pd_es['sessions'], primitive=Count)
agg_feat2 = ft.Feature(agg_feat, parent_entity=pd_es['customers'], primitive=Sum)
dfeat = DirectFeature(agg_feat2, pd_es['sessions'])
times = [datetime(2011, 4, 9, 10, 31, 19), datetime(2011, 4, 9, 11, 0, 0)]
cutoff_time = pd.DataFrame({'time': times, 'instance_id': [0, 2]})
feature_matrix = calculate_feature_matrix([dfeat, agg_feat],
pd_es,
instance_ids=[0, 2],
approximate=Timedelta(10, 's'),
cutoff_time=cutoff_time)
assert feature_matrix[dfeat.get_name()].tolist() == [7, 10]
assert feature_matrix[agg_feat.get_name()].tolist() == [5, 1]
def test_approximate_dfeat_of_need_all_values(pd_es):
p = ft.Feature(pd_es['log']['value'], primitive=Percentile)
agg_feat = ft.Feature(p, parent_entity=pd_es['sessions'], primitive=Sum)
agg_feat2 = ft.Feature(agg_feat, parent_entity=pd_es['customers'], primitive=Sum)
dfeat = DirectFeature(agg_feat2, pd_es['sessions'])
times = [datetime(2011, 4, 9, 10, 31, 19), datetime(2011, 4, 9, 11, 0, 0)]
cutoff_time = pd.DataFrame({'time': times, 'instance_id': [0, 2]})
feature_matrix = calculate_feature_matrix([dfeat, agg_feat],
pd_es,
approximate=Timedelta(10, 's'),
cutoff_time_in_index=True,
cutoff_time=cutoff_time)
log_df = pd_es['log'].df
instances = [0, 2]
cutoffs = [pd.Timestamp('2011-04-09 10:31:19'), | pd.Timestamp('2011-04-09 11:00:00') | pandas.Timestamp |
"""
Functions for implementing 'astype' methods according to pandas conventions,
particularly ones that differ from numpy.
"""
from __future__ import annotations
import inspect
from typing import (
TYPE_CHECKING,
cast,
overload,
)
import warnings
import numpy as np
from pandas._libs import lib
from pandas._typing import (
ArrayLike,
DtypeObj,
)
from pandas.errors import IntCastingNaNError
from pandas.util._exceptions import find_stack_level
from pandas.core.dtypes.common import (
is_datetime64_dtype,
is_datetime64tz_dtype,
is_dtype_equal,
is_object_dtype,
is_timedelta64_dtype,
pandas_dtype,
)
from pandas.core.dtypes.dtypes import (
DatetimeTZDtype,
ExtensionDtype,
PandasDtype,
)
from pandas.core.dtypes.missing import isna
if TYPE_CHECKING:
from pandas.core.arrays import (
DatetimeArray,
ExtensionArray,
)
_dtype_obj = np.dtype(object)
@overload
def astype_nansafe(
arr: np.ndarray, dtype: np.dtype, copy: bool = ..., skipna: bool = ...
) -> np.ndarray:
...
@overload
def astype_nansafe(
arr: np.ndarray, dtype: ExtensionDtype, copy: bool = ..., skipna: bool = ...
) -> ExtensionArray:
...
def astype_nansafe(
arr: np.ndarray, dtype: DtypeObj, copy: bool = True, skipna: bool = False
) -> ArrayLike:
"""
Cast the elements of an array to a given dtype a nan-safe manner.
Parameters
----------
arr : ndarray
dtype : np.dtype or ExtensionDtype
copy : bool, default True
If False, a view will be attempted but may fail, if
e.g. the item sizes don't align.
skipna: bool, default False
Whether or not we should skip NaN when casting as a string-type.
Raises
------
ValueError
The dtype was a datetime64/timedelta64 dtype, but it had no unit.
"""
if arr.ndim > 1:
flat = arr.ravel()
result = astype_nansafe(flat, dtype, copy=copy, skipna=skipna)
# error: Item "ExtensionArray" of "Union[ExtensionArray, ndarray]" has no
# attribute "reshape"
return result.reshape(arr.shape) # type: ignore[union-attr]
# We get here with 0-dim from sparse
arr = np.atleast_1d(arr)
# dispatch on extension dtype if needed
if isinstance(dtype, ExtensionDtype):
return dtype.construct_array_type()._from_sequence(arr, dtype=dtype, copy=copy)
elif not isinstance(dtype, np.dtype): # pragma: no cover
raise ValueError("dtype must be np.dtype or ExtensionDtype")
if arr.dtype.kind in ["m", "M"] and (
issubclass(dtype.type, str) or dtype == _dtype_obj
):
from pandas.core.construction import ensure_wrapped_if_datetimelike
arr = ensure_wrapped_if_datetimelike(arr)
return arr.astype(dtype, copy=copy)
if issubclass(dtype.type, str):
return lib.ensure_string_array(arr, skipna=skipna, convert_na_value=False)
elif is_datetime64_dtype(arr.dtype):
if dtype == np.int64:
if isna(arr).any():
raise ValueError("Cannot convert NaT values to integer")
return arr.view(dtype)
# allow frequency conversions
if dtype.kind == "M":
return arr.astype(dtype)
raise TypeError(f"cannot astype a datetimelike from [{arr.dtype}] to [{dtype}]")
elif is_timedelta64_dtype(arr.dtype):
if dtype == np.int64:
if isna(arr).any():
raise ValueError("Cannot convert NaT values to integer")
return arr.view(dtype)
elif dtype.kind == "m":
return astype_td64_unit_conversion(arr, dtype, copy=copy)
raise TypeError(f"cannot astype a timedelta from [{arr.dtype}] to [{dtype}]")
elif np.issubdtype(arr.dtype, np.floating) and np.issubdtype(dtype, np.integer):
return _astype_float_to_int_nansafe(arr, dtype, copy)
elif is_object_dtype(arr.dtype):
# work around NumPy brokenness, #1987
if np.issubdtype(dtype.type, np.integer):
return lib.astype_intsafe(arr, dtype)
# if we have a datetime/timedelta array of objects
# then coerce to a proper dtype and recall astype_nansafe
elif is_datetime64_dtype(dtype):
from pandas import to_datetime
return astype_nansafe(
to_datetime(arr).values,
dtype,
copy=copy,
)
elif is_timedelta64_dtype(dtype):
from pandas import to_timedelta
return astype_nansafe(to_timedelta(arr)._values, dtype, copy=copy)
if dtype.name in ("datetime64", "timedelta64"):
msg = (
f"The '{dtype.name}' dtype has no unit. Please pass in "
f"'{dtype.name}[ns]' instead."
)
raise ValueError(msg)
if copy or is_object_dtype(arr.dtype) or is_object_dtype(dtype):
# Explicit copy, or required since NumPy can't view from / to object.
return arr.astype(dtype, copy=True)
return arr.astype(dtype, copy=copy)
def _astype_float_to_int_nansafe(
values: np.ndarray, dtype: np.dtype, copy: bool
) -> np.ndarray:
"""
astype with a check preventing converting NaN to an meaningless integer value.
"""
if not np.isfinite(values).all():
raise IntCastingNaNError(
"Cannot convert non-finite values (NA or inf) to integer"
)
if dtype.kind == "u":
# GH#45151
if not (values >= 0).all():
raise ValueError(f"Cannot losslessly cast from {values.dtype} to {dtype}")
return values.astype(dtype, copy=copy)
def astype_array(values: ArrayLike, dtype: DtypeObj, copy: bool = False) -> ArrayLike:
"""
Cast array (ndarray or ExtensionArray) to the new dtype.
Parameters
----------
values : ndarray or ExtensionArray
dtype : dtype object
copy : bool, default False
copy if indicated
Returns
-------
ndarray or ExtensionArray
"""
if (
values.dtype.kind in ["m", "M"]
and dtype.kind in ["i", "u"]
and isinstance(dtype, np.dtype)
and dtype.itemsize != 8
):
# TODO(2.0) remove special case once deprecation on DTA/TDA is enforced
msg = rf"cannot astype a datetimelike from [{values.dtype}] to [{dtype}]"
raise TypeError(msg)
if is_datetime64tz_dtype(dtype) and is_datetime64_dtype(values.dtype):
return astype_dt64_to_dt64tz(values, dtype, copy, via_utc=True)
if is_dtype_equal(values.dtype, dtype):
if copy:
return values.copy()
return values
if not isinstance(values, np.ndarray):
# i.e. ExtensionArray
values = values.astype(dtype, copy=copy)
else:
values = astype_nansafe(values, dtype, copy=copy)
# in pandas we don't store numpy str dtypes, so convert to object
if isinstance(dtype, np.dtype) and issubclass(values.dtype.type, str):
values = np.array(values, dtype=object)
return values
def astype_array_safe(
values: ArrayLike, dtype, copy: bool = False, errors: str = "raise"
) -> ArrayLike:
"""
Cast array (ndarray or ExtensionArray) to the new dtype.
This basically is the implementation for DataFrame/Series.astype and
includes all custom logic for pandas (NaN-safety, converting str to object,
not allowing )
Parameters
----------
values : ndarray or ExtensionArray
dtype : str, dtype convertible
copy : bool, default False
copy if indicated
errors : str, {'raise', 'ignore'}, default 'raise'
- ``raise`` : allow exceptions to be raised
- ``ignore`` : suppress exceptions. On error return original object
Returns
-------
ndarray or ExtensionArray
"""
errors_legal_values = ("raise", "ignore")
if errors not in errors_legal_values:
invalid_arg = (
"Expected value of kwarg 'errors' to be one of "
f"{list(errors_legal_values)}. Supplied value is '{errors}'"
)
raise ValueError(invalid_arg)
if inspect.isclass(dtype) and issubclass(dtype, ExtensionDtype):
msg = (
f"Expected an instance of {dtype.__name__}, "
"but got the class instead. Try instantiating 'dtype'."
)
raise TypeError(msg)
dtype = pandas_dtype(dtype)
if isinstance(dtype, PandasDtype):
# Ensure we don't end up with a PandasArray
dtype = dtype.numpy_dtype
try:
new_values = astype_array(values, dtype, copy=copy)
except (ValueError, TypeError):
# e.g. astype_nansafe can fail on object-dtype of strings
# trying to convert to float
if errors == "ignore":
new_values = values
else:
raise
return new_values
def astype_td64_unit_conversion(
values: np.ndarray, dtype: np.dtype, copy: bool
) -> np.ndarray:
"""
By pandas convention, converting to non-nano timedelta64
returns an int64-dtyped array with ints representing multiples
of the desired timedelta unit. This is essentially division.
Parameters
----------
values : np.ndarray[timedelta64[ns]]
dtype : np.dtype
timedelta64 with unit not-necessarily nano
copy : bool
Returns
-------
np.ndarray
"""
if is_dtype_equal(values.dtype, dtype):
if copy:
return values.copy()
return values
# otherwise we are converting to non-nano
result = values.astype(dtype, copy=False) # avoid double-copying
result = result.astype(np.float64)
mask = isna(values)
np.putmask(result, mask, np.nan)
return result
def astype_dt64_to_dt64tz(
values: ArrayLike, dtype: DtypeObj, copy: bool, via_utc: bool = False
) -> DatetimeArray:
# GH#33401 we have inconsistent behaviors between
# Datetimeindex[naive].astype(tzaware)
# Series[dt64].astype(tzaware)
# This collects them in one place to prevent further fragmentation.
from pandas.core.construction import ensure_wrapped_if_datetimelike
values = ensure_wrapped_if_datetimelike(values)
values = cast("DatetimeArray", values)
aware = isinstance(dtype, DatetimeTZDtype)
if via_utc:
# Series.astype behavior
# caller is responsible for checking this
assert values.tz is None and aware
dtype = cast(DatetimeTZDtype, dtype)
if copy:
# this should be the only copy
values = values.copy()
warnings.warn(
"Using .astype to convert from timezone-naive dtype to "
"timezone-aware dtype is deprecated and will raise in a "
"future version. Use ser.dt.tz_localize instead.",
FutureWarning,
stacklevel= | find_stack_level() | pandas.util._exceptions.find_stack_level |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 5 11:35:21 2021
@author: mariaolaru
"""
import numpy as np
import pandas as pd
from preproc.preprocess_funcs import *
from plts.plot_funcs import *
from proc.process_funcs import *
parent_dir = "/Users/mariaolaru/Box/RC-S_Studies_Regulatory_and_Data/Patient In-Clinic Data/RCS10/study_visits/v07_gamma_entrainment/SCBS/RCS10L/"
#modify funcs to get and set
[msc, gp] = preprocess_settings(parent_dir)
md = preprocess_data(parent_dir, msc, gp) #separate fn b/c can take much longer time to process data
#Step 1: manually subset data into timechunks > 45s w/ X amp X freq
phs_final = pd.DataFrame()
psd_final = pd.DataFrame()
buffer = 15*1000 #add 15s buffer time to beginning
def proc_tsofint(ts_min, ts_max, buffer, md, msc, gp, phs_final, psd_final, out_name_custom):
ts_min = ts_min + buffer
ts_max = ts_max - 1
ts_range = [ts_min, ts_max]
mscs = subset_msc(msc, ts_range)
mscs = mscs.head(1).reset_index()
fs = int(mscs['ch1_sr'].iloc[0])
[mds, tt] = subset_md(md, mscs, fs, ts_range)
#Make file name to save outputs
out_name = name_file(mscs, gp) + '_testing'
#Create PSD dfs in specified time intervals
step_size = 30
mdsm = melt_mds(mds, fs, out_name, step_size)
df_psd = convert_psd(mdsm, fs, out_name)
#Create PHS dfs
freq_thresh = np.array([60, 90])
df_phs = compute_phs(df_psd, fs, freq_thresh, out_name)
#Plot phs
out_name = 'RCS10L_phs_' + out_name_custom
plot_title = make_plot_title(out_name, step_size, mscs, tt)
plot_phs(df_psd, df_phs, out_name, plot_title, gp)
#add additional info for final table
df_phs['stim_amp'] = mscs.loc[0, 'amplitude_ma']
df_phs['stim_freq'] = mscs.loc[0, 'stimfrequency_hz']
df_psd['stim_amp'] = mscs.loc[0, 'amplitude_ma']
df_psd['stim_freq'] = mscs.loc[0, 'stimfrequency_hz']
phs_final = | pd.concat([phs_final, df_phs]) | pandas.concat |
import pandas as pd
from string import punctuation
import nltk
from IPython.core.display import display
nltk.download('tagsets')
from nltk.data import load
nltk.download('averaged_perceptron_tagger')
from nltk import pos_tag
from nltk import word_tokenize
from collections import Counter
def get_tagsets():
tagdict = load('help/tagsets/upenn_tagset.pickle')
return list(tagdict.keys())
tag_list = get_tagsets()
print(tag_list)
# This method will count occurrence of pos tags in each sentence.
def get_pos_occurrence_freq(data, tag_list):
# Get list of sentences in text_list
text_list = data.text
# create empty dataframe
feature_df = pd.DataFrame(columns=tag_list)
for text_line in text_list:
# get pos tags of each word.
pos_tags = [j for i, j in pos_tag(word_tokenize(text_line))]
# create a dict of pos tags and their frequency in given sentence.
row = dict(Counter(pos_tags))
feature_df = feature_df.append(row, ignore_index=True)
feature_df.fillna(0, inplace=True)
return feature_df
data = pd.read_csv('../data/data.csv', header=0)
feature_df = get_pos_occurrence_freq(data, tag_list)
display(feature_df.head())
def add_punctuation_count(feature_df, data):
# The below code line will find the intersection of set
# of punctuations in text and punctuation set
# imported from string module of python and find the length of
# intersection set in each row and add it to column `num_of_unique_punctuations`
# of data frame.
feature_df['num_of_unique_punctuations'] = data['text'].apply(lambda x: len(set(x).intersection(set(punctuation))))
return feature_df
feature_df = add_punctuation_count(feature_df, data)
display(feature_df['num_of_unique_punctuations'].head())
def get_capitalized_word_count(feature_df, data):
# The below code line will tokenize text in every row and
# create a set of only capital words, then find the length of
# this set and add it to the column `number_of_capital_words`
# of dataframe.
feature_df['number_of_capital_words'] = data['text'].apply(
lambda x: len([word for word in word_tokenize(str(x)) if word[0].isupper()]))
return feature_df
feature_df = get_capitalized_word_count(feature_df, data)
display(feature_df['number_of_capital_words'].head())
def get_small_word_count(feature_df, data):
# The below code line will tokenize text in every row and
# create a set of only small words, then find the length of
# this set and add it to the column `number_of_small_words`
# of dataframe.
feature_df['number_of_small_words'] = data['text'].apply(
lambda x: len([word for word in word_tokenize(str(x)) if word[0].islower()]))
return feature_df
feature_df = get_small_word_count(feature_df, data)
display(feature_df['number_of_small_words'].head())
def get_number_of_alphabets(feature_df, data):
# The below code line will break the text line in a list of
# characters in each row and add the count of that list into
# the columns `number_of_alphabets`
feature_df['number_of_alphabets'] = data['text'].apply(lambda x: len([ch for ch in str(x) if ch.isalpha()]))
return feature_df
feature_df = get_number_of_alphabets(feature_df, data)
display(feature_df['number_of_alphabets'].head())
def get_number_of_digit_count(feature_df, data):
# The below code line will break the text line in a list of
# digits in each row and add the count of that list into
# the columns `number_of_digits`
feature_df['number_of_digits'] = data['text'].apply(lambda x: len([ch for ch in str(x) if ch.isdigit()]))
return feature_df
feature_df = get_number_of_digit_count(feature_df, data)
display(feature_df['number_of_digits'].head())
def get_number_of_words(feature_df, data):
# The below code line will break the text line in a list of
# words in each row and add the count of that list into
# the columns `number_of_digits`
feature_df['number_of_words'] = data['text'].apply(lambda x
: len(word_tokenize(str(x))))
return feature_df
feature_df = get_number_of_words(feature_df, data)
display(feature_df['number_of_words'].head())
def get_number_of_whitespaces(feature_df, data):
# The below code line will generate list of white spaces
# in each row and add the length of that list into
# the columns `number_of_white_spaces`
feature_df['number_of_white_spaces'] = data['text'].apply(lambda x: len([ch for ch in str(x) if ch.isspace()]))
return feature_df
feature_df = get_number_of_whitespaces(feature_df, data)
display(feature_df['number_of_white_spaces'].head())
display(feature_df.head())
# Create a test data frame
test_df = pd.DataFrame([{"text": "this is a cat"}, {"text": "why are you so happy"}])
tag_list = ['JJR', 'CC', 'VBN', 'CD', 'NNS']
# 1) TEST get_pos_occurrence_freq()
test_feature_df = get_pos_occurrence_freq(test_df, tag_list)
result_df = pd.DataFrame([{"JJR": 0, "CC": 0, "VBN": 0, "CD": 0, "NNS": 0,
"DT": 2.0, "NN": 1.0, "VBZ": 1.0, "JJ": 0.0, "PRP": 0.0, "RB": 0.0, "VBP": 0.0, "WRB": 0.0},
{"JJR": 0, "CC": 0, "VBN": 0, "CD": 0, "NNS": 0,
"DT": 0.0, "NN": 0.0, "VBZ": 0.0, "JJ": 1.0, "PRP": 1.0, "RB": 1.0, "VBP": 1.0, "WRB": 1.0}])
# Assert equality of the results and expected data frames
| pd.testing.assert_frame_equal(test_feature_df, result_df, check_names=False, check_like=True) | pandas.testing.assert_frame_equal |
# To add a new cell, type '# %%'
# To add a new markdown cell, type '# %% [markdown]'
# %%
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
# %%
DATA_ROOT = '../../data/raw'
# %% [markdown]
# ## LOADING DATA
# %%
print('Loading raw datasets...', flush=True)
GIT_COMMITS_PATH = f"{DATA_ROOT}/GIT_COMMITS.csv"
GIT_COMMITS_CHANGES = f"{DATA_ROOT}/GIT_COMMITS_CHANGES.csv"
SONAR_MEASURES_PATH = f"{DATA_ROOT}/SONAR_MEASURES.csv"
SZZ_FAULT_INDUCING_COMMITS = f"{DATA_ROOT}/SZZ_FAULT_INDUCING_COMMITS.csv"
JIRA_ISSUES = f"{DATA_ROOT}/JIRA_ISSUES.csv"
# %%
git_commits = pd.read_csv(GIT_COMMITS_PATH)
git_commits_changes = pd.read_csv(GIT_COMMITS_CHANGES)
sonar_measures = pd.read_csv(SONAR_MEASURES_PATH)
szz_fault_inducing_commits = pd.read_csv(SZZ_FAULT_INDUCING_COMMITS)
jira_issues = pd.read_csv(JIRA_ISSUES)
# %%
git_commits_changes[git_commits_changes['linesAdded'].isna()]
# %%
len(git_commits_changes.commitHash.unique())
# %% [markdown]
# ## FILTERING COLUMNS
print('Filtering columns...', flush=True)
# %% [markdown]
# -------------------------------------------------------------------------------------------------------------------------------
# %%
git_dates = git_commits[['commitHash','committerDate']]
# %%
agg = {
'linesAdded': ['sum'],
'linesRemoved': ['sum'],
'projectID': ['count'],
}
gcg_by_commit = git_commits_changes.groupby(['projectID', 'commitHash']).agg(agg)
# %%
len(gcg_by_commit)
# %%
gcg_by_commit = gcg_by_commit.reset_index()
# %%
gcg_by_commit.columns = ['projectID', 'commitHash', 'lines_added', 'lines_removed', 'entropylike']
# %%
gcg_by_commit = pd.merge(gcg_by_commit, git_dates, on='commitHash', how='inner')
# %%
gcg_by_commit = gcg_by_commit.sort_values(by=['projectID', 'committerDate'])
# %%
print('Computing metrics...', flush=True)
total_lines = []
project = 'accumulo'
la_counter = 0
lr_counter = 0
for i, row in gcg_by_commit.iterrows():
if project!=row['projectID']:
project=row['projectID']
la_counter = 0
lr_counter = 0
la_counter+=row['lines_added']
lr_counter+=row['lines_removed']
total_lines.append(la_counter-lr_counter)
gcg_by_commit['total_lines'] = total_lines
# %%
gcg_by_commit = gcg_by_commit[gcg_by_commit['total_lines']>=0] #to avoid 2 lines of wrong data in te commons-cli project
# %%
gcg_by_commit['added/total_lines'] = gcg_by_commit['lines_added']/gcg_by_commit['total_lines']
# %%
gcg_by_commit = gcg_by_commit[gcg_by_commit['added/total_lines']<=1] #to avoid 1 line of wrong data in commons-cli project
# %%
gcg_by_commit = gcg_by_commit[['commitHash', 'entropylike', 'added/total_lines']]
# %%
jira_bugs = jira_issues[jira_issues['type'] == 'Bug']
jira_bugs = jira_bugs[['key', 'priority']]
# %%
print('Merging datasets...', flush=True)
szz_fault_inducing_commits = szz_fault_inducing_commits[['faultInducingCommitHash', 'key']]
szz_fault_inducing_commits = szz_fault_inducing_commits.rename(columns={'faultInducingCommitHash':'commitHash'})
szz_fault_inducing_commits.head()
# %%
Y = pd.merge(szz_fault_inducing_commits, jira_bugs, on='key')
# %%
def priorityToCategory(p: str):
"""
"""
if p == 'No bug': return 0
if p == 'Trivial': return 1
if p == 'Minor': return 2
if p == 'Blocker': return 3
if p == 'Major': return 4
if p == 'Critical': return 5
Y['priority'] = Y['priority'].apply(lambda p: priorityToCategory(p))
# %%
Y = Y[['commitHash', 'priority']]
# %%
multitarget = True #in case we are predicting multiple bugs for each commit
# %%
if not multitarget:
Y = Y.sort_values(by='commitHash')
Y = Y.groupby('commitHash').max().reset_index() #otherwise, we predict the one with highest priority
# %%
git_commits = git_commits[['commitHash', 'inMainBranch', 'merge']]
# %%
sonar_measures.drop(['projectID', 'SQAnalysisDate', 'functionComplexityDistribution', 'fileComplexityDistribution', 'lastCommitDate', 'nclocLanguageDistribution', 'alertStatus', 'qualityGateDetails', 'qualityProfiles', 'files'], axis=1, inplace=True)
# %%
X = | pd.merge(git_commits, sonar_measures, how='inner', on='commitHash') | pandas.merge |
"""
.. _twitter:
Twitter Data API
================
"""
import logging
from functools import wraps
from twython import Twython
import pandas as pd
from pandas.io.json import json_normalize
TWITTER_LOG_FMT = ('%(asctime)s | %(levelname)s | %(filename)s:%(lineno)d '
'| %(funcName)s | %(message)s')
logging.basicConfig(format=TWITTER_LOG_FMT)
# Functions that depend on 'previous_cursor' and 'next_cursor' to
# navigate requests with a lot of data, request pagination basically.
CURSORED_FUNCTIONS = [
'get_followers_ids',
'get_followers_list',
'get_friends_ids',
'get_friends_list',
'get_list_members',
'get_list_memberships',
'get_list_subscribers',
'get_list_subscriptions',
'get_retweeters_ids',
'show_owned_lists',
]
# Responses that contain a special key (and the name of that key)
# containing the required data and need to be extracted through
# that key, as opposed to other responses where you can easily
# call DataFrame on them directly
SPECIAL_KEY_FUNCS = {
'search': 'statuses',
'get_followers_list': 'users',
'get_friends_list': 'users',
'get_list_members': 'users',
'get_list_subscribers': 'users',
'get_list_memberships': 'lists',
'get_list_subscriptions': 'lists',
'show_owned_lists': 'lists',
}
# Functions that contain an embedded ``user`` key, containing
# 40+ attributes of the user tweeting, listed, retweeted, etc.
USER_DATA_EMBEDDED = {
'get_favorites': 'tweet_',
'get_home_timeline': 'tweet_',
'get_list_memberships': 'list_',
'get_list_statuses': 'tweet_',
'get_list_subscriptions': '',
'get_mentions_timeline': 'tweet_',
'get_retweets': 'tweet_',
'get_user_timeline': 'tweet_',
'lookup_status': 'tweet_',
'retweeted_of_me': 'tweet_',
'search': 'tweet_',
'show_lists': 'list_',
'show_owned_lists': 'list_',
}
DEFAULT_COUNTS = {
'get_favorites': 200,
'get_followers_ids': 5000,
'get_followers_list': 200,
'get_friends_ids': 5000,
'get_friends_list': 200,
'get_home_timeline': 200,
'get_list_members': 5000,
'get_list_memberships': 1000,
'get_list_statuses': 100,
'get_list_subscribers': 5000,
'get_list_subscriptions': 1000,
'get_mentions_timeline': 200,
'get_retweeters_ids': 100,
'get_retweets': 100,
'get_user_timeline': 200,
'lookup_status': 100,
'lookup_user': 100,
'retweeted_of_me': 100,
'search': 100,
'search_users': 20,
'show_lists': 100,
'show_owned_lists': 1000
}
def _expand_entities(df):
if 'tweet_entities' in df:
colnames = ['tweet_entities_' + x for x in ['mentions', 'hashtags',
'urls', 'symbols',
'media']]
entities_df = | json_normalize(df['tweet_entities']) | pandas.io.json.json_normalize |
import pandas as pd
from rake_nltk import Rake
import numpy as np
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.feature_extraction.text import CountVectorizer
pd.set_option('display.max_columns', 100)
df = pd.read_csv('movie_metadata.csv')
print(df.head())
print(df.shape)
list(df.columns.values)
df = df[['director_name', 'actor_1_name', 'actor_2_name', 'actor_3_name', 'plot_keywords', 'genres', 'movie_title']]
if not df['actor_1_name'].empty or not df['actor_2_name'].empty or not df['actor_3_name'].empty:
df['actors'] = df['actor_1_name'] + "," + df['actor_2_name'] + "," + df['actor_3_name']
df = df[['director_name', 'plot_keywords', 'genres', 'movie_title', 'actors']]
df.dropna()
print(df.head())
df1 = df.where((pd.notnull(df)), 'REMOVE')
print(df1.head())
df.replace(["NaN"], np.nan, inplace=True)
df = df.dropna()
print(df.head())
for index, row in df.iterrows():
# process actors names
app = row['actors'].lower().replace(' ', '')
app = app.replace(',', ' ')
row['actors'] = app
# process director_name
app = row['director_name'].lower().replace(' ', '')
row['director_name'] = app
# process genres
app = row['genres'].lower().replace('|', ' ')
row['genres'] = app
# process plot_keywords
app = row['plot_keywords'].lower().replace('|', ' ')
row['plot_keywords'] = app
print(df.head())
df.set_index('movie_title', inplace=True)
print(df.head())
df['bag_of_words'] = ''
columns = df.columns
for index, row in df.iterrows():
words = ''
for col in columns:
words = words + row[col] + ' '
row['bag_of_words'] = words
df.drop(columns=[col for col in df.columns if col != 'bag_of_words'], inplace=True)
print(df.head())
# instantiating and generating the count matrix
count = CountVectorizer()
count_matrix = count.fit_transform(df['bag_of_words'])
# creating a Series for the movie titles so they are associated to an ordered numerical
# list I will use later to match the indexes
indices = pd.Series(df.index)
print(indices[:5])
# generating the cosine similarity matrix
cosine_sim = cosine_similarity(count_matrix, count_matrix)
print(cosine_sim)
# function that takes in movie title as input and returns the top 10 recommended movies
def recommendations(title, cosine_sim=cosine_sim):
recommended_movies = []
idx = -1
# gettin the index of the movie that matches the title
for i in range(0, indices.size):
if indices[i] == title:
idx = i
break
# creating a Series with the similarity scores in descending order
score_series = | pd.Series(cosine_sim[idx]) | pandas.Series |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import dash
import pandas
import dash_html_components as html
from app import app
import cfg
tableColors = ['rgb(255, 255 ,255)', 'rgb(220, 220, 220)']
@app.callback(
dash.dependencies.Output('detailMainDiv', component_property = 'children'),
[dash.dependencies.Input('geneDrop', 'value')]
)
def showDetails(name):
""" Create tabular view of additional data
Positional arguments:
name -- Gene name for initialization.
"""
if cfg.advancedDesc is not None:
try:
df = cfg.advancedDesc[cfg.advancedDesc['gene_ids'].str.contains(name)]
except TypeError:
df = pandas.DataFrame()
else:
df = | pandas.DataFrame() | pandas.DataFrame |
from __future__ import division
import pytest
import numpy as np
from pandas import (Interval, IntervalIndex, Index, isna,
interval_range, Timestamp, Timedelta,
compat)
from pandas._libs.interval import IntervalTree
from pandas.tests.indexes.common import Base
import pandas.util.testing as tm
import pandas as pd
class TestIntervalIndex(Base):
_holder = IntervalIndex
def setup_method(self, method):
self.index = IntervalIndex.from_arrays([0, 1], [1, 2])
self.index_with_nan = IntervalIndex.from_tuples(
[(0, 1), np.nan, (1, 2)])
self.indices = dict(intervalIndex=tm.makeIntervalIndex(10))
def create_index(self):
return IntervalIndex.from_breaks(np.arange(10))
def test_constructors(self):
expected = self.index
actual = IntervalIndex.from_breaks(np.arange(3), closed='right')
assert expected.equals(actual)
alternate = IntervalIndex.from_breaks(np.arange(3), closed='left')
assert not expected.equals(alternate)
actual = IntervalIndex.from_intervals([Interval(0, 1), Interval(1, 2)])
assert expected.equals(actual)
actual = IntervalIndex([Interval(0, 1), Interval(1, 2)])
assert expected.equals(actual)
actual = IntervalIndex.from_arrays(np.arange(2), np.arange(2) + 1,
closed='right')
assert expected.equals(actual)
actual = Index([Interval(0, 1), Interval(1, 2)])
assert isinstance(actual, IntervalIndex)
assert expected.equals(actual)
actual = Index(expected)
assert isinstance(actual, IntervalIndex)
assert expected.equals(actual)
def test_constructors_other(self):
# all-nan
result = IntervalIndex.from_intervals([np.nan])
expected = np.array([np.nan], dtype=object)
tm.assert_numpy_array_equal(result.values, expected)
# empty
result = IntervalIndex.from_intervals([])
expected = np.array([], dtype=object)
tm.assert_numpy_array_equal(result.values, expected)
def test_constructors_errors(self):
# scalar
with pytest.raises(TypeError):
IntervalIndex(5)
# not an interval
with pytest.raises(TypeError):
IntervalIndex([0, 1])
with pytest.raises(TypeError):
IntervalIndex.from_intervals([0, 1])
# invalid closed
with pytest.raises(ValueError):
IntervalIndex.from_arrays([0, 1], [1, 2], closed='invalid')
# mismatched closed
with pytest.raises(ValueError):
IntervalIndex.from_intervals([Interval(0, 1),
Interval(1, 2, closed='left')])
with pytest.raises(ValueError):
IntervalIndex.from_arrays([0, 10], [3, 5])
with pytest.raises(ValueError):
Index([Interval(0, 1), Interval(2, 3, closed='left')])
# no point in nesting periods in an IntervalIndex
with pytest.raises(ValueError):
IntervalIndex.from_breaks(
pd.period_range('2000-01-01', periods=3))
def test_constructors_datetimelike(self):
# DTI / TDI
for idx in [pd.date_range('20130101', periods=5),
pd.timedelta_range('1 day', periods=5)]:
result = IntervalIndex.from_breaks(idx)
expected = IntervalIndex.from_breaks(idx.values)
tm.assert_index_equal(result, expected)
expected_scalar_type = type(idx[0])
i = result[0]
assert isinstance(i.left, expected_scalar_type)
assert isinstance(i.right, expected_scalar_type)
def test_constructors_error(self):
# non-intervals
def f():
IntervalIndex.from_intervals([0.997, 4.0])
pytest.raises(TypeError, f)
def test_properties(self):
index = self.index
assert len(index) == 2
assert index.size == 2
assert index.shape == (2, )
tm.assert_index_equal(index.left, Index([0, 1]))
tm.assert_index_equal(index.right, | Index([1, 2]) | pandas.Index |
import os
import pandas
import numpy
import tensorflow
from tensorflow import Tensor
from typing import Tuple
from src.variants.variant import Variant
from src.structs import DistanceStruct
class SSIMVariant(Variant):
name = "Structural Similarity Index Measure"
def __init__(self, fasta_file: str, sequence_type: str, image_folder: str):
super().__init__(fasta_file, sequence_type)
self._image_folder = image_folder
def _call_alg(self, image: Tensor, other: Tensor) -> numpy.ndarray:
return tensorflow.image.ssim(
image,
other,
max_val=255, filter_size=11,
filter_sigma=1.5, k1=0.01, k2=0.03)[0].numpy()
def _read_image(self, img_name: str) -> Tensor:
return tensorflow.expand_dims(
tensorflow.image.decode_image(
tensorflow.io.read_file(
os.path.join(
self._image_folder, img_name))), axis=0)
def _upscale_images(self, image: Tensor, other: Tensor) -> Tuple[Tensor]:
max_x = image.shape[1] if image.shape[1] > other.shape[1] else other.shape[1]
max_y = image.shape[2] if image.shape[2] > other.shape[2] else other.shape[2]
return (
tensorflow.image.resize(image, (max_x, max_y), tensorflow.image.ResizeMethod.BICUBIC),
tensorflow.image.resize(other, (max_x, max_y), tensorflow.image.ResizeMethod.BICUBIC)
)
def build_matrix(self) -> DistanceStruct:
files = os.listdir(self._image_folder)
indexes = {".".join(img.split('.')[:-1]): img.split('.')[-1] for img in files}
diff = set(self._names).difference(set(indexes.keys()))
if diff:
raise IOError(f"Sequences without image created: {diff}")
files = []
for i in self._names:
if indexes.get(i):
files.append(f"{i}.{indexes.get(i)}")
indexes = self._names
df = | pandas.DataFrame(index=indexes, columns=indexes) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import re
import warnings
from datetime import timedelta
from itertools import product
import pytest
import numpy as np
import pandas as pd
from pandas import (CategoricalIndex, DataFrame, Index, MultiIndex,
compat, date_range, period_range)
from pandas.compat import PY3, long, lrange, lzip, range, u, PYPY
from pandas.errors import PerformanceWarning, UnsortedIndexError
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.core.indexes.base import InvalidIndexError
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
from pandas._libs.tslib import Timestamp
import pandas.util.testing as tm
from pandas.util.testing import assert_almost_equal, assert_copy
from .common import Base
class TestMultiIndex(Base):
_holder = MultiIndex
_compat_props = ['shape', 'ndim', 'size']
def setup_method(self, method):
major_axis = Index(['foo', 'bar', 'baz', 'qux'])
minor_axis = Index(['one', 'two'])
major_labels = np.array([0, 0, 1, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 0, 1])
self.index_names = ['first', 'second']
self.indices = dict(index=MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels
], names=self.index_names,
verify_integrity=False))
self.setup_indices()
def create_index(self):
return self.index
def test_can_hold_identifiers(self):
idx = self.create_index()
key = idx[0]
assert idx._can_hold_identifiers_and_holds_name(key) is True
def test_boolean_context_compat2(self):
# boolean context compat
# GH7897
i1 = MultiIndex.from_tuples([('A', 1), ('A', 2)])
i2 = MultiIndex.from_tuples([('A', 1), ('A', 3)])
common = i1.intersection(i2)
def f():
if common:
pass
tm.assert_raises_regex(ValueError, 'The truth value of a', f)
def test_labels_dtypes(self):
# GH 8456
i = MultiIndex.from_tuples([('A', 1), ('A', 2)])
assert i.labels[0].dtype == 'int8'
assert i.labels[1].dtype == 'int8'
i = MultiIndex.from_product([['a'], range(40)])
assert i.labels[1].dtype == 'int8'
i = MultiIndex.from_product([['a'], range(400)])
assert i.labels[1].dtype == 'int16'
i = MultiIndex.from_product([['a'], range(40000)])
assert i.labels[1].dtype == 'int32'
i = pd.MultiIndex.from_product([['a'], range(1000)])
assert (i.labels[0] >= 0).all()
assert (i.labels[1] >= 0).all()
def test_where(self):
i = MultiIndex.from_tuples([('A', 1), ('A', 2)])
def f():
i.where(True)
pytest.raises(NotImplementedError, f)
def test_where_array_like(self):
i = MultiIndex.from_tuples([('A', 1), ('A', 2)])
klasses = [list, tuple, np.array, pd.Series]
cond = [False, True]
for klass in klasses:
def f():
return i.where(klass(cond))
pytest.raises(NotImplementedError, f)
def test_repeat(self):
reps = 2
numbers = [1, 2, 3]
names = np.array(['foo', 'bar'])
m = MultiIndex.from_product([
numbers, names], names=names)
expected = MultiIndex.from_product([
numbers, names.repeat(reps)], names=names)
tm.assert_index_equal(m.repeat(reps), expected)
with tm.assert_produces_warning(FutureWarning):
result = m.repeat(n=reps)
tm.assert_index_equal(result, expected)
def test_numpy_repeat(self):
reps = 2
numbers = [1, 2, 3]
names = np.array(['foo', 'bar'])
m = MultiIndex.from_product([
numbers, names], names=names)
expected = MultiIndex.from_product([
numbers, names.repeat(reps)], names=names)
tm.assert_index_equal(np.repeat(m, reps), expected)
msg = "the 'axis' parameter is not supported"
tm.assert_raises_regex(
ValueError, msg, np.repeat, m, reps, axis=1)
def test_set_name_methods(self):
# so long as these are synonyms, we don't need to test set_names
assert self.index.rename == self.index.set_names
new_names = [name + "SUFFIX" for name in self.index_names]
ind = self.index.set_names(new_names)
assert self.index.names == self.index_names
assert ind.names == new_names
with tm.assert_raises_regex(ValueError, "^Length"):
ind.set_names(new_names + new_names)
new_names2 = [name + "SUFFIX2" for name in new_names]
res = ind.set_names(new_names2, inplace=True)
assert res is None
assert ind.names == new_names2
# set names for specific level (# GH7792)
ind = self.index.set_names(new_names[0], level=0)
assert self.index.names == self.index_names
assert ind.names == [new_names[0], self.index_names[1]]
res = ind.set_names(new_names2[0], level=0, inplace=True)
assert res is None
assert ind.names == [new_names2[0], self.index_names[1]]
# set names for multiple levels
ind = self.index.set_names(new_names, level=[0, 1])
assert self.index.names == self.index_names
assert ind.names == new_names
res = ind.set_names(new_names2, level=[0, 1], inplace=True)
assert res is None
assert ind.names == new_names2
@pytest.mark.parametrize('inplace', [True, False])
def test_set_names_with_nlevel_1(self, inplace):
# GH 21149
# Ensure that .set_names for MultiIndex with
# nlevels == 1 does not raise any errors
expected = pd.MultiIndex(levels=[[0, 1]],
labels=[[0, 1]],
names=['first'])
m = pd.MultiIndex.from_product([[0, 1]])
result = m.set_names('first', level=0, inplace=inplace)
if inplace:
result = m
tm.assert_index_equal(result, expected)
def test_set_levels_labels_directly(self):
# setting levels/labels directly raises AttributeError
levels = self.index.levels
new_levels = [[lev + 'a' for lev in level] for level in levels]
labels = self.index.labels
major_labels, minor_labels = labels
major_labels = [(x + 1) % 3 for x in major_labels]
minor_labels = [(x + 1) % 1 for x in minor_labels]
new_labels = [major_labels, minor_labels]
with pytest.raises(AttributeError):
self.index.levels = new_levels
with pytest.raises(AttributeError):
self.index.labels = new_labels
def test_set_levels(self):
# side note - you probably wouldn't want to use levels and labels
# directly like this - but it is possible.
levels = self.index.levels
new_levels = [[lev + 'a' for lev in level] for level in levels]
def assert_matching(actual, expected, check_dtype=False):
# avoid specifying internal representation
# as much as possible
assert len(actual) == len(expected)
for act, exp in zip(actual, expected):
act = np.asarray(act)
exp = np.asarray(exp)
tm.assert_numpy_array_equal(act, exp, check_dtype=check_dtype)
# level changing [w/o mutation]
ind2 = self.index.set_levels(new_levels)
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# level changing [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels, inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, new_levels)
# level changing specific level [w/o mutation]
ind2 = self.index.set_levels(new_levels[0], level=0)
assert_matching(ind2.levels, [new_levels[0], levels[1]])
assert_matching(self.index.levels, levels)
ind2 = self.index.set_levels(new_levels[1], level=1)
assert_matching(ind2.levels, [levels[0], new_levels[1]])
assert_matching(self.index.levels, levels)
# level changing multiple levels [w/o mutation]
ind2 = self.index.set_levels(new_levels, level=[0, 1])
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# level changing specific level [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels[0], level=0, inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, [new_levels[0], levels[1]])
assert_matching(self.index.levels, levels)
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels[1], level=1, inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, [levels[0], new_levels[1]])
assert_matching(self.index.levels, levels)
# level changing multiple levels [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels, level=[0, 1],
inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# illegal level changing should not change levels
# GH 13754
original_index = self.index.copy()
for inplace in [True, False]:
with tm.assert_raises_regex(ValueError, "^On"):
self.index.set_levels(['c'], level=0, inplace=inplace)
assert_matching(self.index.levels, original_index.levels,
check_dtype=True)
with tm.assert_raises_regex(ValueError, "^On"):
self.index.set_labels([0, 1, 2, 3, 4, 5], level=0,
inplace=inplace)
assert_matching(self.index.labels, original_index.labels,
check_dtype=True)
with tm.assert_raises_regex(TypeError, "^Levels"):
self.index.set_levels('c', level=0, inplace=inplace)
assert_matching(self.index.levels, original_index.levels,
check_dtype=True)
with tm.assert_raises_regex(TypeError, "^Labels"):
self.index.set_labels(1, level=0, inplace=inplace)
assert_matching(self.index.labels, original_index.labels,
check_dtype=True)
def test_set_labels(self):
# side note - you probably wouldn't want to use levels and labels
# directly like this - but it is possible.
labels = self.index.labels
major_labels, minor_labels = labels
major_labels = [(x + 1) % 3 for x in major_labels]
minor_labels = [(x + 1) % 1 for x in minor_labels]
new_labels = [major_labels, minor_labels]
def assert_matching(actual, expected):
# avoid specifying internal representation
# as much as possible
assert len(actual) == len(expected)
for act, exp in zip(actual, expected):
act = np.asarray(act)
exp = np.asarray(exp, dtype=np.int8)
tm.assert_numpy_array_equal(act, exp)
# label changing [w/o mutation]
ind2 = self.index.set_labels(new_labels)
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels, inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, new_labels)
# label changing specific level [w/o mutation]
ind2 = self.index.set_labels(new_labels[0], level=0)
assert_matching(ind2.labels, [new_labels[0], labels[1]])
assert_matching(self.index.labels, labels)
ind2 = self.index.set_labels(new_labels[1], level=1)
assert_matching(ind2.labels, [labels[0], new_labels[1]])
assert_matching(self.index.labels, labels)
# label changing multiple levels [w/o mutation]
ind2 = self.index.set_labels(new_labels, level=[0, 1])
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing specific level [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels[0], level=0, inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, [new_labels[0], labels[1]])
assert_matching(self.index.labels, labels)
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels[1], level=1, inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, [labels[0], new_labels[1]])
assert_matching(self.index.labels, labels)
# label changing multiple levels [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels, level=[0, 1],
inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing for levels of different magnitude of categories
ind = pd.MultiIndex.from_tuples([(0, i) for i in range(130)])
new_labels = range(129, -1, -1)
expected = pd.MultiIndex.from_tuples(
[(0, i) for i in new_labels])
# [w/o mutation]
result = ind.set_labels(labels=new_labels, level=1)
assert result.equals(expected)
# [w/ mutation]
result = ind.copy()
result.set_labels(labels=new_labels, level=1, inplace=True)
assert result.equals(expected)
def test_set_levels_labels_names_bad_input(self):
levels, labels = self.index.levels, self.index.labels
names = self.index.names
with tm.assert_raises_regex(ValueError, 'Length of levels'):
self.index.set_levels([levels[0]])
with tm.assert_raises_regex(ValueError, 'Length of labels'):
self.index.set_labels([labels[0]])
with tm.assert_raises_regex(ValueError, 'Length of names'):
self.index.set_names([names[0]])
# shouldn't scalar data error, instead should demand list-like
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_levels(levels[0])
# shouldn't scalar data error, instead should demand list-like
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_labels(labels[0])
# shouldn't scalar data error, instead should demand list-like
with tm.assert_raises_regex(TypeError, 'list-like'):
self.index.set_names(names[0])
# should have equal lengths
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_levels(levels[0], level=[0, 1])
with tm.assert_raises_regex(TypeError, 'list-like'):
self.index.set_levels(levels, level=0)
# should have equal lengths
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_labels(labels[0], level=[0, 1])
with tm.assert_raises_regex(TypeError, 'list-like'):
self.index.set_labels(labels, level=0)
# should have equal lengths
with tm.assert_raises_regex(ValueError, 'Length of names'):
self.index.set_names(names[0], level=[0, 1])
with tm.assert_raises_regex(TypeError, 'string'):
self.index.set_names(names, level=0)
def test_set_levels_categorical(self):
# GH13854
index = MultiIndex.from_arrays([list("xyzx"), [0, 1, 2, 3]])
for ordered in [False, True]:
cidx = CategoricalIndex(list("bac"), ordered=ordered)
result = index.set_levels(cidx, 0)
expected = MultiIndex(levels=[cidx, [0, 1, 2, 3]],
labels=index.labels)
tm.assert_index_equal(result, expected)
result_lvl = result.get_level_values(0)
expected_lvl = CategoricalIndex(list("bacb"),
categories=cidx.categories,
ordered=cidx.ordered)
tm.assert_index_equal(result_lvl, expected_lvl)
def test_metadata_immutable(self):
levels, labels = self.index.levels, self.index.labels
# shouldn't be able to set at either the top level or base level
mutable_regex = re.compile('does not support mutable operations')
with tm.assert_raises_regex(TypeError, mutable_regex):
levels[0] = levels[0]
with tm.assert_raises_regex(TypeError, mutable_regex):
levels[0][0] = levels[0][0]
# ditto for labels
with tm.assert_raises_regex(TypeError, mutable_regex):
labels[0] = labels[0]
with tm.assert_raises_regex(TypeError, mutable_regex):
labels[0][0] = labels[0][0]
# and for names
names = self.index.names
with tm.assert_raises_regex(TypeError, mutable_regex):
names[0] = names[0]
def test_inplace_mutation_resets_values(self):
levels = [['a', 'b', 'c'], [4]]
levels2 = [[1, 2, 3], ['a']]
labels = [[0, 1, 0, 2, 2, 0], [0, 0, 0, 0, 0, 0]]
mi1 = MultiIndex(levels=levels, labels=labels)
mi2 = MultiIndex(levels=levels2, labels=labels)
vals = mi1.values.copy()
vals2 = mi2.values.copy()
assert mi1._tuples is not None
# Make sure level setting works
new_vals = mi1.set_levels(levels2).values
tm.assert_almost_equal(vals2, new_vals)
# Non-inplace doesn't kill _tuples [implementation detail]
tm.assert_almost_equal(mi1._tuples, vals)
# ...and values is still same too
tm.assert_almost_equal(mi1.values, vals)
# Inplace should kill _tuples
mi1.set_levels(levels2, inplace=True)
tm.assert_almost_equal(mi1.values, vals2)
# Make sure label setting works too
labels2 = [[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]]
exp_values = np.empty((6,), dtype=object)
exp_values[:] = [(long(1), 'a')] * 6
# Must be 1d array of tuples
assert exp_values.shape == (6,)
new_values = mi2.set_labels(labels2).values
# Not inplace shouldn't change
tm.assert_almost_equal(mi2._tuples, vals2)
# Should have correct values
tm.assert_almost_equal(exp_values, new_values)
# ...and again setting inplace should kill _tuples, etc
mi2.set_labels(labels2, inplace=True)
tm.assert_almost_equal(mi2.values, new_values)
def test_copy_in_constructor(self):
levels = np.array(["a", "b", "c"])
labels = np.array([1, 1, 2, 0, 0, 1, 1])
val = labels[0]
mi = MultiIndex(levels=[levels, levels], labels=[labels, labels],
copy=True)
assert mi.labels[0][0] == val
labels[0] = 15
assert mi.labels[0][0] == val
val = levels[0]
levels[0] = "PANDA"
assert mi.levels[0][0] == val
def test_set_value_keeps_names(self):
# motivating example from #3742
lev1 = ['hans', 'hans', 'hans', 'grethe', 'grethe', 'grethe']
lev2 = ['1', '2', '3'] * 2
idx = pd.MultiIndex.from_arrays([lev1, lev2], names=['Name', 'Number'])
df = pd.DataFrame(
np.random.randn(6, 4),
columns=['one', 'two', 'three', 'four'],
index=idx)
df = df.sort_index()
assert df._is_copy is None
assert df.index.names == ('Name', 'Number')
df.at[('grethe', '4'), 'one'] = 99.34
assert df._is_copy is None
assert df.index.names == ('Name', 'Number')
def test_copy_names(self):
# Check that adding a "names" parameter to the copy is honored
# GH14302
multi_idx = pd.Index([(1, 2), (3, 4)], names=['MyName1', 'MyName2'])
multi_idx1 = multi_idx.copy()
assert multi_idx.equals(multi_idx1)
assert multi_idx.names == ['MyName1', 'MyName2']
assert multi_idx1.names == ['MyName1', 'MyName2']
multi_idx2 = multi_idx.copy(names=['NewName1', 'NewName2'])
assert multi_idx.equals(multi_idx2)
assert multi_idx.names == ['MyName1', 'MyName2']
assert multi_idx2.names == ['NewName1', 'NewName2']
multi_idx3 = multi_idx.copy(name=['NewName1', 'NewName2'])
assert multi_idx.equals(multi_idx3)
assert multi_idx.names == ['MyName1', 'MyName2']
assert multi_idx3.names == ['NewName1', 'NewName2']
def test_names(self):
# names are assigned in setup
names = self.index_names
level_names = [level.name for level in self.index.levels]
assert names == level_names
# setting bad names on existing
index = self.index
tm.assert_raises_regex(ValueError, "^Length of names",
setattr, index, "names",
list(index.names) + ["third"])
tm.assert_raises_regex(ValueError, "^Length of names",
setattr, index, "names", [])
# initializing with bad names (should always be equivalent)
major_axis, minor_axis = self.index.levels
major_labels, minor_labels = self.index.labels
tm.assert_raises_regex(ValueError, "^Length of names", MultiIndex,
levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels],
names=['first'])
tm.assert_raises_regex(ValueError, "^Length of names", MultiIndex,
levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels],
names=['first', 'second', 'third'])
# names are assigned
index.names = ["a", "b"]
ind_names = list(index.names)
level_names = [level.name for level in index.levels]
assert ind_names == level_names
def test_astype(self):
expected = self.index.copy()
actual = self.index.astype('O')
assert_copy(actual.levels, expected.levels)
assert_copy(actual.labels, expected.labels)
self.check_level_names(actual, expected.names)
with tm.assert_raises_regex(TypeError, "^Setting.*dtype.*object"):
self.index.astype(np.dtype(int))
@pytest.mark.parametrize('ordered', [True, False])
def test_astype_category(self, ordered):
# GH 18630
msg = '> 1 ndim Categorical are not supported at this time'
with tm.assert_raises_regex(NotImplementedError, msg):
self.index.astype(CategoricalDtype(ordered=ordered))
if ordered is False:
# dtype='category' defaults to ordered=False, so only test once
with tm.assert_raises_regex(NotImplementedError, msg):
self.index.astype('category')
def test_constructor_single_level(self):
result = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux']],
labels=[[0, 1, 2, 3]], names=['first'])
assert isinstance(result, MultiIndex)
expected = Index(['foo', 'bar', 'baz', 'qux'], name='first')
tm.assert_index_equal(result.levels[0], expected)
assert result.names == ['first']
def test_constructor_no_levels(self):
tm.assert_raises_regex(ValueError, "non-zero number "
"of levels/labels",
MultiIndex, levels=[], labels=[])
both_re = re.compile('Must pass both levels and labels')
with tm.assert_raises_regex(TypeError, both_re):
MultiIndex(levels=[])
with tm.assert_raises_regex(TypeError, both_re):
MultiIndex(labels=[])
def test_constructor_mismatched_label_levels(self):
labels = [np.array([1]), np.array([2]), np.array([3])]
levels = ["a"]
tm.assert_raises_regex(ValueError, "Length of levels and labels "
"must be the same", MultiIndex,
levels=levels, labels=labels)
length_error = re.compile('>= length of level')
label_error = re.compile(r'Unequal label lengths: \[4, 2\]')
# important to check that it's looking at the right thing.
with tm.assert_raises_regex(ValueError, length_error):
MultiIndex(levels=[['a'], ['b']],
labels=[[0, 1, 2, 3], [0, 3, 4, 1]])
with tm.assert_raises_regex(ValueError, label_error):
MultiIndex(levels=[['a'], ['b']], labels=[[0, 0, 0, 0], [0, 0]])
# external API
with tm.assert_raises_regex(ValueError, length_error):
self.index.copy().set_levels([['a'], ['b']])
with tm.assert_raises_regex(ValueError, label_error):
self.index.copy().set_labels([[0, 0, 0, 0], [0, 0]])
def test_constructor_nonhashable_names(self):
# GH 20527
levels = [[1, 2], [u'one', u'two']]
labels = [[0, 0, 1, 1], [0, 1, 0, 1]]
names = ((['foo'], ['bar']))
message = "MultiIndex.name must be a hashable type"
tm.assert_raises_regex(TypeError, message,
MultiIndex, levels=levels,
labels=labels, names=names)
# With .rename()
mi = MultiIndex(levels=[[1, 2], [u'one', u'two']],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=('foo', 'bar'))
renamed = [['foor'], ['barr']]
tm.assert_raises_regex(TypeError, message, mi.rename, names=renamed)
# With .set_names()
tm.assert_raises_regex(TypeError, message, mi.set_names, names=renamed)
@pytest.mark.parametrize('names', [['a', 'b', 'a'], ['1', '1', '2'],
['1', 'a', '1']])
def test_duplicate_level_names(self, names):
# GH18872
pytest.raises(ValueError, pd.MultiIndex.from_product,
[[0, 1]] * 3, names=names)
# With .rename()
mi = pd.MultiIndex.from_product([[0, 1]] * 3)
tm.assert_raises_regex(ValueError, "Duplicated level name:",
mi.rename, names)
# With .rename(., level=)
mi.rename(names[0], level=1, inplace=True)
tm.assert_raises_regex(ValueError, "Duplicated level name:",
mi.rename, names[:2], level=[0, 2])
def assert_multiindex_copied(self, copy, original):
# Levels should be (at least, shallow copied)
tm.assert_copy(copy.levels, original.levels)
tm.assert_almost_equal(copy.labels, original.labels)
# Labels doesn't matter which way copied
tm.assert_almost_equal(copy.labels, original.labels)
assert copy.labels is not original.labels
# Names doesn't matter which way copied
assert copy.names == original.names
assert copy.names is not original.names
# Sort order should be copied
assert copy.sortorder == original.sortorder
def test_copy(self):
i_copy = self.index.copy()
self.assert_multiindex_copied(i_copy, self.index)
def test_shallow_copy(self):
i_copy = self.index._shallow_copy()
self.assert_multiindex_copied(i_copy, self.index)
def test_view(self):
i_view = self.index.view()
self.assert_multiindex_copied(i_view, self.index)
def check_level_names(self, index, names):
assert [level.name for level in index.levels] == list(names)
def test_changing_names(self):
# names should be applied to levels
level_names = [level.name for level in self.index.levels]
self.check_level_names(self.index, self.index.names)
view = self.index.view()
copy = self.index.copy()
shallow_copy = self.index._shallow_copy()
# changing names should change level names on object
new_names = [name + "a" for name in self.index.names]
self.index.names = new_names
self.check_level_names(self.index, new_names)
# but not on copies
self.check_level_names(view, level_names)
self.check_level_names(copy, level_names)
self.check_level_names(shallow_copy, level_names)
# and copies shouldn't change original
shallow_copy.names = [name + "c" for name in shallow_copy.names]
self.check_level_names(self.index, new_names)
def test_get_level_number_integer(self):
self.index.names = [1, 0]
assert self.index._get_level_number(1) == 0
assert self.index._get_level_number(0) == 1
pytest.raises(IndexError, self.index._get_level_number, 2)
tm.assert_raises_regex(KeyError, 'Level fourth not found',
self.index._get_level_number, 'fourth')
def test_from_arrays(self):
arrays = []
for lev, lab in zip(self.index.levels, self.index.labels):
arrays.append(np.asarray(lev).take(lab))
# list of arrays as input
result = MultiIndex.from_arrays(arrays, names=self.index.names)
tm.assert_index_equal(result, self.index)
# infer correctly
result = MultiIndex.from_arrays([[pd.NaT, Timestamp('20130101')],
['a', 'b']])
assert result.levels[0].equals(Index([Timestamp('20130101')]))
assert result.levels[1].equals(Index(['a', 'b']))
def test_from_arrays_iterator(self):
# GH 18434
arrays = []
for lev, lab in zip(self.index.levels, self.index.labels):
arrays.append(np.asarray(lev).take(lab))
# iterator as input
result = MultiIndex.from_arrays(iter(arrays), names=self.index.names)
tm.assert_index_equal(result, self.index)
# invalid iterator input
with tm.assert_raises_regex(
TypeError, "Input must be a list / sequence of array-likes."):
MultiIndex.from_arrays(0)
def test_from_arrays_index_series_datetimetz(self):
idx1 = pd.date_range('2015-01-01 10:00', freq='D', periods=3,
tz='US/Eastern')
idx2 = pd.date_range('2015-01-01 10:00', freq='H', periods=3,
tz='Asia/Tokyo')
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_series_timedelta(self):
idx1 = pd.timedelta_range('1 days', freq='D', periods=3)
idx2 = pd.timedelta_range('2 hours', freq='H', periods=3)
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_series_period(self):
idx1 = pd.period_range('2011-01-01', freq='D', periods=3)
idx2 = pd.period_range('2015-01-01', freq='H', periods=3)
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_datetimelike_mixed(self):
idx1 = pd.date_range('2015-01-01 10:00', freq='D', periods=3,
tz='US/Eastern')
idx2 = pd.date_range('2015-01-01 10:00', freq='H', periods=3)
idx3 = pd.timedelta_range('1 days', freq='D', periods=3)
idx4 = pd.period_range('2011-01-01', freq='D', periods=3)
result = pd.MultiIndex.from_arrays([idx1, idx2, idx3, idx4])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
tm.assert_index_equal(result.get_level_values(2), idx3)
tm.assert_index_equal(result.get_level_values(3), idx4)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1),
pd.Series(idx2),
pd.Series(idx3),
pd.Series(idx4)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result2.get_level_values(2), idx3)
tm.assert_index_equal(result2.get_level_values(3), idx4)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_series_categorical(self):
# GH13743
idx1 = pd.CategoricalIndex(list("abcaab"), categories=list("bac"),
ordered=False)
idx2 = pd.CategoricalIndex(list("abcaab"), categories=list("bac"),
ordered=True)
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
result3 = pd.MultiIndex.from_arrays([idx1.values, idx2.values])
tm.assert_index_equal(result3.get_level_values(0), idx1)
tm.assert_index_equal(result3.get_level_values(1), idx2)
def test_from_arrays_empty(self):
# 0 levels
with tm.assert_raises_regex(
ValueError, "Must pass non-zero number of levels/labels"):
MultiIndex.from_arrays(arrays=[])
# 1 level
result = MultiIndex.from_arrays(arrays=[[]], names=['A'])
assert isinstance(result, MultiIndex)
expected = Index([], name='A')
tm.assert_index_equal(result.levels[0], expected)
# N levels
for N in [2, 3]:
arrays = [[]] * N
names = list('ABC')[:N]
result = MultiIndex.from_arrays(arrays=arrays, names=names)
expected = MultiIndex(levels=[[]] * N, labels=[[]] * N,
names=names)
tm.assert_index_equal(result, expected)
def test_from_arrays_invalid_input(self):
invalid_inputs = [1, [1], [1, 2], [[1], 2],
'a', ['a'], ['a', 'b'], [['a'], 'b']]
for i in invalid_inputs:
pytest.raises(TypeError, MultiIndex.from_arrays, arrays=i)
def test_from_arrays_different_lengths(self):
# see gh-13599
idx1 = [1, 2, 3]
idx2 = ['a', 'b']
tm.assert_raises_regex(ValueError, '^all arrays must '
'be same length$',
MultiIndex.from_arrays, [idx1, idx2])
idx1 = []
idx2 = ['a', 'b']
tm.assert_raises_regex(ValueError, '^all arrays must '
'be same length$',
MultiIndex.from_arrays, [idx1, idx2])
idx1 = [1, 2, 3]
idx2 = []
tm.assert_raises_regex(ValueError, '^all arrays must '
'be same length$',
MultiIndex.from_arrays, [idx1, idx2])
def test_from_product(self):
first = ['foo', 'bar', 'buz']
second = ['a', 'b', 'c']
names = ['first', 'second']
result = MultiIndex.from_product([first, second], names=names)
tuples = [('foo', 'a'), ('foo', 'b'), ('foo', 'c'), ('bar', 'a'),
('bar', 'b'), ('bar', 'c'), ('buz', 'a'), ('buz', 'b'),
('buz', 'c')]
expected = MultiIndex.from_tuples(tuples, names=names)
tm.assert_index_equal(result, expected)
def test_from_product_iterator(self):
# GH 18434
first = ['foo', 'bar', 'buz']
second = ['a', 'b', 'c']
names = ['first', 'second']
tuples = [('foo', 'a'), ('foo', 'b'), ('foo', 'c'), ('bar', 'a'),
('bar', 'b'), ('bar', 'c'), ('buz', 'a'), ('buz', 'b'),
('buz', 'c')]
expected = MultiIndex.from_tuples(tuples, names=names)
# iterator as input
result = MultiIndex.from_product(iter([first, second]), names=names)
tm.assert_index_equal(result, expected)
# Invalid non-iterable input
with tm.assert_raises_regex(
TypeError, "Input must be a list / sequence of iterables."):
MultiIndex.from_product(0)
def test_from_product_empty(self):
# 0 levels
with tm.assert_raises_regex(
ValueError, "Must pass non-zero number of levels/labels"):
MultiIndex.from_product([])
# 1 level
result = MultiIndex.from_product([[]], names=['A'])
expected = pd.Index([], name='A')
tm.assert_index_equal(result.levels[0], expected)
# 2 levels
l1 = [[], ['foo', 'bar', 'baz'], []]
l2 = [[], [], ['a', 'b', 'c']]
names = ['A', 'B']
for first, second in zip(l1, l2):
result = MultiIndex.from_product([first, second], names=names)
expected = MultiIndex(levels=[first, second],
labels=[[], []], names=names)
tm.assert_index_equal(result, expected)
# GH12258
names = ['A', 'B', 'C']
for N in range(4):
lvl2 = lrange(N)
result = MultiIndex.from_product([[], lvl2, []], names=names)
expected = MultiIndex(levels=[[], lvl2, []],
labels=[[], [], []], names=names)
tm.assert_index_equal(result, expected)
def test_from_product_invalid_input(self):
invalid_inputs = [1, [1], [1, 2], [[1], 2],
'a', ['a'], ['a', 'b'], [['a'], 'b']]
for i in invalid_inputs:
pytest.raises(TypeError, MultiIndex.from_product, iterables=i)
def test_from_product_datetimeindex(self):
dt_index = date_range('2000-01-01', periods=2)
mi = pd.MultiIndex.from_product([[1, 2], dt_index])
etalon = construct_1d_object_array_from_listlike([(1, pd.Timestamp(
'2000-01-01')), (1, pd.Timestamp('2000-01-02')), (2, pd.Timestamp(
'2000-01-01')), (2, pd.Timestamp('2000-01-02'))])
tm.assert_numpy_array_equal(mi.values, etalon)
def test_from_product_index_series_categorical(self):
# GH13743
first = ['foo', 'bar']
for ordered in [False, True]:
idx = pd.CategoricalIndex(list("abcaab"), categories=list("bac"),
ordered=ordered)
expected = pd.CategoricalIndex(list("abcaab") + list("abcaab"),
categories=list("bac"),
ordered=ordered)
for arr in [idx, pd.Series(idx), idx.values]:
result = pd.MultiIndex.from_product([first, arr])
tm.assert_index_equal(result.get_level_values(1), expected)
def test_values_boxed(self):
tuples = [(1, pd.Timestamp('2000-01-01')), (2, pd.NaT),
(3, pd.Timestamp('2000-01-03')),
(1, pd.Timestamp('2000-01-04')),
(2, pd.Timestamp('2000-01-02')),
(3, pd.Timestamp('2000-01-03'))]
result = pd.MultiIndex.from_tuples(tuples)
expected = construct_1d_object_array_from_listlike(tuples)
tm.assert_numpy_array_equal(result.values, expected)
# Check that code branches for boxed values produce identical results
tm.assert_numpy_array_equal(result.values[:4], result[:4].values)
def test_values_multiindex_datetimeindex(self):
# Test to ensure we hit the boxing / nobox part of MI.values
ints = np.arange(10 ** 18, 10 ** 18 + 5)
naive = pd.DatetimeIndex(ints)
aware = pd.DatetimeIndex(ints, tz='US/Central')
idx = pd.MultiIndex.from_arrays([naive, aware])
result = idx.values
outer = pd.DatetimeIndex([x[0] for x in result])
tm.assert_index_equal(outer, naive)
inner = pd.DatetimeIndex([x[1] for x in result])
tm.assert_index_equal(inner, aware)
# n_lev > n_lab
result = idx[:2].values
outer = pd.DatetimeIndex([x[0] for x in result])
tm.assert_index_equal(outer, naive[:2])
inner = pd.DatetimeIndex([x[1] for x in result])
tm.assert_index_equal(inner, aware[:2])
def test_values_multiindex_periodindex(self):
# Test to ensure we hit the boxing / nobox part of MI.values
ints = np.arange(2007, 2012)
pidx = pd.PeriodIndex(ints, freq='D')
idx = pd.MultiIndex.from_arrays([ints, pidx])
result = idx.values
outer = pd.Int64Index([x[0] for x in result])
tm.assert_index_equal(outer, pd.Int64Index(ints))
inner = pd.PeriodIndex([x[1] for x in result])
tm.assert_index_equal(inner, pidx)
# n_lev > n_lab
result = idx[:2].values
outer = pd.Int64Index([x[0] for x in result])
tm.assert_index_equal(outer, pd.Int64Index(ints[:2]))
inner = pd.PeriodIndex([x[1] for x in result])
tm.assert_index_equal(inner, pidx[:2])
def test_append(self):
result = self.index[:3].append(self.index[3:])
assert result.equals(self.index)
foos = [self.index[:1], self.index[1:3], self.index[3:]]
result = foos[0].append(foos[1:])
assert result.equals(self.index)
# empty
result = self.index.append([])
assert result.equals(self.index)
def test_append_mixed_dtypes(self):
# GH 13660
dti = date_range('2011-01-01', freq='M', periods=3, )
dti_tz = date_range('2011-01-01', freq='M', periods=3, tz='US/Eastern')
pi = period_range('2011-01', freq='M', periods=3)
mi = MultiIndex.from_arrays([[1, 2, 3],
[1.1, np.nan, 3.3],
['a', 'b', 'c'],
dti, dti_tz, pi])
assert mi.nlevels == 6
res = mi.append(mi)
exp = MultiIndex.from_arrays([[1, 2, 3, 1, 2, 3],
[1.1, np.nan, 3.3, 1.1, np.nan, 3.3],
['a', 'b', 'c', 'a', 'b', 'c'],
dti.append(dti),
dti_tz.append(dti_tz),
pi.append(pi)])
tm.assert_index_equal(res, exp)
other = MultiIndex.from_arrays([['x', 'y', 'z'], ['x', 'y', 'z'],
['x', 'y', 'z'], ['x', 'y', 'z'],
['x', 'y', 'z'], ['x', 'y', 'z']])
res = mi.append(other)
exp = MultiIndex.from_arrays([[1, 2, 3, 'x', 'y', 'z'],
[1.1, np.nan, 3.3, 'x', 'y', 'z'],
['a', 'b', 'c', 'x', 'y', 'z'],
dti.append(pd.Index(['x', 'y', 'z'])),
dti_tz.append(pd.Index(['x', 'y', 'z'])),
pi.append(pd.Index(['x', 'y', 'z']))])
tm.assert_index_equal(res, exp)
def test_get_level_values(self):
result = self.index.get_level_values(0)
expected = Index(['foo', 'foo', 'bar', 'baz', 'qux', 'qux'],
name='first')
tm.assert_index_equal(result, expected)
assert result.name == 'first'
result = self.index.get_level_values('first')
expected = self.index.get_level_values(0)
tm.assert_index_equal(result, expected)
# GH 10460
index = MultiIndex(
levels=[CategoricalIndex(['A', 'B']),
CategoricalIndex([1, 2, 3])],
labels=[np.array([0, 0, 0, 1, 1, 1]),
np.array([0, 1, 2, 0, 1, 2])])
exp = CategoricalIndex(['A', 'A', 'A', 'B', 'B', 'B'])
tm.assert_index_equal(index.get_level_values(0), exp)
exp = CategoricalIndex([1, 2, 3, 1, 2, 3])
tm.assert_index_equal(index.get_level_values(1), exp)
def test_get_level_values_int_with_na(self):
# GH 17924
arrays = [['a', 'b', 'b'], [1, np.nan, 2]]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(1)
expected = Index([1, np.nan, 2])
tm.assert_index_equal(result, expected)
arrays = [['a', 'b', 'b'], [np.nan, np.nan, 2]]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(1)
expected = Index([np.nan, np.nan, 2])
tm.assert_index_equal(result, expected)
def test_get_level_values_na(self):
arrays = [[np.nan, np.nan, np.nan], ['a', np.nan, 1]]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(0)
expected = pd.Index([np.nan, np.nan, np.nan])
tm.assert_index_equal(result, expected)
result = index.get_level_values(1)
expected = pd.Index(['a', np.nan, 1])
tm.assert_index_equal(result, expected)
arrays = [['a', 'b', 'b'], pd.DatetimeIndex([0, 1, pd.NaT])]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(1)
expected = pd.DatetimeIndex([0, 1, pd.NaT])
tm.assert_index_equal(result, expected)
arrays = [[], []]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(0)
expected = pd.Index([], dtype=object)
tm.assert_index_equal(result, expected)
def test_get_level_values_all_na(self):
# GH 17924 when level entirely consists of nan
arrays = [[np.nan, np.nan, np.nan], ['a', np.nan, 1]]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(0)
expected = pd.Index([np.nan, np.nan, np.nan], dtype=np.float64)
tm.assert_index_equal(result, expected)
result = index.get_level_values(1)
expected = pd.Index(['a', np.nan, 1], dtype=object)
tm.assert_index_equal(result, expected)
def test_reorder_levels(self):
# this blows up
tm.assert_raises_regex(IndexError, '^Too many levels',
self.index.reorder_levels, [2, 1, 0])
def test_nlevels(self):
assert self.index.nlevels == 2
def test_iter(self):
result = list(self.index)
expected = [('foo', 'one'), ('foo', 'two'), ('bar', 'one'),
('baz', 'two'), ('qux', 'one'), ('qux', 'two')]
assert result == expected
def test_legacy_pickle(self):
if PY3:
pytest.skip("testing for legacy pickles not "
"support on py3")
path = tm.get_data_path('multiindex_v1.pickle')
obj = pd.read_pickle(path)
obj2 = MultiIndex.from_tuples(obj.values)
assert obj.equals(obj2)
res = obj.get_indexer(obj)
exp = np.arange(len(obj), dtype=np.intp)
assert_almost_equal(res, exp)
res = obj.get_indexer(obj2[::-1])
exp = obj.get_indexer(obj[::-1])
exp2 = obj2.get_indexer(obj2[::-1])
assert_almost_equal(res, exp)
assert_almost_equal(exp, exp2)
def test_legacy_v2_unpickle(self):
# 0.7.3 -> 0.8.0 format manage
path = tm.get_data_path('mindex_073.pickle')
obj = pd.read_pickle(path)
obj2 = MultiIndex.from_tuples(obj.values)
assert obj.equals(obj2)
res = obj.get_indexer(obj)
exp = np.arange(len(obj), dtype=np.intp)
assert_almost_equal(res, exp)
res = obj.get_indexer(obj2[::-1])
exp = obj.get_indexer(obj[::-1])
exp2 = obj2.get_indexer(obj2[::-1])
assert_almost_equal(res, exp)
assert_almost_equal(exp, exp2)
def test_roundtrip_pickle_with_tz(self):
# GH 8367
# round-trip of timezone
index = MultiIndex.from_product(
[[1, 2], ['a', 'b'], date_range('20130101', periods=3,
tz='US/Eastern')
], names=['one', 'two', 'three'])
unpickled = tm.round_trip_pickle(index)
assert index.equal_levels(unpickled)
def test_from_tuples_index_values(self):
result = MultiIndex.from_tuples(self.index)
assert (result.values == self.index.values).all()
def test_contains(self):
assert ('foo', 'two') in self.index
assert ('bar', 'two') not in self.index
assert None not in self.index
def test_contains_top_level(self):
midx = MultiIndex.from_product([['A', 'B'], [1, 2]])
assert 'A' in midx
assert 'A' not in midx._engine
def test_contains_with_nat(self):
# MI with a NaT
mi = MultiIndex(levels=[['C'],
pd.date_range('2012-01-01', periods=5)],
labels=[[0, 0, 0, 0, 0, 0], [-1, 0, 1, 2, 3, 4]],
names=[None, 'B'])
assert ('C', pd.Timestamp('2012-01-01')) in mi
for val in mi.values:
assert val in mi
def test_is_all_dates(self):
assert not self.index.is_all_dates
def test_is_numeric(self):
# MultiIndex is never numeric
assert not self.index.is_numeric()
def test_getitem(self):
# scalar
assert self.index[2] == ('bar', 'one')
# slice
result = self.index[2:5]
expected = self.index[[2, 3, 4]]
assert result.equals(expected)
# boolean
result = self.index[[True, False, True, False, True, True]]
result2 = self.index[np.array([True, False, True, False, True, True])]
expected = self.index[[0, 2, 4, 5]]
assert result.equals(expected)
assert result2.equals(expected)
def test_getitem_group_select(self):
sorted_idx, _ = self.index.sortlevel(0)
assert sorted_idx.get_loc('baz') == slice(3, 4)
assert sorted_idx.get_loc('foo') == slice(0, 2)
def test_get_loc(self):
assert self.index.get_loc(('foo', 'two')) == 1
assert self.index.get_loc(('baz', 'two')) == 3
pytest.raises(KeyError, self.index.get_loc, ('bar', 'two'))
pytest.raises(KeyError, self.index.get_loc, 'quux')
pytest.raises(NotImplementedError, self.index.get_loc, 'foo',
method='nearest')
# 3 levels
index = MultiIndex(levels=[Index(lrange(4)), Index(lrange(4)), Index(
lrange(4))], labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])])
pytest.raises(KeyError, index.get_loc, (1, 1))
assert index.get_loc((2, 0)) == slice(3, 5)
def test_get_loc_duplicates(self):
index = Index([2, 2, 2, 2])
result = index.get_loc(2)
expected = slice(0, 4)
assert result == expected
# pytest.raises(Exception, index.get_loc, 2)
index = Index(['c', 'a', 'a', 'b', 'b'])
rs = index.get_loc('c')
xp = 0
assert rs == xp
def test_get_value_duplicates(self):
index = MultiIndex(levels=[['D', 'B', 'C'],
[0, 26, 27, 37, 57, 67, 75, 82]],
labels=[[0, 0, 0, 1, 2, 2, 2, 2, 2, 2],
[1, 3, 4, 6, 0, 2, 2, 3, 5, 7]],
names=['tag', 'day'])
assert index.get_loc('D') == slice(0, 3)
with pytest.raises(KeyError):
index._engine.get_value(np.array([]), 'D')
def test_get_loc_level(self):
index = MultiIndex(levels=[Index(lrange(4)), Index(lrange(4)), Index(
lrange(4))], labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])])
loc, new_index = index.get_loc_level((0, 1))
expected = slice(1, 2)
exp_index = index[expected].droplevel(0).droplevel(0)
assert loc == expected
assert new_index.equals(exp_index)
loc, new_index = index.get_loc_level((0, 1, 0))
expected = 1
assert loc == expected
assert new_index is None
pytest.raises(KeyError, index.get_loc_level, (2, 2))
index = MultiIndex(levels=[[2000], lrange(4)], labels=[np.array(
[0, 0, 0, 0]), np.array([0, 1, 2, 3])])
result, new_index = index.get_loc_level((2000, slice(None, None)))
expected = slice(None, None)
assert result == expected
assert new_index.equals(index.droplevel(0))
@pytest.mark.parametrize('level', [0, 1])
@pytest.mark.parametrize('null_val', [np.nan, pd.NaT, None])
def test_get_loc_nan(self, level, null_val):
# GH 18485 : NaN in MultiIndex
levels = [['a', 'b'], ['c', 'd']]
key = ['b', 'd']
levels[level] = np.array([0, null_val], dtype=type(null_val))
key[level] = null_val
idx = MultiIndex.from_product(levels)
assert idx.get_loc(tuple(key)) == 3
def test_get_loc_missing_nan(self):
# GH 8569
idx = MultiIndex.from_arrays([[1.0, 2.0], [3.0, 4.0]])
assert isinstance(idx.get_loc(1), slice)
pytest.raises(KeyError, idx.get_loc, 3)
pytest.raises(KeyError, idx.get_loc, np.nan)
pytest.raises(KeyError, idx.get_loc, [np.nan])
@pytest.mark.parametrize('dtype1', [int, float, bool, str])
@pytest.mark.parametrize('dtype2', [int, float, bool, str])
def test_get_loc_multiple_dtypes(self, dtype1, dtype2):
# GH 18520
levels = [np.array([0, 1]).astype(dtype1),
np.array([0, 1]).astype(dtype2)]
idx = pd.MultiIndex.from_product(levels)
assert idx.get_loc(idx[2]) == 2
@pytest.mark.parametrize('level', [0, 1])
@pytest.mark.parametrize('dtypes', [[int, float], [float, int]])
def test_get_loc_implicit_cast(self, level, dtypes):
# GH 18818, GH 15994 : as flat index, cast int to float and vice-versa
levels = [['a', 'b'], ['c', 'd']]
key = ['b', 'd']
lev_dtype, key_dtype = dtypes
levels[level] = np.array([0, 1], dtype=lev_dtype)
key[level] = key_dtype(1)
idx = MultiIndex.from_product(levels)
assert idx.get_loc(tuple(key)) == 3
def test_get_loc_cast_bool(self):
# GH 19086 : int is casted to bool, but not vice-versa
levels = [[False, True], np.arange(2, dtype='int64')]
idx = MultiIndex.from_product(levels)
assert idx.get_loc((0, 1)) == 1
assert idx.get_loc((1, 0)) == 2
pytest.raises(KeyError, idx.get_loc, (False, True))
pytest.raises(KeyError, idx.get_loc, (True, False))
def test_slice_locs(self):
df = tm.makeTimeDataFrame()
stacked = df.stack()
idx = stacked.index
slob = slice(*idx.slice_locs(df.index[5], df.index[15]))
sliced = stacked[slob]
expected = df[5:16].stack()
tm.assert_almost_equal(sliced.values, expected.values)
slob = slice(*idx.slice_locs(df.index[5] + timedelta(seconds=30),
df.index[15] - timedelta(seconds=30)))
sliced = stacked[slob]
expected = df[6:15].stack()
tm.assert_almost_equal(sliced.values, expected.values)
def test_slice_locs_with_type_mismatch(self):
df = tm.makeTimeDataFrame()
stacked = df.stack()
idx = stacked.index
tm.assert_raises_regex(TypeError, '^Level type mismatch',
idx.slice_locs, (1, 3))
tm.assert_raises_regex(TypeError, '^Level type mismatch',
idx.slice_locs,
df.index[5] + timedelta(
seconds=30), (5, 2))
df = tm.makeCustomDataframe(5, 5)
stacked = df.stack()
idx = stacked.index
with tm.assert_raises_regex(TypeError, '^Level type mismatch'):
idx.slice_locs(timedelta(seconds=30))
# TODO: Try creating a UnicodeDecodeError in exception message
with tm.assert_raises_regex(TypeError, '^Level type mismatch'):
idx.slice_locs(df.index[1], (16, "a"))
def test_slice_locs_not_sorted(self):
index = MultiIndex(levels=[Index(lrange(4)), Index(lrange(4)), Index(
lrange(4))], labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])])
tm.assert_raises_regex(KeyError, "[Kk]ey length.*greater than "
"MultiIndex lexsort depth",
index.slice_locs, (1, 0, 1), (2, 1, 0))
# works
sorted_index, _ = index.sortlevel(0)
# should there be a test case here???
sorted_index.slice_locs((1, 0, 1), (2, 1, 0))
def test_slice_locs_partial(self):
sorted_idx, _ = self.index.sortlevel(0)
result = sorted_idx.slice_locs(('foo', 'two'), ('qux', 'one'))
assert result == (1, 5)
result = sorted_idx.slice_locs(None, ('qux', 'one'))
assert result == (0, 5)
result = sorted_idx.slice_locs(('foo', 'two'), None)
assert result == (1, len(sorted_idx))
result = sorted_idx.slice_locs('bar', 'baz')
assert result == (2, 4)
def test_slice_locs_not_contained(self):
# some searchsorted action
index = MultiIndex(levels=[[0, 2, 4, 6], [0, 2, 4]],
labels=[[0, 0, 0, 1, 1, 2, 3, 3, 3],
[0, 1, 2, 1, 2, 2, 0, 1, 2]], sortorder=0)
result = index.slice_locs((1, 0), (5, 2))
assert result == (3, 6)
result = index.slice_locs(1, 5)
assert result == (3, 6)
result = index.slice_locs((2, 2), (5, 2))
assert result == (3, 6)
result = index.slice_locs(2, 5)
assert result == (3, 6)
result = index.slice_locs((1, 0), (6, 3))
assert result == (3, 8)
result = index.slice_locs(-1, 10)
assert result == (0, len(index))
def test_consistency(self):
# need to construct an overflow
major_axis = lrange(70000)
minor_axis = lrange(10)
major_labels = np.arange(70000)
minor_labels = np.repeat(lrange(10), 7000)
# the fact that is works means it's consistent
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
# inconsistent
major_labels = np.array([0, 0, 1, 1, 1, 2, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 1, 0, 1, 0, 1])
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
assert not index.is_unique
def test_truncate(self):
major_axis = Index(lrange(4))
minor_axis = Index(lrange(2))
major_labels = np.array([0, 0, 1, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 0, 1])
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
result = index.truncate(before=1)
assert 'foo' not in result.levels[0]
assert 1 in result.levels[0]
result = index.truncate(after=1)
assert 2 not in result.levels[0]
assert 1 in result.levels[0]
result = index.truncate(before=1, after=2)
assert len(result.levels[0]) == 2
# after < before
pytest.raises(ValueError, index.truncate, 3, 1)
def test_get_indexer(self):
major_axis = Index(lrange(4))
minor_axis = Index(lrange(2))
major_labels = np.array([0, 0, 1, 2, 2, 3, 3], dtype=np.intp)
minor_labels = np.array([0, 1, 0, 0, 1, 0, 1], dtype=np.intp)
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
idx1 = index[:5]
idx2 = index[[1, 3, 5]]
r1 = idx1.get_indexer(idx2)
assert_almost_equal(r1, np.array([1, 3, -1], dtype=np.intp))
r1 = idx2.get_indexer(idx1, method='pad')
e1 = np.array([-1, 0, 0, 1, 1], dtype=np.intp)
assert_almost_equal(r1, e1)
r2 = idx2.get_indexer(idx1[::-1], method='pad')
assert_almost_equal(r2, e1[::-1])
rffill1 = idx2.get_indexer(idx1, method='ffill')
assert_almost_equal(r1, rffill1)
r1 = idx2.get_indexer(idx1, method='backfill')
e1 = np.array([0, 0, 1, 1, 2], dtype=np.intp)
assert_almost_equal(r1, e1)
r2 = idx2.get_indexer(idx1[::-1], method='backfill')
assert_almost_equal(r2, e1[::-1])
rbfill1 = idx2.get_indexer(idx1, method='bfill')
assert_almost_equal(r1, rbfill1)
# pass non-MultiIndex
r1 = idx1.get_indexer(idx2.values)
rexp1 = idx1.get_indexer(idx2)
assert_almost_equal(r1, rexp1)
r1 = idx1.get_indexer([1, 2, 3])
assert (r1 == [-1, -1, -1]).all()
# create index with duplicates
idx1 = Index(lrange(10) + lrange(10))
idx2 = Index(lrange(20))
msg = "Reindexing only valid with uniquely valued Index objects"
with tm.assert_raises_regex(InvalidIndexError, msg):
idx1.get_indexer(idx2)
def test_get_indexer_nearest(self):
midx = MultiIndex.from_tuples([('a', 1), ('b', 2)])
with pytest.raises(NotImplementedError):
midx.get_indexer(['a'], method='nearest')
with pytest.raises(NotImplementedError):
midx.get_indexer(['a'], method='pad', tolerance=2)
def test_hash_collisions(self):
# non-smoke test that we don't get hash collisions
index = MultiIndex.from_product([np.arange(1000), np.arange(1000)],
names=['one', 'two'])
result = index.get_indexer(index.values)
tm.assert_numpy_array_equal(result, np.arange(
len(index), dtype='intp'))
for i in [0, 1, len(index) - 2, len(index) - 1]:
result = index.get_loc(index[i])
assert result == i
def test_format(self):
self.index.format()
self.index[:0].format()
def test_format_integer_names(self):
index = MultiIndex(levels=[[0, 1], [0, 1]],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]], names=[0, 1])
index.format(names=True)
def test_format_sparse_display(self):
index = MultiIndex(levels=[[0, 1], [0, 1], [0, 1], [0]],
labels=[[0, 0, 0, 1, 1, 1], [0, 0, 1, 0, 0, 1],
[0, 1, 0, 0, 1, 0], [0, 0, 0, 0, 0, 0]])
result = index.format()
assert result[3] == '1 0 0 0'
def test_format_sparse_config(self):
warn_filters = warnings.filters
warnings.filterwarnings('ignore', category=FutureWarning,
module=".*format")
# GH1538
pd.set_option('display.multi_sparse', False)
result = self.index.format()
assert result[1] == 'foo two'
tm.reset_display_options()
warnings.filters = warn_filters
def test_to_frame(self):
tuples = [(1, 'one'), (1, 'two'), (2, 'one'), (2, 'two')]
index = MultiIndex.from_tuples(tuples)
result = index.to_frame(index=False)
expected = DataFrame(tuples)
tm.assert_frame_equal(result, expected)
result = index.to_frame()
expected.index = index
tm.assert_frame_equal(result, expected)
tuples = [(1, 'one'), (1, 'two'), (2, 'one'), (2, 'two')]
index = MultiIndex.from_tuples(tuples, names=['first', 'second'])
result = index.to_frame(index=False)
expected = DataFrame(tuples)
expected.columns = ['first', 'second']
tm.assert_frame_equal(result, expected)
result = index.to_frame()
expected.index = index
tm.assert_frame_equal(result, expected)
index = MultiIndex.from_product([range(5),
pd.date_range('20130101', periods=3)])
result = index.to_frame(index=False)
expected = DataFrame(
{0: np.repeat(np.arange(5, dtype='int64'), 3),
1: np.tile(pd.date_range('20130101', periods=3), 5)})
tm.assert_frame_equal(result, expected)
index = MultiIndex.from_product([range(5),
pd.date_range('20130101', periods=3)])
result = index.to_frame()
expected.index = index
tm.assert_frame_equal(result, expected)
def test_to_hierarchical(self):
index = MultiIndex.from_tuples([(1, 'one'), (1, 'two'), (2, 'one'), (
2, 'two')])
result = index.to_hierarchical(3)
expected = MultiIndex(levels=[[1, 2], ['one', 'two']],
labels=[[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1]])
tm.assert_index_equal(result, expected)
assert result.names == index.names
# K > 1
result = index.to_hierarchical(3, 2)
expected = MultiIndex(levels=[[1, 2], ['one', 'two']],
labels=[[0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1],
[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]])
tm.assert_index_equal(result, expected)
assert result.names == index.names
# non-sorted
index = MultiIndex.from_tuples([(2, 'c'), (1, 'b'),
(2, 'a'), (2, 'b')],
names=['N1', 'N2'])
result = index.to_hierarchical(2)
expected = MultiIndex.from_tuples([(2, 'c'), (2, 'c'), (1, 'b'),
(1, 'b'),
(2, 'a'), (2, 'a'),
(2, 'b'), (2, 'b')],
names=['N1', 'N2'])
tm.assert_index_equal(result, expected)
assert result.names == index.names
def test_bounds(self):
self.index._bounds
def test_equals_multi(self):
assert self.index.equals(self.index)
assert not self.index.equals(self.index.values)
assert self.index.equals(Index(self.index.values))
assert self.index.equal_levels(self.index)
assert not self.index.equals(self.index[:-1])
assert not self.index.equals(self.index[-1])
# different number of levels
index = MultiIndex(levels=[Index(lrange(4)), Index(lrange(4)), Index(
lrange(4))], labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])])
index2 = MultiIndex(levels=index.levels[:-1], labels=index.labels[:-1])
assert not index.equals(index2)
assert not index.equal_levels(index2)
# levels are different
major_axis = Index(lrange(4))
minor_axis = Index(lrange(2))
major_labels = np.array([0, 0, 1, 2, 2, 3])
minor_labels = np.array([0, 1, 0, 0, 1, 0])
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
assert not self.index.equals(index)
assert not self.index.equal_levels(index)
# some of the labels are different
major_axis = Index(['foo', 'bar', 'baz', 'qux'])
minor_axis = Index(['one', 'two'])
major_labels = np.array([0, 0, 2, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 0, 1])
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
assert not self.index.equals(index)
def test_equals_missing_values(self):
# make sure take is not using -1
i = pd.MultiIndex.from_tuples([(0, pd.NaT),
(0, pd.Timestamp('20130101'))])
result = i[0:1].equals(i[0])
assert not result
result = i[1:2].equals(i[1])
assert not result
def test_identical(self):
mi = self.index.copy()
mi2 = self.index.copy()
assert mi.identical(mi2)
mi = mi.set_names(['new1', 'new2'])
assert mi.equals(mi2)
assert not mi.identical(mi2)
mi2 = mi2.set_names(['new1', 'new2'])
assert mi.identical(mi2)
mi3 = Index(mi.tolist(), names=mi.names)
mi4 = Index(mi.tolist(), names=mi.names, tupleize_cols=False)
assert mi.identical(mi3)
assert not mi.identical(mi4)
assert mi.equals(mi4)
def test_is_(self):
mi = MultiIndex.from_tuples(lzip(range(10), range(10)))
assert mi.is_(mi)
assert mi.is_(mi.view())
assert mi.is_(mi.view().view().view().view())
mi2 = mi.view()
# names are metadata, they don't change id
mi2.names = ["A", "B"]
assert mi2.is_(mi)
assert mi.is_(mi2)
assert mi.is_(mi.set_names(["C", "D"]))
mi2 = mi.view()
mi2.set_names(["E", "F"], inplace=True)
assert mi.is_(mi2)
# levels are inherent properties, they change identity
mi3 = mi2.set_levels([lrange(10), lrange(10)])
assert not mi3.is_(mi2)
# shouldn't change
assert mi2.is_(mi)
mi4 = mi3.view()
# GH 17464 - Remove duplicate MultiIndex levels
mi4.set_levels([lrange(10), lrange(10)], inplace=True)
assert not mi4.is_(mi3)
mi5 = mi.view()
mi5.set_levels(mi5.levels, inplace=True)
assert not mi5.is_(mi)
def test_union(self):
piece1 = self.index[:5][::-1]
piece2 = self.index[3:]
the_union = piece1 | piece2
tups = sorted(self.index.values)
expected = MultiIndex.from_tuples(tups)
assert the_union.equals(expected)
# corner case, pass self or empty thing:
the_union = self.index.union(self.index)
assert the_union is self.index
the_union = self.index.union(self.index[:0])
assert the_union is self.index
# won't work in python 3
# tuples = self.index.values
# result = self.index[:4] | tuples[4:]
# assert result.equals(tuples)
# not valid for python 3
# def test_union_with_regular_index(self):
# other = Index(['A', 'B', 'C'])
# result = other.union(self.index)
# assert ('foo', 'one') in result
# assert 'B' in result
# result2 = self.index.union(other)
# assert result.equals(result2)
def test_intersection(self):
piece1 = self.index[:5][::-1]
piece2 = self.index[3:]
the_int = piece1 & piece2
tups = sorted(self.index[3:5].values)
expected = MultiIndex.from_tuples(tups)
assert the_int.equals(expected)
# corner case, pass self
the_int = self.index.intersection(self.index)
assert the_int is self.index
# empty intersection: disjoint
empty = self.index[:2] & self.index[2:]
expected = self.index[:0]
assert empty.equals(expected)
# can't do in python 3
# tuples = self.index.values
# result = self.index & tuples
# assert result.equals(tuples)
def test_sub(self):
first = self.index
# - now raises (previously was set op difference)
with pytest.raises(TypeError):
first - self.index[-3:]
with pytest.raises(TypeError):
self.index[-3:] - first
with pytest.raises(TypeError):
self.index[-3:] - first.tolist()
with pytest.raises(TypeError):
first.tolist() - self.index[-3:]
def test_difference(self):
first = self.index
result = first.difference(self.index[-3:])
expected = MultiIndex.from_tuples(sorted(self.index[:-3].values),
sortorder=0,
names=self.index.names)
assert isinstance(result, MultiIndex)
assert result.equals(expected)
assert result.names == self.index.names
# empty difference: reflexive
result = self.index.difference(self.index)
expected = self.index[:0]
assert result.equals(expected)
assert result.names == self.index.names
# empty difference: superset
result = self.index[-3:].difference(self.index)
expected = self.index[:0]
assert result.equals(expected)
assert result.names == self.index.names
# empty difference: degenerate
result = self.index[:0].difference(self.index)
expected = self.index[:0]
assert result.equals(expected)
assert result.names == self.index.names
# names not the same
chunklet = self.index[-3:]
chunklet.names = ['foo', 'baz']
result = first.difference(chunklet)
assert result.names == (None, None)
# empty, but non-equal
result = self.index.difference(self.index.sortlevel(1)[0])
assert len(result) == 0
# raise Exception called with non-MultiIndex
result = first.difference(first.values)
assert result.equals(first[:0])
# name from empty array
result = first.difference([])
assert first.equals(result)
assert first.names == result.names
# name from non-empty array
result = first.difference([('foo', 'one')])
expected = pd.MultiIndex.from_tuples([('bar', 'one'), ('baz', 'two'), (
'foo', 'two'), ('qux', 'one'), ('qux', 'two')])
expected.names = first.names
assert first.names == result.names
tm.assert_raises_regex(TypeError, "other must be a MultiIndex "
"or a list of tuples",
first.difference, [1, 2, 3, 4, 5])
def test_from_tuples(self):
tm.assert_raises_regex(TypeError, 'Cannot infer number of levels '
'from empty list',
MultiIndex.from_tuples, [])
expected = MultiIndex(levels=[[1, 3], [2, 4]],
labels=[[0, 1], [0, 1]],
names=['a', 'b'])
# input tuples
result = MultiIndex.from_tuples(((1, 2), (3, 4)), names=['a', 'b'])
tm.assert_index_equal(result, expected)
def test_from_tuples_iterator(self):
# GH 18434
# input iterator for tuples
expected = MultiIndex(levels=[[1, 3], [2, 4]],
labels=[[0, 1], [0, 1]],
names=['a', 'b'])
result = MultiIndex.from_tuples(zip([1, 3], [2, 4]), names=['a', 'b'])
tm.assert_index_equal(result, expected)
# input non-iterables
with tm.assert_raises_regex(
TypeError, 'Input must be a list / sequence of tuple-likes.'):
MultiIndex.from_tuples(0)
def test_from_tuples_empty(self):
# GH 16777
result = MultiIndex.from_tuples([], names=['a', 'b'])
expected = MultiIndex.from_arrays(arrays=[[], []],
names=['a', 'b'])
tm.assert_index_equal(result, expected)
def test_argsort(self):
result = self.index.argsort()
expected = self.index.values.argsort()
tm.assert_numpy_array_equal(result, expected)
def test_sortlevel(self):
import random
tuples = list(self.index)
random.shuffle(tuples)
index = MultiIndex.from_tuples(tuples)
sorted_idx, _ = index.sortlevel(0)
expected = MultiIndex.from_tuples(sorted(tuples))
assert sorted_idx.equals(expected)
sorted_idx, _ = index.sortlevel(0, ascending=False)
assert sorted_idx.equals(expected[::-1])
sorted_idx, _ = index.sortlevel(1)
by1 = sorted(tuples, key=lambda x: (x[1], x[0]))
expected = MultiIndex.from_tuples(by1)
assert sorted_idx.equals(expected)
sorted_idx, _ = index.sortlevel(1, ascending=False)
assert sorted_idx.equals(expected[::-1])
def test_sortlevel_not_sort_remaining(self):
mi = MultiIndex.from_tuples([[1, 1, 3], [1, 1, 1]], names=list('ABC'))
sorted_idx, _ = mi.sortlevel('A', sort_remaining=False)
assert sorted_idx.equals(mi)
def test_sortlevel_deterministic(self):
tuples = [('bar', 'one'), ('foo', 'two'), ('qux', 'two'),
('foo', 'one'), ('baz', 'two'), ('qux', 'one')]
index = MultiIndex.from_tuples(tuples)
sorted_idx, _ = index.sortlevel(0)
expected = MultiIndex.from_tuples(sorted(tuples))
assert sorted_idx.equals(expected)
sorted_idx, _ = index.sortlevel(0, ascending=False)
assert sorted_idx.equals(expected[::-1])
sorted_idx, _ = index.sortlevel(1)
by1 = sorted(tuples, key=lambda x: (x[1], x[0]))
expected = MultiIndex.from_tuples(by1)
assert sorted_idx.equals(expected)
sorted_idx, _ = index.sortlevel(1, ascending=False)
assert sorted_idx.equals(expected[::-1])
def test_dims(self):
pass
def test_drop(self):
dropped = self.index.drop([('foo', 'two'), ('qux', 'one')])
index = MultiIndex.from_tuples([('foo', 'two'), ('qux', 'one')])
dropped2 = self.index.drop(index)
expected = self.index[[0, 2, 3, 5]]
tm.assert_index_equal(dropped, expected)
tm.assert_index_equal(dropped2, expected)
dropped = self.index.drop(['bar'])
expected = self.index[[0, 1, 3, 4, 5]]
tm.assert_index_equal(dropped, expected)
dropped = self.index.drop('foo')
expected = self.index[[2, 3, 4, 5]]
tm.assert_index_equal(dropped, expected)
index = MultiIndex.from_tuples([('bar', 'two')])
pytest.raises(KeyError, self.index.drop, [('bar', 'two')])
pytest.raises(KeyError, self.index.drop, index)
pytest.raises(KeyError, self.index.drop, ['foo', 'two'])
# partially correct argument
mixed_index = MultiIndex.from_tuples([('qux', 'one'), ('bar', 'two')])
pytest.raises(KeyError, self.index.drop, mixed_index)
# error='ignore'
dropped = self.index.drop(index, errors='ignore')
expected = self.index[[0, 1, 2, 3, 4, 5]]
tm.assert_index_equal(dropped, expected)
dropped = self.index.drop(mixed_index, errors='ignore')
expected = self.index[[0, 1, 2, 3, 5]]
tm.assert_index_equal(dropped, expected)
dropped = self.index.drop(['foo', 'two'], errors='ignore')
expected = self.index[[2, 3, 4, 5]]
tm.assert_index_equal(dropped, expected)
# mixed partial / full drop
dropped = self.index.drop(['foo', ('qux', 'one')])
expected = self.index[[2, 3, 5]]
tm.assert_index_equal(dropped, expected)
# mixed partial / full drop / error='ignore'
mixed_index = ['foo', ('qux', 'one'), 'two']
pytest.raises(KeyError, self.index.drop, mixed_index)
dropped = self.index.drop(mixed_index, errors='ignore')
expected = self.index[[2, 3, 5]]
tm.assert_index_equal(dropped, expected)
def test_droplevel_with_names(self):
index = self.index[self.index.get_loc('foo')]
dropped = index.droplevel(0)
assert dropped.name == 'second'
index = MultiIndex(
levels=[Index(lrange(4)), Index(lrange(4)), Index(lrange(4))],
labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])],
names=['one', 'two', 'three'])
dropped = index.droplevel(0)
assert dropped.names == ('two', 'three')
dropped = index.droplevel('two')
expected = index.droplevel(1)
assert dropped.equals(expected)
def test_droplevel_list(self):
index = MultiIndex(
levels=[Index(lrange(4)), Index(lrange(4)), Index( | lrange(4) | pandas.compat.lrange |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.