prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import datetime
from datetime import timedelta
from distutils.version import LooseVersion
from io import BytesIO
import os
import re
from warnings import catch_warnings, simplefilter
import numpy as np
import pytest
from pandas.compat import is_platform_little_endian, is_platform_windows
import pandas.util._test_decorators as td
from pandas.core.dtypes.common import is_categorical_dtype
import pandas as pd
from pandas import (
Categorical,
CategoricalIndex,
DataFrame,
DatetimeIndex,
Index,
Int64Index,
MultiIndex,
RangeIndex,
Series,
Timestamp,
bdate_range,
concat,
date_range,
isna,
timedelta_range,
)
from pandas.tests.io.pytables.common import (
_maybe_remove,
create_tempfile,
ensure_clean_path,
ensure_clean_store,
safe_close,
safe_remove,
tables,
)
import pandas.util.testing as tm
from pandas.io.pytables import (
ClosedFileError,
HDFStore,
PossibleDataLossError,
Term,
read_hdf,
)
from pandas.io import pytables as pytables # noqa: E402 isort:skip
from pandas.io.pytables import TableIterator # noqa: E402 isort:skip
_default_compressor = "blosc"
ignore_natural_naming_warning = pytest.mark.filterwarnings(
"ignore:object name:tables.exceptions.NaturalNameWarning"
)
@pytest.mark.single
class TestHDFStore:
def test_format_kwarg_in_constructor(self, setup_path):
# GH 13291
with ensure_clean_path(setup_path) as path:
with pytest.raises(ValueError):
HDFStore(path, format="table")
def test_context(self, setup_path):
path = create_tempfile(setup_path)
try:
with HDFStore(path) as tbl:
raise ValueError("blah")
except ValueError:
pass
finally:
safe_remove(path)
try:
with HDFStore(path) as tbl:
tbl["a"] = tm.makeDataFrame()
with HDFStore(path) as tbl:
assert len(tbl) == 1
assert type(tbl["a"]) == DataFrame
finally:
safe_remove(path)
def test_conv_read_write(self, setup_path):
path = create_tempfile(setup_path)
try:
def roundtrip(key, obj, **kwargs):
obj.to_hdf(path, key, **kwargs)
return read_hdf(path, key)
o = tm.makeTimeSeries()
tm.assert_series_equal(o, roundtrip("series", o))
o = tm.makeStringSeries()
tm.assert_series_equal(o, roundtrip("string_series", o))
o = tm.makeDataFrame()
tm.assert_frame_equal(o, roundtrip("frame", o))
# table
df = DataFrame(dict(A=range(5), B=range(5)))
df.to_hdf(path, "table", append=True)
result = read_hdf(path, "table", where=["index>2"])
tm.assert_frame_equal(df[df.index > 2], result)
finally:
safe_remove(path)
def test_long_strings(self, setup_path):
# GH6166
df = DataFrame(
{"a": tm.rands_array(100, size=10)}, index=tm.rands_array(100, size=10)
)
with ensure_clean_store(setup_path) as store:
store.append("df", df, data_columns=["a"])
result = store.select("df")
tm.assert_frame_equal(df, result)
def test_api(self, setup_path):
# GH4584
# API issue when to_hdf doesn't accept append AND format args
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.iloc[:10].to_hdf(path, "df", append=True, format="table")
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
# append to False
df.iloc[:10].to_hdf(path, "df", append=False, format="table")
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.iloc[:10].to_hdf(path, "df", append=True)
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
# append to False
df.iloc[:10].to_hdf(path, "df", append=False, format="table")
df.iloc[10:].to_hdf(path, "df", append=True)
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.to_hdf(path, "df", append=False, format="fixed")
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df", append=False, format="f")
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df", append=False)
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df")
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_store(setup_path) as store:
path = store._path
df = tm.makeDataFrame()
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=True, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
# append to False
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
# formats
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format=None)
tm.assert_frame_equal(store.select("df"), df)
with ensure_clean_path(setup_path) as path:
# Invalid.
df = tm.makeDataFrame()
with pytest.raises(ValueError):
df.to_hdf(path, "df", append=True, format="f")
with pytest.raises(ValueError):
df.to_hdf(path, "df", append=True, format="fixed")
with pytest.raises(TypeError):
df.to_hdf(path, "df", append=True, format="foo")
with pytest.raises(TypeError):
df.to_hdf(path, "df", append=False, format="bar")
# File path doesn't exist
path = ""
with pytest.raises(FileNotFoundError):
read_hdf(path, "df")
def test_api_default_format(self, setup_path):
# default_format option
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
pd.set_option("io.hdf.default_format", "fixed")
_maybe_remove(store, "df")
store.put("df", df)
assert not store.get_storer("df").is_table
with pytest.raises(ValueError):
store.append("df2", df)
pd.set_option("io.hdf.default_format", "table")
_maybe_remove(store, "df")
store.put("df", df)
assert store.get_storer("df").is_table
_maybe_remove(store, "df2")
store.append("df2", df)
assert store.get_storer("df").is_table
pd.set_option("io.hdf.default_format", None)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
pd.set_option("io.hdf.default_format", "fixed")
df.to_hdf(path, "df")
with HDFStore(path) as store:
assert not store.get_storer("df").is_table
with pytest.raises(ValueError):
df.to_hdf(path, "df2", append=True)
pd.set_option("io.hdf.default_format", "table")
df.to_hdf(path, "df3")
with HDFStore(path) as store:
assert store.get_storer("df3").is_table
df.to_hdf(path, "df4", append=True)
with HDFStore(path) as store:
assert store.get_storer("df4").is_table
pd.set_option("io.hdf.default_format", None)
def test_keys(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeStringSeries()
store["c"] = tm.makeDataFrame()
assert len(store) == 3
expected = {"/a", "/b", "/c"}
assert set(store.keys()) == expected
assert set(store) == expected
def test_keys_ignore_hdf_softlink(self, setup_path):
# GH 20523
# Puts a softlink into HDF file and rereads
with ensure_clean_store(setup_path) as store:
df = DataFrame(dict(A=range(5), B=range(5)))
store.put("df", df)
assert store.keys() == ["/df"]
store._handle.create_soft_link(store._handle.root, "symlink", "df")
# Should ignore the softlink
assert store.keys() == ["/df"]
def test_iter_empty(self, setup_path):
with ensure_clean_store(setup_path) as store:
# GH 12221
assert list(store) == []
def test_repr(self, setup_path):
with ensure_clean_store(setup_path) as store:
repr(store)
store.info()
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeStringSeries()
store["c"] = tm.makeDataFrame()
df = tm.makeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["bool3"] = True
df["int1"] = 1
df["int2"] = 2
df["timestamp1"] = Timestamp("20010102")
df["timestamp2"] = Timestamp("20010103")
df["datetime1"] = datetime.datetime(2001, 1, 2, 0, 0)
df["datetime2"] = datetime.datetime(2001, 1, 3, 0, 0)
df.loc[3:6, ["obj1"]] = np.nan
df = df._consolidate()._convert(datetime=True)
with catch_warnings(record=True):
simplefilter("ignore", pd.errors.PerformanceWarning)
store["df"] = df
# make a random group in hdf space
store._handle.create_group(store._handle.root, "bah")
assert store.filename in repr(store)
assert store.filename in str(store)
store.info()
# storers
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
store.append("df", df)
s = store.get_storer("df")
repr(s)
str(s)
@ignore_natural_naming_warning
def test_contains(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeDataFrame()
store["foo/bar"] = tm.makeDataFrame()
assert "a" in store
assert "b" in store
assert "c" not in store
assert "foo/bar" in store
assert "/foo/bar" in store
assert "/foo/b" not in store
assert "bar" not in store
# gh-2694: tables.NaturalNameWarning
with catch_warnings(record=True):
store["node())"] = tm.makeDataFrame()
assert "node())" in store
def test_versioning(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeDataFrame()
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df1")
store.append("df1", df[:10])
store.append("df1", df[10:])
assert store.root.a._v_attrs.pandas_version == "0.15.2"
assert store.root.b._v_attrs.pandas_version == "0.15.2"
assert store.root.df1._v_attrs.pandas_version == "0.15.2"
# write a file and wipe its versioning
_maybe_remove(store, "df2")
store.append("df2", df)
# this is an error because its table_type is appendable, but no
# version info
store.get_node("df2")._v_attrs.pandas_version = None
with pytest.raises(Exception):
store.select("df2")
def test_mode(self, setup_path):
df = tm.makeTimeDataFrame()
def check(mode):
with ensure_clean_path(setup_path) as path:
# constructor
if mode in ["r", "r+"]:
with pytest.raises(IOError):
HDFStore(path, mode=mode)
else:
store = HDFStore(path, mode=mode)
assert store._handle.mode == mode
store.close()
with ensure_clean_path(setup_path) as path:
# context
if mode in ["r", "r+"]:
with pytest.raises(IOError):
with HDFStore(path, mode=mode) as store: # noqa
pass
else:
with HDFStore(path, mode=mode) as store:
assert store._handle.mode == mode
with ensure_clean_path(setup_path) as path:
# conv write
if mode in ["r", "r+"]:
with pytest.raises(IOError):
df.to_hdf(path, "df", mode=mode)
df.to_hdf(path, "df", mode="w")
else:
df.to_hdf(path, "df", mode=mode)
# conv read
if mode in ["w"]:
with pytest.raises(ValueError):
read_hdf(path, "df", mode=mode)
else:
result = read_hdf(path, "df", mode=mode)
tm.assert_frame_equal(result, df)
def check_default_mode():
# read_hdf uses default mode
with ensure_clean_path(setup_path) as path:
df.to_hdf(path, "df", mode="w")
result = read_hdf(path, "df")
tm.assert_frame_equal(result, df)
check("r")
check("r+")
check("a")
check("w")
check_default_mode()
def test_reopen_handle(self, setup_path):
with ensure_clean_path(setup_path) as path:
store = HDFStore(path, mode="a")
store["a"] = tm.makeTimeSeries()
# invalid mode change
with pytest.raises(PossibleDataLossError):
store.open("w")
store.close()
assert not store.is_open
# truncation ok here
store.open("w")
assert store.is_open
assert len(store) == 0
store.close()
assert not store.is_open
store = HDFStore(path, mode="a")
store["a"] = tm.makeTimeSeries()
# reopen as read
store.open("r")
assert store.is_open
assert len(store) == 1
assert store._mode == "r"
store.close()
assert not store.is_open
# reopen as append
store.open("a")
assert store.is_open
assert len(store) == 1
assert store._mode == "a"
store.close()
assert not store.is_open
# reopen as append (again)
store.open("a")
assert store.is_open
assert len(store) == 1
assert store._mode == "a"
store.close()
assert not store.is_open
def test_open_args(self, setup_path):
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
# create an in memory store
store = HDFStore(
path, mode="a", driver="H5FD_CORE", driver_core_backing_store=0
)
store["df"] = df
store.append("df2", df)
tm.assert_frame_equal(store["df"], df)
tm.assert_frame_equal(store["df2"], df)
store.close()
# the file should not have actually been written
assert not os.path.exists(path)
def test_flush(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store.flush()
store.flush(fsync=True)
def test_get(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
left = store.get("a")
right = store["a"]
tm.assert_series_equal(left, right)
left = store.get("/a")
right = store["/a"]
tm.assert_series_equal(left, right)
with pytest.raises(KeyError, match="'No object named b in the file'"):
store.get("b")
@pytest.mark.parametrize(
"where, expected",
[
(
"/",
{
"": ({"first_group", "second_group"}, set()),
"/first_group": (set(), {"df1", "df2"}),
"/second_group": ({"third_group"}, {"df3", "s1"}),
"/second_group/third_group": (set(), {"df4"}),
},
),
(
"/second_group",
{
"/second_group": ({"third_group"}, {"df3", "s1"}),
"/second_group/third_group": (set(), {"df4"}),
},
),
],
)
def test_walk(self, where, expected, setup_path):
# GH10143
objs = {
"df1": pd.DataFrame([1, 2, 3]),
"df2": pd.DataFrame([4, 5, 6]),
"df3": pd.DataFrame([6, 7, 8]),
"df4": pd.DataFrame([9, 10, 11]),
"s1": pd.Series([10, 9, 8]),
# Next 3 items aren't pandas objects and should be ignored
"a1": np.array([[1, 2, 3], [4, 5, 6]]),
"tb1": np.array([(1, 2, 3), (4, 5, 6)], dtype="i,i,i"),
"tb2": np.array([(7, 8, 9), (10, 11, 12)], dtype="i,i,i"),
}
with ensure_clean_store("walk_groups.hdf", mode="w") as store:
store.put("/first_group/df1", objs["df1"])
store.put("/first_group/df2", objs["df2"])
store.put("/second_group/df3", objs["df3"])
store.put("/second_group/s1", objs["s1"])
store.put("/second_group/third_group/df4", objs["df4"])
# Create non-pandas objects
store._handle.create_array("/first_group", "a1", objs["a1"])
store._handle.create_table("/first_group", "tb1", obj=objs["tb1"])
store._handle.create_table("/second_group", "tb2", obj=objs["tb2"])
assert len(list(store.walk(where=where))) == len(expected)
for path, groups, leaves in store.walk(where=where):
assert path in expected
expected_groups, expected_frames = expected[path]
assert expected_groups == set(groups)
assert expected_frames == set(leaves)
for leaf in leaves:
frame_path = "/".join([path, leaf])
obj = store.get(frame_path)
if "df" in leaf:
tm.assert_frame_equal(obj, objs[leaf])
else:
tm.assert_series_equal(obj, objs[leaf])
def test_getattr(self, setup_path):
with ensure_clean_store(setup_path) as store:
s = tm.makeTimeSeries()
store["a"] = s
# test attribute access
result = store.a
tm.assert_series_equal(result, s)
result = getattr(store, "a")
tm.assert_series_equal(result, s)
df = tm.makeTimeDataFrame()
store["df"] = df
result = store.df
tm.assert_frame_equal(result, df)
# errors
for x in ["d", "mode", "path", "handle", "complib"]:
with pytest.raises(AttributeError):
getattr(store, x)
# not stores
for x in ["mode", "path", "handle", "complib"]:
getattr(store, "_{x}".format(x=x))
def test_put(self, setup_path):
with ensure_clean_store(setup_path) as store:
ts = tm.makeTimeSeries()
df = tm.makeTimeDataFrame()
store["a"] = ts
store["b"] = df[:10]
store["foo/bar/bah"] = df[:10]
store["foo"] = df[:10]
store["/foo"] = df[:10]
store.put("c", df[:10], format="table")
# not OK, not a table
with pytest.raises(ValueError):
store.put("b", df[10:], append=True)
# node does not currently exist, test _is_table_type returns False
# in this case
_maybe_remove(store, "f")
with pytest.raises(ValueError):
store.put("f", df[10:], append=True)
# can't put to a table (use append instead)
with pytest.raises(ValueError):
store.put("c", df[10:], append=True)
# overwrite table
store.put("c", df[:10], format="table", append=False)
tm.assert_frame_equal(df[:10], store["c"])
def test_put_string_index(self, setup_path):
with ensure_clean_store(setup_path) as store:
index = Index(
["I am a very long string index: {i}".format(i=i) for i in range(20)]
)
s = Series(np.arange(20), index=index)
df = DataFrame({"A": s, "B": s})
store["a"] = s
tm.assert_series_equal(store["a"], s)
store["b"] = df
tm.assert_frame_equal(store["b"], df)
# mixed length
index = Index(
["abcdefghijklmnopqrstuvwxyz1234567890"]
+ ["I am a very long string index: {i}".format(i=i) for i in range(20)]
)
s = Series(np.arange(21), index=index)
df = DataFrame({"A": s, "B": s})
store["a"] = s
tm.assert_series_equal(store["a"], s)
store["b"] = df
tm.assert_frame_equal(store["b"], df)
def test_put_compression(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = tm.makeTimeDataFrame()
store.put("c", df, format="table", complib="zlib")
tm.assert_frame_equal(store["c"], df)
# can't compress if format='fixed'
with pytest.raises(ValueError):
store.put("b", df, format="fixed", complib="zlib")
@td.skip_if_windows_python_3
def test_put_compression_blosc(self, setup_path):
df = tm.makeTimeDataFrame()
with ensure_clean_store(setup_path) as store:
# can't compress if format='fixed'
with pytest.raises(ValueError):
store.put("b", df, format="fixed", complib="blosc")
store.put("c", df, format="table", complib="blosc")
tm.assert_frame_equal(store["c"], df)
def test_complibs_default_settings(self, setup_path):
# GH15943
df = tm.makeDataFrame()
# Set complevel and check if complib is automatically set to
# default value
with ensure_clean_path(setup_path) as tmpfile:
df.to_hdf(tmpfile, "df", complevel=9)
result = pd.read_hdf(tmpfile, "df")
tm.assert_frame_equal(result, df)
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 9
assert node.filters.complib == "zlib"
# Set complib and check to see if compression is disabled
with ensure_clean_path(setup_path) as tmpfile:
df.to_hdf(tmpfile, "df", complib="zlib")
result = pd.read_hdf(tmpfile, "df")
tm.assert_frame_equal(result, df)
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 0
assert node.filters.complib is None
# Check if not setting complib or complevel results in no compression
with ensure_clean_path(setup_path) as tmpfile:
df.to_hdf(tmpfile, "df")
result = pd.read_hdf(tmpfile, "df")
tm.assert_frame_equal(result, df)
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 0
assert node.filters.complib is None
# Check if file-defaults can be overridden on a per table basis
with ensure_clean_path(setup_path) as tmpfile:
store = pd.HDFStore(tmpfile)
store.append("dfc", df, complevel=9, complib="blosc")
store.append("df", df)
store.close()
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 0
assert node.filters.complib is None
for node in h5file.walk_nodes(where="/dfc", classname="Leaf"):
assert node.filters.complevel == 9
assert node.filters.complib == "blosc"
def test_complibs(self, setup_path):
# GH14478
df = tm.makeDataFrame()
# Building list of all complibs and complevels tuples
all_complibs = tables.filters.all_complibs
# Remove lzo if its not available on this platform
if not tables.which_lib_version("lzo"):
all_complibs.remove("lzo")
# Remove bzip2 if its not available on this platform
if not tables.which_lib_version("bzip2"):
all_complibs.remove("bzip2")
all_levels = range(0, 10)
all_tests = [(lib, lvl) for lib in all_complibs for lvl in all_levels]
for (lib, lvl) in all_tests:
with ensure_clean_path(setup_path) as tmpfile:
gname = "foo"
# Write and read file to see if data is consistent
df.to_hdf(tmpfile, gname, complib=lib, complevel=lvl)
result = pd.read_hdf(tmpfile, gname)
tm.assert_frame_equal(result, df)
# Open file and check metadata
# for correct amount of compression
h5table = tables.open_file(tmpfile, mode="r")
for node in h5table.walk_nodes(where="/" + gname, classname="Leaf"):
assert node.filters.complevel == lvl
if lvl == 0:
assert node.filters.complib is None
else:
assert node.filters.complib == lib
h5table.close()
def test_put_integer(self, setup_path):
# non-date, non-string index
df = DataFrame(np.random.randn(50, 100))
self._check_roundtrip(df, tm.assert_frame_equal, setup_path)
@td.xfail_non_writeable
def test_put_mixed_type(self, setup_path):
df = tm.makeTimeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["bool3"] = True
df["int1"] = 1
df["int2"] = 2
df["timestamp1"] = Timestamp("20010102")
df["timestamp2"] = Timestamp("20010103")
df["datetime1"] = datetime.datetime(2001, 1, 2, 0, 0)
df["datetime2"] = datetime.datetime(2001, 1, 3, 0, 0)
df.loc[3:6, ["obj1"]] = np.nan
df = df._consolidate()._convert(datetime=True)
with ensure_clean_store(setup_path) as store:
_maybe_remove(store, "df")
# PerformanceWarning
with catch_warnings(record=True):
simplefilter("ignore", pd.errors.PerformanceWarning)
store.put("df", df)
expected = store.get("df")
tm.assert_frame_equal(expected, df)
@pytest.mark.filterwarnings(
"ignore:object name:tables.exceptions.NaturalNameWarning"
)
def test_append(self, setup_path):
with ensure_clean_store(setup_path) as store:
# this is allowed by almost always don't want to do it
# tables.NaturalNameWarning):
with catch_warnings(record=True):
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df1")
store.append("df1", df[:10])
store.append("df1", df[10:])
tm.assert_frame_equal(store["df1"], df)
_maybe_remove(store, "df2")
store.put("df2", df[:10], format="table")
store.append("df2", df[10:])
tm.assert_frame_equal(store["df2"], df)
_maybe_remove(store, "df3")
store.append("/df3", df[:10])
store.append("/df3", df[10:])
tm.assert_frame_equal(store["df3"], df)
# this is allowed by almost always don't want to do it
# tables.NaturalNameWarning
_maybe_remove(store, "/df3 foo")
store.append("/df3 foo", df[:10])
store.append("/df3 foo", df[10:])
tm.assert_frame_equal(store["df3 foo"], df)
# dtype issues - mizxed type in a single object column
df = DataFrame(data=[[1, 2], [0, 1], [1, 2], [0, 0]])
df["mixed_column"] = "testing"
df.loc[2, "mixed_column"] = np.nan
_maybe_remove(store, "df")
store.append("df", df)
tm.assert_frame_equal(store["df"], df)
# uints - test storage of uints
uint_data = DataFrame(
{
"u08": Series(
np.random.randint(0, high=255, size=5), dtype=np.uint8
),
"u16": Series(
np.random.randint(0, high=65535, size=5), dtype=np.uint16
),
"u32": Series(
np.random.randint(0, high=2 ** 30, size=5), dtype=np.uint32
),
"u64": Series(
[2 ** 58, 2 ** 59, 2 ** 60, 2 ** 61, 2 ** 62],
dtype=np.uint64,
),
},
index=np.arange(5),
)
_maybe_remove(store, "uints")
store.append("uints", uint_data)
tm.assert_frame_equal(store["uints"], uint_data)
# uints - test storage of uints in indexable columns
_maybe_remove(store, "uints")
# 64-bit indices not yet supported
store.append("uints", uint_data, data_columns=["u08", "u16", "u32"])
tm.assert_frame_equal(store["uints"], uint_data)
def test_append_series(self, setup_path):
with ensure_clean_store(setup_path) as store:
# basic
ss = tm.makeStringSeries()
ts = tm.makeTimeSeries()
ns = Series(np.arange(100))
store.append("ss", ss)
result = store["ss"]
tm.assert_series_equal(result, ss)
assert result.name is None
store.append("ts", ts)
result = store["ts"]
tm.assert_series_equal(result, ts)
assert result.name is None
ns.name = "foo"
store.append("ns", ns)
result = store["ns"]
tm.assert_series_equal(result, ns)
assert result.name == ns.name
# select on the values
expected = ns[ns > 60]
result = store.select("ns", "foo>60")
tm.assert_series_equal(result, expected)
# select on the index and values
expected = ns[(ns > 70) & (ns.index < 90)]
result = store.select("ns", "foo>70 and index<90")
tm.assert_series_equal(result, expected)
# multi-index
mi = DataFrame(np.random.randn(5, 1), columns=["A"])
mi["B"] = np.arange(len(mi))
mi["C"] = "foo"
mi.loc[3:5, "C"] = "bar"
mi.set_index(["C", "B"], inplace=True)
s = mi.stack()
s.index = s.index.droplevel(2)
store.append("mi", s)
tm.assert_series_equal(store["mi"], s)
def test_store_index_types(self, setup_path):
# GH5386
# test storing various index types
with ensure_clean_store(setup_path) as store:
def check(format, index):
df = DataFrame(np.random.randn(10, 2), columns=list("AB"))
df.index = index(len(df))
_maybe_remove(store, "df")
store.put("df", df, format=format)
tm.assert_frame_equal(df, store["df"])
for index in [
tm.makeFloatIndex,
tm.makeStringIndex,
tm.makeIntIndex,
tm.makeDateIndex,
]:
check("table", index)
check("fixed", index)
# period index currently broken for table
# seee GH7796 FIXME
check("fixed", tm.makePeriodIndex)
# check('table',tm.makePeriodIndex)
# unicode
index = tm.makeUnicodeIndex
check("table", index)
check("fixed", index)
@pytest.mark.skipif(
not is_platform_little_endian(), reason="reason platform is not little endian"
)
def test_encoding(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(dict(A="foo", B="bar"), index=range(5))
df.loc[2, "A"] = np.nan
df.loc[3, "B"] = np.nan
_maybe_remove(store, "df")
store.append("df", df, encoding="ascii")
tm.assert_frame_equal(store["df"], df)
expected = df.reindex(columns=["A"])
result = store.select("df", Term("columns=A", encoding="ascii"))
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"val",
[
[b"E\xc9, 17", b"", b"a", b"b", b"c"],
[b"E\xc9, 17", b"a", b"b", b"c"],
[b"EE, 17", b"", b"a", b"b", b"c"],
[b"E\xc9, 17", b"\xf8\xfc", b"a", b"b", b"c"],
[b"", b"a", b"b", b"c"],
[b"\xf8\xfc", b"a", b"b", b"c"],
[b"A\xf8\xfc", b"", b"a", b"b", b"c"],
[np.nan, b"", b"b", b"c"],
[b"A\xf8\xfc", np.nan, b"", b"b", b"c"],
],
)
@pytest.mark.parametrize("dtype", ["category", object])
def test_latin_encoding(self, setup_path, dtype, val):
enc = "latin-1"
nan_rep = ""
key = "data"
val = [x.decode(enc) if isinstance(x, bytes) else x for x in val]
ser = pd.Series(val, dtype=dtype)
with ensure_clean_path(setup_path) as store:
ser.to_hdf(store, key, format="table", encoding=enc, nan_rep=nan_rep)
retr = read_hdf(store, key)
s_nan = ser.replace(nan_rep, np.nan)
if is_categorical_dtype(s_nan):
assert is_categorical_dtype(retr)
tm.assert_series_equal(
s_nan, retr, check_dtype=False, check_categorical=False
)
else:
tm.assert_series_equal(s_nan, retr)
# FIXME: don't leave commented-out
# fails:
# for x in examples:
# roundtrip(s, nan_rep=b'\xf8\xfc')
def test_append_some_nans(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(
{
"A": Series(np.random.randn(20)).astype("int32"),
"A1": np.random.randn(20),
"A2": np.random.randn(20),
"B": "foo",
"C": "bar",
"D": Timestamp("20010101"),
"E": datetime.datetime(2001, 1, 2, 0, 0),
},
index=np.arange(20),
)
# some nans
_maybe_remove(store, "df1")
df.loc[0:15, ["A1", "B", "D", "E"]] = np.nan
store.append("df1", df[:10])
store.append("df1", df[10:])
tm.assert_frame_equal(store["df1"], df)
# first column
df1 = df.copy()
df1.loc[:, "A1"] = np.nan
_maybe_remove(store, "df1")
store.append("df1", df1[:10])
store.append("df1", df1[10:])
tm.assert_frame_equal(store["df1"], df1)
# 2nd column
df2 = df.copy()
df2.loc[:, "A2"] = np.nan
_maybe_remove(store, "df2")
store.append("df2", df2[:10])
store.append("df2", df2[10:])
tm.assert_frame_equal(store["df2"], df2)
# datetimes
df3 = df.copy()
df3.loc[:, "E"] = np.nan
_maybe_remove(store, "df3")
store.append("df3", df3[:10])
store.append("df3", df3[10:])
tm.assert_frame_equal(store["df3"], df3)
def test_append_all_nans(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(
{"A1": np.random.randn(20), "A2": np.random.randn(20)},
index=np.arange(20),
)
df.loc[0:15, :] = np.nan
# nan some entire rows (dropna=True)
_maybe_remove(store, "df")
store.append("df", df[:10], dropna=True)
store.append("df", df[10:], dropna=True)
tm.assert_frame_equal(store["df"], df[-4:])
# nan some entire rows (dropna=False)
_maybe_remove(store, "df2")
store.append("df2", df[:10], dropna=False)
store.append("df2", df[10:], dropna=False)
tm.assert_frame_equal(store["df2"], df)
# tests the option io.hdf.dropna_table
pd.set_option("io.hdf.dropna_table", False)
_maybe_remove(store, "df3")
store.append("df3", df[:10])
store.append("df3", df[10:])
tm.assert_frame_equal(store["df3"], df)
pd.set_option("io.hdf.dropna_table", True)
_maybe_remove(store, "df4")
store.append("df4", df[:10])
store.append("df4", df[10:])
tm.assert_frame_equal(store["df4"], df[-4:])
# nan some entire rows (string are still written!)
df = DataFrame(
{
"A1": np.random.randn(20),
"A2": np.random.randn(20),
"B": "foo",
"C": "bar",
},
index=np.arange(20),
)
df.loc[0:15, :] = np.nan
_maybe_remove(store, "df")
store.append("df", df[:10], dropna=True)
store.append("df", df[10:], dropna=True)
tm.assert_frame_equal(store["df"], df)
_maybe_remove(store, "df2")
store.append("df2", df[:10], dropna=False)
store.append("df2", df[10:], dropna=False)
tm.assert_frame_equal(store["df2"], df)
# nan some entire rows (but since we have dates they are still
# written!)
df = DataFrame(
{
"A1": np.random.randn(20),
"A2": np.random.randn(20),
"B": "foo",
"C": "bar",
"D": Timestamp("20010101"),
"E": datetime.datetime(2001, 1, 2, 0, 0),
},
index=np.arange(20),
)
df.loc[0:15, :] = np.nan
_maybe_remove(store, "df")
store.append("df", df[:10], dropna=True)
store.append("df", df[10:], dropna=True)
tm.assert_frame_equal(store["df"], df)
_maybe_remove(store, "df2")
store.append("df2", df[:10], dropna=False)
store.append("df2", df[10:], dropna=False)
tm.assert_frame_equal(store["df2"], df)
# Test to make sure defaults are to not drop.
# Corresponding to Issue 9382
df_with_missing = DataFrame(
{"col1": [0, np.nan, 2], "col2": [1, np.nan, np.nan]}
)
with ensure_clean_path(setup_path) as path:
df_with_missing.to_hdf(path, "df_with_missing", format="table")
reloaded = read_hdf(path, "df_with_missing")
tm.assert_frame_equal(df_with_missing, reloaded)
def test_read_missing_key_close_store(self, setup_path):
# GH 25766
with ensure_clean_path(setup_path) as path:
df = pd.DataFrame({"a": range(2), "b": range(2)})
df.to_hdf(path, "k1")
with pytest.raises(KeyError, match="'No object named k2 in the file'"):
pd.read_hdf(path, "k2")
# smoke test to test that file is properly closed after
# read with KeyError before another write
df.to_hdf(path, "k2")
def test_read_missing_key_opened_store(self, setup_path):
# GH 28699
with ensure_clean_path(setup_path) as path:
df = pd.DataFrame({"a": range(2), "b": range(2)})
df.to_hdf(path, "k1")
store = pd.HDFStore(path, "r")
with pytest.raises(KeyError, match="'No object named k2 in the file'"):
pd.read_hdf(store, "k2")
# Test that the file is still open after a KeyError and that we can
# still read from it.
pd.read_hdf(store, "k1")
def test_append_frame_column_oriented(self, setup_path):
with ensure_clean_store(setup_path) as store:
# column oriented
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df1")
store.append("df1", df.iloc[:, :2], axes=["columns"])
store.append("df1", df.iloc[:, 2:])
tm.assert_frame_equal(store["df1"], df)
result = store.select("df1", "columns=A")
expected = df.reindex(columns=["A"])
tm.assert_frame_equal(expected, result)
# selection on the non-indexable
result = store.select("df1", ("columns=A", "index=df.index[0:4]"))
expected = df.reindex(columns=["A"], index=df.index[0:4])
tm.assert_frame_equal(expected, result)
# this isn't supported
with pytest.raises(TypeError):
store.select("df1", "columns=A and index>df.index[4]")
def test_append_with_different_block_ordering(self, setup_path):
# GH 4096; using same frames, but different block orderings
with ensure_clean_store(setup_path) as store:
for i in range(10):
df = DataFrame(np.random.randn(10, 2), columns=list("AB"))
df["index"] = range(10)
df["index"] += i * 10
df["int64"] = Series([1] * len(df), dtype="int64")
df["int16"] = Series([1] * len(df), dtype="int16")
if i % 2 == 0:
del df["int64"]
df["int64"] = Series([1] * len(df), dtype="int64")
if i % 3 == 0:
a = df.pop("A")
df["A"] = a
df.set_index("index", inplace=True)
store.append("df", df)
# test a different ordering but with more fields (like invalid
# combinate)
with ensure_clean_store(setup_path) as store:
df = DataFrame(np.random.randn(10, 2), columns=list("AB"), dtype="float64")
df["int64"] = Series([1] * len(df), dtype="int64")
df["int16"] = Series([1] * len(df), dtype="int16")
store.append("df", df)
# store additional fields in different blocks
df["int16_2"] = Series([1] * len(df), dtype="int16")
with pytest.raises(ValueError):
store.append("df", df)
# store multile additional fields in different blocks
df["float_3"] = Series([1.0] * len(df), dtype="float64")
with pytest.raises(ValueError):
store.append("df", df)
def test_append_with_strings(self, setup_path):
with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
def check_col(key, name, size):
assert (
getattr(store.get_storer(key).table.description, name).itemsize
== size
)
# avoid truncation on elements
df = DataFrame([[123, "asdqwerty"], [345, "dggnhebbsdfbdfb"]])
store.append("df_big", df)
tm.assert_frame_equal(store.select("df_big"), df)
check_col("df_big", "values_block_1", 15)
# appending smaller string ok
df2 = DataFrame([[124, "asdqy"], [346, "dggnhefbdfb"]])
store.append("df_big", df2)
expected = concat([df, df2])
tm.assert_frame_equal(store.select("df_big"), expected)
check_col("df_big", "values_block_1", 15)
# avoid truncation on elements
df = DataFrame([[123, "asdqwerty"], [345, "dggnhebbsdfbdfb"]])
store.append("df_big2", df, min_itemsize={"values": 50})
tm.assert_frame_equal(store.select("df_big2"), df)
check_col("df_big2", "values_block_1", 50)
# bigger string on next append
store.append("df_new", df)
df_new = DataFrame(
[[124, "abcdefqhij"], [346, "abcdefghijklmnopqrtsuvwxyz"]]
)
with pytest.raises(ValueError):
store.append("df_new", df_new)
# min_itemsize on Series index (GH 11412)
df = tm.makeMixedDataFrame().set_index("C")
store.append("ss", df["B"], min_itemsize={"index": 4})
tm.assert_series_equal(store.select("ss"), df["B"])
# same as above, with data_columns=True
store.append(
"ss2", df["B"], data_columns=True, min_itemsize={"index": 4}
)
tm.assert_series_equal(store.select("ss2"), df["B"])
# min_itemsize in index without appending (GH 10381)
store.put("ss3", df, format="table", min_itemsize={"index": 6})
# just make sure there is a longer string:
df2 = df.copy().reset_index().assign(C="longer").set_index("C")
store.append("ss3", df2)
tm.assert_frame_equal(store.select("ss3"), pd.concat([df, df2]))
# same as above, with a Series
store.put("ss4", df["B"], format="table", min_itemsize={"index": 6})
store.append("ss4", df2["B"])
tm.assert_series_equal(
store.select("ss4"), pd.concat([df["B"], df2["B"]])
)
# with nans
_maybe_remove(store, "df")
df = tm.makeTimeDataFrame()
df["string"] = "foo"
df.loc[1:4, "string"] = np.nan
df["string2"] = "bar"
df.loc[4:8, "string2"] = np.nan
df["string3"] = "bah"
df.loc[1:, "string3"] = np.nan
store.append("df", df)
result = store.select("df")
tm.assert_frame_equal(result, df)
with ensure_clean_store(setup_path) as store:
def check_col(key, name, size):
assert getattr(
store.get_storer(key).table.description, name
).itemsize, size
df = DataFrame(dict(A="foo", B="bar"), index=range(10))
# a min_itemsize that creates a data_column
_maybe_remove(store, "df")
store.append("df", df, min_itemsize={"A": 200})
check_col("df", "A", 200)
assert store.get_storer("df").data_columns == ["A"]
# a min_itemsize that creates a data_column2
_maybe_remove(store, "df")
store.append("df", df, data_columns=["B"], min_itemsize={"A": 200})
check_col("df", "A", 200)
assert store.get_storer("df").data_columns == ["B", "A"]
# a min_itemsize that creates a data_column2
_maybe_remove(store, "df")
store.append("df", df, data_columns=["B"], min_itemsize={"values": 200})
check_col("df", "B", 200)
check_col("df", "values_block_0", 200)
assert store.get_storer("df").data_columns == ["B"]
# infer the .typ on subsequent appends
_maybe_remove(store, "df")
store.append("df", df[:5], min_itemsize=200)
store.append("df", df[5:], min_itemsize=200)
tm.assert_frame_equal(store["df"], df)
# invalid min_itemsize keys
df = DataFrame(["foo", "foo", "foo", "barh", "barh", "barh"], columns=["A"])
_maybe_remove(store, "df")
with pytest.raises(ValueError):
store.append("df", df, min_itemsize={"foo": 20, "foobar": 20})
def test_append_with_empty_string(self, setup_path):
with ensure_clean_store(setup_path) as store:
# with all empty strings (GH 12242)
df = DataFrame({"x": ["a", "b", "c", "d", "e", "f", ""]})
store.append("df", df[:-1], min_itemsize={"x": 1})
store.append("df", df[-1:], min_itemsize={"x": 1})
tm.assert_frame_equal(store.select("df"), df)
def test_to_hdf_with_min_itemsize(self, setup_path):
with ensure_clean_path(setup_path) as path:
# min_itemsize in index with to_hdf (GH 10381)
df = tm.makeMixedDataFrame().set_index("C")
df.to_hdf(path, "ss3", format="table", min_itemsize={"index": 6})
# just make sure there is a longer string:
df2 = df.copy().reset_index().assign(C="longer").set_index("C")
df2.to_hdf(path, "ss3", append=True, format="table")
tm.assert_frame_equal(pd.read_hdf(path, "ss3"), pd.concat([df, df2]))
# same as above, with a Series
df["B"].to_hdf(path, "ss4", format="table", min_itemsize={"index": 6})
df2["B"].to_hdf(path, "ss4", append=True, format="table")
tm.assert_series_equal(
pd.read_hdf(path, "ss4"), pd.concat([df["B"], df2["B"]])
)
@pytest.mark.parametrize(
"format", [pytest.param("fixed", marks=td.xfail_non_writeable), "table"]
)
def test_to_hdf_errors(self, format, setup_path):
data = ["\ud800foo"]
ser = pd.Series(data, index=pd.Index(data))
with ensure_clean_path(setup_path) as path:
# GH 20835
ser.to_hdf(path, "table", format=format, errors="surrogatepass")
result = pd.read_hdf(path, "table", errors="surrogatepass")
tm.assert_series_equal(result, ser)
def test_append_with_data_columns(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = tm.makeTimeDataFrame()
df.iloc[0, df.columns.get_loc("B")] = 1.0
_maybe_remove(store, "df")
store.append("df", df[:2], data_columns=["B"])
store.append("df", df[2:])
tm.assert_frame_equal(store["df"], df)
# check that we have indices created
assert store._handle.root.df.table.cols.index.is_indexed is True
assert store._handle.root.df.table.cols.B.is_indexed is True
# data column searching
result = store.select("df", "B>0")
expected = df[df.B > 0]
tm.assert_frame_equal(result, expected)
# data column searching (with an indexable and a data_columns)
result = store.select("df", "B>0 and index>df.index[3]")
df_new = df.reindex(index=df.index[4:])
expected = df_new[df_new.B > 0]
tm.assert_frame_equal(result, expected)
# data column selection with a string data_column
df_new = df.copy()
df_new["string"] = "foo"
df_new.loc[1:4, "string"] = np.nan
df_new.loc[5:6, "string"] = "bar"
_maybe_remove(store, "df")
store.append("df", df_new, data_columns=["string"])
result = store.select("df", "string='foo'")
expected = df_new[df_new.string == "foo"]
tm.assert_frame_equal(result, expected)
# using min_itemsize and a data column
def check_col(key, name, size):
assert (
getattr(store.get_storer(key).table.description, name).itemsize
== size
)
with ensure_clean_store(setup_path) as store:
_maybe_remove(store, "df")
store.append(
"df", df_new, data_columns=["string"], min_itemsize={"string": 30}
)
check_col("df", "string", 30)
_maybe_remove(store, "df")
store.append("df", df_new, data_columns=["string"], min_itemsize=30)
check_col("df", "string", 30)
_maybe_remove(store, "df")
store.append(
"df", df_new, data_columns=["string"], min_itemsize={"values": 30}
)
check_col("df", "string", 30)
with ensure_clean_store(setup_path) as store:
df_new["string2"] = "foobarbah"
df_new["string_block1"] = "foobarbah1"
df_new["string_block2"] = "foobarbah2"
_maybe_remove(store, "df")
store.append(
"df",
df_new,
data_columns=["string", "string2"],
min_itemsize={"string": 30, "string2": 40, "values": 50},
)
check_col("df", "string", 30)
check_col("df", "string2", 40)
check_col("df", "values_block_1", 50)
with ensure_clean_store(setup_path) as store:
# multiple data columns
df_new = df.copy()
df_new.iloc[0, df_new.columns.get_loc("A")] = 1.0
df_new.iloc[0, df_new.columns.get_loc("B")] = -1.0
df_new["string"] = "foo"
sl = df_new.columns.get_loc("string")
df_new.iloc[1:4, sl] = np.nan
df_new.iloc[5:6, sl] = "bar"
df_new["string2"] = "foo"
sl = df_new.columns.get_loc("string2")
df_new.iloc[2:5, sl] = np.nan
df_new.iloc[7:8, sl] = "bar"
_maybe_remove(store, "df")
store.append("df", df_new, data_columns=["A", "B", "string", "string2"])
result = store.select(
"df", "string='foo' and string2='foo' and A>0 and B<0"
)
expected = df_new[
(df_new.string == "foo")
& (df_new.string2 == "foo")
& (df_new.A > 0)
& (df_new.B < 0)
]
tm.assert_frame_equal(result, expected, check_index_type=False)
# yield an empty frame
result = store.select("df", "string='foo' and string2='cool'")
expected = df_new[(df_new.string == "foo") & (df_new.string2 == "cool")]
tm.assert_frame_equal(result, expected, check_index_type=False)
with ensure_clean_store(setup_path) as store:
# doc example
df_dc = df.copy()
df_dc["string"] = "foo"
df_dc.loc[4:6, "string"] = np.nan
df_dc.loc[7:9, "string"] = "bar"
df_dc["string2"] = "cool"
df_dc["datetime"] = Timestamp("20010102")
df_dc = df_dc._convert(datetime=True)
df_dc.loc[3:5, ["A", "B", "datetime"]] = np.nan
_maybe_remove(store, "df_dc")
store.append(
"df_dc", df_dc, data_columns=["B", "C", "string", "string2", "datetime"]
)
result = store.select("df_dc", "B>0")
expected = df_dc[df_dc.B > 0]
tm.assert_frame_equal(result, expected, check_index_type=False)
result = store.select("df_dc", ["B > 0", "C > 0", "string == foo"])
expected = df_dc[(df_dc.B > 0) & (df_dc.C > 0) & (df_dc.string == "foo")]
tm.assert_frame_equal(result, expected, check_index_type=False)
with ensure_clean_store(setup_path) as store:
# doc example part 2
np.random.seed(1234)
index = date_range("1/1/2000", periods=8)
df_dc = DataFrame(
np.random.randn(8, 3), index=index, columns=["A", "B", "C"]
)
df_dc["string"] = "foo"
df_dc.loc[4:6, "string"] = np.nan
df_dc.loc[7:9, "string"] = "bar"
df_dc.loc[:, ["B", "C"]] = df_dc.loc[:, ["B", "C"]].abs()
df_dc["string2"] = "cool"
# on-disk operations
store.append("df_dc", df_dc, data_columns=["B", "C", "string", "string2"])
result = store.select("df_dc", "B>0")
expected = df_dc[df_dc.B > 0]
tm.assert_frame_equal(result, expected)
result = store.select("df_dc", ["B > 0", "C > 0", 'string == "foo"'])
expected = df_dc[(df_dc.B > 0) & (df_dc.C > 0) & (df_dc.string == "foo")]
tm.assert_frame_equal(result, expected)
def test_create_table_index(self, setup_path):
with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
def col(t, column):
return getattr(store.get_storer(t).table.cols, column)
# data columns
df = tm.makeTimeDataFrame()
df["string"] = "foo"
df["string2"] = "bar"
store.append("f", df, data_columns=["string", "string2"])
assert col("f", "index").is_indexed is True
assert col("f", "string").is_indexed is True
assert col("f", "string2").is_indexed is True
# specify index=columns
store.append(
"f2", df, index=["string"], data_columns=["string", "string2"]
)
assert col("f2", "index").is_indexed is False
assert col("f2", "string").is_indexed is True
assert col("f2", "string2").is_indexed is False
# try to index a non-table
_maybe_remove(store, "f2")
store.put("f2", df)
with pytest.raises(TypeError):
store.create_table_index("f2")
def test_append_hierarchical(self, setup_path):
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["foo", "bar"],
)
df = DataFrame(np.random.randn(10, 3), index=index, columns=["A", "B", "C"])
with ensure_clean_store(setup_path) as store:
store.append("mi", df)
result = store.select("mi")
tm.assert_frame_equal(result, df)
# GH 3748
result = store.select("mi", columns=["A", "B"])
expected = df.reindex(columns=["A", "B"])
tm.assert_frame_equal(result, expected)
with ensure_clean_path("test.hdf") as path:
df.to_hdf(path, "df", format="table")
result = read_hdf(path, "df", columns=["A", "B"])
expected = df.reindex(columns=["A", "B"])
tm.assert_frame_equal(result, expected)
def test_column_multiindex(self, setup_path):
# GH 4710
# recreate multi-indexes properly
index = MultiIndex.from_tuples(
[("A", "a"), ("A", "b"), ("B", "a"), ("B", "b")], names=["first", "second"]
)
df = DataFrame(np.arange(12).reshape(3, 4), columns=index)
expected = df.copy()
if isinstance(expected.index, RangeIndex):
expected.index = Int64Index(expected.index)
with ensure_clean_store(setup_path) as store:
store.put("df", df)
tm.assert_frame_equal(
store["df"], expected, check_index_type=True, check_column_type=True
)
store.put("df1", df, format="table")
tm.assert_frame_equal(
store["df1"], expected, check_index_type=True, check_column_type=True
)
with pytest.raises(ValueError):
store.put("df2", df, format="table", data_columns=["A"])
with pytest.raises(ValueError):
store.put("df3", df, format="table", data_columns=True)
# appending multi-column on existing table (see GH 6167)
with ensure_clean_store(setup_path) as store:
store.append("df2", df)
store.append("df2", df)
tm.assert_frame_equal(store["df2"], concat((df, df)))
# non_index_axes name
df = DataFrame(
np.arange(12).reshape(3, 4), columns=Index(list("ABCD"), name="foo")
)
expected = df.copy()
if isinstance(expected.index, RangeIndex):
expected.index = Int64Index(expected.index)
with ensure_clean_store(setup_path) as store:
store.put("df1", df, format="table")
tm.assert_frame_equal(
store["df1"], expected, check_index_type=True, check_column_type=True
)
def test_store_multiindex(self, setup_path):
# validate multi-index names
# GH 5527
with ensure_clean_store(setup_path) as store:
def make_index(names=None):
return MultiIndex.from_tuples(
[
(datetime.datetime(2013, 12, d), s, t)
for d in range(1, 3)
for s in range(2)
for t in range(3)
],
names=names,
)
# no names
_maybe_remove(store, "df")
df = DataFrame(np.zeros((12, 2)), columns=["a", "b"], index=make_index())
store.append("df", df)
tm.assert_frame_equal(store.select("df"), df)
# partial names
_maybe_remove(store, "df")
df = DataFrame(
np.zeros((12, 2)),
columns=["a", "b"],
index=make_index(["date", None, None]),
)
store.append("df", df)
tm.assert_frame_equal(store.select("df"), df)
# series
_maybe_remove(store, "s")
s = Series(np.zeros(12), index=make_index(["date", None, None]))
store.append("s", s)
xp = Series(np.zeros(12), index=make_index(["date", "level_1", "level_2"]))
tm.assert_series_equal(store.select("s"), xp)
# dup with column
_maybe_remove(store, "df")
df = DataFrame(
np.zeros((12, 2)),
columns=["a", "b"],
index=make_index(["date", "a", "t"]),
)
with pytest.raises(ValueError):
store.append("df", df)
# dup within level
_maybe_remove(store, "df")
df = DataFrame(
np.zeros((12, 2)),
columns=["a", "b"],
index=make_index(["date", "date", "date"]),
)
with pytest.raises(ValueError):
store.append("df", df)
# fully names
_maybe_remove(store, "df")
df = DataFrame(
np.zeros((12, 2)),
columns=["a", "b"],
index=make_index(["date", "s", "t"]),
)
store.append("df", df)
tm.assert_frame_equal(store.select("df"), df)
def test_select_columns_in_where(self, setup_path):
# GH 6169
# recreate multi-indexes when columns is passed
# in the `where` argument
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["foo_name", "bar_name"],
)
# With a DataFrame
df = DataFrame(np.random.randn(10, 3), index=index, columns=["A", "B", "C"])
with ensure_clean_store(setup_path) as store:
store.put("df", df, format="table")
expected = df[["A"]]
tm.assert_frame_equal(store.select("df", columns=["A"]), expected)
tm.assert_frame_equal(store.select("df", where="columns=['A']"), expected)
# With a Series
s = Series(np.random.randn(10), index=index, name="A")
with ensure_clean_store(setup_path) as store:
store.put("s", s, format="table")
tm.assert_series_equal(store.select("s", where="columns=['A']"), s)
def test_mi_data_columns(self, setup_path):
# GH 14435
idx = pd.MultiIndex.from_arrays(
[date_range("2000-01-01", periods=5), range(5)], names=["date", "id"]
)
df = pd.DataFrame({"a": [1.1, 1.2, 1.3, 1.4, 1.5]}, index=idx)
with ensure_clean_store(setup_path) as store:
store.append("df", df, data_columns=True)
actual = store.select("df", where="id == 1")
expected = df.iloc[[1], :]
tm.assert_frame_equal(actual, expected)
def test_pass_spec_to_storer(self, setup_path):
df = tm.makeDataFrame()
with ensure_clean_store(setup_path) as store:
store.put("df", df)
with pytest.raises(TypeError):
store.select("df", columns=["A"])
with pytest.raises(TypeError):
store.select("df", where=[("columns=A")])
@td.xfail_non_writeable
def test_append_misc(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
store.append("df", df, chunksize=1)
result = store.select("df")
tm.assert_frame_equal(result, df)
store.append("df1", df, expectedrows=10)
result = store.select("df1")
tm.assert_frame_equal(result, df)
# more chunksize in append tests
def check(obj, comparator):
for c in [10, 200, 1000]:
with ensure_clean_store(setup_path, mode="w") as store:
store.append("obj", obj, chunksize=c)
result = store.select("obj")
comparator(result, obj)
df = tm.makeDataFrame()
df["string"] = "foo"
df["float322"] = 1.0
df["float322"] = df["float322"].astype("float32")
df["bool"] = df["float322"] > 0
df["time1"] = Timestamp("20130101")
df["time2"] = Timestamp("20130102")
check(df, tm.assert_frame_equal)
# empty frame, GH4273
with ensure_clean_store(setup_path) as store:
# 0 len
df_empty = DataFrame(columns=list("ABC"))
store.append("df", df_empty)
with pytest.raises(KeyError, match="'No object named df in the file'"):
store.select("df")
# repeated append of 0/non-zero frames
df = DataFrame(np.random.rand(10, 3), columns=list("ABC"))
store.append("df", df)
tm.assert_frame_equal(store.select("df"), df)
store.append("df", df_empty)
tm.assert_frame_equal(store.select("df"), df)
# store
df = DataFrame(columns=list("ABC"))
store.put("df2", df)
tm.assert_frame_equal(store.select("df2"), df)
def test_append_raise(self, setup_path):
with ensure_clean_store(setup_path) as store:
# test append with invalid input to get good error messages
# list in column
df = tm.makeDataFrame()
df["invalid"] = [["a"]] * len(df)
assert df.dtypes["invalid"] == np.object_
with pytest.raises(TypeError):
store.append("df", df)
# multiple invalid columns
df["invalid2"] = [["a"]] * len(df)
df["invalid3"] = [["a"]] * len(df)
with pytest.raises(TypeError):
store.append("df", df)
# datetime with embedded nans as object
df = tm.makeDataFrame()
s = Series(datetime.datetime(2001, 1, 2), index=df.index)
s = s.astype(object)
s[0:5] = np.nan
df["invalid"] = s
assert df.dtypes["invalid"] == np.object_
with pytest.raises(TypeError):
store.append("df", df)
# directly ndarray
with pytest.raises(TypeError):
store.append("df", np.arange(10))
# series directly
with pytest.raises(TypeError):
store.append("df", Series(np.arange(10)))
# appending an incompatible table
df = tm.makeDataFrame()
store.append("df", df)
df["foo"] = "foo"
with pytest.raises(ValueError):
store.append("df", df)
def test_table_index_incompatible_dtypes(self, setup_path):
df1 = DataFrame({"a": [1, 2, 3]})
df2 = DataFrame({"a": [4, 5, 6]}, index=date_range("1/1/2000", periods=3))
with ensure_clean_store(setup_path) as store:
store.put("frame", df1, format="table")
with pytest.raises(TypeError):
store.put("frame", df2, format="table", append=True)
def test_table_values_dtypes_roundtrip(self, setup_path):
with ensure_clean_store(setup_path) as store:
df1 = DataFrame({"a": [1, 2, 3]}, dtype="f8")
store.append("df_f8", df1)
tm.assert_series_equal(df1.dtypes, store["df_f8"].dtypes)
df2 = DataFrame({"a": [1, 2, 3]}, dtype="i8")
store.append("df_i8", df2)
tm.assert_series_equal(df2.dtypes, store["df_i8"].dtypes)
# incompatible dtype
with pytest.raises(ValueError):
store.append("df_i8", df1)
# check creation/storage/retrieval of float32 (a bit hacky to
# actually create them thought)
df1 = DataFrame(np.array([[1], [2], [3]], dtype="f4"), columns=["A"])
store.append("df_f4", df1)
tm.assert_series_equal(df1.dtypes, store["df_f4"].dtypes)
assert df1.dtypes[0] == "float32"
# check with mixed dtypes
df1 = DataFrame(
{
c: Series(np.random.randint(5), dtype=c)
for c in ["float32", "float64", "int32", "int64", "int16", "int8"]
}
)
df1["string"] = "foo"
df1["float322"] = 1.0
df1["float322"] = df1["float322"].astype("float32")
df1["bool"] = df1["float32"] > 0
df1["time1"] = Timestamp("20130101")
df1["time2"] = Timestamp("20130102")
store.append("df_mixed_dtypes1", df1)
result = store.select("df_mixed_dtypes1").dtypes.value_counts()
result.index = [str(i) for i in result.index]
expected = Series(
{
"float32": 2,
"float64": 1,
"int32": 1,
"bool": 1,
"int16": 1,
"int8": 1,
"int64": 1,
"object": 1,
"datetime64[ns]": 2,
}
)
result = result.sort_index()
expected = expected.sort_index()
tm.assert_series_equal(result, expected)
def test_table_mixed_dtypes(self, setup_path):
# frame
df = tm.makeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["bool3"] = True
df["int1"] = 1
df["int2"] = 2
df["timestamp1"] = Timestamp("20010102")
df["timestamp2"] = Timestamp("20010103")
df["datetime1"] = datetime.datetime(2001, 1, 2, 0, 0)
df["datetime2"] = datetime.datetime(2001, 1, 3, 0, 0)
df.loc[3:6, ["obj1"]] = np.nan
df = df._consolidate()._convert(datetime=True)
with ensure_clean_store(setup_path) as store:
store.append("df1_mixed", df)
tm.assert_frame_equal(store.select("df1_mixed"), df)
def test_unimplemented_dtypes_table_columns(self, setup_path):
with ensure_clean_store(setup_path) as store:
dtypes = [("date", datetime.date(2001, 1, 2))]
# currently not supported dtypes ####
for n, f in dtypes:
df = tm.makeDataFrame()
df[n] = f
with pytest.raises(TypeError):
store.append("df1_{n}".format(n=n), df)
# frame
df = tm.makeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["datetime1"] = datetime.date(2001, 1, 2)
df = df._consolidate()._convert(datetime=True)
with ensure_clean_store(setup_path) as store:
# this fails because we have a date in the object block......
with pytest.raises(TypeError):
store.append("df_unimplemented", df)
@td.xfail_non_writeable
@pytest.mark.skipif(
LooseVersion(np.__version__) == LooseVersion("1.15.0"),
reason=(
"Skipping pytables test when numpy version is "
"exactly equal to 1.15.0: gh-22098"
),
)
def test_calendar_roundtrip_issue(self, setup_path):
# 8591
# doc example from tseries holiday section
weekmask_egypt = "Sun Mon Tue Wed Thu"
holidays = [
"2012-05-01",
datetime.datetime(2013, 5, 1),
np.datetime64("2014-05-01"),
]
bday_egypt = pd.offsets.CustomBusinessDay(
holidays=holidays, weekmask=weekmask_egypt
)
dt = datetime.datetime(2013, 4, 30)
dts = date_range(dt, periods=5, freq=bday_egypt)
s = Series(dts.weekday, dts).map(Series("Mon Tue Wed Thu Fri Sat Sun".split()))
with ensure_clean_store(setup_path) as store:
store.put("fixed", s)
result = store.select("fixed")
tm.assert_series_equal(result, s)
store.append("table", s)
result = store.select("table")
tm.assert_series_equal(result, s)
def test_roundtrip_tz_aware_index(self, setup_path):
# GH 17618
time = pd.Timestamp("2000-01-01 01:00:00", tz="US/Eastern")
df = pd.DataFrame(data=[0], index=[time])
with ensure_clean_store(setup_path) as store:
store.put("frame", df, format="fixed")
recons = store["frame"]
tm.assert_frame_equal(recons, df)
assert recons.index[0].value == 946706400000000000
def test_append_with_timedelta(self, setup_path):
# GH 3577
# append timedelta
df = DataFrame(
dict(
A=Timestamp("20130101"),
B=[
Timestamp("20130101") + timedelta(days=i, seconds=10)
for i in range(10)
],
)
)
df["C"] = df["A"] - df["B"]
df.loc[3:5, "C"] = np.nan
with ensure_clean_store(setup_path) as store:
# table
_maybe_remove(store, "df")
store.append("df", df, data_columns=True)
result = store.select("df")
tm.assert_frame_equal(result, df)
result = store.select("df", where="C<100000")
tm.assert_frame_equal(result, df)
result = store.select("df", where="C<pd.Timedelta('-3D')")
tm.assert_frame_equal(result, df.iloc[3:])
result = store.select("df", "C<'-3D'")
tm.assert_frame_equal(result, df.iloc[3:])
# a bit hacky here as we don't really deal with the NaT properly
result = store.select("df", "C<'-500000s'")
result = result.dropna(subset=["C"])
tm.assert_frame_equal(result, df.iloc[6:])
result = store.select("df", "C<'-3.5D'")
result = result.iloc[1:]
tm.assert_frame_equal(result, df.iloc[4:])
# fixed
_maybe_remove(store, "df2")
store.put("df2", df)
result = store.select("df2")
tm.assert_frame_equal(result, df)
def test_remove(self, setup_path):
with ensure_clean_store(setup_path) as store:
ts = tm.makeTimeSeries()
df = tm.makeDataFrame()
store["a"] = ts
store["b"] = df
_maybe_remove(store, "a")
assert len(store) == 1
tm.assert_frame_equal(df, store["b"])
_maybe_remove(store, "b")
assert len(store) == 0
# nonexistence
with pytest.raises(
KeyError, match="'No object named a_nonexistent_store in the file'"
):
store.remove("a_nonexistent_store")
# pathing
store["a"] = ts
store["b/foo"] = df
_maybe_remove(store, "foo")
_maybe_remove(store, "b/foo")
assert len(store) == 1
store["a"] = ts
store["b/foo"] = df
_maybe_remove(store, "b")
assert len(store) == 1
# __delitem__
store["a"] = ts
store["b"] = df
del store["a"]
del store["b"]
assert len(store) == 0
def test_invalid_terms(self, setup_path):
with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
df = tm.makeTimeDataFrame()
df["string"] = "foo"
df.loc[0:4, "string"] = "bar"
store.put("df", df, format="table")
# some invalid terms
with pytest.raises(TypeError):
Term()
# more invalid
with pytest.raises(ValueError):
store.select("df", "df.index[3]")
with pytest.raises(SyntaxError):
store.select("df", "index>")
# from the docs
with ensure_clean_path(setup_path) as path:
dfq = DataFrame(
np.random.randn(10, 4),
columns=list("ABCD"),
index=date_range("20130101", periods=10),
)
dfq.to_hdf(path, "dfq", format="table", data_columns=True)
# check ok
read_hdf(
path, "dfq", where="index>Timestamp('20130104') & columns=['A', 'B']"
)
read_hdf(path, "dfq", where="A>0 or C>0")
# catch the invalid reference
with ensure_clean_path(setup_path) as path:
dfq = DataFrame(
np.random.randn(10, 4),
columns=list("ABCD"),
index=date_range("20130101", periods=10),
)
dfq.to_hdf(path, "dfq", format="table")
with pytest.raises(ValueError):
read_hdf(path, "dfq", where="A>0 or C>0")
def test_same_name_scoping(self, setup_path):
with ensure_clean_store(setup_path) as store:
import pandas as pd
df = DataFrame(
np.random.randn(20, 2), index=pd.date_range("20130101", periods=20)
)
store.put("df", df, format="table")
expected = df[df.index > pd.Timestamp("20130105")]
import datetime # noqa
result = store.select("df", "index>datetime.datetime(2013,1,5)")
tm.assert_frame_equal(result, expected)
from datetime import datetime # noqa
# technically an error, but allow it
result = store.select("df", "index>datetime.datetime(2013,1,5)")
tm.assert_frame_equal(result, expected)
result = store.select("df", "index>datetime(2013,1,5)")
tm.assert_frame_equal(result, expected)
def test_series(self, setup_path):
s = tm.makeStringSeries()
self._check_roundtrip(s, tm.assert_series_equal, path=setup_path)
ts = tm.makeTimeSeries()
self._check_roundtrip(ts, tm.assert_series_equal, path=setup_path)
ts2 = Series(ts.index, Index(ts.index, dtype=object))
self._check_roundtrip(ts2, tm.assert_series_equal, path=setup_path)
ts3 = Series(ts.values, Index(np.asarray(ts.index, dtype=object), dtype=object))
self._check_roundtrip(
ts3, tm.assert_series_equal, path=setup_path, check_index_type=False
)
def test_float_index(self, setup_path):
# GH #454
index = np.random.randn(10)
s = Series(np.random.randn(10), index=index)
self._check_roundtrip(s, tm.assert_series_equal, path=setup_path)
@td.xfail_non_writeable
def test_tuple_index(self, setup_path):
# GH #492
col = np.arange(10)
idx = [(0.0, 1.0), (2.0, 3.0), (4.0, 5.0)]
data = np.random.randn(30).reshape((3, 10))
DF = DataFrame(data, index=idx, columns=col)
with catch_warnings(record=True):
simplefilter("ignore", pd.errors.PerformanceWarning)
self._check_roundtrip(DF, tm.assert_frame_equal, path=setup_path)
@td.xfail_non_writeable
@pytest.mark.filterwarnings("ignore::pandas.errors.PerformanceWarning")
def test_index_types(self, setup_path):
with catch_warnings(record=True):
values = np.random.randn(2)
func = lambda l, r: tm.assert_series_equal(
l, r, check_dtype=True, check_index_type=True, check_series_type=True
)
with catch_warnings(record=True):
ser = Series(values, [0, "y"])
self._check_roundtrip(ser, func, path=setup_path)
with catch_warnings(record=True):
ser = Series(values, [datetime.datetime.today(), 0])
self._check_roundtrip(ser, func, path=setup_path)
with catch_warnings(record=True):
ser = Series(values, ["y", 0])
self._check_roundtrip(ser, func, path=setup_path)
with catch_warnings(record=True):
ser = Series(values, [datetime.date.today(), "a"])
self._check_roundtrip(ser, func, path=setup_path)
with catch_warnings(record=True):
ser = Series(values, [0, "y"])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(values, [datetime.datetime.today(), 0])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(values, ["y", 0])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(values, [datetime.date.today(), "a"])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(values, [1.23, "b"])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(values, [1, 1.53])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(values, [1, 5])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(
values, [datetime.datetime(2012, 1, 1), datetime.datetime(2012, 1, 2)]
)
self._check_roundtrip(ser, func, path=setup_path)
def test_timeseries_preepoch(self, setup_path):
dr = bdate_range("1/1/1940", "1/1/1960")
ts = Series(np.random.randn(len(dr)), index=dr)
try:
self._check_roundtrip(ts, tm.assert_series_equal, path=setup_path)
except OverflowError:
pytest.skip("known failer on some windows platforms")
@td.xfail_non_writeable
@pytest.mark.parametrize(
"compression", [False, pytest.param(True, marks=td.skip_if_windows_python_3)]
)
def test_frame(self, compression, setup_path):
df = tm.makeDataFrame()
# put in some random NAs
df.values[0, 0] = np.nan
df.values[5, 3] = np.nan
self._check_roundtrip_table(
df, tm.assert_frame_equal, path=setup_path, compression=compression
)
self._check_roundtrip(
df, tm.assert_frame_equal, path=setup_path, compression=compression
)
tdf = tm.makeTimeDataFrame()
self._check_roundtrip(
tdf, tm.assert_frame_equal, path=setup_path, compression=compression
)
with ensure_clean_store(setup_path) as store:
# not consolidated
df["foo"] = np.random.randn(len(df))
store["df"] = df
recons = store["df"]
assert recons._data.is_consolidated()
# empty
self._check_roundtrip(df[:0], tm.assert_frame_equal, path=setup_path)
@td.xfail_non_writeable
def test_empty_series_frame(self, setup_path):
s0 = Series(dtype=object)
s1 = Series(name="myseries", dtype=object)
df0 = DataFrame()
df1 = DataFrame(index=["a", "b", "c"])
df2 = DataFrame(columns=["d", "e", "f"])
self._check_roundtrip(s0, tm.assert_series_equal, path=setup_path)
self._check_roundtrip(s1, tm.assert_series_equal, path=setup_path)
self._check_roundtrip(df0, tm.assert_frame_equal, path=setup_path)
self._check_roundtrip(df1, tm.assert_frame_equal, path=setup_path)
self._check_roundtrip(df2, tm.assert_frame_equal, path=setup_path)
@td.xfail_non_writeable
@pytest.mark.parametrize(
"dtype", [np.int64, np.float64, np.object, "m8[ns]", "M8[ns]"]
)
def test_empty_series(self, dtype, setup_path):
s = Series(dtype=dtype)
self._check_roundtrip(s, tm.assert_series_equal, path=setup_path)
def test_can_serialize_dates(self, setup_path):
rng = [x.date() for x in bdate_range("1/1/2000", "1/30/2000")]
frame = DataFrame(np.random.randn(len(rng), 4), index=rng)
self._check_roundtrip(frame, tm.assert_frame_equal, path=setup_path)
def test_store_hierarchical(self, setup_path):
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["foo", "bar"],
)
frame = DataFrame(np.random.randn(10, 3), index=index, columns=["A", "B", "C"])
self._check_roundtrip(frame, tm.assert_frame_equal, path=setup_path)
self._check_roundtrip(frame.T, tm.assert_frame_equal, path=setup_path)
self._check_roundtrip(frame["A"], tm.assert_series_equal, path=setup_path)
# check that the names are stored
with ensure_clean_store(setup_path) as store:
store["frame"] = frame
recons = store["frame"]
tm.assert_frame_equal(recons, frame)
def test_store_index_name(self, setup_path):
df = tm.makeDataFrame()
df.index.name = "foo"
with ensure_clean_store(setup_path) as store:
store["frame"] = df
recons = store["frame"]
tm.assert_frame_equal(recons, df)
def test_store_index_name_with_tz(self, setup_path):
# GH 13884
df = pd.DataFrame({"A": [1, 2]})
df.index = pd.DatetimeIndex([1234567890123456787, 1234567890123456788])
df.index = df.index.tz_localize("UTC")
df.index.name = "foo"
with ensure_clean_store(setup_path) as store:
store.put("frame", df, format="table")
recons = store["frame"]
tm.assert_frame_equal(recons, df)
@pytest.mark.parametrize("table_format", ["table", "fixed"])
def test_store_index_name_numpy_str(self, table_format, setup_path):
# GH #13492
idx = pd.Index(
pd.to_datetime([datetime.date(2000, 1, 1), datetime.date(2000, 1, 2)]),
name="cols\u05d2",
)
idx1 = pd.Index(
pd.to_datetime([datetime.date(2010, 1, 1), datetime.date(2010, 1, 2)]),
name="rows\u05d0",
)
df = pd.DataFrame(np.arange(4).reshape(2, 2), columns=idx, index=idx1)
# This used to fail, returning numpy strings instead of python strings.
with ensure_clean_path(setup_path) as path:
df.to_hdf(path, "df", format=table_format)
df2 = read_hdf(path, "df")
tm.assert_frame_equal(df, df2, check_names=True)
assert type(df2.index.name) == str
assert type(df2.columns.name) == str
def test_store_series_name(self, setup_path):
df = tm.makeDataFrame()
series = df["A"]
with ensure_clean_store(setup_path) as store:
store["series"] = series
recons = store["series"]
tm.assert_series_equal(recons, series)
@td.xfail_non_writeable
@pytest.mark.parametrize(
"compression", [False, pytest.param(True, marks=td.skip_if_windows_python_3)]
)
def test_store_mixed(self, compression, setup_path):
def _make_one():
df = tm.makeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["int1"] = 1
df["int2"] = 2
return df._consolidate()
df1 = _make_one()
df2 = _make_one()
self._check_roundtrip(df1, tm.assert_frame_equal, path=setup_path)
self._check_roundtrip(df2, tm.assert_frame_equal, path=setup_path)
with ensure_clean_store(setup_path) as store:
store["obj"] = df1
tm.assert_frame_equal(store["obj"], df1)
store["obj"] = df2
tm.assert_frame_equal(store["obj"], df2)
# check that can store Series of all of these types
self._check_roundtrip(
df1["obj1"],
tm.assert_series_equal,
path=setup_path,
compression=compression,
)
self._check_roundtrip(
df1["bool1"],
tm.assert_series_equal,
path=setup_path,
compression=compression,
)
self._check_roundtrip(
df1["int1"],
tm.assert_series_equal,
path=setup_path,
compression=compression,
)
@pytest.mark.filterwarnings(
"ignore:\\nduplicate:pandas.io.pytables.DuplicateWarning"
)
def test_select_with_dups(self, setup_path):
# single dtypes
df = DataFrame(np.random.randn(10, 4), columns=["A", "A", "B", "B"])
df.index = date_range("20130101 9:30", periods=10, freq="T")
with ensure_clean_store(setup_path) as store:
store.append("df", df)
result = store.select("df")
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
result = store.select("df", columns=df.columns)
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
result = store.select("df", columns=["A"])
expected = df.loc[:, ["A"]]
tm.assert_frame_equal(result, expected)
# dups across dtypes
df = concat(
[
DataFrame(np.random.randn(10, 4), columns=["A", "A", "B", "B"]),
DataFrame(
np.random.randint(0, 10, size=20).reshape(10, 2), columns=["A", "C"]
),
],
axis=1,
)
df.index = date_range("20130101 9:30", periods=10, freq="T")
with ensure_clean_store(setup_path) as store:
store.append("df", df)
result = store.select("df")
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
result = store.select("df", columns=df.columns)
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
expected = df.loc[:, ["A"]]
result = store.select("df", columns=["A"])
tm.assert_frame_equal(result, expected, by_blocks=True)
expected = df.loc[:, ["B", "A"]]
result = store.select("df", columns=["B", "A"])
tm.assert_frame_equal(result, expected, by_blocks=True)
# duplicates on both index and columns
with ensure_clean_store(setup_path) as store:
store.append("df", df)
store.append("df", df)
expected = df.loc[:, ["B", "A"]]
expected = concat([expected, expected])
result = store.select("df", columns=["B", "A"])
tm.assert_frame_equal(result, expected, by_blocks=True)
def test_overwrite_node(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeDataFrame()
ts = tm.makeTimeSeries()
store["a"] = ts
tm.assert_series_equal(store["a"], ts)
def test_select(self, setup_path):
with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
# select with columns=
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df")
store.append("df", df)
result = store.select("df", columns=["A", "B"])
expected = df.reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# equivalently
result = store.select("df", [("columns=['A', 'B']")])
expected = df.reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# with a data column
_maybe_remove(store, "df")
store.append("df", df, data_columns=["A"])
result = store.select("df", ["A > 0"], columns=["A", "B"])
expected = df[df.A > 0].reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# all a data columns
_maybe_remove(store, "df")
store.append("df", df, data_columns=True)
result = store.select("df", ["A > 0"], columns=["A", "B"])
expected = df[df.A > 0].reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# with a data column, but different columns
_maybe_remove(store, "df")
store.append("df", df, data_columns=["A"])
result = store.select("df", ["A > 0"], columns=["C", "D"])
expected = df[df.A > 0].reindex(columns=["C", "D"])
tm.assert_frame_equal(expected, result)
def test_select_dtypes(self, setup_path):
with ensure_clean_store(setup_path) as store:
# with a Timestamp data column (GH #2637)
df = DataFrame(
dict(ts=bdate_range("2012-01-01", periods=300), A=np.random.randn(300))
)
_maybe_remove(store, "df")
store.append("df", df, data_columns=["ts", "A"])
result = store.select("df", "ts>=Timestamp('2012-02-01')")
expected = df[df.ts >= Timestamp("2012-02-01")]
tm.assert_frame_equal(expected, result)
# bool columns (GH #2849)
df = DataFrame(np.random.randn(5, 2), columns=["A", "B"])
df["object"] = "foo"
df.loc[4:5, "object"] = "bar"
df["boolv"] = df["A"] > 0
_maybe_remove(store, "df")
store.append("df", df, data_columns=True)
expected = df[df.boolv == True].reindex(columns=["A", "boolv"]) # noqa
for v in [True, "true", 1]:
result = store.select(
"df", "boolv == {v!s}".format(v=v), columns=["A", "boolv"]
)
tm.assert_frame_equal(expected, result)
expected = df[df.boolv == False].reindex(columns=["A", "boolv"]) # noqa
for v in [False, "false", 0]:
result = store.select(
"df", "boolv == {v!s}".format(v=v), columns=["A", "boolv"]
)
tm.assert_frame_equal(expected, result)
# integer index
df = DataFrame(dict(A=np.random.rand(20), B=np.random.rand(20)))
_maybe_remove(store, "df_int")
store.append("df_int", df)
result = store.select("df_int", "index<10 and columns=['A']")
expected = df.reindex(index=list(df.index)[0:10], columns=["A"])
tm.assert_frame_equal(expected, result)
# float index
df = DataFrame(
dict(
A=np.random.rand(20),
B=np.random.rand(20),
index=np.arange(20, dtype="f8"),
)
)
_maybe_remove(store, "df_float")
store.append("df_float", df)
result = store.select("df_float", "index<10.0 and columns=['A']")
expected = df.reindex(index=list(df.index)[0:10], columns=["A"])
tm.assert_frame_equal(expected, result)
with ensure_clean_store(setup_path) as store:
# floats w/o NaN
df = DataFrame(dict(cols=range(11), values=range(11)), dtype="float64")
df["cols"] = (df["cols"] + 10).apply(str)
store.append("df1", df, data_columns=True)
result = store.select("df1", where="values>2.0")
expected = df[df["values"] > 2.0]
tm.assert_frame_equal(expected, result)
# floats with NaN
df.iloc[0] = np.nan
expected = df[df["values"] > 2.0]
store.append("df2", df, data_columns=True, index=False)
result = store.select("df2", where="values>2.0")
tm.assert_frame_equal(expected, result)
# https://github.com/PyTables/PyTables/issues/282
# bug in selection when 0th row has a np.nan and an index
# store.append('df3',df,data_columns=True)
# result = store.select(
# 'df3', where='values>2.0')
# tm.assert_frame_equal(expected, result)
# not in first position float with NaN ok too
df = DataFrame(dict(cols=range(11), values=range(11)), dtype="float64")
df["cols"] = (df["cols"] + 10).apply(str)
df.iloc[1] = np.nan
expected = df[df["values"] > 2.0]
store.append("df4", df, data_columns=True)
result = store.select("df4", where="values>2.0")
tm.assert_frame_equal(expected, result)
# test selection with comparison against numpy scalar
# GH 11283
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
expected = df[df["A"] > 0]
store.append("df", df, data_columns=True)
np_zero = np.float64(0) # noqa
result = store.select("df", where=["A>np_zero"])
tm.assert_frame_equal(expected, result)
def test_select_with_many_inputs(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(
dict(
ts=bdate_range("2012-01-01", periods=300),
A=np.random.randn(300),
B=range(300),
users=["a"] * 50
+ ["b"] * 50
+ ["c"] * 100
+ ["a{i:03d}".format(i=i) for i in range(100)],
)
)
_maybe_remove(store, "df")
store.append("df", df, data_columns=["ts", "A", "B", "users"])
# regular select
result = store.select("df", "ts>=Timestamp('2012-02-01')")
expected = df[df.ts >= Timestamp("2012-02-01")]
tm.assert_frame_equal(expected, result)
# small selector
result = store.select(
"df", "ts>=Timestamp('2012-02-01') & users=['a','b','c']"
)
expected = df[
(df.ts >= Timestamp("2012-02-01")) & df.users.isin(["a", "b", "c"])
]
tm.assert_frame_equal(expected, result)
# big selector along the columns
selector = ["a", "b", "c"] + ["a{i:03d}".format(i=i) for i in range(60)]
result = store.select(
"df", "ts>=Timestamp('2012-02-01') and users=selector"
)
expected = df[(df.ts >= Timestamp("2012-02-01")) & df.users.isin(selector)]
tm.assert_frame_equal(expected, result)
selector = range(100, 200)
result = store.select("df", "B=selector")
expected = df[df.B.isin(selector)]
tm.assert_frame_equal(expected, result)
assert len(result) == 100
# big selector along the index
selector = Index(df.ts[0:100].values)
result = store.select("df", "ts=selector")
expected = df[df.ts.isin(selector.values)]
tm.assert_frame_equal(expected, result)
assert len(result) == 100
def test_select_iterator(self, setup_path):
# single table
with ensure_clean_store(setup_path) as store:
df = tm.makeTimeDataFrame(500)
_maybe_remove(store, "df")
store.append("df", df)
expected = store.select("df")
results = list(store.select("df", iterator=True))
result = concat(results)
tm.assert_frame_equal(expected, result)
results = list(store.select("df", chunksize=100))
assert len(results) == 5
result = concat(results)
tm.assert_frame_equal(expected, result)
results = list(store.select("df", chunksize=150))
result = concat(results)
tm.assert_frame_equal(result, expected)
with ensure_clean_path(setup_path) as path:
df = tm.makeTimeDataFrame(500)
df.to_hdf(path, "df_non_table")
with pytest.raises(TypeError):
read_hdf(path, "df_non_table", chunksize=100)
with pytest.raises(TypeError):
read_hdf(path, "df_non_table", iterator=True)
with ensure_clean_path(setup_path) as path:
df = tm.makeTimeDataFrame(500)
df.to_hdf(path, "df", format="table")
results = list(read_hdf(path, "df", chunksize=100))
result = concat(results)
assert len(results) == 5
tm.assert_frame_equal(result, df)
tm.assert_frame_equal(result, read_hdf(path, "df"))
# multiple
with ensure_clean_store(setup_path) as store:
df1 = tm.makeTimeDataFrame(500)
store.append("df1", df1, data_columns=True)
df2 = tm.makeTimeDataFrame(500).rename(columns="{}_2".format)
df2["foo"] = "bar"
store.append("df2", df2)
df = concat([df1, df2], axis=1)
# full selection
expected = store.select_as_multiple(["df1", "df2"], selector="df1")
results = list(
store.select_as_multiple(["df1", "df2"], selector="df1", chunksize=150)
)
result = concat(results)
tm.assert_frame_equal(expected, result)
def test_select_iterator_complete_8014(self, setup_path):
# GH 8014
# using iterator and where clause
chunksize = 1e4
# no iterator
with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100064, "S")
_maybe_remove(store, "df")
store.append("df", expected)
beg_dt = expected.index[0]
end_dt = expected.index[-1]
# select w/o iteration and no where clause works
result = store.select("df")
tm.assert_frame_equal(expected, result)
# select w/o iterator and where clause, single term, begin
# of range, works
where = "index >= '{beg_dt}'".format(beg_dt=beg_dt)
result = store.select("df", where=where)
tm.assert_frame_equal(expected, result)
# select w/o iterator and where clause, single term, end
# of range, works
where = "index <= '{end_dt}'".format(end_dt=end_dt)
result = store.select("df", where=where)
tm.assert_frame_equal(expected, result)
# select w/o iterator and where clause, inclusive range,
# works
where = "index >= '{beg_dt}' & index <= '{end_dt}'".format(
beg_dt=beg_dt, end_dt=end_dt
)
result = store.select("df", where=where)
tm.assert_frame_equal(expected, result)
# with iterator, full range
with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100064, "S")
_maybe_remove(store, "df")
store.append("df", expected)
beg_dt = expected.index[0]
end_dt = expected.index[-1]
# select w/iterator and no where clause works
results = list(store.select("df", chunksize=chunksize))
result = concat(results)
tm.assert_frame_equal(expected, result)
# select w/iterator and where clause, single term, begin of range
where = "index >= '{beg_dt}'".format(beg_dt=beg_dt)
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
tm.assert_frame_equal(expected, result)
# select w/iterator and where clause, single term, end of range
where = "index <= '{end_dt}'".format(end_dt=end_dt)
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
tm.assert_frame_equal(expected, result)
# select w/iterator and where clause, inclusive range
where = "index >= '{beg_dt}' & index <= '{end_dt}'".format(
beg_dt=beg_dt, end_dt=end_dt
)
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
tm.assert_frame_equal(expected, result)
def test_select_iterator_non_complete_8014(self, setup_path):
# GH 8014
# using iterator and where clause
chunksize = 1e4
# with iterator, non complete range
with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100064, "S")
_maybe_remove(store, "df")
store.append("df", expected)
beg_dt = expected.index[1]
end_dt = expected.index[-2]
# select w/iterator and where clause, single term, begin of range
where = "index >= '{beg_dt}'".format(beg_dt=beg_dt)
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
rexpected = expected[expected.index >= beg_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, single term, end of range
where = "index <= '{end_dt}'".format(end_dt=end_dt)
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
rexpected = expected[expected.index <= end_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, inclusive range
where = "index >= '{beg_dt}' & index <= '{end_dt}'".format(
beg_dt=beg_dt, end_dt=end_dt
)
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
rexpected = expected[
(expected.index >= beg_dt) & (expected.index <= end_dt)
]
tm.assert_frame_equal(rexpected, result)
# with iterator, empty where
with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100064, "S")
_maybe_remove(store, "df")
store.append("df", expected)
end_dt = expected.index[-1]
# select w/iterator and where clause, single term, begin of range
where = "index > '{end_dt}'".format(end_dt=end_dt)
results = list(store.select("df", where=where, chunksize=chunksize))
assert 0 == len(results)
def test_select_iterator_many_empty_frames(self, setup_path):
# GH 8014
# using iterator and where clause can return many empty
# frames.
chunksize = int(1e4)
# with iterator, range limited to the first chunk
with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100000, "S")
_maybe_remove(store, "df")
store.append("df", expected)
beg_dt = expected.index[0]
end_dt = expected.index[chunksize - 1]
# select w/iterator and where clause, single term, begin of range
where = "index >= '{beg_dt}'".format(beg_dt=beg_dt)
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
rexpected = expected[expected.index >= beg_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, single term, end of range
where = "index <= '{end_dt}'".format(end_dt=end_dt)
results = list(store.select("df", where=where, chunksize=chunksize))
assert len(results) == 1
result = concat(results)
rexpected = expected[expected.index <= end_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, inclusive range
where = "index >= '{beg_dt}' & index <= '{end_dt}'".format(
beg_dt=beg_dt, end_dt=end_dt
)
results = list(store.select("df", where=where, chunksize=chunksize))
# should be 1, is 10
assert len(results) == 1
result = concat(results)
rexpected = expected[
(expected.index >= beg_dt) & (expected.index <= end_dt)
]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause which selects
# *nothing*.
#
# To be consistent with Python idiom I suggest this should
# return [] e.g. `for e in []: print True` never prints
# True.
where = "index <= '{beg_dt}' & index >= '{end_dt}'".format(
beg_dt=beg_dt, end_dt=end_dt
)
results = list(store.select("df", where=where, chunksize=chunksize))
# should be []
assert len(results) == 0
@pytest.mark.filterwarnings(
"ignore:\\nthe :pandas.io.pytables.AttributeConflictWarning"
)
def test_retain_index_attributes(self, setup_path):
# GH 3499, losing frequency info on index recreation
df = DataFrame(
dict(A=Series(range(3), index=date_range("2000-1-1", periods=3, freq="H")))
)
with ensure_clean_store(setup_path) as store:
_maybe_remove(store, "data")
store.put("data", df, format="table")
result = store.get("data")
tm.assert_frame_equal(df, result)
for attr in ["freq", "tz", "name"]:
for idx in ["index", "columns"]:
assert getattr(getattr(df, idx), attr, None) == getattr(
getattr(result, idx), attr, None
)
# try to append a table with a different frequency
with catch_warnings(record=True):
df2 = DataFrame(
dict(
A=Series(
range(3), index=date_range("2002-1-1", periods=3, freq="D")
)
)
)
store.append("data", df2)
assert store.get_storer("data").info["index"]["freq"] is None
# this is ok
_maybe_remove(store, "df2")
df2 = DataFrame(
dict(
A=Series(
range(3),
index=[
Timestamp("20010101"),
Timestamp("20010102"),
Timestamp("20020101"),
],
)
)
)
store.append("df2", df2)
df3 = DataFrame(
dict(
A=Series(
range(3), index=date_range("2002-1-1", periods=3, freq="D")
)
)
)
store.append("df2", df3)
@pytest.mark.filterwarnings(
"ignore:\\nthe :pandas.io.pytables.AttributeConflictWarning"
)
def test_retain_index_attributes2(self, setup_path):
with ensure_clean_path(setup_path) as path:
with catch_warnings(record=True):
df = DataFrame(
dict(
A=Series(
range(3), index=date_range("2000-1-1", periods=3, freq="H")
)
)
)
df.to_hdf(path, "data", mode="w", append=True)
df2 = DataFrame(
dict(
A=Series(
range(3), index=date_range("2002-1-1", periods=3, freq="D")
)
)
)
df2.to_hdf(path, "data", append=True)
idx = date_range("2000-1-1", periods=3, freq="H")
idx.name = "foo"
df = DataFrame(dict(A=Series(range(3), index=idx)))
df.to_hdf(path, "data", mode="w", append=True)
assert read_hdf(path, "data").index.name == "foo"
with catch_warnings(record=True):
idx2 = date_range("2001-1-1", periods=3, freq="H")
idx2.name = "bar"
df2 = DataFrame(dict(A=Series(range(3), index=idx2)))
df2.to_hdf(path, "data", append=True)
assert read_hdf(path, "data").index.name is None
def test_frame_select(self, setup_path):
df = tm.makeTimeDataFrame()
with ensure_clean_store(setup_path) as store:
store.put("frame", df, format="table")
date = df.index[len(df) // 2]
crit1 = Term("index>=date")
assert crit1.env.scope["date"] == date
crit2 = "columns=['A', 'D']"
crit3 = "columns=A"
result = store.select("frame", [crit1, crit2])
expected = df.loc[date:, ["A", "D"]]
tm.assert_frame_equal(result, expected)
result = store.select("frame", [crit3])
expected = df.loc[:, ["A"]]
tm.assert_frame_equal(result, expected)
# invalid terms
df = tm.makeTimeDataFrame()
store.append("df_time", df)
with pytest.raises(ValueError):
store.select("df_time", "index>0")
# can't select if not written as table
# store['frame'] = df
# with pytest.raises(ValueError):
# store.select('frame', [crit1, crit2])
def test_frame_select_complex(self, setup_path):
# select via complex criteria
df = tm.makeTimeDataFrame()
df["string"] = "foo"
df.loc[df.index[0:4], "string"] = "bar"
with ensure_clean_store(setup_path) as store:
store.put("df", df, format="table", data_columns=["string"])
# empty
result = store.select("df", 'index>df.index[3] & string="bar"')
expected = df.loc[(df.index > df.index[3]) & (df.string == "bar")]
tm.assert_frame_equal(result, expected)
result = store.select("df", 'index>df.index[3] & string="foo"')
expected = df.loc[(df.index > df.index[3]) & (df.string == "foo")]
tm.assert_frame_equal(result, expected)
# or
result = store.select("df", 'index>df.index[3] | string="bar"')
expected = df.loc[(df.index > df.index[3]) | (df.string == "bar")]
tm.assert_frame_equal(result, expected)
result = store.select(
"df", "(index>df.index[3] & " 'index<=df.index[6]) | string="bar"'
)
expected = df.loc[
((df.index > df.index[3]) & (df.index <= df.index[6]))
| (df.string == "bar")
]
tm.assert_frame_equal(result, expected)
# invert
result = store.select("df", 'string!="bar"')
expected = df.loc[df.string != "bar"]
tm.assert_frame_equal(result, expected)
# invert not implemented in numexpr :(
with pytest.raises(NotImplementedError):
store.select("df", '~(string="bar")')
# invert ok for filters
result = store.select("df", "~(columns=['A','B'])")
expected = df.loc[:, df.columns.difference(["A", "B"])]
tm.assert_frame_equal(result, expected)
# in
result = store.select("df", "index>df.index[3] & columns in ['A','B']")
expected = df.loc[df.index > df.index[3]].reindex(columns=["A", "B"])
tm.assert_frame_equal(result, expected)
def test_frame_select_complex2(self, setup_path):
with ensure_clean_path(["parms.hdf", "hist.hdf"]) as paths:
pp, hh = paths
# use non-trivial selection criteria
parms = DataFrame({"A": [1, 1, 2, 2, 3]})
parms.to_hdf(pp, "df", mode="w", format="table", data_columns=["A"])
selection = read_hdf(pp, "df", where="A=[2,3]")
hist = DataFrame(
np.random.randn(25, 1),
columns=["data"],
index=MultiIndex.from_tuples(
[(i, j) for i in range(5) for j in range(5)], names=["l1", "l2"]
),
)
hist.to_hdf(hh, "df", mode="w", format="table")
expected = read_hdf(hh, "df", where="l1=[2, 3, 4]")
# scope with list like
l = selection.index.tolist() # noqa
store = HDFStore(hh)
result = store.select("df", where="l1=l")
tm.assert_frame_equal(result, expected)
store.close()
result = read_hdf(hh, "df", where="l1=l")
tm.assert_frame_equal(result, expected)
# index
index = selection.index # noqa
result = read_hdf(hh, "df", where="l1=index")
tm.assert_frame_equal(result, expected)
result = read_hdf(hh, "df", where="l1=selection.index")
tm.assert_frame_equal(result, expected)
result = read_hdf(hh, "df", where="l1=selection.index.tolist()")
tm.assert_frame_equal(result, expected)
result = read_hdf(hh, "df", where="l1=list(selection.index)")
tm.assert_frame_equal(result, expected)
# scope with index
store = HDFStore(hh)
result = store.select("df", where="l1=index")
tm.assert_frame_equal(result, expected)
result = store.select("df", where="l1=selection.index")
tm.assert_frame_equal(result, expected)
result = store.select("df", where="l1=selection.index.tolist()")
tm.assert_frame_equal(result, expected)
result = store.select("df", where="l1=list(selection.index)")
tm.assert_frame_equal(result, expected)
store.close()
def test_invalid_filtering(self, setup_path):
# can't use more than one filter (atm)
df = tm.makeTimeDataFrame()
with ensure_clean_store(setup_path) as store:
store.put("df", df, format="table")
# not implemented
with pytest.raises(NotImplementedError):
store.select("df", "columns=['A'] | columns=['B']")
# in theory we could deal with this
with pytest.raises(NotImplementedError):
store.select("df", "columns=['A','B'] & columns=['C']")
def test_string_select(self, setup_path):
# GH 2973
with ensure_clean_store(setup_path) as store:
df = tm.makeTimeDataFrame()
# test string ==/!=
df["x"] = "none"
df.loc[2:7, "x"] = ""
store.append("df", df, data_columns=["x"])
result = store.select("df", "x=none")
expected = df[df.x == "none"]
tm.assert_frame_equal(result, expected)
result = store.select("df", "x!=none")
expected = df[df.x != "none"]
tm.assert_frame_equal(result, expected)
df2 = df.copy()
df2.loc[df2.x == "", "x"] = np.nan
store.append("df2", df2, data_columns=["x"])
result = store.select("df2", "x!=none")
expected = df2[isna(df2.x)]
tm.assert_frame_equal(result, expected)
# int ==/!=
df["int"] = 1
df.loc[2:7, "int"] = 2
store.append("df3", df, data_columns=["int"])
result = store.select("df3", "int=2")
expected = df[df.int == 2]
tm.assert_frame_equal(result, expected)
result = store.select("df3", "int!=2")
expected = df[df.int != 2]
tm.assert_frame_equal(result, expected)
def test_read_column(self, setup_path):
df = tm.makeTimeDataFrame()
with ensure_clean_store(setup_path) as store:
_maybe_remove(store, "df")
# GH 17912
# HDFStore.select_column should raise a KeyError
# exception if the key is not a valid store
with pytest.raises(KeyError, match="No object named df in the file"):
store.select_column("df", "index")
store.append("df", df)
# error
with pytest.raises(
KeyError, match=re.escape("'column [foo] not found in the table'")
):
store.select_column("df", "foo")
with pytest.raises(Exception):
store.select_column("df", "index", where=["index>5"])
# valid
result = store.select_column("df", "index")
tm.assert_almost_equal(result.values, Series(df.index).values)
assert isinstance(result, Series)
# not a data indexable column
with pytest.raises(ValueError):
store.select_column("df", "values_block_0")
# a data column
df2 = df.copy()
df2["string"] = "foo"
store.append("df2", df2, data_columns=["string"])
result = store.select_column("df2", "string")
tm.assert_almost_equal(result.values, df2["string"].values)
# a data column with NaNs, result excludes the NaNs
df3 = df.copy()
df3["string"] = "foo"
df3.loc[4:6, "string"] = np.nan
store.append("df3", df3, data_columns=["string"])
result = store.select_column("df3", "string")
tm.assert_almost_equal(result.values, df3["string"].values)
# start/stop
result = store.select_column("df3", "string", start=2)
tm.assert_almost_equal(result.values, df3["string"].values[2:])
result = store.select_column("df3", "string", start=-2)
tm.assert_almost_equal(result.values, df3["string"].values[-2:])
result = store.select_column("df3", "string", stop=2)
tm.assert_almost_equal(result.values, df3["string"].values[:2])
result = store.select_column("df3", "string", stop=-2)
tm.assert_almost_equal(result.values, df3["string"].values[:-2])
result = store.select_column("df3", "string", start=2, stop=-2)
tm.assert_almost_equal(result.values, df3["string"].values[2:-2])
result = store.select_column("df3", "string", start=-2, stop=2)
tm.assert_almost_equal(result.values, df3["string"].values[-2:2])
# GH 10392 - make sure column name is preserved
df4 = DataFrame({"A": np.random.randn(10), "B": "foo"})
store.append("df4", df4, data_columns=True)
expected = df4["B"]
result = store.select_column("df4", "B")
tm.assert_series_equal(result, expected)
def test_coordinates(self, setup_path):
df = tm.makeTimeDataFrame()
with ensure_clean_store(setup_path) as store:
_maybe_remove(store, "df")
store.append("df", df)
# all
c = store.select_as_coordinates("df")
assert (c.values == np.arange(len(df.index))).all()
# get coordinates back & test vs frame
_maybe_remove(store, "df")
df = DataFrame(dict(A=range(5), B=range(5)))
store.append("df", df)
c = store.select_as_coordinates("df", ["index<3"])
assert (c.values == np.arange(3)).all()
result = store.select("df", where=c)
expected = df.loc[0:2, :]
tm.assert_frame_equal(result, expected)
c = store.select_as_coordinates("df", ["index>=3", "index<=4"])
assert (c.values == np.arange(2) + 3).all()
result = store.select("df", where=c)
expected = df.loc[3:4, :]
tm.assert_frame_equal(result, expected)
assert isinstance(c, Index)
# multiple tables
_maybe_remove(store, "df1")
_maybe_remove(store, "df2")
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame().rename(columns="{}_2".format)
store.append("df1", df1, data_columns=["A", "B"])
store.append("df2", df2)
c = store.select_as_coordinates("df1", ["A>0", "B>0"])
df1_result = store.select("df1", c)
df2_result = store.select("df2", c)
result = concat([df1_result, df2_result], axis=1)
expected = concat([df1, df2], axis=1)
expected = expected[(expected.A > 0) & (expected.B > 0)]
tm.assert_frame_equal(result, expected)
# pass array/mask as the coordinates
with ensure_clean_store(setup_path) as store:
df = DataFrame(
np.random.randn(1000, 2), index=date_range("20000101", periods=1000)
)
store.append("df", df)
c = store.select_column("df", "index")
where = c[DatetimeIndex(c).month == 5].index
expected = df.iloc[where]
# locations
result = store.select("df", where=where)
tm.assert_frame_equal(result, expected)
# boolean
result = store.select("df", where=where)
tm.assert_frame_equal(result, expected)
# invalid
with pytest.raises(ValueError):
store.select("df", where=np.arange(len(df), dtype="float64"))
with pytest.raises(ValueError):
store.select("df", where=np.arange(len(df) + 1))
with pytest.raises(ValueError):
store.select("df", where=np.arange(len(df)), start=5)
with pytest.raises(ValueError):
store.select("df", where=np.arange(len(df)), start=5, stop=10)
# selection with filter
selection = date_range("20000101", periods=500)
result = store.select("df", where="index in selection")
expected = df[df.index.isin(selection)]
tm.assert_frame_equal(result, expected)
# list
df = DataFrame(np.random.randn(10, 2))
store.append("df2", df)
result = store.select("df2", where=[0, 3, 5])
expected = df.iloc[[0, 3, 5]]
tm.assert_frame_equal(result, expected)
# boolean
where = [True] * 10
where[-2] = False
result = store.select("df2", where=where)
expected = df.loc[where]
tm.assert_frame_equal(result, expected)
# start/stop
result = store.select("df2", start=5, stop=10)
expected = df[5:10]
tm.assert_frame_equal(result, expected)
def test_append_to_multiple(self, setup_path):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame().rename(columns="{}_2".format)
df2["foo"] = "bar"
df = concat([df1, df2], axis=1)
with ensure_clean_store(setup_path) as store:
# exceptions
with pytest.raises(ValueError):
store.append_to_multiple(
{"df1": ["A", "B"], "df2": None}, df, selector="df3"
)
with pytest.raises(ValueError):
store.append_to_multiple({"df1": None, "df2": None}, df, selector="df3")
with pytest.raises(ValueError):
store.append_to_multiple("df1", df, "df1")
# regular operation
store.append_to_multiple(
{"df1": ["A", "B"], "df2": None}, df, selector="df1"
)
result = store.select_as_multiple(
["df1", "df2"], where=["A>0", "B>0"], selector="df1"
)
expected = df[(df.A > 0) & (df.B > 0)]
tm.assert_frame_equal(result, expected)
def test_append_to_multiple_dropna(self, setup_path):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame().rename(columns="{}_2".format)
df1.iloc[1, df1.columns.get_indexer(["A", "B"])] = np.nan
df = concat([df1, df2], axis=1)
with ensure_clean_store(setup_path) as store:
# dropna=True should guarantee rows are synchronized
store.append_to_multiple(
{"df1": ["A", "B"], "df2": None}, df, selector="df1", dropna=True
)
result = store.select_as_multiple(["df1", "df2"])
expected = df.dropna()
tm.assert_frame_equal(result, expected)
tm.assert_index_equal(store.select("df1").index, store.select("df2").index)
@pytest.mark.xfail(
run=False, reason="append_to_multiple_dropna_false is not raising as failed"
)
def test_append_to_multiple_dropna_false(self, setup_path):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame().rename(columns="{}_2".format)
df1.iloc[1, df1.columns.get_indexer(["A", "B"])] = np.nan
df = concat([df1, df2], axis=1)
with ensure_clean_store(setup_path) as store:
# dropna=False shouldn't synchronize row indexes
store.append_to_multiple(
{"df1a": ["A", "B"], "df2a": None}, df, selector="df1a", dropna=False
)
with pytest.raises(ValueError):
store.select_as_multiple(["df1a", "df2a"])
assert not store.select("df1a").index.equals(store.select("df2a").index)
def test_select_as_multiple(self, setup_path):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame().rename(columns="{}_2".format)
df2["foo"] = "bar"
with ensure_clean_store(setup_path) as store:
# no tables stored
with pytest.raises(Exception):
store.select_as_multiple(None, where=["A>0", "B>0"], selector="df1")
store.append("df1", df1, data_columns=["A", "B"])
store.append("df2", df2)
# exceptions
with pytest.raises(Exception):
store.select_as_multiple(None, where=["A>0", "B>0"], selector="df1")
with pytest.raises(Exception):
store.select_as_multiple([None], where=["A>0", "B>0"], selector="df1")
msg = "'No object named df3 in the file'"
with pytest.raises(KeyError, match=msg):
store.select_as_multiple(
["df1", "df3"], where=["A>0", "B>0"], selector="df1"
)
with pytest.raises(KeyError, match=msg):
store.select_as_multiple(["df3"], where=["A>0", "B>0"], selector="df1")
with pytest.raises(KeyError, match="'No object named df4 in the file'"):
store.select_as_multiple(
["df1", "df2"], where=["A>0", "B>0"], selector="df4"
)
# default select
result = store.select("df1", ["A>0", "B>0"])
expected = store.select_as_multiple(
["df1"], where=["A>0", "B>0"], selector="df1"
)
tm.assert_frame_equal(result, expected)
expected = store.select_as_multiple(
"df1", where=["A>0", "B>0"], selector="df1"
)
tm.assert_frame_equal(result, expected)
# multiple
result = store.select_as_multiple(
["df1", "df2"], where=["A>0", "B>0"], selector="df1"
)
expected = concat([df1, df2], axis=1)
expected = expected[(expected.A > 0) & (expected.B > 0)]
tm.assert_frame_equal(result, expected)
# multiple (diff selector)
result = store.select_as_multiple(
["df1", "df2"], where="index>df2.index[4]", selector="df2"
)
expected = concat([df1, df2], axis=1)
expected = expected[5:]
tm.assert_frame_equal(result, expected)
# test exception for diff rows
store.append("df3", tm.makeTimeDataFrame(nper=50))
with pytest.raises(ValueError):
store.select_as_multiple(
["df1", "df3"], where=["A>0", "B>0"], selector="df1"
)
@pytest.mark.skipif(
LooseVersion(tables.__version__) < LooseVersion("3.1.0"),
reason=("tables version does not support fix for nan selection bug: GH 4858"),
)
def test_nan_selection_bug_4858(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(dict(cols=range(6), values=range(6)), dtype="float64")
df["cols"] = (df["cols"] + 10).apply(str)
df.iloc[0] = np.nan
expected = DataFrame(
dict(cols=["13.0", "14.0", "15.0"], values=[3.0, 4.0, 5.0]),
index=[3, 4, 5],
)
# write w/o the index on that particular column
store.append("df", df, data_columns=True, index=["cols"])
result = store.select("df", where="values>2.0")
tm.assert_frame_equal(result, expected)
def test_start_stop_table(self, setup_path):
with ensure_clean_store(setup_path) as store:
# table
df = DataFrame(dict(A=np.random.rand(20), B=np.random.rand(20)))
store.append("df", df)
result = store.select("df", "columns=['A']", start=0, stop=5)
expected = df.loc[0:4, ["A"]]
tm.assert_frame_equal(result, expected)
# out of range
result = store.select("df", "columns=['A']", start=30, stop=40)
assert len(result) == 0
expected = df.loc[30:40, ["A"]]
tm.assert_frame_equal(result, expected)
def test_start_stop_multiple(self, setup_path):
# GH 16209
with ensure_clean_store(setup_path) as store:
df = DataFrame({"foo": [1, 2], "bar": [1, 2]})
store.append_to_multiple(
{"selector": ["foo"], "data": None}, df, selector="selector"
)
result = store.select_as_multiple(
["selector", "data"], selector="selector", start=0, stop=1
)
expected = df.loc[[0], ["foo", "bar"]]
tm.assert_frame_equal(result, expected)
def test_start_stop_fixed(self, setup_path):
with ensure_clean_store(setup_path) as store:
# fixed, GH 8287
df = DataFrame(
dict(A=np.random.rand(20), B=np.random.rand(20)),
index=pd.date_range("20130101", periods=20),
)
store.put("df", df)
result = store.select("df", start=0, stop=5)
expected = df.iloc[0:5, :]
tm.assert_frame_equal(result, expected)
result = store.select("df", start=5, stop=10)
expected = df.iloc[5:10, :]
tm.assert_frame_equal(result, expected)
# out of range
result = store.select("df", start=30, stop=40)
expected = df.iloc[30:40, :]
tm.assert_frame_equal(result, expected)
# series
s = df.A
store.put("s", s)
result = store.select("s", start=0, stop=5)
expected = s.iloc[0:5]
tm.assert_series_equal(result, expected)
result = store.select("s", start=5, stop=10)
expected = s.iloc[5:10]
tm.assert_series_equal(result, expected)
# sparse; not implemented
df = tm.makeDataFrame()
df.iloc[3:5, 1:3] = np.nan
df.iloc[8:10, -2] = np.nan
def test_select_filter_corner(self, setup_path):
df = DataFrame(np.random.randn(50, 100))
df.index = ["{c:3d}".format(c=c) for c in df.index]
df.columns = ["{c:3d}".format(c=c) for c in df.columns]
with ensure_clean_store(setup_path) as store:
store.put("frame", df, format="table")
crit = "columns=df.columns[:75]"
result = store.select("frame", [crit])
tm.assert_frame_equal(result, df.loc[:, df.columns[:75]])
crit = "columns=df.columns[:75:2]"
result = store.select("frame", [crit])
tm.assert_frame_equal(result, df.loc[:, df.columns[:75:2]])
def test_path_pathlib(self, setup_path):
df = tm.makeDataFrame()
result = tm.round_trip_pathlib(
lambda p: df.to_hdf(p, "df"), lambda p: pd.read_hdf(p, "df")
)
tm.assert_frame_equal(df, result)
@pytest.mark.parametrize("start, stop", [(0, 2), (1, 2), (None, None)])
def test_contiguous_mixed_data_table(self, start, stop, setup_path):
# GH 17021
# ValueError when reading a contiguous mixed-data table ft. VLArray
df = DataFrame(
{
"a": Series([20111010, 20111011, 20111012]),
"b": Series(["ab", "cd", "ab"]),
}
)
with ensure_clean_store(setup_path) as store:
store.append("test_dataset", df)
result = store.select("test_dataset", start=start, stop=stop)
tm.assert_frame_equal(df[start:stop], result)
def test_path_pathlib_hdfstore(self, setup_path):
df = tm.makeDataFrame()
def writer(path):
with pd.HDFStore(path) as store:
df.to_hdf(store, "df")
def reader(path):
with pd.HDFStore(path) as store:
return pd.read_hdf(store, "df")
result = tm.round_trip_pathlib(writer, reader)
tm.assert_frame_equal(df, result)
def test_pickle_path_localpath(self, setup_path):
df = tm.makeDataFrame()
result = tm.round_trip_pathlib(
lambda p: df.to_hdf(p, "df"), lambda p: pd.read_hdf(p, "df")
)
tm.assert_frame_equal(df, result)
def test_path_localpath_hdfstore(self, setup_path):
df = tm.makeDataFrame()
def writer(path):
with pd.HDFStore(path) as store:
df.to_hdf(store, "df")
def reader(path):
with pd.HDFStore(path) as store:
return pd.read_hdf(store, "df")
result = tm.round_trip_localpath(writer, reader)
tm.assert_frame_equal(df, result)
def _check_roundtrip(self, obj, comparator, path, compression=False, **kwargs):
options = {}
if compression:
options["complib"] = _default_compressor
with ensure_clean_store(path, "w", **options) as store:
store["obj"] = obj
retrieved = store["obj"]
comparator(retrieved, obj, **kwargs)
def _check_double_roundtrip(
self, obj, comparator, path, compression=False, **kwargs
):
options = {}
if compression:
options["complib"] = compression or _default_compressor
with ensure_clean_store(path, "w", **options) as store:
store["obj"] = obj
retrieved = store["obj"]
comparator(retrieved, obj, **kwargs)
store["obj"] = retrieved
again = store["obj"]
comparator(again, obj, **kwargs)
def _check_roundtrip_table(self, obj, comparator, path, compression=False):
options = {}
if compression:
options["complib"] = _default_compressor
with ensure_clean_store(path, "w", **options) as store:
store.put("obj", obj, format="table")
retrieved = store["obj"]
comparator(retrieved, obj)
def test_multiple_open_close(self, setup_path):
# gh-4409: open & close multiple times
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.to_hdf(path, "df", mode="w", format="table")
# single
store = HDFStore(path)
assert "CLOSED" not in store.info()
assert store.is_open
store.close()
assert "CLOSED" in store.info()
assert not store.is_open
with ensure_clean_path(setup_path) as path:
if pytables._table_file_open_policy_is_strict:
# multiples
store1 = HDFStore(path)
with pytest.raises(ValueError):
HDFStore(path)
store1.close()
else:
# multiples
store1 = HDFStore(path)
store2 = HDFStore(path)
assert "CLOSED" not in store1.info()
assert "CLOSED" not in store2.info()
assert store1.is_open
assert store2.is_open
store1.close()
assert "CLOSED" in store1.info()
assert not store1.is_open
assert "CLOSED" not in store2.info()
assert store2.is_open
store2.close()
assert "CLOSED" in store1.info()
assert "CLOSED" in store2.info()
assert not store1.is_open
assert not store2.is_open
# nested close
store = HDFStore(path, mode="w")
store.append("df", df)
store2 = HDFStore(path)
store2.append("df2", df)
store2.close()
assert "CLOSED" in store2.info()
assert not store2.is_open
store.close()
assert "CLOSED" in store.info()
assert not store.is_open
# double closing
store = HDFStore(path, mode="w")
store.append("df", df)
store2 = HDFStore(path)
store.close()
assert "CLOSED" in store.info()
assert not store.is_open
store2.close()
assert "CLOSED" in store2.info()
assert not store2.is_open
# ops on a closed store
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.to_hdf(path, "df", mode="w", format="table")
store = HDFStore(path)
store.close()
with pytest.raises(ClosedFileError):
store.keys()
with pytest.raises(ClosedFileError):
"df" in store
with pytest.raises(ClosedFileError):
len(store)
with pytest.raises(ClosedFileError):
store["df"]
with pytest.raises(AttributeError):
store.df
with pytest.raises(ClosedFileError):
store.select("df")
with pytest.raises(ClosedFileError):
store.get("df")
with pytest.raises(ClosedFileError):
store.append("df2", df)
with pytest.raises(ClosedFileError):
store.put("df3", df)
with pytest.raises(ClosedFileError):
store.get_storer("df2")
with pytest.raises(ClosedFileError):
store.remove("df2")
with pytest.raises(ClosedFileError, match="file is not open"):
store.select("df")
def test_pytables_native_read(self, datapath, setup_path):
with ensure_clean_store(
datapath("io", "data", "legacy_hdf/pytables_native.h5"), mode="r"
) as store:
d2 = store["detector/readout"]
assert isinstance(d2, DataFrame)
@pytest.mark.skipif(
is_platform_windows(), reason="native2 read fails oddly on windows"
)
def test_pytables_native2_read(self, datapath, setup_path):
with ensure_clean_store(
datapath("io", "data", "legacy_hdf", "pytables_native2.h5"), mode="r"
) as store:
str(store)
d1 = store["detector"]
assert isinstance(d1, DataFrame)
@td.xfail_non_writeable
def test_legacy_table_fixed_format_read_py2(self, datapath, setup_path):
# GH 24510
# legacy table with fixed format written in Python 2
with ensure_clean_store(
datapath("io", "data", "legacy_hdf", "legacy_table_fixed_py2.h5"), mode="r"
) as store:
result = store.select("df")
expected = pd.DataFrame(
[[1, 2, 3, "D"]],
columns=["A", "B", "C", "D"],
index=pd.Index(["ABC"], name="INDEX_NAME"),
)
tm.assert_frame_equal(expected, result)
def test_legacy_table_read_py2(self, datapath, setup_path):
# issue: 24925
# legacy table written in Python 2
with ensure_clean_store(
datapath("io", "data", "legacy_hdf", "legacy_table_py2.h5"), mode="r"
) as store:
result = store.select("table")
expected = pd.DataFrame({"a": ["a", "b"], "b": [2, 3]})
tm.assert_frame_equal(expected, result)
def test_copy(self, setup_path):
with catch_warnings(record=True):
def do_copy(f, new_f=None, keys=None, propindexes=True, **kwargs):
try:
store = HDFStore(f, "r")
if new_f is None:
import tempfile
fd, new_f = tempfile.mkstemp()
tstore = store.copy(
new_f, keys=keys, propindexes=propindexes, **kwargs
)
# check keys
if keys is None:
keys = store.keys()
assert set(keys) == set(tstore.keys())
# check indices & nrows
for k in tstore.keys():
if tstore.get_storer(k).is_table:
new_t = tstore.get_storer(k)
orig_t = store.get_storer(k)
assert orig_t.nrows == new_t.nrows
# check propindixes
if propindexes:
for a in orig_t.axes:
if a.is_indexed:
assert new_t[a.name].is_indexed
finally:
safe_close(store)
safe_close(tstore)
try:
os.close(fd)
except (OSError, ValueError):
pass
safe_remove(new_f)
# new table
df = tm.makeDataFrame()
try:
path = create_tempfile(setup_path)
st = HDFStore(path)
st.append("df", df, data_columns=["A"])
st.close()
do_copy(f=path)
do_copy(f=path, propindexes=False)
finally:
safe_remove(path)
def test_store_datetime_fractional_secs(self, setup_path):
with ensure_clean_store(setup_path) as store:
dt = datetime.datetime(2012, 1, 2, 3, 4, 5, 123456)
series = Series([0], [dt])
store["a"] = series
assert store["a"].index[0] == dt
def test_tseries_indices_series(self, setup_path):
with ensure_clean_store(setup_path) as store:
idx = tm.makeDateIndex(10)
ser = Series(np.random.randn(len(idx)), idx)
store["a"] = ser
result = store["a"]
tm.assert_series_equal(result, ser)
assert result.index.freq == ser.index.freq
tm.assert_class_equal(result.index, ser.index, obj="series index")
idx = tm.makePeriodIndex(10)
ser = Series(np.random.randn(len(idx)), idx)
store["a"] = ser
result = store["a"]
tm.assert_series_equal(result, ser)
assert result.index.freq == ser.index.freq
tm.assert_class_equal(result.index, ser.index, obj="series index")
def test_tseries_indices_frame(self, setup_path):
with ensure_clean_store(setup_path) as store:
idx = tm.makeDateIndex(10)
df = DataFrame(np.random.randn(len(idx), 3), index=idx)
store["a"] = df
result = store["a"]
| tm.assert_frame_equal(result, df) | pandas.util.testing.assert_frame_equal |
import sys
import os
import logging
import datetime
import pandas as pd
from job import Job, Trace
from policies import ShortestJobFirst, FirstInFirstOut, ShortestRemainingTimeFirst, QuasiShortestServiceFirst
sys.path.append('..')
def simulate_vc(trace, vc, placement, log_dir, policy, logger, start_ts, *args):
if policy == 'sjf':
scheduler = ShortestJobFirst(
trace, vc, placement, log_dir, logger, start_ts)
elif policy == 'fifo':
scheduler = FirstInFirstOut(
trace, vc, placement, log_dir, logger, start_ts)
elif policy == 'srtf':
scheduler = ShortestRemainingTimeFirst(
trace, vc, placement, log_dir, logger, start_ts)
elif policy == 'qssf':
scheduler = QuasiShortestServiceFirst(
trace, vc, placement, log_dir, logger, start_ts, args[0])
scheduler.simulate()
logger.info(f'Finish {vc.vc_name}')
return True
def get_available_schedulers():
return ['fifo', 'sjf', 'srtf', 'qssf']
def get_available_placers():
return ['random', 'consolidate', 'consolidateFirst']
def trace_process(dir, date_range):
start = '2020-04-01 00:00:00'
df = pd.read_csv(dir+'/cluster_log.csv', parse_dates=['submit_time'], usecols=['job_id', 'user', 'vc', 'jobname', 'gpu_num',
'cpu_num', 'state', 'submit_time', 'duration'])
# Consider gpu jobs only
df = df[df['gpu_num'] > 0]
# VC filter
vc_dict = pd.read_pickle(dir+'/vc_dict_homo.pkl')
vc_list = vc_dict.keys()
df = df[df['vc'].isin(vc_list)]
df = df[df['submit_time'] >= pd.Timestamp(start)]
df['submit_time'] = df['submit_time'].apply(
lambda x: int(datetime.datetime.timestamp(pd.Timestamp(x))))
# Normalizing
df['submit_time'] = df['submit_time'] - df.iloc[0]['submit_time']
df['remain'] = df['duration']
df[['start_time', 'end_time']] = sys.maxsize
df[['ckpt_times', 'queue', 'jct']] = 0
df['status'] = None
# Slicing simulation part
begin = (pd.Timestamp(date_range[0])-pd.Timestamp(start)).total_seconds()
end = (pd.Timestamp(date_range[1])-pd.Timestamp(start)).total_seconds()
df = df[(df['submit_time'] >= begin) & (df['submit_time'] <= end)]
df.sort_values(by='submit_time', inplace=True)
df.reset_index(inplace=True, drop=True)
return df, begin
def trace_philly_process(dir, date_range):
start = '2017-10-01 00:00:00'
df = pd.read_csv(dir+'/cluster_log.csv', parse_dates=['submit_time'], usecols=['user', 'vc', 'jobname', 'gpu_num',
'state', 'submit_time', 'duration'])
# Consider gpu jobs only
df = df[df['gpu_num'] > 0]
# VC filter
vc_dict = pd.read_pickle(dir+'/vc_dict_homo.pkl')
vc_list = vc_dict.keys()
df = df[df['vc'].isin(vc_list)]
df = df[df['submit_time'] >= pd.Timestamp(start)]
df['submit_time'] = df['submit_time'].apply(
lambda x: int(datetime.datetime.timestamp(pd.Timestamp(x))))
df['state'] = df['state'].replace('Pass', 'COMPLETED')
df['state'] = df['state'].replace('Failed', 'FAILED')
df['state'] = df['state'].replace('Killed', 'CANCELLED')
# Normalizing
df['submit_time'] = df['submit_time'] - df.iloc[0]['submit_time']
df['remain'] = df['duration']
df[['start_time', 'end_time']] = sys.maxsize
df[['ckpt_times', 'queue', 'jct']] = 0
df['status'] = None
# Slicing simulation part
begin = (pd.Timestamp(date_range[0])-pd.Timestamp(start)).total_seconds()
end = (pd.Timestamp(date_range[1])- | pd.Timestamp(start) | pandas.Timestamp |
# settings.configure()
# import os
# import django
# os.environ.setdefault("DJANGO_SETTINGS_MODULE", "mysite.settings")
# django.setup()
# from . import models
import email
import pandas
from datetime import time
import random
from django.core.management.base import BaseCommand
from django.conf import settings
from med.models import *
class Command(BaseCommand):
def returnTimes(self, medics=None, dayOnRussian=None, index=0):
if dayOnRussian and not medics.empty:
if not pandas.isna(medics.iloc[index][dayOnRussian]):
_ = medics.iloc[index][dayOnRussian].split('-')
tmp = [time(int(_[0].split(':')[0]), int(_[0].split(':')[1]), 0), time(int(_[1].split(':')[0]), int(_[1].split(':')[1]), 0)]
return tmp
else:
tmp = [None, None]
return tmp
def parseScheduleAndDoctors (self, path=None):
#medics = pandas.read_excel(path)
medics = pandas.read_csv(path, encoding='windows-1251')
medics['Срок действия'] = pandas.to_da | tetime(medics['Срок действия']) | pandas.to_datetime |
from context import dero
import pandas as pd
from pandas.util.testing import assert_frame_equal
from pandas import Timestamp
from numpy import nan
import numpy
class DataFrameTest:
df = pd.DataFrame([
(10516, 'a', '1/1/2000', 1.01),
(10516, 'a', '1/2/2000', 1.02),
(10516, 'a', '1/3/2000', 1.03),
(10516, 'a', '1/4/2000', 1.04),
(10516, 'b', '1/1/2000', 1.05),
(10516, 'b', '1/2/2000', 1.06),
(10516, 'b', '1/3/2000', 1.07),
(10516, 'b', '1/4/2000', 1.08),
(10517, 'a', '1/1/2000', 1.09),
(10517, 'a', '1/2/2000', 1.10),
(10517, 'a', '1/3/2000', 1.11),
(10517, 'a', '1/4/2000', 1.12),
], columns = ['PERMNO','byvar','Date', 'RET'])
df_duplicate_row = pd.DataFrame([
(10516, 'a', '1/1/2000', 1.01),
(10516, 'a', '1/2/2000', 1.02),
(10516, 'a', '1/3/2000', 1.03),
(10516, 'a', '1/3/2000', 1.03), #this is a duplicated row
(10516, 'a', '1/4/2000', 1.04),
(10516, 'b', '1/1/2000', 1.05),
(10516, 'b', '1/2/2000', 1.06),
(10516, 'b', '1/3/2000', 1.07),
(10516, 'b', '1/4/2000', 1.08),
(10517, 'a', '1/1/2000', 1.09),
(10517, 'a', '1/2/2000', 1.10),
(10517, 'a', '1/3/2000', 1.11),
(10517, 'a', '1/4/2000', 1.12),
], columns = ['PERMNO','byvar','Date', 'RET'])
df_weight = pd.DataFrame(data = [
(10516, 'a', '1/1/2000', 1.01, 0),
(10516, 'a', '1/2/2000', 1.02, 1),
(10516, 'a', '1/3/2000', 1.03, 1),
(10516, 'a', '1/4/2000', 1.04, 0),
(10516, 'b', '1/1/2000', 1.05, 1),
(10516, 'b', '1/2/2000', 1.06, 1),
(10516, 'b', '1/3/2000', 1.07, 1),
(10516, 'b', '1/4/2000', 1.08, 1),
(10517, 'a', '1/1/2000', 1.09, 0),
(10517, 'a', '1/2/2000', 1.1, 0),
(10517, 'a', '1/3/2000', 1.11, 0),
(10517, 'a', '1/4/2000', 1.12, 1),
], columns = ['PERMNO', 'byvar', 'Date', 'RET', 'weight'])
df_nan_byvar = pd.DataFrame(data = [
('a', 1),
(nan, 2),
('b', 3),
('b', 4),
], columns = ['byvar', 'val'])
df_nan_byvar_and_val = pd.DataFrame(data = [
('a', 1),
(nan, 2),
('b', nan),
('b', 4),
], columns = ['byvar', 'val'])
single_ticker_df = pd.DataFrame(data = [
('a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
], columns = ['byvar', 'Date', 'TICKER'])
df_datetime = df.copy()
df_datetime['Date'] = pd.to_datetime(df_datetime['Date'])
df_datetime_no_ret = df_datetime.copy()
df_datetime_no_ret.drop('RET', axis=1, inplace=True)
df_gvkey_str = pd.DataFrame([
('001076','3/1/1995'),
('001076','4/1/1995'),
('001722','1/1/2012'),
('001722','7/1/2012'),
('001722', nan),
(nan ,'1/1/2012')
], columns=['GVKEY','Date'])
df_gvkey_str['Date'] = pd.to_datetime(df_gvkey_str['Date'])
df_gvkey_num = df_gvkey_str.copy()
df_gvkey_num['GVKEY'] = df_gvkey_num['GVKEY'].astype('float64')
df_gvkey_str2 = pd.DataFrame([
('001076','2/1/1995'),
('001076','3/2/1995'),
('001722','11/1/2011'),
('001722','10/1/2011'),
('001722', nan),
(nan ,'1/1/2012')
], columns=['GVKEY','Date'])
df_gvkey_str2['Date'] = pd.to_datetime(df_gvkey_str2['Date'])
df_fill_data = pd.DataFrame(
data=[
(4, 'c', nan, 'a'),
(1, 'd', 3, 'a'),
(10, 'e', 100, 'a'),
(2, nan, 6, 'b'),
(5, 'f', 8, 'b'),
(11, 'g', 150, 'b'),
],
columns=['y', 'x1', 'x2', 'group']
)
class TestCumulate(DataFrameTest):
expect_between_1_3 = pd.DataFrame(data = [
(10516, 'a', '1/1/2000', 1.01, 1.01),
(10516, 'a', '1/2/2000', 1.02, 1.02),
(10516, 'a', '1/3/2000', 1.03, 1.0506),
(10516, 'a', '1/4/2000', 1.04, 1.04),
(10516, 'b', '1/1/2000', 1.05, 1.05),
(10516, 'b', '1/2/2000', 1.06, 1.06),
(10516, 'b', '1/3/2000', 1.07, 1.1342),
(10516, 'b', '1/4/2000', 1.08, 1.08),
(10517, 'a', '1/1/2000', 1.09, 1.09),
(10517, 'a', '1/2/2000', 1.1, 1.1),
(10517, 'a', '1/3/2000', 1.11, 1.2210000000000003),
(10517, 'a', '1/4/2000', 1.12, 1.12),
], columns = ['PERMNO', 'byvar', 'Date', 'RET', 'cum_RET'])
expect_first = pd.DataFrame([
(10516, 'a', '1/1/2000', 1.01, 1.01),
(10516, 'a', '1/2/2000', 1.02, 1.02),
(10516, 'a', '1/3/2000', 1.03, 1.0506),
(10516, 'a', '1/4/2000', 1.04, 1.092624),
(10516, 'b', '1/1/2000', 1.05, 1.05),
(10516, 'b', '1/2/2000', 1.06, 1.06),
(10516, 'b', '1/3/2000', 1.07, 1.1342),
(10516, 'b', '1/4/2000', 1.08, 1.224936),
(10517, 'a', '1/1/2000', 1.09, 1.09),
(10517, 'a', '1/2/2000', 1.10, 1.10),
(10517, 'a', '1/3/2000', 1.11, 1.221),
(10517, 'a', '1/4/2000', 1.12, 1.36752),
], columns = ['PERMNO','byvar','Date', 'RET', 'cum_RET'])
def test_method_between_1_3(self):
cum_df = dero.pandas.cumulate(self.df, 'RET', 'between', periodvar='Date',
byvars=['PERMNO','byvar'], time=[1,3])
assert_frame_equal(self.expect_between_1_3, cum_df, check_dtype=False)
def test_method_between_m2_0(self):
cum_df = dero.pandas.cumulate(self.df, 'RET', 'between', periodvar='Date',
byvars=['PERMNO','byvar'], time=[-2,0])
#Actually same result as [1,3]
assert_frame_equal(self.expect_between_1_3, cum_df, check_dtype=False)
def test_shifted_index(self):
df = self.df.copy()
df.index = df.index + 10
cum_df = dero.pandas.cumulate(df, 'RET', 'between', periodvar='Date',
byvars=['PERMNO','byvar'], time=[-2,0])
assert_frame_equal(self.expect_between_1_3, cum_df, check_dtype=False)
def test_method_first(self):
cum_df = dero.pandas.cumulate(self.df, 'RET', 'first', periodvar='Date',
byvars=['PERMNO','byvar'])
assert_frame_equal(self.expect_first, cum_df, check_dtype=False)
def test_grossify(self):
df = self.df.copy() #don't overwrite original
df['RET'] -= 1 #ungrossify
expect_first_grossify = self.expect_first.copy()
expect_first_grossify['cum_RET'] -= 1
expect_first_grossify['RET'] -= 1
cum_df = dero.pandas.cumulate(df, 'RET', 'first', periodvar='Date',
byvars=['PERMNO','byvar'], grossify=True)
assert_frame_equal(expect_first_grossify, cum_df, check_dtype=False)
class TestGroupbyMerge(DataFrameTest):
def test_subset_max(self):
byvars = ['PERMNO','byvar']
out = dero.pandas.groupby_merge(self.df, byvars, 'max', subset='RET')
expect_df = pd.DataFrame(
[(10516, 'a', '1/1/2000', 1.01, 1.04),
(10516, 'a', '1/2/2000', 1.02, 1.04),
(10516, 'a', '1/3/2000', 1.03, 1.04),
(10516, 'a', '1/4/2000', 1.04, 1.04),
(10516, 'b', '1/1/2000', 1.05, 1.08),
(10516, 'b', '1/2/2000', 1.06, 1.08),
(10516, 'b', '1/3/2000', 1.07, 1.08),
(10516, 'b', '1/4/2000', 1.08, 1.08),
(10517, 'a', '1/1/2000', 1.09, 1.12),
(10517, 'a', '1/2/2000', 1.10, 1.12),
(10517, 'a', '1/3/2000', 1.11, 1.12),
(10517, 'a', '1/4/2000', 1.12, 1.12)],
columns = ['PERMNO','byvar','Date', 'RET', 'RET_max'])
assert_frame_equal(expect_df, out)
def test_subset_std(self):
byvars = ['PERMNO','byvar']
out = dero.pandas.groupby_merge(self.df, byvars, 'std', subset='RET')
expect_df = pd.DataFrame(
[(10516, 'a', '1/1/2000', 1.01, 0.012909944487358068),
(10516, 'a', '1/2/2000', 1.02, 0.012909944487358068),
(10516, 'a', '1/3/2000', 1.03, 0.012909944487358068),
(10516, 'a', '1/4/2000', 1.04, 0.012909944487358068),
(10516, 'b', '1/1/2000', 1.05, 0.012909944487358068),
(10516, 'b', '1/2/2000', 1.06, 0.012909944487358068),
(10516, 'b', '1/3/2000', 1.07, 0.012909944487358068),
(10516, 'b', '1/4/2000', 1.08, 0.012909944487358068),
(10517, 'a', '1/1/2000', 1.09, 0.012909944487358068),
(10517, 'a', '1/2/2000', 1.10, 0.012909944487358068),
(10517, 'a', '1/3/2000', 1.11, 0.012909944487358068),
(10517, 'a', '1/4/2000', 1.12, 0.012909944487358068)],
columns = ['PERMNO','byvar','Date', 'RET', 'RET_std'])
assert_frame_equal(expect_df, out)
def test_nan_byvar_transform(self):
expect_df = self.df_nan_byvar.copy()
expect_df['val_transform'] = expect_df['val']
out = dero.pandas.groupby_merge(self.df_nan_byvar, 'byvar', 'transform', (lambda x: x))
assert_frame_equal(expect_df, out)
def test_nan_byvar_and_nan_val_transform_numeric(self):
non_standard_index = self.df_nan_byvar_and_val.copy()
non_standard_index.index = [5,6,7,8]
expect_df = self.df_nan_byvar_and_val.copy()
expect_df['val_transform'] = expect_df['val'] + 1
expect_df.index = [5,6,7,8]
out = dero.pandas.groupby_merge(non_standard_index, 'byvar', 'transform', (lambda x: x + 1))
assert_frame_equal(expect_df, out)
def test_nan_byvar_and_nan_val_and_nonstandard_index_transform_numeric(self):
expect_df = self.df_nan_byvar_and_val.copy()
expect_df['val_transform'] = expect_df['val'] + 1
def test_nan_byvar_sum(self):
expect_df = pd.DataFrame(data = [
('a', 1, 1.0),
(nan, 2, nan),
('b', 3, 7.0),
('b', 4, 7.0),
], columns = ['byvar', 'val', 'val_sum'])
out = dero.pandas.groupby_merge(self.df_nan_byvar, 'byvar', 'sum')
assert_frame_equal(expect_df, out)
class TestLongToWide:
expect_df_with_colindex = pd.DataFrame(data = [
(10516, 'a', 1.01, 1.02, 1.03, 1.04),
(10516, 'b', 1.05, 1.06, 1.07, 1.08),
(10517, 'a', 1.09, 1.1, 1.11, 1.12),
], columns = ['PERMNO', 'byvar',
'RET1/1/2000', 'RET1/2/2000',
'RET1/3/2000', 'RET1/4/2000'])
expect_df_no_colindex = pd.DataFrame(data = [
(10516, 'a', '1/1/2000', 1.01, 1.02, 1.03, 1.04),
(10516, 'a', '1/2/2000', 1.01, 1.02, 1.03, 1.04),
(10516, 'a', '1/3/2000', 1.01, 1.02, 1.03, 1.04),
(10516, 'a', '1/4/2000', 1.01, 1.02, 1.03, 1.04),
(10516, 'b', '1/1/2000', 1.05, 1.06, 1.07, 1.08),
(10516, 'b', '1/2/2000', 1.05, 1.06, 1.07, 1.08),
(10516, 'b', '1/3/2000', 1.05, 1.06, 1.07, 1.08),
(10516, 'b', '1/4/2000', 1.05, 1.06, 1.07, 1.08),
(10517, 'a', '1/1/2000', 1.09, 1.1, 1.11, 1.12),
(10517, 'a', '1/2/2000', 1.09, 1.1, 1.11, 1.12),
(10517, 'a', '1/3/2000', 1.09, 1.1, 1.11, 1.12),
(10517, 'a', '1/4/2000', 1.09, 1.1, 1.11, 1.12),
], columns = ['PERMNO', 'byvar', 'Date', 'RET0',
'RET1', 'RET2', 'RET3'])
input_data = DataFrameTest()
ltw_no_dup_colindex = dero.pandas.long_to_wide(input_data.df,
['PERMNO', 'byvar'], 'RET', colindex='Date')
ltw_dup_colindex = dero.pandas.long_to_wide(input_data.df_duplicate_row,
['PERMNO', 'byvar'], 'RET', colindex='Date')
ltw_no_dup_no_colindex = dero.pandas.long_to_wide(input_data.df,
['PERMNO', 'byvar'], 'RET')
ltw_dup_no_colindex = dero.pandas.long_to_wide(input_data.df_duplicate_row,
['PERMNO', 'byvar'], 'RET')
df_list = [ltw_no_dup_colindex, ltw_dup_colindex,
ltw_no_dup_no_colindex, ltw_dup_no_colindex]
def test_no_duplicates_with_colindex(self):
assert_frame_equal(self.expect_df_with_colindex, self.ltw_no_dup_colindex)
def test_duplicates_with_colindex(self):
assert_frame_equal(self.expect_df_with_colindex, self.ltw_dup_colindex)
def test_no_duplicates_no_colindex(self):
assert_frame_equal(self.expect_df_no_colindex, self.ltw_no_dup_no_colindex)
def test_duplicates_no_colindex(self):
assert_frame_equal(self.expect_df_no_colindex, self.ltw_dup_no_colindex)
def test_no_extra_vars(self):
for df in self.df_list:
assert ('__idx__','__key__') not in df.columns
class TestPortfolioAverages:
input_data = DataFrameTest()
expect_avgs_no_wt = pd.DataFrame(data = [
(1, 'a', 1.0250000000000001),
(1, 'b', 1.0550000000000002),
(2, 'a', 1.1050000000000002),
(2, 'b', 1.0750000000000002),
], columns = ['portfolio', 'byvar', 'RET'])
expect_avgs_wt = pd.DataFrame(data = [
(1, 'a', 1.0250000000000001, 1.025),
(1, 'b', 1.0550000000000002, 1.0550000000000002),
(2, 'a', 1.1050000000000002, 1.12),
(2, 'b', 1.0750000000000002, 1.0750000000000002),
], columns = ['portfolio', 'byvar', 'RET', 'RET_wavg'])
expect_ports = pd.DataFrame(data = [
(10516, 'a', '1/1/2000', 1.01, 0, 1),
(10516, 'a', '1/2/2000', 1.02, 1, 1),
(10516, 'a', '1/3/2000', 1.03, 1, 1),
(10516, 'a', '1/4/2000', 1.04, 0, 1),
(10516, 'b', '1/1/2000', 1.05, 1, 1),
(10516, 'b', '1/2/2000', 1.06, 1, 1),
(10516, 'b', '1/3/2000', 1.07, 1, 2),
(10516, 'b', '1/4/2000', 1.08, 1, 2),
(10517, 'a', '1/1/2000', 1.09, 0, 2),
(10517, 'a', '1/2/2000', 1.1, 0, 2),
(10517, 'a', '1/3/2000', 1.11, 0, 2),
(10517, 'a', '1/4/2000', 1.12, 1, 2),
], columns = ['PERMNO', 'byvar', 'Date', 'RET', 'weight', 'portfolio'])
avgs, ports = dero.pandas.portfolio_averages(input_data.df_weight, 'RET', 'RET', ngroups=2,
byvars='byvar')
w_avgs, w_ports = dero.pandas.portfolio_averages(input_data.df_weight, 'RET', 'RET', ngroups=2,
byvars='byvar', wtvar='weight')
def test_simple_averages(self):
assert_frame_equal(self.expect_avgs_no_wt, self.avgs, check_dtype=False)
def test_weighted_averages(self):
assert_frame_equal(self.expect_avgs_wt, self.w_avgs, check_dtype=False)
def test_portfolio_construction(self):
print(self.ports)
assert_frame_equal(self.expect_ports, self.ports, check_dtype=False)
assert_frame_equal(self.expect_ports, self.w_ports, check_dtype=False)
class TestWinsorize(DataFrameTest):
def test_winsor_40_subset_byvars(self):
expect_df = pd.DataFrame(data = [
(10516, 'a', '1/1/2000', 1.022624),
(10516, 'a', '1/2/2000', 1.022624),
(10516, 'a', '1/3/2000', 1.02672),
(10516, 'a', '1/4/2000', 1.02672),
(10516, 'b', '1/1/2000', 1.062624),
(10516, 'b', '1/2/2000', 1.062624),
(10516, 'b', '1/3/2000', 1.06672),
(10516, 'b', '1/4/2000', 1.06672),
(10517, 'a', '1/1/2000', 1.102624),
(10517, 'a', '1/2/2000', 1.102624),
(10517, 'a', '1/3/2000', 1.10672),
(10517, 'a', '1/4/2000', 1.10672),
], columns = ['PERMNO', 'byvar', 'Date', 'RET'])
wins = dero.pandas.winsorize(self.df, .4, subset='RET', byvars=['PERMNO','byvar'])
assert_frame_equal(expect_df, wins, check_less_precise=True)
class TestRegBy(DataFrameTest):
def create_indf(self):
indf = self.df_weight.copy()
indf['key'] = indf['PERMNO'].astype(str) + '_' + indf['byvar']
return indf
def test_regby_nocons(self):
indf = self.create_indf()
expect_df = pd.DataFrame(data = [
(0.48774684748988806, '10516_a'),
(0.9388636664168903, '10516_b'),
(0.22929206076239614, '10517_a'),
], columns = ['coef_RET', 'key'])
rb = dero.pandas.reg_by(indf, 'weight', 'RET', 'key', cons=False)
print('Reg by: ', rb)
assert_frame_equal(expect_df, rb)
def test_regby_cons(self):
indf = self.create_indf()
expect_df = pd.DataFrame(data = [
(0.49999999999999645, 5.329070518200751e-15, '10516_a'),
(0.9999999999999893, 1.0658141036401503e-14, '10516_b'),
(-32.89999999999997, 29.999999999999982, '10517_a'),
], columns = ['const', 'coef_RET', 'key'])
rb = dero.pandas.reg_by(indf, 'weight', 'RET', 'key')
print('Reg by: ', rb)
assert_frame_equal(expect_df, rb)
def test_regby_cons_low_obs(self):
indf = self.create_indf().loc[:8,:] #makes it so that one byvar only has one obs
expect_df = pd.DataFrame(data = [
(0.49999999999999645, 5.329070518200751e-15, '10516_a'),
(0.9999999999999893, 1.0658141036401503e-14, '10516_b'),
(nan, nan, '10517_a'),
], columns = ['const', 'coef_RET', 'key'])
rb = dero.pandas.reg_by(indf, 'weight', 'RET', 'key')
print('Reg by: ', rb)
assert_frame_equal(expect_df, rb)
class TestExpandMonths(DataFrameTest):
def test_expand_months_tradedays(self):
expect_df = pd.DataFrame(data = [
(Timestamp('2000-01-03 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-04 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-05 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-06 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-07 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-10 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-11 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-12 00:00:00'), 'a', | Timestamp('2000-01-01 00:00:00') | pandas.Timestamp |
import argparse
import datetime
import numpy as np
import pandas as pd
import pysam
def convert_table_to_vcf(genotypes_filename, calls_filename, reference_filename, vcf_filename):
# Load genotypes in long format.
genotypes = | pd.read_table(genotypes_filename) | pandas.read_table |
from utils.model import Perceptron
from utils.all_utils import prepare_data, save_plot, save_model
import pandas as pd
import logging
import os
logging_str = "[%(asctime)s: %(levelname)s: %(module)s] %(message)s"
log_dir = "logs"
os.makedirs(log_dir, exist_ok=True)
logging.basicConfig(filename= os.path.join(log_dir,"running_logs.log"),level=logging.INFO, format=logging_str, filemode="a")
def main(data, modelName, plotName, eta, epochs):
df = | pd.DataFrame(data) | pandas.DataFrame |
import os
import configparser
import pandas as pd
import numpy as np
import psycopg2
import psycopg2.extras
# Set up GCP API
from google.cloud import bigquery
# Construct a BigQuery client object.
client = bigquery.Client()
import sql_queries as sql_q
def convert_int_zipcode_to_str(df, col):
"""
Converts integer zipcode column into 0-padded str column.
df - pandas dataframe with zipcode int column
col - string; name of column with zipcodes
"""
df[col] = df[col].astype('str')
df[col] = df[col].apply(lambda x: x.zfill(5))
def remove_bad_zipcodes(zip_df, df, col):
"""
Removes bad zipcodes from data (i.e. erroneous zipcodes that are
not valid US zipcodes).
zip_df - pandas dataframe with valid US zipcodes
df - pandas dataframe to be cleaned
col - string; column name of zipcode column in df
"""
zip_set = set(zip_df['Zipcode'].unique())
return df[df[col].isin(zip_set)]
def load_lbnl_data(zip_df, replace_nans=True, short_zips=True):
"""
Loads LBNL solar survey data.
zip_df - pandas dataframe with zipcode data for cleaning bad zipcodes
replace_nans - boolean; if True, replaces -9999 missing value placeholders with np.nan
short_zips - boolean; if True, makes sure all zip codes are 5-digit
"""
df1 = pd.read_csv('../data/TTS_LBNL_public_file_10-Dec-2019_p1.csv', encoding='latin-1', low_memory=False)
df2 = pd.read_csv('../data/TTS_LBNL_public_file_10-Dec-2019_p2.csv', encoding='latin-1', low_memory=False)
lbnl_df = pd.concat([df1, df2], axis=0)
if replace_nans:
lbnl_df.replace(-9999, np.nan, inplace=True)
lbnl_df.replace('-9999', np.nan, inplace=True)
if short_zips:
lbnl_df['Zip Code'] = lbnl_df['Zip Code'].apply(lambda x: x.strip()[:5])
# a few zip codes with only 4 digits
lbnl_df['Zip Code'] = lbnl_df['Zip Code'].apply(lambda x: x.zfill(5))
lbnl_df = remove_bad_zipcodes(zip_df, lbnl_df, 'Zip Code')
return lbnl_df
def load_eia_zipcode_data(zip_df):
"""
Loads EIA dataset with zipcodes and energy providers.
zip_df - pandas dataframe with zipcode data for cleaning bad zipcodes
"""
iou_df = pd.read_csv('../data/iouzipcodes2017.csv')
noniou_df = pd.read_csv('../data/noniouzipcodes2017.csv')
eia_zipcode_df = pd.concat([iou_df, noniou_df], axis=0)
# zip codes are ints without zero padding
convert_int_zipcode_to_str(eia_zipcode_df, 'zip')
eia_zipcode_df = remove_bad_zipcodes(zip_df, eia_zipcode_df, 'zip')
return eia_zipcode_df
def extract_lbnl_data(zip_df):
"""
Gets data from LBNL dataset for the installer table and main metrics table.
zip_df - pandas dataframe with zipcode data for cleaning bad zipcodes
"""
lbnl_df = load_lbnl_data(zip_df, replace_nans=False)
# get mode of module manufacturer #1 for each install company
# doesn't seem to work when -9999 values are replaced with NaNs
manufacturer_modes = lbnl_df[['Installer Name', 'Module Manufacturer #1']].groupby('Installer Name').agg(lambda x: x.value_counts().index[0])
manufacturer_modes.reset_index(inplace=True)
# dictionary of installer name to ID
id_install_dict = {}
for i, r in manufacturer_modes.iterrows():
id_install_dict[r['Installer Name']] = i
# get primary installers by zipcode
installer_modes = lbnl_df[['Installer Name', 'Zip Code']].groupby('Zip Code').agg(lambda x: x.value_counts().index[0])
lbnl_zip_data = lbnl_df[['Battery System', 'Feed-in Tariff (Annual Payment)', 'Zip Code']].copy()
lbnl_zip_data.replace(-9999, 0, inplace=True)
lbnl_zip_groups = lbnl_zip_data.groupby('Zip Code').mean()
# merge with most common installer by zip codes
lbnl_zip_groups = lbnl_zip_groups.merge(installer_modes, left_index=True, right_index=True)
lbnl_zip_groups = lbnl_zip_groups[~(lbnl_zip_groups.index == '-9999')]
lbnl_zip_groups.reset_index(inplace=True)
lbnl_zip_groups['Installer ID'] = lbnl_zip_groups['Installer Name'].replace(id_install_dict)
lbnl_zip_groups['Installer ID'] = lbnl_zip_groups['Installer ID'].astype('int')
return manufacturer_modes.reset_index(), lbnl_zip_groups
def extract_eia_data(zip_df):
"""
Extracts data from EIA for main metrics table and utility table.
zip_df - pandas dataframe with zipcode data for cleaning bad zipcodes
Note: several utilities serve the same zip codes.
"""
# load zipcode to eiaid/util number data
eia_zip_df = load_eia_zipcode_data(zip_df)
# eia861 report loading
eia861_df = pd.read_excel('../data/Sales_Ult_Cust_2018.xlsx', header=[0, 1, 2])
# util number here is eiaia in the IOU data
# get relevant columns from multiindex dataframe
utility_number = eia861_df['Utility Characteristics', 'Unnamed: 1_level_1', 'Utility Number']
utility_name = eia861_df['Utility Characteristics', 'Unnamed: 2_level_1', 'Utility Name']
service_type = eia861_df['Utility Characteristics', 'Unnamed: 4_level_1', 'Service Type']
ownership = eia861_df['Utility Characteristics', 'Unnamed: 7_level_1', 'Ownership']
eia_utility_data = pd.concat([utility_number, utility_name, service_type, ownership], axis=1)
eia_utility_data.columns = eia_utility_data.columns.droplevel(0).droplevel(0)
# get residential cost and kwh usage data
res_data = eia861_df['RESIDENTIAL'].copy()
# drop uppermost level
res_data.columns = res_data.columns.droplevel(0)
# missing data seems to be a period
res_data.replace('.', np.nan, inplace=True)
for c in res_data.columns:
res_data[c] = res_data[c].astype('float')
util_number_data = pd.DataFrame(utility_number)
util_number_data.columns = util_number_data.columns.droplevel(0).droplevel(0)
res_data = pd.concat([res_data, utility_number], axis=1)
res_data.columns = ['Thousand Dollars', 'Megawatthours', 'Count', 'Utility Number']
# first join with zipcode data to group by zip
res_data_zip = res_data.merge(eia_zip_df, left_on='Utility Number', right_on='eiaid')
# group by zip and get sums of revenues, MWh, and customer count
res_data_zip = res_data_zip.groupby('zip').sum()
# convert revenues to yearly bill and MWh to kWh
# thousand dollars of revenue divided by customer count
res_data_zip['average_yearly_bill'] = res_data_zip['Thousand Dollars'] * 1000 / res_data_zip['Count']
# kwh divided by customer count
res_data_zip['average_yearly_kwh'] = (res_data_zip['Megawatthours'] * 1000) / res_data_zip['Count']
res_columns = ['average_yearly_bill', 'average_yearly_kwh']
res_data_zip = res_data_zip[res_columns]
# combine residential and utility info data
# eia_861_data = pd.concat([res_data[res_columns], eia_utility_data], axis=1)
# combine zipcodes with EIA861 utility data
eia_util_zipcode = eia_utility_data.merge(eia_zip_df, left_on='Utility Number', right_on='eiaid')
# get most-common utility name, service type, and ownership by zipcode
common_util = eia_util_zipcode[['zip', 'Utility Name', 'Service Type', 'Ownership']].groupby('zip').agg(lambda x: x.value_counts().index[0])
eia_861_summary = res_data_zip.merge(common_util, left_index=True, right_index=True)
# change zip back to a column
eia_861_summary.reset_index(inplace=True)
eia_861_summary = remove_bad_zipcodes(zip_df, eia_861_summary, 'zip')
return eia_861_summary
def extract_acs_data(zip_df, load_csv=True, save_csv=True):
"""
Extracts ACS US census data from Google BigQuery.
zip_df - pandas dataframe with zipcode data for cleaning bad zipcodes
load_csv - boolean; if True, tries to load data from csv
save_csv - boolean; if True, will save data to csv if downloading anew
"""
# ACS US census data
ACS_DB = '`bigquery-public-data`.census_bureau_acs'
ACS_TABLE = 'zip_codes_2017_5yr'
filename = '../data/acs_data.csv'
if load_csv and os.path.exists(filename):
acs_df = pd.read_csv(filename)
convert_int_zipcode_to_str(acs_df, 'geo_id')
return acs_df
acs_data_query = f"""SELECT geo_id,
median_age,
housing_units,
median_income,
owner_occupied_housing_units,
occupied_housing_units,
dwellings_1_units_detached + dwellings_1_units_attached + dwellings_2_units + dwellings_3_to_4_units AS family_homes,
bachelors_degree_2,
different_house_year_ago_different_city + different_house_year_ago_same_city AS moved_recently
FROM {ACS_DB}.{ACS_TABLE}"""
acs_data = pd.read_gbq(acs_data_query)
acs_data = remove_bad_zipcodes(zip_df, acs_data, 'geo_id')
if save_csv:
acs_data.to_csv(filename, index=False)
return acs_data
def extract_psr_data(zip_df, load_csv=True, save_csv=True):
"""
Extracts project sunroof data from Google BigQuery.-
zip_df - pandas dataframe with zipcode data for cleaning bad zipcodes
load_csv - boolean; if True, tries to load data from csv
save_csv - boolean; if True, will save data to csv if downloading anew
"""
PSR_DB = '`bigquery-public-data`.sunroof_solar'
PSR_TABLE = 'solar_potential_by_postal_code'
filename = '../data/psr_data.csv'
if load_csv and os.path.exists(filename):
df = | pd.read_csv(filename) | pandas.read_csv |
from brightics.common.report import ReportBuilder, strip_margin, plt2MD, \
pandasDF2MD, keyValues2MD
from brightics.function.utils import _model_dict
from brightics.common.utils import check_required_parameters
import numpy as np
import pandas as pd
import math
from math import sqrt
import seaborn as sns
import matplotlib.pyplot as plt
import scipy.stats
from scipy.stats import t
from scipy import mean
from statsmodels.stats.weightstats import ttest_ind
def one_sample_ttest(table, input_cols, alternatives, hypothesized_mean=0, conf_level=0.95):
n = len(table)
degree = n - 1
alpha = 1.0 - conf_level
out_table = pd.DataFrame()
# statistics
statistics = "t statistic, t distribution with %d degrees of freedom under the null hypothesis." % degree
# Print model
rb = ReportBuilder()
rb.addMD(strip_margin("""
## One Sameple T Test Result
| - Statistics = {s}
| - Hypothesized mean = {h}
| - Confidence level = {cl}
""".format(s=statistics, h=hypothesized_mean, cl=conf_level)))
for input_col in input_cols:
# model
alter_list = []
p_list = []
CI_list = []
# data
data = input_col
# estimates
result = stats.ttest_1samp(table[input_col], hypothesized_mean)
estimates = result[0]
cols = ['data', 'alternative_hypothesis', 'statistics', 'estimates', 'p_value', 'confidence_level', 'lower_confidence_interval', 'upper_confidence_interval']
for i in alternatives:
if (i == 'Greater'):
# alternative hypothesis
alternative_hypothesis = "true mean >" + str(hypothesized_mean)
# p-values
p_value = 1.0 - t.cdf(estimates, degree)
# confidence interval - greater
critical_val = t.ppf(1.0 - alpha, degree)
width = critical_val * np.std(table[input_col]) / math.sqrt(n - 1)
lower_conf_interval = np.mean(table[input_col]) - width
upper_conf_interval = math.inf
# model
alter = 'true mean > {hypothesized_mean}'.format(hypothesized_mean=hypothesized_mean)
alter_list.append(alter)
p_list.append(p_value)
conf_interval = '({lower_conf_interval}, {upper_conf_interval})'.format(lower_conf_interval=lower_conf_interval, upper_conf_interval=upper_conf_interval)
CI_list.append(conf_interval)
# out_table
list = []
list.append([data, alternative_hypothesis, statistics, estimates, p_value, conf_level, lower_conf_interval, upper_conf_interval])
out_table = out_table.append( | pd.DataFrame(list, columns=cols) | pandas.DataFrame |
# A collection of helper functions that are used throughout. This file is aimed to avoid replication of code.
import pandas as pd
def read_in_NNDSS(date_string, apply_delay_at_read=False, apply_inc_at_read=False, running_epyreff=False):
"""
A general function to read in the NNDSS data. Alternatively this can be manually set to read in the linelist instead.
Args:
date_string: (str) a string of the date of the data file.
Returns:
A dataframe of all NNDSS data.
"""
import numpy as np
from datetime import timedelta
import glob
from params import use_linelist, assume_local_cases_if_unknown
from params import scale_inc_omicron, shape_inc_omicron, scale_inc, shape_inc, scale_rd, shape_rd, offset_rd, offset_inc, omicron_dominance_date
if not use_linelist:
# On occasion the date string in NNDSS will be missing the leading 0 (e.g. 2Aug2021 vs 02Aug2021). In this case manually add the zero.
case_file_date = | pd.to_datetime(date_string) | pandas.to_datetime |
from typing import List
import numpy as np
import pandas as pd
import stockstats
import talib
import copy
class BasicProcessor:
def __init__(self, data_source: str, start_date, end_date, time_interval, **kwargs):
assert data_source in {
"alpaca",
"baostock",
"ccxt",
"binance",
"iexcloud",
"joinquant",
"quandl",
"quantconnect",
"ricequant",
"wrds",
"yahoofinance",
"tusharepro",
}, "Data source input is NOT supported yet."
self.data_source: str = data_source
self.start_date: str = start_date
self.end_date: str = end_date
self.time_interval: str = time_interval # standard time_interval
# transferred_time_interval will be supported in the future.
# self.nonstandard_time_interval: str = self.calc_nonstandard_time_interval() # transferred time_interval of this processor
self.time_zone: str = ""
self.dataframe: pd.DataFrame = pd.DataFrame()
self.dictnumpy: dict = {} # e.g., self.dictnumpy["open"] = np.array([1, 2, 3]), self.dictnumpy["close"] = np.array([1, 2, 3])
def download_data(self, ticker_list: List[str]):
pass
def clean_data(self):
if "date" in self.dataframe.columns.values.tolist():
self.dataframe.rename(columns={'date': 'time'}, inplace=True)
if "datetime" in self.dataframe.columns.values.tolist():
self.dataframe.rename(columns={'datetime': 'time'}, inplace=True)
if self.data_source == "ccxt":
self.dataframe.rename(columns={'index': 'time'}, inplace=True)
if self.data_source == 'ricequant':
''' RiceQuant data is already cleaned, we only need to transform data format here.
No need for filling NaN data'''
self.dataframe.rename(columns={'order_book_id': 'tic'}, inplace=True)
# raw df uses multi-index (tic,time), reset it to single index (time)
self.dataframe.reset_index(level=[0, 1], inplace=True)
# check if there is NaN values
assert not self.dataframe.isnull().values.any()
elif self.data_source == 'baostock':
self.dataframe.rename(columns={'code': 'tic'}, inplace=True)
self.dataframe.dropna(inplace=True)
# adj_close: adjusted close price
if 'adj_close' not in self.dataframe.columns.values.tolist():
self.dataframe['adj_close'] = self.dataframe['close']
self.dataframe.sort_values(by=['time', 'tic'], inplace=True)
self.dataframe = self.dataframe[['tic', 'time', 'open', 'high', 'low', 'close', 'adj_close', 'volume']]
def get_trading_days(self, start: str, end: str) -> List[str]:
if self.data_source in ["binance", "ccxt", "quantconnect", "ricequant", "tusharepro"]:
print(f"Calculate get_trading_days not supported for {self.data_source} yet.")
return None
# use_stockstats_or_talib: 0 (stockstats, default), or 1 (use talib). Users can choose the method.
def add_technical_indicator(self, tech_indicator_list: List[str], use_stockstats_or_talib: int = 0):
"""
calculate technical indicators
use stockstats/talib package to add technical inidactors
:param data: (df) pandas dataframe
:return: (df) pandas dataframe
"""
if "date" in self.dataframe.columns.values.tolist():
self.dataframe.rename(columns={'date': 'time'}, inplace=True)
if self.data_source == "ccxt":
self.dataframe.rename(columns={'index': 'time'}, inplace=True)
self.dataframe.reset_index(drop=False, inplace=True)
if "level_1" in self.dataframe.columns:
self.dataframe.drop(columns=["level_1"], inplace=True)
if "level_0" in self.dataframe.columns and "tic" not in self.dataframe.columns:
self.dataframe.rename(columns={"level_0": "tic"}, inplace=True)
assert use_stockstats_or_talib in {0, 1}
print("tech_indicator_list: ", tech_indicator_list)
if use_stockstats_or_talib == 0: # use stockstats
stock = stockstats.StockDataFrame.retype(self.dataframe)
unique_ticker = stock.tic.unique()
for indicator in tech_indicator_list:
print("indicator: ", indicator)
indicator_df = pd.DataFrame()
for i in range(len(unique_ticker)):
try:
temp_indicator = stock[stock.tic == unique_ticker[i]][indicator]
temp_indicator = pd.DataFrame(temp_indicator)
temp_indicator["tic"] = unique_ticker[i]
temp_indicator["time"] = self.dataframe[self.dataframe.tic == unique_ticker[i]][
"time"
].to_list()
indicator_df = indicator_df.append(
temp_indicator, ignore_index=True
)
except Exception as e:
print(e)
if not indicator_df.empty:
self.dataframe = self.dataframe.merge(
indicator_df[["tic", "time", indicator]], on=["tic", "time"], how="left"
)
else: # use talib
final_df = | pd.DataFrame() | pandas.DataFrame |
from itertools import chain
import operator
import numpy as np
import pytest
from pandas.core.dtypes.common import is_number
from pandas import (
DataFrame,
Index,
Series,
)
import pandas._testing as tm
from pandas.core.groupby.base import maybe_normalize_deprecated_kernels
from pandas.tests.apply.common import (
frame_transform_kernels,
series_transform_kernels,
)
@pytest.mark.parametrize("func", ["sum", "mean", "min", "max", "std"])
@pytest.mark.parametrize(
"args,kwds",
[
pytest.param([], {}, id="no_args_or_kwds"),
pytest.param([1], {}, id="axis_from_args"),
pytest.param([], {"axis": 1}, id="axis_from_kwds"),
pytest.param([], {"numeric_only": True}, id="optional_kwds"),
pytest.param([1, True], {"numeric_only": True}, id="args_and_kwds"),
],
)
@pytest.mark.parametrize("how", ["agg", "apply"])
def test_apply_with_string_funcs(request, float_frame, func, args, kwds, how):
if len(args) > 1 and how == "agg":
request.node.add_marker(
pytest.mark.xfail(
raises=TypeError,
reason="agg/apply signature mismatch - agg passes 2nd "
"argument to func",
)
)
result = getattr(float_frame, how)(func, *args, **kwds)
expected = getattr(float_frame, func)(*args, **kwds)
tm.assert_series_equal(result, expected)
def test_with_string_args(datetime_series):
for arg in ["sum", "mean", "min", "max", "std"]:
result = datetime_series.apply(arg)
expected = getattr(datetime_series, arg)()
assert result == expected
@pytest.mark.parametrize("op", ["mean", "median", "std", "var"])
@pytest.mark.parametrize("how", ["agg", "apply"])
def test_apply_np_reducer(float_frame, op, how):
# GH 39116
float_frame = DataFrame({"a": [1, 2], "b": [3, 4]})
result = getattr(float_frame, how)(op)
# pandas ddof defaults to 1, numpy to 0
kwargs = {"ddof": 1} if op in ("std", "var") else {}
expected = Series(
getattr(np, op)(float_frame, axis=0, **kwargs), index=float_frame.columns
)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"op", ["abs", "ceil", "cos", "cumsum", "exp", "log", "sqrt", "square"]
)
@pytest.mark.parametrize("how", ["transform", "apply"])
def test_apply_np_transformer(float_frame, op, how):
# GH 39116
# float_frame will _usually_ have negative values, which will
# trigger the warning here, but let's put one in just to be sure
float_frame.iloc[0, 0] = -1.0
warn = None
if op in ["log", "sqrt"]:
warn = RuntimeWarning
with tm.assert_produces_warning(warn):
result = getattr(float_frame, how)(op)
expected = getattr(np, op)(float_frame)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"series, func, expected",
chain(
tm.get_cython_table_params(
Series(dtype=np.float64),
[
("sum", 0),
("max", np.nan),
("min", np.nan),
("all", True),
("any", False),
("mean", np.nan),
("prod", 1),
("std", np.nan),
("var", np.nan),
("median", np.nan),
],
),
tm.get_cython_table_params(
Series([np.nan, 1, 2, 3]),
[
("sum", 6),
("max", 3),
("min", 1),
("all", True),
("any", True),
("mean", 2),
("prod", 6),
("std", 1),
("var", 1),
("median", 2),
],
),
tm.get_cython_table_params(
Series("a b c".split()),
[
("sum", "abc"),
("max", "c"),
("min", "a"),
("all", True),
("any", True),
],
),
),
)
def test_agg_cython_table_series(series, func, expected):
# GH21224
# test reducing functions in
# pandas.core.base.SelectionMixin._cython_table
result = series.agg(func)
if is_number(expected):
assert np.isclose(result, expected, equal_nan=True)
else:
assert result == expected
@pytest.mark.parametrize(
"series, func, expected",
chain(
tm.get_cython_table_params(
Series(dtype=np.float64),
[
("cumprod", Series([], Index([]), dtype=np.float64)),
("cumsum", Series([], Index([]), dtype=np.float64)),
],
),
tm.get_cython_table_params(
Series([np.nan, 1, 2, 3]),
[
("cumprod", Series([np.nan, 1, 2, 6])),
("cumsum", Series([np.nan, 1, 3, 6])),
],
),
tm.get_cython_table_params(
Series("a b c".split()), [("cumsum", Series(["a", "ab", "abc"]))]
),
),
)
def test_agg_cython_table_transform_series(series, func, expected):
# GH21224
# test transforming functions in
# pandas.core.base.SelectionMixin._cython_table (cumprod, cumsum)
result = series.agg(func)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"df, func, expected",
chain(
tm.get_cython_table_params(
DataFrame(),
[
("sum", Series(dtype="float64")),
("max", Series(dtype="float64")),
("min", Series(dtype="float64")),
("all", Series(dtype=bool)),
("any", Series(dtype=bool)),
("mean", Series(dtype="float64")),
("prod", Series(dtype="float64")),
("std", Series(dtype="float64")),
("var", | Series(dtype="float64") | pandas.Series |
import pandas as pd
import pytest
import woodwork as ww
from woodwork.logical_types import Boolean, Double, Integer
from rayml.exceptions import MethodPropertyNotFoundError
from rayml.pipelines.components import (
ComponentBase,
FeatureSelector,
RFClassifierSelectFromModel,
RFRegressorSelectFromModel,
)
def make_rf_feature_selectors():
rf_classifier = RFClassifierSelectFromModel(
number_features=5,
n_estimators=10,
max_depth=7,
percent_features=0.5,
threshold=0,
)
rf_regressor = RFRegressorSelectFromModel(
number_features=5,
n_estimators=10,
max_depth=7,
percent_features=0.5,
threshold=0,
)
return rf_classifier, rf_regressor
def test_init():
rf_classifier, rf_regressor = make_rf_feature_selectors()
assert rf_classifier.name == "RF Classifier Select From Model"
assert rf_regressor.name == "RF Regressor Select From Model"
def test_component_fit(X_y_binary, X_y_multi, X_y_regression):
X_binary, y_binary = X_y_binary
X_multi, y_multi = X_y_multi
X_reg, y_reg = X_y_regression
rf_classifier, rf_regressor = make_rf_feature_selectors()
assert isinstance(rf_classifier.fit(X_binary, y_binary), ComponentBase)
assert isinstance(rf_classifier.fit(X_multi, y_multi), ComponentBase)
assert isinstance(rf_regressor.fit(X_reg, y_reg), ComponentBase)
def test_feature_selector_missing_component_obj():
class MockFeatureSelector(FeatureSelector):
name = "Mock Feature Selector"
def fit(self, X, y):
return self
mock_feature_selector = MockFeatureSelector()
mock_feature_selector.fit(pd.DataFrame(), pd.Series())
with pytest.raises(
MethodPropertyNotFoundError,
match="Feature selector requires a transform method or a component_obj that implements transform",
):
mock_feature_selector.transform(pd.DataFrame())
with pytest.raises(
MethodPropertyNotFoundError,
match="Feature selector requires a transform method or a component_obj that implements transform",
):
mock_feature_selector.fit_transform(pd.DataFrame())
def test_feature_selector_component_obj_missing_transform():
class MockFeatureSelector(FeatureSelector):
name = "Mock Feature Selector"
def __init__(self):
self._component_obj = None
def fit(self, X, y):
return self
mock_feature_selector = MockFeatureSelector()
mock_feature_selector.fit(pd.DataFrame(), pd.Series())
with pytest.raises(
MethodPropertyNotFoundError,
match="Feature selector requires a transform method or a component_obj that implements transform",
):
mock_feature_selector.transform(pd.DataFrame())
with pytest.raises(
MethodPropertyNotFoundError,
match="Feature selector requires a transform method or a component_obj that implements transform",
):
mock_feature_selector.fit_transform(pd.DataFrame())
def test_feature_selectors_drop_columns_maintains_woodwork():
X = pd.DataFrame({"a": [1, 2, 3], "b": [2, 4, 6], "c": [1, 2, 3], "d": [1, 2, 3]})
X.ww.init(logical_types={"a": "double", "b": "categorical"})
y = pd.Series([0, 1, 1])
rf_classifier, rf_regressor = make_rf_feature_selectors()
rf_classifier.fit(X, y)
X_t = rf_classifier.transform(X, y)
assert len(X_t.columns) == 2
rf_regressor.fit(X, y)
X_t = rf_regressor.transform(X, y)
assert len(X_t.columns) == 2
@pytest.mark.parametrize(
"X_df",
[
pd.DataFrame(
pd.to_datetime(["20190902", "20200519", "20190607"], format="%Y%m%d")
),
pd.DataFrame(pd.Series([1, 2, 3], dtype="Int64")),
pd.DataFrame(pd.Series([1.0, 2.0, 3.0], dtype="float")),
pd.DataFrame(pd.Series(["a", "b", "a"], dtype="category")),
pd.DataFrame(pd.Series([True, False, True], dtype="boolean")),
pd.DataFrame(
pd.Series(
["this will be a natural language column because length", "yay", "hay"],
dtype="string",
)
),
],
)
def test_feature_selectors_woodwork_custom_overrides_returned_by_components(X_df):
rf_classifier, rf_regressor = make_rf_feature_selectors()
y = pd.Series([1, 2, 1])
X_df["another column"] = | pd.Series([1.0, 2.0, 3.0], dtype="float") | pandas.Series |
from bs4 import BeautifulSoup
import requests
import pandas as pd
import datetime
from selenium import webdriver
page_link = 'http://lefthandditchcompany.com/SystemStatus.aspx'
page_response = requests.get(page_link, timeout=60, verify=False)
body = BeautifulSoup(page_response.content, 'lxml')
Creekflow = body.find("span", id="ctl00_MainContentPlaceHolder_CreekFlowAFLabel").get_text()
CFS = body.find("span", id="ctl00_MainContentPlaceHolder_CreekFlowCFSLabel").get_text()
Issues = body.find("span", id ="ctl00_MainContentPlaceHolder_CreekFlowIssueCFSPerShareLabel").get_text()
Current_Gold = body.find("span", id='ctl00_MainContentPlaceHolder_GoldAFLabel').get_text()
Current_Isabelle = body.find("span", id='ctl00_MainContentPlaceHolder_IsabelleAFLabel').get_text()
Current_LHP = body.find("span", id="ctl00_MainContentPlaceHolder_LHParkAFLabel").get_text()
Current_LHV = body.find("span", id="ctl00_MainContentPlaceHolder_LHValleyAFLabel").get_text()
Current_Allens = body.find("span", id='ctl00_MainContentPlaceHolder_AllensAFLabel').get_text()
Current_Total = body.find("span", id='ctl00_MainContentPlaceHolder_TotalAFLabel').get_text()
Empty_Gold = body.find("span", id="ctl00_MainContentPlaceHolder_GoldEmptyAFLabel").get_text()
Empty_Isabelle = body.find("span", id="ctl00_MainContentPlaceHolder_IsabelleEmptyAFLabel").get_text()
Empty_LHP = body.find("span",id="ctl00_MainContentPlaceHolder_LHParkEmptyAFLabel").get_text()
Empty_LHV = body.find("span", id="ctl00_MainContentPlaceHolder_LHValleyEmptyAFLabel").get_text()
Empty_Allens = body.find("span", id="ctl00_MainContentPlaceHolder_AllensEmptyAFLabel").get_text()
Full_Gold = body.find("span", id="ctl00_MainContentPlaceHolder_GoldFullAFLabel").get_text()
Full_Isabelle = body.find("span", id="ctl00_MainContentPlaceHolder_IsabelleFullAFLabel").get_text()
Full_LHP = body.find("span", id="ctl00_MainContentPlaceHolder_LHParkFullAFLabel").get_text()
Full_LHV = body.find("span", id='ctl00_MainContentPlaceHolder_LHValleyFullAFLabel').get_text()
Full_Allens = body.find("span", id="ctl00_MainContentPlaceHolder_AllensFullAFLabel").get_text()
dictionary = {'Creekflow': Creekflow, 'CFS': CFS, 'Issues': Issues,
'CurrentGold': Current_Gold, 'CurrentIsabelle': Current_Isabelle, 'CurrentLHP': Current_LHP,
'CurrentLHV': Current_LHV, 'CurrentAllens': Current_Allens, 'CurrentTotal': Current_Total,
'EmptyGold': Empty_Gold, 'EmptyIsabelle': Empty_Isabelle, 'EmptyLHP': Empty_LHP,
'EmptyLHV': Empty_LHV, 'EmptyAllens': Empty_Allens, 'FullGold': Full_Gold,
'FullIsabelle': Full_Isabelle, 'FullLHP': Full_LHP, 'FullLHV': Full_LHV, 'FullAllens': Full_Allens}
df = pd.DataFrame(dictionary, index=[0])
df.CFS = df.CFS.str.replace('(','')
df.CFS = df.CFS.str.replace(')','')
df.CFS = df.CFS.str.replace('CFS','')
df.Issues = df.Issues.str.replace('(','')
df.Issues = df.Issues.str.replace(')','')
df.Issues = df.Issues.str.replace('Issue:','')
df.Issues = df.Issues.str.replace('CFS / Share','')
df.CurrentLHV = df.CurrentLHV.str.replace(',','')
df.CurrentTotal = df.CurrentTotal.str.replace(',','')
df.FullLHP = df.FullLHP.str.replace(',','')
df.FullLHV = df.FullLHV.str.replace(',','')
df.FullAllens = df.FullAllens.str.replace(',','')
df.Creekflow = pd.to_numeric(df.Creekflow)
df.CFS = pd.to_numeric(df.CFS)
df.Issues = | pd.to_numeric(df.Issues) | pandas.to_numeric |
import logging, os, sys, pickle, json, time, yaml, glob
from datetime import datetime as dt
import warnings
warnings.filterwarnings('ignore')
import subprocess
from itertools import chain
from tqdm import tqdm
import networkx as nx
import pandas as pd
from math import pi
import numpy as np
from kedro.io import DataCatalog
from ffsc.flow.simplex import network_simplex
from ffsc.interdiction.gp import *
from ffsc.interdiction.dijkstra_methods import differential_fill
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
N_WORKERS=14
# total cost overflowing max int: 2147483647
import multiprocessing as mp
def sds_demand_counterfactual(iso2, df_sds, df_nodes, node_iso2, edge_df, params, dijkstra_min_adj):
"""
Make flow df for SDS case
"""
## turn the dijkstra results into flow dfs
if 'COALMINE' in df_nodes['NODETYPE'].unique():
carrier = 'coal'
source_types = ['COALMINE']
elif 'LNGTERMINAL' in df_nodes['NODETYPE'].unique():
carrier= 'gas'
source_types = ['OILFIELD','OILWELL']
else:
carrier='oil'
source_types = ['OILFIELD','OILWELL']
logger = logging.getLogger(f'SDS_counterfact_{carrier}')
node_iso2 = node_iso2.groupby('NODE').nth(0).reset_index()
# merge iso2 -> correct for bad geometries
df_nodes = pd.merge(df_nodes, node_iso2, how='left', left_on='NODE',right_on='NODE')
df_nodes.loc[df_nodes['NODE'].isin(['CITY_218','CITY_219','CITY_220']),'iso2']='AR'
df_nodes.loc[df_nodes['NODE'].isin(['CITY_3565','POWERSTATION_8344']), 'iso2']='DK'
df_nodes.loc[df_nodes['NODE'].isin(['CITY_4635','CITY_4636','POWERSTATION_10692']), 'iso2'] = 'GA'
df_nodes.loc[df_nodes['NODE'].isin(['POWERSTATION_27208','POWERSTATION_26808']), 'iso2'] = 'US'
df_nodes.loc[df_nodes['NODE'].isin(['POWERSTATION_13119']), 'iso2'] = 'IE'
df_nodes.loc[df_nodes['NODE'].isin(['POWERSTATION_5117']), 'iso2'] = 'CN'
df_nodes.loc[df_nodes['NODE'].isin(['POWERSTATION_7847']),'iso2'] = 'CY'
df_nodes.loc[df_nodes['NODE'].isin(['POWERSTATION_14316', 'POWERSTATION_14317', 'POWERSTATION_14321']),'iso2'] = "'NA"
# merge region
df_nodes = pd.merge(df_nodes, iso2[['iso2','region_weo2019']], how='left', left_on='iso2',right_on='iso2')
# get reduction in final and power energy
df_sds['REDUCTION-FIN'] = (df_sds[f'2040SDS-TPED-{carrier.upper()}']-df_sds[f'2040SDS-POWER-{carrier.upper()}'])/(df_sds[f'2018-TPED-{carrier.upper()}'] - df_sds[f'2018-POWER-{carrier.upper()}'])
df_sds['REDUCTION-POWER'] = df_sds[f'2040SDS-POWER-{carrier.upper()}'] / df_sds[f'2018-POWER-{carrier.upper()}']
## merge reduction onto nodes
df_nodes = pd.merge(df_nodes,df_sds[['REGION','REDUCTION-FIN','REDUCTION-POWER']], how='left',left_on='region_weo2019',right_on='REGION')
# take reduction out of demand
df_nodes.loc[df_nodes['NODETYPE']=='CITY','D'] = df_nodes.loc[df_nodes['NODETYPE']=='CITY','D']* df_nodes.loc[df_nodes['NODETYPE']=='CITY','REDUCTION-FIN']
df_nodes.loc[df_nodes['NODETYPE']=='POWERSTATION','D'] = df_nodes.loc[df_nodes['NODETYPE']=='POWERSTATION','D'] * df_nodes.loc[df_nodes['NODETYPE']=='POWERSTATION','REDUCTION-POWER']
# accomodate antarcitca
df_nodes.loc[df_nodes['iso2']=='AQ','D'] = 0
print (df_nodes)
print (df_nodes[df_nodes['D'].isin([np.nan, np.inf, -np.inf])])
# round and int demand
df_nodes['D'] = np.round(df_nodes['D'],0).astype(int)
logger.info('loading run data')
run_data = pickle.load(open(params['flowfill_run'][carrier],'rb'))
SCALE_FACTOR = run_data['SCALE_FACTOR']
STEP_INI = run_data['STEP_INI']
GAMMA = run_data['GAMMA']
df_alpha = run_data['ALPHA']
logger.info('running fill algo')
ii_w, df_flow, df_z = differential_fill(df_nodes, dijkstra_min_adj, df_alpha, STEP_INI, GAMMA,SCALE_FACTOR, params, logging.getLogger('ini_diff_fill'), None)
print ('df flow')
print (df_flow)
logger.info('prepping dfs')
sources = df_flow.sum()[df_flow.sum()>0].index.tolist()
df_nodes = df_nodes.reset_index().rename(columns={'index':'idx'}).set_index('NODE')
edge_df['flow']=0
edge_df = pd.merge(edge_df, df_nodes[['idx']], how='left',left_on='source',right_index=True).rename(columns={'idx':'source_idx'})
edge_df = pd.merge(edge_df, df_nodes[['idx']], how='left',left_on='target',right_index=True).rename(columns={'idx':'target_idx'})
edge_df = edge_df.set_index(['source_idx','target_idx'])
logger.info('filling flow paths')
for source in tqdm(sources, desc='adding flow'):
source_idx = df_nodes.at[source,'idx']
if carrier=='oil':
paths = pickle.load(open(f'/paths/{carrier}_join/{source_idx}.pkl','rb'))
else:
paths = pickle.load(open(f'/paths/{carrier}/{source_idx}.pkl','rb'))
for dest in df_flow.loc[df_flow[source]>0,source].index.values.tolist():
dest_idx = df_nodes.at[dest,'idx']
add_flow = df_flow.at[dest,source]
#print('add flow',add_flow)
node_path_list = paths[dest_idx]
node_path_tups = list(zip(node_path_list[0:-1],node_path_list[1:]))
if carrier=='oil':
# make sure the idxs arent the same
node_path_tups = [nn for nn in node_path_tups if nn[0]!=nn[1]]
#print ('node_path_tups',node_path_tups)
edge_df.loc[node_path_tups,'flow']+=add_flow
return edge_df.reset_index().drop(columns=['source_idx','target_idx'])
def interdict_supply_worker(ii_w, carrier, params):
if ii_w==None:
worker_logger=logging.getLogger(f'baseline_{carrier}')
else:
worker_logger=logging.getLogger(f'{ii_w}_{carrier}')
worker_logger.info('Loading catalog data')
catalog = yaml.load(open(os.path.join(os.getcwd(),'conf','base','catalog.yml'),'r'),Loader=yaml.SafeLoader)
kedro_catalog = DataCatalog.from_config(catalog)
node_df = kedro_catalog.load(f'community_{carrier}_nodes')
dijkstra_min_adj = kedro_catalog.load(f'{carrier}_dijkstra_mincost_adj')
worker_logger.info('loading run data')
run_data = pickle.load(open(params['flowfill_run'][carrier],'rb'))
SCALE_FACTOR = run_data['SCALE_FACTOR']
STEP_INI = run_data['STEP_INI']
GAMMA = run_data['GAMMA']
df_alpha = run_data['ALPHA']
if ii_w!=None:
worker_logger.info('applying interdiction')
df_alpha.iloc[ii_w] = df_alpha.iloc[ii_w]*2
worker_logger.info('running fill algo')
_, df_flow, df_z = differential_fill(node_df, dijkstra_min_adj, df_alpha, STEP_INI, GAMMA,SCALE_FACTOR, params, worker_logger, None)
worker_logger.info('got result, pickling')
# don't keep impedance, can calculate after.
if ii_w==None:
pickle.dump(df_flow, open(os.path.join(os.getcwd(),'results','interdiction','supply',carrier,f'flow_baseline.pkl'),'wb'))
#pickle.dump(df_z, open(os.path.join(os.getcwd(),'results','interdiction','supply',carrier,f'z_baseline.pkl'),'wb'))
else:
pickle.dump(df_flow, open(os.path.join(os.getcwd(),'results','interdiction','supply',carrier,f'flow_{ii_w}.pkl'),'wb'))
#pickle.dump(df_z, open(os.path.join(os.getcwd(),'results','interdiction','supply',carrier,f'z_{ii_w}.pkl'),'wb'))
return 1
def post_interdict_supply(df_nodes, params, dijkstra_adj):
"""
Post-process the supply interdictions
"""
## turn the dijkstra results into flow dfs
if 'COALMINE' in df_nodes['NODETYPE'].unique():
carrier = 'coal'
interdict_path = '/paths/supply/coal/flow_*.pkl'
elif 'LNGTERMINAL' in df_nodes['NODETYPE'].unique():
carrier= 'gas'
interdict_path = os.path.join(os.getcwd(),'results','interdiction','supply','gas','flow_*.pkl')
else:
carrier='oil'
interdict_path = os.path.join(os.getcwd(),'results','interdiction','supply','oil','flow_*.pkl')
run_data = pickle.load(open(params['flowfill_run'][carrier],'rb'))
SCALE_FACTOR = run_data['SCALE_FACTOR']
STEP_INI = run_data['STEP_INI']
GAMMA = run_data['GAMMA']
df_alpha = run_data['ALPHA']
print ('df_alpha')
print (df_alpha)
# for all the supplies do records then parse to a df
supply_pickles = glob.glob(interdict_path)
bl_path = [f for f in supply_pickles if 'baseline' in f][0]
interdict_paths = [f for f in supply_pickles if 'baseline' not in f]
# do the baseline stuff
bl_flow = pickle.load(open(bl_path,'rb'))
bl_transmission_cost = bl_flow*dijkstra_adj
supply_marginal_cost = bl_flow.sum()*df_alpha # sorrect
bl_supply_cost = bl_flow * supply_marginal_cost
bl_total_cost = bl_transmission_cost + bl_supply_cost
demand_cost = pd.DataFrame(bl_total_cost.sum(axis=1)).rename(columns={0:'baseline'})
print (demand_cost)
print ('bl',bl_total_cost.sum().sum())
# get the total cost
records = []
for f in tqdm(interdict_paths):
id_flow = pickle.load(open(bl_path,'rb'))
idx = int(f.split('_')[-1][:-4])
id_alpha = df_alpha.copy()
id_alpha.iloc[idx] = id_alpha.iloc[idx]*2
id_transmission_cost = id_flow*dijkstra_adj
supply_marginal_cost = id_flow.sum()*id_alpha # sorrect
id_supply_cost = id_flow * supply_marginal_cost
id_total_cost = id_transmission_cost + id_supply_cost
demand_cost['id'] = id_total_cost.sum(axis=1)
demand_cost['diff'] = demand_cost['id'] - demand_cost['baseline']
demand_cost['increase'] = demand_cost['diff']/demand_cost['baseline']
record = {
'idx':idx,
'NODE':id_alpha.index[idx],
'total_cost':id_total_cost.sum().sum(),
'top_5':demand_cost.reset_index().sort_values('diff').iloc[-5:, [demand_cost.reset_index().columns.get_loc(c) for c in ['index','diff','increase']]].values.tolist()
}
records.append(record)
# records to dataframe
results_df = pd.DataFrame(records)
pickle.dump(results_df, open(os.path.join(os.getcwd(),f'{carrier}_supply_interdict.pkl'),'wb'))
return []
def interdict_supply(node_df, params, dijkstra_min_adj):
"""
For all supply nodes, try doubling the quadratic term
"""
## turn the dijkstra results into flow dfs
if 'COALMINE' in node_df['NODETYPE'].unique():
carrier = 'coal'
source_types = ['COALMINE']
elif 'LNGTERMINAL' in node_df['NODETYPE'].unique():
carrier= 'gas'
source_types = ['OILFIELD','OILWELL']
else:
carrier='oil'
source_types = ['OILFIELD','OILWELL']
logger = logging.getLogger(f'Dijkstra_post_{carrier}')
run_data = pickle.load(open(params['flowfill_run'][carrier],'rb'))
df_alpha = run_data['ALPHA']
idxs = [i for i,x in enumerate(df_alpha.index.isin(dijkstra_min_adj.columns).tolist()) if x]
print (idxs)
logger.info(f'Running {len(idxs)} fills with {N_WORKERS} workers')
### check what needs to be run
flow_pickles = glob.glob(os.path.join(os.getcwd(),'results','interdiction','supply','gas','flow*.pkl'))
done_idx = [f.split('_')[-1][:-4] for f in flow_pickles]
idxs = [ii for ii in idxs if str(ii) not in done_idx]
if 'baseline' not in done_idx:
idxs = [None]+idxs
logger.info(f'{len(idxs)} left to run')
pool = mp.Pool(N_WORKERS)
# redo just [None,0]
map_params = list(
zip(
idxs,
[carrier]*(len(idxs)),
[params]*(len(idxs)),
)
)
results = pool.starmap(interdict_supply_worker, map_params)
print ('results')
print (results)
return []
def call_pypy_dijkstra(call_params):
call_params = json.loads(call_params)
print (call_params)
process = subprocess.Popen([str(r) for r in call_params],shell=True, stdout = subprocess.PIPE, stderr = subprocess.STDOUT)
for line in process.stdout:
print(line.decode(), end='')
process.stdout.close()
return_code = process.wait()
return True
def dijkstra_post_oil(df_nodes):
#### first: generate an adjacent costs matrix for oil.
logger = logging.getLogger('Post_oil')
# load all the cost pickles
logger.info('Load all the cost pickles')
picklefiles = glob.glob(os.path.join(os.getcwd(),'results','interdiction','dijkstra_paths','oil','costs','*.pkl'))
pkl_data = {int(os.path.splitext(f.split('/')[-1])[0]):pickle.load(open(f,'rb')) for f in picklefiles}
print (pkl_data.keys())
df_nodes = df_nodes.reset_index().rename(columns={'index':'idx'})
target_nodes = df_nodes.loc[df_nodes['NODETYPE'].isin(['CITY','POWERSTATION']),'idx'].values.tolist()
source_nodes = df_nodes.loc[df_nodes['NODETYPE'].isin(['OILWELL','OILFIELD']) & df_nodes['idx'].isin(pkl_data.keys()),'idx'].values.tolist()
pkl_data = {(kk1,kk2):cost for kk1,vv in pkl_data.items() for kk2, cost in vv.items() }
#print (pkl_data.keys())
start_nodes = list(set([kk[0] for kk in list(pkl_data.keys()) if kk[0] in source_nodes]))
# make them into an nx digraph
G = nx.DiGraph()
G.add_edges_from([(kk_tup[0],kk_tup[1], {'z':cost}) for kk_tup, cost in pkl_data.items()])
#print('nodes')
#print(G.nodes)
# get supply and target nodes on graph, and call dijkstra for all supply nodes
dijkstra_results = {}
for sn in tqdm(start_nodes):
costs,paths = nx.single_source_dijkstra(G, sn, target=None, cutoff=None, weight='z')
#print (sn)
#print (costs, paths)
dijkstra_results[sn] = {'costs':costs,'paths':paths}
# parse results into adjacency matrix
df_adj = pd.DataFrame(
{sn:{tn:dijkstra_results[sn]['costs'][tn] for tn in target_nodes if tn in dijkstra_results[sn]['costs'].keys()}
for sn in start_nodes})
idx_mapper = {r[0]:r[1] for r in df_nodes[['idx','NODE']].values.tolist()}
df_adj.index = df_adj.index.astype(int).map(idx_mapper)
df_adj.columns = df_adj.columns.astype(int).map(idx_mapper)
print (df_adj)
### second: combine paths
for sn in start_nodes:
crude_paths = pickle.load(open(f'/paths/oil/{sn}.pkl','rb'))
for tn in dijkstra_results[sn].keys():
master_path = dijkstra_results[sn]['paths'][tn]
inter_idx = master_path[1]
product_paths = pickle.load(open(f'/paths/oil/{inter_idx}.pkl','rb'))
print ('crude path')
print (crude_paths[inter_idx])
print ('product_path')
print (product_paths[tn])
exit()
# for each source:
## for each target:
### combine paths into new dict
# return adjacency matrix
def dijkstra_post_parse(community_nodes):
if 'COALMINE' in community_nodes['NODETYPE'].unique():
carrier = 'coal'
source_types = ['COALMINE']
elif 'LNGTERMINAL' in community_nodes['NODETYPE'].unique():
carrier= 'gas'
source_types = ['OILFIELD','OILWELL']
else:
carrier='oil'
source_types = ['OILFIELD','OILWELL']
logger=logging.getLogger(f'{carrier} parse dijkstra')
community_nodes = community_nodes.reset_index().rename(columns={'index':'idx'})
cost_pkl_fs = glob.glob(os.path.join(os.getcwd(),'results','interdiction','dijkstra_paths',carrier,'costs','*'))
logger.info(f'found {len(cost_pkl_fs)} pickle files')
logger.info('Loading pickles...')
cost_pkls = {os.path.splitext(os.path.split(el)[1])[0]:pickle.load(open(el,'rb')) for el in cost_pkl_fs}
logger.info('Parsing to df')
df = | pd.DataFrame() | pandas.DataFrame |
import numpy as np
import pandas as pd
import joblib, os, pickle
from Fuzzy_clustering.version3.project_manager.PredictModelManager.Clusterer import clusterer
from Fuzzy_clustering.version3.project_manager.PredictModelManager.ClusterPredictManager import ClusterPredict
class FullClusterPredictManager(object):
def __init__(self, path_model, static_data):
self.path_model = path_model
self.static_data = static_data
self.thres_split = static_data['clustering']['thres_split']
self.thres_act = static_data['clustering']['thres_act']
self.n_clusters = static_data['clustering']['n_clusters']
self.rated = static_data['rated']
self.var_imp = static_data['clustering']['var_imp']
self.var_lin = static_data['clustering']['var_lin']
self.var_nonreg = static_data['clustering']['var_nonreg']
try:
self.load()
except:
pass
def load_data(self):
data_path = self.static_data['path_data']
X = pd.read_csv(os.path.join(data_path, 'dataset_X.csv'), index_col=0, header=0, parse_dates=True, dayfirst=True)
y = pd.read_csv(os.path.join(data_path, 'dataset_y.csv'), index_col=0, header=0, parse_dates=True, dayfirst=True)
if os.path.exists(os.path.join(data_path, 'dataset_cnn.pickle')):
X_cnn = joblib.load(os.path.join(data_path, 'dataset_cnn.pickle'))
X_cnn = X_cnn.transpose([0, 2, 3, 1])
else:
X_cnn = np.array([])
index = X.index
index_all = X.index
if self.static_data['type'] == 'pv' and self.static_data['NWP_model'] == 'skiron':
index = np.where(X['flux'] > 1e-8)[0]
X = X.iloc[index]
y_reduced = y.iloc[index]
if X_cnn.shape[0]>0:
X_cnn = X_cnn[index]
else:
y_reduced = y
if os.path.exists(os.path.join(data_path, 'dataset_lstm.pickle')):
X_lstm = joblib.load(os.path.join(data_path, 'dataset_lstm.pickle'))
else:
X_lstm = np.array([])
return X, y, y_reduced, X_cnn, X_lstm, index, index_all
def load_test_data(self):
X, y, y_reduced, X_cnn, X_lstm, index, index_all = self.load_data()
test_ind = np.where(X.index >= self.split_test)[0]
test_ind_all = np.where(index_all >= self.split_test)[0]
index = index[index >= self.split_test]
index_all = index_all[index_all >= self.split_test]
indices_test = test_ind
X_test = X.iloc[test_ind]
y_test = y.iloc[test_ind_all]
y_test_reduced = y_reduced.iloc[test_ind]
if len(X_cnn.shape) > 1:
X_cnn_test = X_cnn[indices_test]
else:
X_cnn_test = np.array([])
if len(X_lstm.shape) > 1:
X_lstm_test = X_lstm[indices_test]
else:
X_lstm_test = np.array([])
return X_test, y_test, y_test_reduced, X_cnn_test, X_lstm_test, index, index_all
def check_if_all_nans(self, activations):
if activations.isna().all(axis=1).any() == True:
indices = activations.index[activations.isna().all(axis=1).to_numpy().ravel()]
if indices.shape[0] > 50:
raise RuntimeError('Too many nans. Please check your model')
for ind in indices:
act = activations.loc[ind]
clust = act.idxmax()
activations.loc[ind, clust] = 0.1
return activations
def predict_clusters(self, X_test = pd.DataFrame([]), y_test = pd.DataFrame([]), X_cnn_test = np.array([]), X_lstm_test = np.array([]), test = True):
if X_test.shape[0]==0:
offline = True
else:
offline = False
if offline:
if test:
X_test, y_test_all, y_test, X_cnn_test, X_lstm_test, index, index_all = self.load_test_data()
else:
X_test, y_test_all, y_test, X_cnn_test, X_lstm_test, index, index_all = self.load_data()
else:
index = X_test.index
index_all = X_test.index
y_test_all = y_test
sc = joblib.load(os.path.join(self.static_data['path_data'], 'X_scaler.pickle'))
scale_y = joblib.load(os.path.join(self.static_data['path_data'], 'Y_scaler.pickle'))
pred_cluster = dict()
X_test = pd.DataFrame(sc.transform(X_test.values), columns=X_test.columns, index=X_test.index)
if y_test.shape[0]>0:
y_test = pd.DataFrame(scale_y.transform(y_test.values), columns=y_test.columns, index=y_test.index)
if not hasattr(self, 'clusterer'):
self.clusterer = clusterer(self.static_data['path_fuzzy_models'])
act_test = self.clusterer.compute_activations(X_test)
act_test = self.check_if_all_nans(act_test)
for clust in self.clusters.keys():
predict_module = ClusterPredict(self.static_data, self.clusters[clust])
if clust == 'global':
if len(self.clusters[clust].methods) > 0:
pred_cluster[clust] = predict_module.predict(X_test.values, X_cnn=X_cnn_test, X_lstm=X_lstm_test)
if y_test.shape[0] > 0:
pred_cluster[clust]['metrics'] = predict_module.evaluate(pred_cluster['global'], y_test.values)
pred_cluster[clust]['dates'] = X_test.index
pred_cluster[clust]['index'] = np.arange(0, X_test.shape[0])
else:
dates = X_test.index[act_test[clust] >= self.thres_act]
nind = np.where(act_test[clust] >= self.thres_act)[0]
nind.sort()
x = X_test.loc[dates]
if y_test.shape[0] > 0:
targ = y_test.loc[dates].values
if len(X_cnn_test.shape) > 1:
x_cnn = X_cnn_test[nind]
else:
x_cnn = np.array([])
if len(X_lstm_test.shape) > 1:
x_lstm = X_lstm_test[nind]
else:
x_lstm = np.array([])
pred_cluster[clust] = predict_module.predict(x.values, X_cnn=x_cnn, X_lstm=x_lstm)
if y_test.shape[0] > 0:
pred_cluster[clust]['metrics'] = predict_module.evaluate(pred_cluster[clust], targ)
pred_cluster[clust]['dates'] = dates
pred_cluster[clust]['index'] = nind
predictions = dict()
result_clust = | pd.DataFrame() | pandas.DataFrame |
import anemoi as an
import pandas as pd
import numpy as np
import scipy as sp
import statsmodels.api as sm
import scipy.odr.odrpack as odrpack
import warnings
def compare_sorted_df_columns(cols_1, cols_2):
return sorted(cols_1) == sorted(cols_2)
def valid_ws_correlation_data(data, ref_ws_col='ref', site_ws_col='site'):
'''Perform checks on wind speed correlation data.
:Parameters:
data: DataFrame
DataFrame with wind speed columns ref_ws_col and site_ws_col
ref_ws_col: string, default 'ref'
Reference anemometer data column to use.
site_ws_col: string, default 'site'
Site anemometer data column to use.
'''
if ref_ws_col == site_ws_col:
raise ValueError("Error: Reference and site wind speed columns cannot have the same name.")
return False
if not compare_sorted_df_columns(data.columns.tolist(), [ref_ws_col, site_ws_col]):
raise ValueError("Error: the correlation data don't match the expected format.")
return False
if not data.shape[0] > 6:
warnings.warn("Warning: trying to correalate between less than six points.")
return False
if (data.loc[:,ref_ws_col] == data.loc[:,site_ws_col]).sum() == data.shape[0]:
warnings.warn("Warning: it seems you are trying to correalate a single mast against itself.")
return False
return True
def return_correlation_results_frame(ref_label='ref', site_label='site'):
results = pd.DataFrame(columns=['slope', 'offset' , 'R2', 'uncert', 'points'],
index=pd.MultiIndex.from_tuples([(ref_label, site_label)],
names=['ref', 'site'])
)
return results
def return_correlation_data_from_masts(ref_mast, site_mast):
'''Return a DataFrame of reference and site data for correlations.
Will be extracted from each MetMast object using the primary anemometers and wind vanes.
:Parameters:
ref_mast: MetMast
Anemoi MetMast object
site_mast: MetMast
Anemoi MetMast object
:Returns:
out: DataFrame with columns ref, site, and dir
'''
ref_data = ref_mast.return_primary_ano_vane_data()
ref_data.columns = ['ref', 'dir']
site_data = site_mast.return_primary_ano_vane_data()
site_data.columns = ['site', 'site_dir']
data = pd.concat([ref_data, site_data.site], axis=1).dropna()
data = data.loc[:, ['ref', 'site', 'dir']]
if not valid_ws_correlation_data(data=data, ref_ws_col='ref', site_ws_col='site'):
warning_string = "Warning: {} and {} don't seem to have valid concurrent data for a correlation.".format(ref_mast.name, site_mast.name)
warnings.warn(warning_string)
return data
### CORRELATION METHODS ###
def calculate_R2(data, ref_ws_col='ref', site_ws_col='site'):
'''Return a single R2 between two wind speed columns
:Parameters:
data: DataFrame
DataFrame with wind speed columns ref_ws_col and site_ws_col
ref_ws_col: string, default 'ref'
Reference anemometer data column to use.
site_ws_col: string, default 'site'
Site anemometer data column to use.
'''
data = data.loc[:,[ref_ws_col, site_ws_col]].dropna()
if not valid_ws_correlation_data(data=data, ref_ws_col=ref_ws_col, site_ws_col=site_ws_col):
return np.nan
r2 = data[ref_ws_col].corr(data[site_ws_col])**2
return r2
def calculate_IEC_uncertainty(data, ref_ws_col='ref', site_ws_col='site'):
'''Calculate the IEC correlation uncertainty between two wind speed columns
:Parameters:
data: DataFrame
DataFrame with wind speed columns ref_ws_col and site_ws_col
ref_ws_col: string, default 'ref'
Reference anemometer data column to use.
site_ws_col: string, default 'site'
Site anemometer data column to use.
'''
data = data.loc[:,[ref_ws_col, site_ws_col]].dropna()
if not valid_ws_correlation_data(data=data, ref_ws_col=ref_ws_col, site_ws_col=site_ws_col):
return np.nan
X = data.loc[:,ref_ws_col].values
Y = data.loc[:,site_ws_col].values
uncert = np.std(Y/X)*100/len(X)
return uncert*100.0
def calculate_EDF_uncertainty(data, ref_ws_col='ref', site_ws_col='site'):
'''Calculate the EDF estimated correaltion uncetianty between two wind speed columns.
Assumes a correalation forced through the origin
:Parameters:
data: DataFrame
DataFrame with wind speed columns ref_ws_col and site_ws_col
ref_ws_col: string, default 'ref'
Reference anemometer data column to use.
site_ws_col: string, default 'site'
Site anemometer data column to use.
'''
data = data.loc[:,[ref_ws_col, site_ws_col]].dropna()
if not valid_ws_correlation_data(data=data, ref_ws_col=ref_ws_col, site_ws_col=site_ws_col):
return np.nan
X = data.loc[:,ref_ws_col].values
Y = data.loc[:,site_ws_col].values
Sxx = np.sum(X**2)
Syy = np.sum(Y**2)
Sxy = np.sum(X*Y)
B = 0.5*(Sxx - Syy)/Sxy
SU = -B + np.sqrt(B**2 + 1)
e2 = np.sum((Y - SU*X)**2)/(1 + SU**2)
Xsi2 = e2/(data.shape[0] - 1)
uncert = np.sqrt((Xsi2*SU**2)*(Sxx*Sxy**2 + 0.25*((Sxx - Syy)**2)*Sxx)/((B**2 + 1.0)*Sxy**4))
return uncert*100.0
def ws_correlation_least_squares_model(data, ref_ws_col='ref', site_ws_col='site', force_through_origin=False):
'''Calculate the slope and offset between two wind speed columns using ordinary least squares regression.
https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.linalg.lstsq.html
:Parameters:
data: DataFrame
DataFrame with wind speed columns ref and site, and direction data dir
ref_ws_col: string, default None (primary anemometer assumed)
Reference anemometer data to use. Extracted from MetMast.data
site_ws_col: string, default None (primary anemometer assumed)
Site anemometer data to use. Extracted from MetMast.data
force_through_origin: boolean, default False
Force the correlation through the origin (offset equal to zero)
:Returns:
out: DataFrame
slope, offset, R2, uncert, points
'''
data = data.loc[:, [ref_ws_col, site_ws_col]].dropna()
results = return_correlation_results_frame(ref_label=ref_ws_col, site_label=site_ws_col)
if not valid_ws_correlation_data(data=data, ref_ws_col=ref_ws_col, site_ws_col=site_ws_col):
return results
points = data.shape[0]
R2 = calculate_R2(data=data, ref_ws_col=ref_ws_col, site_ws_col=site_ws_col)
uncert = calculate_IEC_uncertainty(data=data, ref_ws_col=ref_ws_col, site_ws_col=site_ws_col)
if force_through_origin:
data.loc[:,'offset'] = 0
else:
data.loc[:,'offset'] = 1
X = data.loc[:, [ref_ws_col,'offset']].values
Y = data.loc[:, site_ws_col].values
slope, offset = np.linalg.lstsq(X, Y)[0]
results.loc[pd.IndexSlice[ref_ws_col, site_ws_col],['slope', 'offset' , 'R2', 'uncert', 'points']] = np.array([slope, offset, R2, uncert, points])
return results
def f_with_offset(B, x):
return B[0]*x + B[1]
def f_without_offset(B, x):
return B[0]*x
def ws_correlation_orthoginal_distance_model(data, ref_ws_col='ref', site_ws_col='site', force_through_origin=False):
'''Calculate the slope and offset between two wind speed columns using orthoganal distance regression.
https://docs.scipy.org/doc/scipy-0.18.1/reference/odr.html
:Parameters:
data: DataFrame
DataFrame with wind speed columns ref and site, and direction data dir
ref_ws_col: string, default None (primary anemometer assumed)
Reference anemometer data to use. Extracted from MetMast.data
site_ws_col: string, default None (primary anemometer assumed)
Site anemometer data to use. Extracted from MetMast.data
force_through_origin: boolean, default False
Force the correlation through the origin (offset equal to zero)
:Returns:
out: DataFrame
slope, offset, R2, uncert, points
'''
data = data.loc[:, [ref_ws_col, site_ws_col]].dropna().astype(np.float)
results = return_correlation_results_frame(ref_label=ref_ws_col, site_label=site_ws_col)
if not valid_ws_correlation_data(data=data, ref_ws_col=ref_ws_col, site_ws_col=site_ws_col):
return results
points = data.shape[0]
R2 = calculate_R2(data=data, ref_ws_col=ref_ws_col, site_ws_col=site_ws_col)
uncert = calculate_IEC_uncertainty(data=data, ref_ws_col=ref_ws_col, site_ws_col=site_ws_col)
X = data.loc[:, ref_ws_col].values
Y = data.loc[:, site_ws_col].values
data_mean = data.mean()
slope_estimate_via_ratio = data_mean[site_ws_col]/data_mean[ref_ws_col]
realdata = odrpack.RealData(X, Y)
if force_through_origin:
linear = odrpack.Model(f_without_offset)
odr = odrpack.ODR(realdata, linear, beta0=[slope_estimate_via_ratio])
slope = odr.run().beta[0]
offset = 0
else:
linear = odrpack.Model(f_with_offset)
odr = odrpack.ODR(realdata, linear, beta0=[slope_estimate_via_ratio, 0.0])
slope, offset = odr.run().beta[0], odr.run().beta[1]
results.loc[pd.IndexSlice[ref_ws_col, site_ws_col],['slope', 'offset' , 'R2', 'uncert', 'points']] = np.array([slope, offset, R2, uncert, points])
return results
def ws_correlation_robust_linear_model(data, ref_ws_col='ref', site_ws_col='site', force_through_origin=False):
'''Calculate the slope and offset between two wind speed columns using robust linear model.
http://www.statsmodels.org/dev/rlm.html
:Parameters:
data: DataFrame
DataFrame with wind speed columns ref and site, and direction data dir
ref_ws_col: string, default None (primary anemometer assumed)
Reference anemometer data to use. Extracted from MetMast.data
site_ws_col: string, default None (primary anemometer assumed)
Site anemometer data to use. Extracted from MetMast.data
force_through_origin: boolean, default False
Force the correlation through the origin (offset equal to zero)
:Returns:
out: DataFrame
slope, offset, R2, uncert, points
'''
data = data.loc[:, [ref_ws_col, site_ws_col]].dropna().astype(np.float)
results = return_correlation_results_frame(ref_label=ref_ws_col, site_label=site_ws_col)
if not valid_ws_correlation_data(data=data, ref_ws_col=ref_ws_col, site_ws_col=site_ws_col):
return results
points = data.shape[0]
R2 = calculate_R2(data=data, ref_ws_col=ref_ws_col, site_ws_col=site_ws_col)
uncert = calculate_IEC_uncertainty(data=data, ref_ws_col=ref_ws_col, site_ws_col=site_ws_col)
X = data.loc[:, ref_ws_col].values
Y = data.loc[:, site_ws_col].values
if not force_through_origin:
X = sm.add_constant(X)
else:
X = [np.zeros(X.shape[0]), X]
X = np.column_stack(X)
mod = sm.RLM(Y, X)
resrlm = mod.fit()
offset, slope = resrlm.params
R2 = sm.WLS(mod.endog, mod.exog, weights=mod.fit().weights).fit().rsquared
results.loc[pd.IndexSlice[ref_ws_col, site_ws_col],['slope', 'offset' , 'R2', 'uncert', 'points']] = np.array([slope, offset, R2, uncert, points])
return results
def ws_correlation_method(data, ref_ws_col='ref', site_ws_col='site', method='ODR', force_through_origin=False):
'''Calculate the slope and offset, for a given correlation method, between two wind speed columns.
:Parameters:
data: DataFrame
DataFrame with wind speed columns ref and site, and direction data dir
ref_ws_col: string, default None (primary anemometer assumed)
Reference anemometer data to use. Extracted from MetMast.data
site_ws_col: string, default None (primary anemometer assumed)
Site anemometer data to use. Extracted from MetMast.data
method: string, default 'ODR'
Correlation method to use.
* Orthoginal distance regression: 'ODR'
* Ordinary least squares: 'OLS'
* Robust linear models: 'RLM'
force_through_origin: boolean, default False
Force the correlation through the origin (offset equal to zero)
:Returns:
out: DataFrame
slope, offset, R2, uncert, points
'''
if method == 'ODR':
results = ws_correlation_orthoginal_distance_model(data=data, ref_ws_col=ref_ws_col, site_ws_col=site_ws_col, force_through_origin=force_through_origin)
elif method == 'OLS':
results = ws_correlation_least_squares_model(data=data, ref_ws_col=ref_ws_col, site_ws_col=site_ws_col, force_through_origin=force_through_origin)
elif method == 'RLM':
results = ws_correlation_robust_linear_model(data=data, ref_ws_col=ref_ws_col, site_ws_col=site_ws_col, force_through_origin=force_through_origin)
return results
def ws_correlation_binned_by_direction(data, ref_ws_col='ref', site_ws_col='site', ref_dir_col='dir', dir_sectors=16, method='ODR', force_through_origin=False):
'''Calculate the slope and offset, binned by direction, between two wind speed columns.
:Parameters:
data: DataFrame
DataFrame with wind speed columns ref and site, and direction data dir
ref_ws_col: string, default None (primary anemometer assumed)
Reference anemometer data to use. Extracted from MetMast.data
site_ws_col: string, default None (primary anemometer assumed)
Site anemometer data to use. Extracted from MetMast.data
ref_dir_col: string, default None (primary wind vane assumed)
Reference wind vane data to use. Extracted from MetMast.data
dir_sectors: int, default 16
Number of equally spaced direction sectors
method: string, default 'ODR'
Correlation method to use.
* Orthoginal distance regression: 'ODR'
* Ordinary least squares: 'OLS'
* Robust linear models: 'RLM'
force_through_origin: boolean, default False
Force the correlation through the origin (offset equal to zero)
:Returns:
out: DataFrame
slope, offset, R2, uncert, points
'''
data = data.loc[:,[ref_ws_col, site_ws_col, ref_dir_col]].dropna().astype(np.float)
results = return_correlation_results_frame(ref_label=ref_ws_col, site_label=site_ws_col)
dir_bins = np.arange(1,dir_sectors+1)
results = pd.concat([results]*dir_sectors, axis=0)
results.index = pd.Index(dir_bins, name='dir_bin')
data['dir_bin'] = an.analysis.wind_rose.append_dir_bin(data[ref_dir_col], dir_sectors=dir_sectors)
for dir_bin in dir_bins:
dir_bin_data = data.loc[data['dir_bin']==dir_bin, [ref_ws_col, site_ws_col]]
points = dir_bin_data.shape[0]
if not valid_ws_correlation_data(data=dir_bin_data, ref_ws_col=ref_ws_col, site_ws_col=site_ws_col):
results.loc[dir_bin, 'points'] = points
else:
uncert = calculate_IEC_uncertainty(data=dir_bin_data, ref_ws_col=ref_ws_col, site_ws_col=site_ws_col)
dir_bin_results = ws_correlation_method(data=dir_bin_data, ref_ws_col=ref_ws_col, site_ws_col=site_ws_col, method=method, force_through_origin=force_through_origin)
results.loc[dir_bin, ['slope', 'offset', 'R2' , 'uncert', 'points']] = dir_bin_results.values
return results
def ws_correlation_binned_by_month(data, ref_ws_col='ref', site_ws_col='site', method='ODR', force_through_origin=False):
'''Calculate the slope and offset, binned by month, between two wind speed columns.
:Parameters:
data: DataFrame
DataFrame with wind speed columns ref and site, and direction data dir
ref_ws_col: string, default None (primary anemometer assumed)
Reference anemometer data to use. Extracted from MetMast.data
site_ws_col: string, default None (primary anemometer assumed)
Site anemometer data to use. Extracted from MetMast.data
method: string, default 'ODR'
Correlation method to use.
* Orthoginal distance regression: 'ODR'
* Ordinary least squares: 'OLS'
* Robust linear models: 'RLM'
force_through_origin: boolean, default False
Force the correlation through the origin (offset equal to zero)
:Returns:
out: DataFrame
slope, offset, R2, uncert, points
'''
data = data.loc[:, [ref_ws_col, site_ws_col]].dropna().astype(np.float)
results = return_correlation_results_frame(ref_label=ref_ws_col, site_label=site_ws_col)
if not valid_ws_correlation_data(data=data, ref_ws_col=ref_ws_col, site_ws_col=site_ws_col):
return results
months = np.arange(1,13)
results = pd.concat([results]*12, axis=0)
results.index = pd.Index(months, name='month')
for month in months:
monthly_data = data.loc[data.index.month==month, [ref_ws_col, site_ws_col]]
points = monthly_data.shape[0]
if not valid_ws_correlation_data(data=monthly_data, ref_ws_col=ref_ws_col, site_ws_col=site_ws_col):
results.loc[month, 'points'] = points
else:
uncert = calculate_IEC_uncertainty(data=monthly_data, ref_ws_col=ref_ws_col, site_ws_col=site_ws_col)
monthly_results = ws_correlation_method(data=monthly_data, ref_ws_col=ref_ws_col, site_ws_col=site_ws_col, method=method, force_through_origin=force_through_origin)
results.loc[month, ['slope', 'offset', 'R2' , 'uncert', 'points']] = monthly_results.values
return results
### MAST CORRELATIONS ###
''' Basic outline is that for every correlate method you have to pass it
reference and site mast objects along with the needed sensor names
'''
def masts_10_minute(ref_mast, site_mast, ref_ws_col=None, site_ws_col=None, method='ODR', force_through_origin=False):
'''Calculate the slope and offset between two met masts.
:Parameters:
ref_mast: MetMast
MetMast object
site_mast: MetMast
MetMast object
ref_ws_col: string, default None (primary anemometer assumed)
Reference anemometer data to use. Extracted from MetMast.data
site_ws_col: string, default None (primary anemometer assumed)
Site anemometer data to use. Extracted from MetMast.data
method: string, default 'ODR'
Correlation method to use.
* Orthoginal distance regression: 'ODR'
* Ordinary least squares: 'OLS'
* Robust linear models: 'RLM'
force_through_origin: boolean, default False
Force the correlation through the origin (offset equal to zero)
:Returns:
out: DataFrame
slope, offset, R2, uncert, points
'''
ref_ws_col = ref_mast.check_and_return_mast_ws_sensor(ref_ws_col)
site_ws_col = site_mast.check_and_return_mast_ws_sensor(site_ws_col)
ref_ws_data = ref_mast.return_sensor_data([ref_ws_col])
site_ws_data = site_mast.return_sensor_data([site_ws_col])
data = pd.concat([ref_ws_data, site_ws_data], axis=1, join='inner').dropna().astype(np.float)
data.columns = ['ref', 'site']
results = return_correlation_results_frame(ref_label=ref_mast.name, site_label=site_mast.name)
valid_results = ws_correlation_method(data=data, ref_ws_col='ref', site_ws_col='site', method=method, force_through_origin=force_through_origin)
results.loc[pd.IndexSlice[ref_mast.name, site_mast.name], ['slope', 'offset', 'R2' , 'uncert', 'points']] = valid_results.values
return results
def masts_10_minute_by_direction(ref_mast, site_mast, ref_ws_col=None, ref_dir_col=None, site_ws_col=None, site_dir_col=None, method='ODR', force_through_origin=False, dir_sectors=16):
'''Calculate the slope and offset, binned by direction, between two met masts.
:Parameters:
ref_mast: MetMast
MetMast object
site_mast: MetMast
MetMast object
ref_ws_col: string, default None (primary anemometer assumed)
Reference anemometer data to use. Extracted from MetMast.data
ref_dir_col: string, default None (primary wind vane assumed)
Reference anemometer data to use. Extracted from MetMast.data
site_dir_col: string, default None (primary anemometer assumed)
Site anemometer data to use. Extracted from MetMast.data
method: string, default 'ODR'
Correlation method to use.
* Orthoginal distance regression: 'ODR'
* Ordinary least squares: 'OLS'
* Robust linear models: 'RLM'
dir_sectors: int, default 16
Number of equally spaced direction sectors
force_through_origin: boolean, default False
Force the correlation through the origin (offset equal to zero)
:Returns:
out: DataFrame
slope, offset, R2, uncert, points
'''
ref_ws_col = ref_mast.check_and_return_mast_ws_sensor(ref_ws_col)
ref_dir_col = ref_mast.check_and_return_mast_dir_sensor(ref_dir_col)
site_ws_col = site_mast.check_and_return_mast_ws_sensor(site_ws_col)
site_dir_col = site_mast.check_and_return_mast_dir_sensor(site_dir_col)
ref_ws_data = ref_mast.return_sensor_data([ref_ws_col])
ref_dir_data = ref_mast.return_sensor_data([ref_dir_col])
site_ws_data = site_mast.return_sensor_data([site_ws_col])
data = pd.concat([ref_ws_data, site_ws_data, ref_dir_data], axis=1, join='inner').dropna().astype(np.float)
data.columns = ['ref', 'site', 'dir']
results = ws_correlation_binned_by_direction(data, dir_sectors=dir_sectors, method=method, force_through_origin=force_through_origin)
results = results.reset_index()
results['ref'] = ref_mast.name
results['site'] = site_mast.name
results = results.set_index(['ref', 'site', 'dir_bin'])
return results
def masts_daily(ref_mast, site_mast, ref_ws_col=None, site_ws_col=None, method='ODR', force_through_origin=False, minimum_recovery_rate=0.7):
'''Calculate the slope and offset for daily data between two met masts.
:Parameters:
ref_mast: MetMast
MetMast object
site_mast: MetMast
MetMast object
ref_ws_col: string, default None (primary anemometer assumed)
Reference anemometer data to use. Extracted from MetMast.data
site_ws_col: string, default None (primary anemometer assumed)
Site anemometer data to use. Extracted from MetMast.data
method: string, default 'ODR'
Correlation method to use.
* Orthoginal distance regression: 'ODR'
* Ordinary least squares: 'OLS'
* Robust linear models: 'RLM'
force_through_origin: boolean, default False
Force the correlation through the origin (offset equal to zero)
minimum_recovery_rate: float, default 0.7
Minimum allowable recovery rate until resampled data are excluded.
For example, by defalt, when resampling 10-minute data to daily averages you would need
at least 101 valid records to have a valid daily average.
:Returns:
out: DataFrame
slope, offset, R2, uncert, points
'''
ref_ws_col = ref_mast.check_and_return_mast_ws_sensor(ref_ws_col)
site_ws_col = site_mast.check_and_return_mast_ws_sensor(site_ws_col)
ref_ws_data = ref_mast.return_sensor_data([ref_ws_col])
site_ws_data = site_mast.return_sensor_data([site_ws_col])
if minimum_recovery_rate > 1:
minimum_recovery_rate = minimum_recovery_rate/100.0
ref_data_daily_mean = an.utils.mast_data.resample_mast_data(ref_ws_data, freq='daily', minimum_recovery_rate=minimum_recovery_rate)
site_data_daily_mean = an.utils.mast_data.resample_mast_data(site_ws_data, freq='daily', minimum_recovery_rate=minimum_recovery_rate)
data_daily = pd.concat([ref_data_daily_mean, site_data_daily_mean], axis=1).dropna().astype(np.float)
data_daily.columns = ['ref', 'site']
data_daily['dir'] = np.nan
results = ws_correlation_method(data_daily, method=method, force_through_origin=force_through_origin)
results.index = pd.MultiIndex.from_tuples([(ref_mast.name, site_mast.name)], names=['ref', 'site'])
return results
def masts_daily_by_month(ref_mast, site_mast, ref_ws_col=None, site_ws_col=None, method='ODR', force_through_origin=False, minimum_recovery_rate=0.7):
'''Calculate the slope and offset for daily data, binned by month, between two met masts.
:Parameters:
ref_mast: MetMast
MetMast object
site_mast: MetMast
MetMast object
ref_ws_col: string, default None (primary anemometer assumed)
Reference anemometer data to use. Extracted from MetMast.data
site_ws_col: string, default None (primary anemometer assumed)
Site anemometer data to use. Extracted from MetMast.data
method: string, default 'ODR'
Correlation method to use.
* Orthoginal distance regression: 'ODR'
* Ordinary least squares: 'OLS'
* Robust linear models: 'RLM'
force_through_origin: boolean, default False
Force the correlation through the origin (offset equal to zero)
minimum_recovery_rate: float, default 0.7
Minimum allowable recovery rate until resampled data are excluded.
For example, by defalt, when resampling 10-minute data to daily averages you would need
at least 101 valid records to have a valid daily average.
:Returns:
out: DataFrame
slope, offset, R2, uncert, points for each month
'''
ref_ws_col = ref_mast.check_and_return_mast_ws_sensor(ref_ws_col)
site_ws_col = site_mast.check_and_return_mast_ws_sensor(site_ws_col)
ref_ws_data = ref_mast.return_sensor_data([ref_ws_col])
site_ws_data = site_mast.return_sensor_data([site_ws_col])
if minimum_recovery_rate > 1:
minimum_recovery_rate = minimum_recovery_rate/100.0
ref_data_daily_mean = an.utils.mast_data.resample_mast_data(ref_ws_data, freq='daily', minimum_recovery_rate=minimum_recovery_rate)
site_data_daily_mean = an.utils.mast_data.resample_mast_data(site_ws_data, freq='daily', minimum_recovery_rate=minimum_recovery_rate)
data_daily = pd.concat([ref_data_daily_mean, site_data_daily_mean], axis=1).dropna().astype(np.float)
data_daily.columns = ['ref', 'site']
data_daily['dir'] = np.nan
results = ws_correlation_binned_by_month(data_daily, method='ODR', force_through_origin=force_through_origin)
results = results.reset_index()
results['ref'] = ref_mast.name
results['site'] = site_mast.name
results = results.set_index(['ref', 'site', 'month'])
return results
def apply_10min_results_by_direction(ref_mast, site_mast, corr_results, ref_ws_col=None, ref_dir_col=None, site_ws_col=None, splice=True):
'''Applies the slopes and offsets from a 10-minute correaltion, binned by direction, between two met masts.
:Parameters:
ref_mast: MetMast
MetMast object
site_mast: MetMast
MetMast object
corr_results: DataFrame
slope, offset, R2, uncert, points for each direction sector
ref_ws_col: string, default None (primary anemometer assumed)
Reference anemometer data to use. Extracted from MetMast.data
ref_dir_col: string, default None (primary vane assumed)
Reference anemometer data to use. Extracted from MetMast.data
site_ws_col: string, default None (primary anemometer assumed)
Site anemometer data to use. Extracted from MetMast.data
splice: Boolean, default True
Returns site data where available and gap-fills any missing periods between the site mast
and the reference mast's measurement period. Otherwise, returns purely sythesized data without
taking into account the measured wind speeds.
:Returns:
out: time series DataFrame
predicted wind speeds at the site
'''
ref_ws_col = ref_mast.check_and_return_mast_ws_sensor(ref_ws_col)
ref_dir_col = ref_mast.check_and_return_mast_dir_sensor(ref_dir_col)
site_ws_col = site_mast.check_and_return_mast_ws_sensor(site_ws_col)
ref_ws_data = ref_mast.return_sensor_data([ref_ws_col])
ref_dir_data = ref_mast.return_sensor_data([ref_dir_col])
site_ws_data = site_mast.return_sensor_data([site_ws_col])
data = | pd.concat([ref_ws_data, site_ws_data, ref_dir_data], axis=1, join='inner') | pandas.concat |
from __future__ import print_function
'''
This module should be organized as follows:
Main function:
chi_estimate() = returns chi_n, chi_b
- calls:
wealth.get_wealth_data() - returns data moments on wealth distribution
labor.labor_data_moments() - returns data moments on labor supply
minstat() - returns min of statistical objective function
model_moments() - returns model moments
SS.run_SS() - return SS distributions
'''
'''
------------------------------------------------------------------------
Last updated: 7/27/2016
Uses a simulated method of moments to calibrate the chi_n adn chi_b
parameters of OG-USA.
This py-file calls the following other file(s):
wealth.get_wealth_data()
labor.labor_data_moments()
SS.run_SS
This py-file creates the following other file(s): None
------------------------------------------------------------------------
'''
import numpy as np
import scipy.optimize as opt
import pandas as pd
import os
try:
import cPickle as pickle
except ImportError:
import pickle
from . import wealth
from . import labor
from . import SS
from . import utils
def chi_n_func(s, a0, a1, a2, a3, a4):
chi_n = a0 + a1 * s + a2 * s ** 2 + a3 * s ** 3 + a4 * s ** 4
return chi_n
def chebyshev_func(x, a0, a1, a2, a3, a4):
func = np.polynomial.chebyshev.chebval(x, [a0, a1, a2, a3, a4])
return func
def chi_estimate(p, client=None):
'''
--------------------------------------------------------------------
This function calls others to obtain the data momements and then
runs the simulated method of moments estimation by calling the
minimization routine.
INPUTS:
income_tax_parameters = length 4 tuple, (analytical_mtrs, etr_params, mtrx_params, mtry_params)
ss_parameters = length 21 tuple, (J, S, T, BW, beta, sigma, alpha, Z, delta, ltilde, nu, g_y,\
g_n_ss, tau_payroll, retire, mean_income_data,\
h_wealth, p_wealth, m_wealth, b_ellipse, upsilon)
iterative_params = [2,] vector, vector with max iterations and tolerance
for SS solution
chi_guesses = [J+S,] vector, initial guesses of chi_b and chi_n stacked together
baseline_dir = string, path where baseline results located
OTHER FUNCTIONS AND FILES CALLED BY THIS FUNCTION:
wealth.compute_wealth_moments()
labor.labor_data_moments()
minstat()
OBJECTS CREATED WITHIN FUNCTION:
wealth_moments = [J+2,] array, wealth moments from data
labor_moments = [S,] array, labor moments from data
data_moments = [J+2+S,] array, wealth and labor moments stacked
bnds = [S+J,] array, bounds for parameter estimates
chi_guesses_flat = [J+S,] vector, initial guesses of chi_b and chi_n stacked
min_arg = length 6 tuple, variables needed for minimizer
est_output = dictionary, output from minimizer
chi_params = [J+S,] vector, parameters estimates for chi_b and chi_n stacked
objective_func_min = scalar, minimum of statistical objective function
OUTPUT:
./baseline_dir/Calibration/chi_estimation.pkl
RETURNS: chi_params
--------------------------------------------------------------------
'''
baseline_dir="./OUTPUT"
#chi_b_guess = np.ones(80)
# a0 = 5.38312524e+01
# a1 = -1.55746248e+00
# a2 = 1.77689237e-02
# a3 = -8.04751667e-06
# a4 = 5.65432019e-08
""" Kei's Vals
a0 = 170
a1 = -2.19154735e+00
a2 = -2.22817460e-02
a3 = 4.49993507e-04
a4 = -1.34197054e-06
"""
""" Adam's Vals 1
a0 = 2.59572155e+02
a1 = -2.35122641e+01
a2 = 4.27581467e-01
a3 = -3.40808933e-03
a4 = 1.00404321e-05
"""
a0 = 1.16807470e+03#5.19144310e+02
a1 = -1.05805189e+02#-4.70245283e+01
a2 = 1.92411660e+00#8.55162933e-01
a3 = -1.53364020e-02#-6.81617866e-03
a4 = 4.51819445e-05#2.00808642e-05
sixty_plus_chi = 10000
params_init = np.array([a0, a1, a2, a3, a4])
# Generate labor data moments
labor_hours = np.array([167, 165, 165, 165, 165, 166, 165, 165, 164, 166, 164])
labor_part_rate = np.array([0.69, 0.849, 0.849, 0.847, 0.847, 0.859, 0.859, 0.709, 0.709, 0.212, 0.212])
employ_rate = np.array([0.937, 0.954, 0.954, 0.966, 0.966, 0.97, 0.97, 0.968, 0.968, 0.978, 0.978])
labor_hours_adj = labor_hours * labor_part_rate * employ_rate
# get fraction of time endowment worked (assume time
# endowment is 24 hours minus required time to sleep 6.5 hours)
labor_moments = labor_hours_adj * 12 / (365 * 17.5)
#labor_moments[9] = 0.1
#labor_moments[10] = 0.1
# combine moments
data_moments = np.array(list(labor_moments.flatten()))
# weighting matrix
W = np.identity(p.J+2+p.S)
W = np.identity(11)
ages = np.linspace(20, 60, p.S // 2)
est_output = opt.minimize(minstat, params_init,\
args=(p, client, data_moments, W, ages, sixty_plus_chi),\
method="L-BFGS-B",\
tol=1e-15, options={'eps': 1e-10})
a0, a1, a2, a3, a4 = est_output.x
chi_n = np.ones(p.S)
chi_n[:p.S // 2] = chebyshev_func(ages, a0, a1, a2, a3, a4)
chi_n[p.S // 2:] = sixty_plus_chi
p.chi_n = chi_n
pickle.dump(chi_n, open("chi_n.p", "wb"))
ss_output = SS.run_SS(p)
return ss_output
def minstat(params, *args):
'''
--------------------------------------------------------------------
This function generates the weighted sum of squared differences
between the model and data moments.
INPUTS:
chi_guesses = [J+S,] vector, initial guesses of chi_b and chi_n stacked together
arg = length 6 tuple, variables needed for minimizer
OTHER FUNCTIONS AND FILES CALLED BY THIS FUNCTION:
SS.run_SS()
calc_moments()
OBJECTS CREATED WITHIN FUNCTION:
ss_output = dictionary, variables from SS of model
model_moments = [J+2+S,] array, moments from the model solution
distance = scalar, weighted, squared deviation between data and model moments
RETURNS: distance
--------------------------------------------------------------------
'''
a0, a1, a2, a3, a4 = params
p, client, data_moments, W, ages, sixty_plus_chi = args
chi_n = np.ones(p.S)
chi_n[:p.S // 2] = chebyshev_func(ages, a0, a1, a2, a3, a4)
chi_n[p.S // 2:] = sixty_plus_chi
p.chi_n = chi_n
#print(chi_n)
try:
ss_output = SS.run_SS(p, client)
except:
return 1e100
print("-----------------------------------------------------")
print('PARAMS', params)
print("-----------------------------------------------------")
model_moments = calc_moments(ss_output, p.omega_SS, p.lambdas, p.S, p.J)
print('Model moments:', model_moments)
print("-----------------------------------------------------")
# distance with levels
distance = np.dot(np.dot((np.array(model_moments) - np.array(data_moments)).T,W),
np.array(model_moments) - np.array(data_moments))
#distance = ((np.array(model_moments) - np.array(data_moments))**2).sum()
print('DATA and MODEL DISTANCE: ', distance)
# # distance with percentage diffs
# distance = (((model_moments - data_moments)/data_moments)**2).sum()
return distance
def calc_moments(ss_output, omega_SS, lambdas, S, J):
'''
--------------------------------------------------------------------
This function calculates moments from the SS output that correspond
to the data moments used for estimation.
INPUTS:
ss_output = dictionary, variables from SS of model
omega_SS = [S,] array, SS population distribution over age
lambdas = [J,] array, proportion of population of each ability type
S = integer, number of ages
J = integer, number of ability types
OTHER FUNCTIONS AND FILES CALLED BY THIS FUNCTION:
the_inequalizer()
OBJECTS CREATED WITHIN FUNCTION:
model_wealth_moments = [J+2,] array, wealth moments from the model
model_labor_moments = [S,] array, labor moments from the model
model_moments = [J+2+S,] array, wealth and data moments from the model solution
distance = scalar, weighted, squared deviation between data and model moments
RETURNS: distance
RETURNS: model_moments
--------------------------------------------------------------------
'''
# unpack relevant SS variables
n = ss_output['nssmat']
# labor moments
model_labor_moments = (n.reshape(S, J) * lambdas.reshape(1, J)).sum(axis=1)
### we have ages 20-100 so lets find binds based on population weights
# converting to match our data moments
model_labor_moments = pd.DataFrame(model_labor_moments * omega_SS)
#print("--------------------------------------------------------------------")
#print("Original:")
#model_labor_moments = model_labor_moments.mean(axis=0)
model_labor_moments.rename({0: 'labor_weighted'}, axis=1, inplace=True)
#print(model_labor_moments)
#print("--------------------------------------------------------------------")
ages = np.linspace(20, 100, S)
age_bins = np.linspace(20, 75, 12)
age_bins[11] = 101
labels = np.linspace(20, 70, 11)
model_labor_moments['pop_dist'] = omega_SS
model_labor_moments['age_bins'] = | pd.cut(ages, age_bins, right=False, include_lowest=True, labels=labels) | pandas.cut |
from pathlib import Path
from typing import Union, Dict, List
import medvision as mv
import numpy as np
import pandas as pd
def load_det_dsmd(
dsmd_path: Union[str, Path],
class2label: Union[str, Dict[str, int]]
):
""" load detection dataset metadata.
Args:
dsmd_path (str or Path): dataset metadata file path.
class2label (str or dict): class-to-label file.
Return:
(OrderedDict): Loaded dsmd is a OrderedDict looks like
{
data/1.png: [
bboxes (ndarray) of category 'cat' of shape (n, 4) or (n, 5),
bboxes (ndarray) of category 'dog' of shape (n, 4) or (n, 5),
...
]
data/2.png: [
...
]
...
}
"""
if isinstance(class2label, str):
class2label = mv.load_c2l(class2label)
assert min(class2label.values()) == 0, \
"label should start from 0, but got %d" % min(class2label.values())
num_classes = len(class2label)
df = | pd.read_csv(dsmd_path, header=None) | pandas.read_csv |
# -*- coding: utf-8 -*-
# Arithmetc tests for DataFrame/Series/Index/Array classes that should
# behave identically.
from datetime import timedelta
import operator
import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from pandas.compat import long
from pandas.core import ops
from pandas.errors import NullFrequencyError, PerformanceWarning
from pandas._libs.tslibs import IncompatibleFrequency
from pandas import (
timedelta_range,
Timedelta, Timestamp, NaT, Series, TimedeltaIndex, DatetimeIndex)
# ------------------------------------------------------------------
# Fixtures
@pytest.fixture
def tdser():
"""
Return a Series with dtype='timedelta64[ns]', including a NaT.
"""
return Series(['59 Days', '59 Days', 'NaT'], dtype='timedelta64[ns]')
@pytest.fixture(params=[pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)],
ids=lambda x: type(x).__name__)
def delta(request):
"""
Several ways of representing two hours
"""
return request.param
@pytest.fixture(params=[timedelta(minutes=5, seconds=4),
Timedelta('5m4s'),
Timedelta('5m4s').to_timedelta64()],
ids=lambda x: type(x).__name__)
def scalar_td(request):
"""
Several variants of Timedelta scalars representing 5 minutes and 4 seconds
"""
return request.param
@pytest.fixture(params=[pd.Index, Series, pd.DataFrame],
ids=lambda x: x.__name__)
def box(request):
"""
Several array-like containers that should have effectively identical
behavior with respect to arithmetic operations.
"""
return request.param
@pytest.fixture(params=[pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(strict=True))],
ids=lambda x: x.__name__)
def box_df_fail(request):
"""
Fixture equivalent to `box` fixture but xfailing the DataFrame case.
"""
return request.param
# ------------------------------------------------------------------
# Numeric dtypes Arithmetic with Timedelta Scalar
class TestNumericArraylikeArithmeticWithTimedeltaScalar(object):
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="block.eval incorrect",
strict=True))
])
@pytest.mark.parametrize('index', [
pd.Int64Index(range(1, 11)),
pd.UInt64Index(range(1, 11)),
pd.Float64Index(range(1, 11)),
pd.RangeIndex(1, 11)],
ids=lambda x: type(x).__name__)
@pytest.mark.parametrize('scalar_td', [
Timedelta(days=1),
Timedelta(days=1).to_timedelta64(),
Timedelta(days=1).to_pytimedelta()],
ids=lambda x: type(x).__name__)
def test_numeric_arr_mul_tdscalar(self, scalar_td, index, box):
# GH#19333
if (box is Series and
type(scalar_td) is timedelta and index.dtype == 'f8'):
raise pytest.xfail(reason="Cannot multiply timedelta by float")
expected = timedelta_range('1 days', '10 days')
index = tm.box_expected(index, box)
expected = tm.box_expected(expected, box)
result = index * scalar_td
tm.assert_equal(result, expected)
commute = scalar_td * index
tm.assert_equal(commute, expected)
@pytest.mark.parametrize('index', [
pd.Int64Index(range(1, 3)),
pd.UInt64Index(range(1, 3)),
pd.Float64Index(range(1, 3)),
pd.RangeIndex(1, 3)],
ids=lambda x: type(x).__name__)
@pytest.mark.parametrize('scalar_td', [
Timedelta(days=1),
Timedelta(days=1).to_timedelta64(),
Timedelta(days=1).to_pytimedelta()],
ids=lambda x: type(x).__name__)
def test_numeric_arr_rdiv_tdscalar(self, scalar_td, index, box):
if box is Series and type(scalar_td) is timedelta:
raise pytest.xfail(reason="TODO: Figure out why this case fails")
if box is pd.DataFrame and isinstance(scalar_td, timedelta):
raise pytest.xfail(reason="TODO: Figure out why this case fails")
expected = | TimedeltaIndex(['1 Day', '12 Hours']) | pandas.TimedeltaIndex |
# -*- coding: utf-8 -*-
"""
Analyzes code age in a git repository
Writes reports in the following locations
e.g. For repository "cpython"
[root] Defaults to ~/git.stats
├── cpython Directory for https://github.com/python/cpython.git
│ └── reports
│ ├── 2011-03-06.d68ed6fc.2_0 Revision `d68ed6fc` which was created on 2011-03-06 on
│ │ │ branch `2.0`.
│ │ └── __c.__cpp.__h Report on *.c, *.cpp and *.h files in this revision
│ │ ├── Guido_van_Rossum Sub-report on author `<NAME>`
│ │ │ ├── code-age.png Graph of code age. LoC / day vs date
│ │ │ ├── code-age.txt List of commits in the peaks in the code-age.png graph
│ │ │ ├── details.csv LoC in each directory in for these files and authors
│ │ │ ├── newest-commits.txt List of newest commits for these files and authors
│ │ │ └── oldest-commits.txt List of oldest commits for these files and authors
"""
from __future__ import division, print_function
import subprocess
from subprocess import CalledProcessError
from collections import defaultdict, Counter
import sys
import time
import re
import os
import stat
import glob
import errno
import numpy as np
from scipy import signal
import pandas as pd
from pandas import Series, DataFrame, Timestamp
import matplotlib
import matplotlib.pylab as plt
from matplotlib.pylab import cycler
import bz2
from multiprocessing import Pool, cpu_count
from multiprocessing.pool import ThreadPool
import pygments
from pygments import lex
from pygments.token import Text, Comment, Punctuation, Literal
from pygments.lexers import guess_lexer_for_filename
# Python 2 / 3 stuff
PY2 = sys.version_info[0] < 3
try:
import cPickle as pickle
except ImportError:
import pickle
try:
reload(sys)
sys.setdefaultencoding('utf-8')
except:
pass
#
# Configuration.
#
CACHE_FILE_VERSION = 3 # Update when making incompatible changes to cache file format
TIMEZONE = 'Australia/Melbourne' # The timezone used for all commit times. TODO Make configurable
SHA_LEN = 8 # The number of characters used when displaying git SHA-1 hashes
STRICT_CHECKING = False # For validating code.
N_BLAME_PROCESSES = max(1, cpu_count() - 1) # Number of processes to use for blaming
N_SHOW_THREADS = 8 # Number of threads for running the many git show commands
DO_MULTIPROCESSING = True # For test non-threaded performance
# Set graphing style
matplotlib.style.use('ggplot')
plt.rcParams['axes.prop_cycle'] = cycler('color', ['b', 'y', 'k', '#707040', '#404070'])
plt.rcParams['savefig.dpi'] = 300
PATH_MAX = 255
# Files that we don't analyze. These are files that don't have lines of code so that blaming
# doesn't make sense.
IGNORED_EXTS = {
'.air', '.bin', '.bmp', '.cer', '.cert', '.der', '.developerprofile', '.dll', '.doc', '.docx',
'.exe', '.gif', '.icns', '.ico', '.jar', '.jpeg', '.jpg', '.keychain', '.launch', '.pdf',
'.pem', '.pfx', '.png', '.prn', '.so', '.spc', '.svg', '.swf', '.tif', '.tiff', '.xls', '.xlsx',
'.tar', '.zip', '.gz', '.7z', '.rar',
'.patch',
'.dump',
'.h5'
}
def _is_windows():
"""Returns: True if running on a MS-Windows operating system."""
try:
sys.getwindowsversion()
except:
return False
else:
return True
IS_WINDOWS = _is_windows()
if IS_WINDOWS:
import win32api
import win32process
import win32con
def lowpriority():
""" Set the priority of the process to below-normal.
http://stackoverflow.com/questions/1023038/change-process-priority-in-python-cross-platform
"""
pid = win32api.GetCurrentProcessId()
handle = win32api.OpenProcess(win32con.PROCESS_ALL_ACCESS, True, pid)
win32process.SetPriorityClass(handle, win32process.BELOW_NORMAL_PRIORITY_CLASS)
else:
def lowpriority():
os.nice(1)
class ProcessPool(object):
"""Package of Pool and ThreadPool for 'with' usage.
"""
SINGLE = 0
THREAD = 1
PROCESS = 2
def __init__(self, process_type, n_pool):
if not DO_MULTIPROCESSING:
process_type = ProcessPool.SINGLE
self.process_type = process_type
if process_type != ProcessPool.SINGLE:
clazz = ThreadPool if process_type == ProcessPool.THREAD else Pool
self.pool = clazz(n_pool)
def __enter__(self):
return self
def imap_unordered(self, func, args_iter):
if self.process_type != ProcessPool.SINGLE:
return self.pool.imap_unordered(func, args_iter)
else:
return map(func, args_iter)
def __exit__(self, exc_type, exc_value, traceback):
if self.process_type != ProcessPool.SINGLE:
self.pool.terminate()
def sha_str(sha):
"""The way we show git SHA-1 hashes in reports."""
return sha[:SHA_LEN]
def date_str(date):
"""The way we show dates in reports."""
return date.strftime('%Y-%m-%d')
DAY = pd.Timedelta('1 days') # 24 * 3600 * 1e9 in pandas nanosec time
# Max date accepted for commits. Clearly a sanity check
MAX_DATE = Timestamp('today').tz_localize(TIMEZONE) + DAY
def to_timestamp(date_s):
"""Convert string `date_s' to pandas Timestamp in `TIMEZONE`
NOTE: The idea is to get all times in one timezone.
"""
return Timestamp(date_s).tz_convert(TIMEZONE)
def delta_days(t0, t1):
"""Returns: time from `t0` to `t1' in days where t0 and t1 are Timestamps
Returned value is signed (+ve if t1 later than t0) and fractional
"""
return (t1 - t0).total_seconds() / 3600 / 24
concat = ''.join
path_join = os.path.join
def decode_to_str(bytes):
"""Decode byte list `bytes` to a unicode string trying utf-8 encoding first then latin-1.
"""
if bytes is None:
return None
try:
return bytes.decode('utf-8')
except:
return bytes.decode('latin-1')
def save_object(path, obj):
"""Save object `obj` to `path` after bzipping it
"""
# existing_pkl is for recovering from bad pickles
existing_pkl = '%s.old.pkl' % path
if os.path.exists(path) and not os.path.exists(existing_pkl):
os.rename(path, existing_pkl)
with bz2.BZ2File(path, 'w') as f:
# protocol=2 makes pickle usable by python 2.x
pickle.dump(obj, f, protocol=2)
# Delete existing_pkl if load_object succeeds
load_object(path)
if os.path.exists(path) and os.path.exists(existing_pkl):
os.remove(existing_pkl)
def load_object(path, default=None):
"""Load object from `path`
"""
if default is not None and not os.path.exists(path):
return default
try:
with bz2.BZ2File(path, 'r') as f:
return pickle.load(f)
except:
print('load_object(%s, %s) failed' % (path, default), file=sys.stderr)
raise
def mkdir(path):
"""Create directory `path` including all intermediate-level directories and ignore
"already exists" errors.
"""
try:
os.makedirs(path)
except OSError as e:
if not (e.errno == errno.EEXIST and os.path.isdir(path)):
raise
def df_append_totals(df_in):
"""Append row and column totals to Pandas DataFrame `df_in`, remove all zero columns and sort
rows and columns by total.
"""
assert 'Total' not in df_in.index
assert 'Total' not in df_in.columns
rows, columns = list(df_in.index), list(df_in.columns)
df = DataFrame(index=rows + ['Total'], columns=columns + ['Total'])
df.iloc[:-1, :-1] = df_in
df.iloc[:, -1] = df.iloc[:-1, :-1].sum(axis=1)
df.iloc[-1, :] = df.iloc[:-1, :].sum(axis=0)
row_order = ['Total'] + sorted(rows, key=lambda r: -df.loc[r, 'Total'])
column_order = ['Total'] + sorted(columns, key=lambda c: -df.loc['Total', c])
df = df.reindex_axis(row_order, axis=0)
df = df.reindex_axis(column_order, axis=1)
empties = [col for col in df.columns if df.loc['Total', col] == 0]
df.drop(empties, axis=1, inplace=True)
return df
def moving_average(series, window):
"""Returns: Weighted moving average of pandas Series `series` as a pandas Series.
Weights are a triangle of width `window`.
NOTE: If window is greater than the number of items in series then smoothing may not work
well. See first few lines of function code.
"""
if len(series) < 10:
return series
window = min(window, len(series))
weights = np.empty(window, dtype=np.float)
radius = (window - 1) / 2
for i in range(window):
weights[i] = radius + 1 - abs(i - radius)
ma = np.convolve(series, weights, mode='same')
assert ma.size == series.size, ([ma.size, ma.dtype], [series.size, series.dtype], window)
sum_raw = series.sum()
sum_ma = ma.sum()
if sum_ma:
ma *= sum_raw / sum_ma
return Series(ma, index=series.index)
def procrustes(s, width=100):
"""Returns: String `s` fitted `width` or fewer chars, removing middle characters if necessary.
"""
width = max(20, width)
if len(s) > width:
notch = int(round(width * 0.6)) - 5
end = width - 5 - notch
return '%s ... %s' % (s[:notch], s[-end:])
return s
RE_EXT = re.compile(r'^\.\w+$')
RE_EXT_NUMBER = re.compile(r'^\.\d+$')
def get_ext(path):
"""Returns: extension of file `path`
"""
parts = os.path.splitext(path)
if not parts:
ext = '[None]'
else:
ext = parts[-1]
if not RE_EXT.search(ext) or RE_EXT_NUMBER.search(ext):
ext = ''
return ext
def exec_output(command, require_output):
"""Executes `command` which is a list of strings. If `require_output` is True then raise an
exception is there is no stdout.
Returns: The stdout of the child process as a string.
"""
# TODO save stderr and print it on error
try:
output = subprocess.check_output(command)
except:
print('exec_output failed: command=%s' % ' '.join(command), file=sys.stderr)
raise
if require_output and not output:
raise RuntimeError('exec_output: command=%s' % command)
return decode_to_str(output)
def exec_output_lines(command, require_output, sep=None):
"""Executes `command` which is a list of strings. If `require_output` is True then raise an
exception is there is no stdout.
Returns: The stdout of the child process as a list of strings, one string per line.
"""
if sep is not None:
return exec_output(command, require_output).split(sep)
else:
return exec_output(command, require_output).splitlines()
def exec_headline(command):
"""Execute `command` which is a list of strings.
Returns: The first line stdout of the child process.
"""
return exec_output(command, True).splitlines()[0]
def git_file_list(path_patterns=()):
"""Returns: List of files in current git revision matching `path_patterns`.
This is basically git ls-files.
"""
# git ls-files -z returns a '\0' separated list of files terminated with '\0\0'
bin_list = exec_output_lines(['git', 'ls-files', '-z', '--exclude-standard'] + path_patterns,
False, '\0')
file_list = []
for path in bin_list:
if not path:
break
file_list.append(path)
return file_list
def git_pending_list(path_patterns=()):
"""Returns: List of git pending files matching `path_patterns`.
"""
return exec_output_lines(['git', 'diff', '--name-only'] + path_patterns, False)
def git_file_list_no_pending(path_patterns=()):
"""Returns: List of non-pending files in current git revision matching `path_patterns`.
"""
file_list = git_file_list(path_patterns)
pending = set(git_pending_list(path_patterns))
return [path for path in file_list if path not in pending]
def git_diff(rev1, rev2):
"""Returns: List of files that differ in git revisions `rev1` and `rev2`.
"""
return exec_output_lines(['git', 'diff', '--name-only', rev1, rev2], False)
def git_show_oneline(obj):
"""Returns: One-line description of a git object `obj`, which is typically a commit.
https://git-scm.com/docs/git-show
"""
return exec_headline(['git', 'show', '--oneline', '--quiet', obj])
def git_date(obj):
"""Returns: Date of a git object `obj`, which is typically a commit.
NOTE: The returned date is standardized to timezone TIMEZONE.
"""
date_s = exec_headline(['git', 'show', '--pretty=format:%ai', '--quiet', obj])
return to_timestamp(date_s)
RE_REMOTE_URL = re.compile(r'(https?://.*/[^/]+(?:\.git)?)\s+\(fetch\)')
RE_REMOTE_NAME = re.compile(r'https?://.*/(.+?)(\.git)?$')
def git_remote():
"""Returns: The remote URL and a short name for the current repository.
"""
# $ git remote -v
# origin https://github.com/FFTW/fftw3.git (fetch)
# origin https://github.com/FFTW/fftw3.git (push)
try:
output_lines = exec_output_lines(['git', 'remote', '-v'], True)
except Exception as e:
print('git_remote error: %s' % e)
return 'unknown', 'unknown'
for line in output_lines:
m = RE_REMOTE_URL.search(line)
if not m:
continue
remote_url = m.group(1)
remote_name = RE_REMOTE_NAME.search(remote_url).group(1)
return remote_url, remote_name
raise RuntimeError('No remote')
def git_describe():
"""Returns: git describe of current revision.
"""
return exec_headline(['git', 'describe', '--always'])
def git_name():
"""Returns: git name of current revision.
"""
return ' '.join(exec_headline(['git', 'name-rev', 'HEAD']).split()[1:])
def git_current_branch():
"""Returns: git name of current branch or None if there is no current branch (detached HEAD).
"""
branch = exec_headline(['git', 'rev-parse', '--abbrev-ref', 'HEAD'])
if branch == 'HEAD': # Detached HEAD?
branch = None
return branch
def git_current_revision():
"""Returns: SHA-1 of current revision.
"""
return exec_headline(['git', 'rev-parse', 'HEAD'])
def git_revision_description():
"""Returns: Our best guess at describing the current revision"""
description = git_current_branch()
if not description:
description = git_describe()
return description
RE_PATH = re.compile(r'''[^a-z^0-9^!@#$\-+=_\[\]\{\}\(\)^\x7f-\xffff]''', re.IGNORECASE)
RE_SLASH = re.compile(r'[\\/]+')
def normalize_path(path):
"""Returns: `path` without leading ./ and trailing / . \ is replaced by /
"""
path = RE_SLASH.sub('/', path)
if path.startswith('./'):
path = path[2:]
if path.endswith('/'):
path = path[:-1]
return path
def clean_path(path):
"""Returns: `path` with characters that are illegal in filenames replaced with '_'
"""
return RE_PATH.sub('_', normalize_path(path))
def git_blame_text(path):
"""Returns: git blame text for file `path`
"""
if PY2:
path = path.encode(sys.getfilesystemencoding())
return exec_output(['git', 'blame', '-l', '-f', '-w', '-M', path], False)
RE_BLAME = re.compile(r'''
\^*([0-9a-f]{4,})\s+
.+?\s+
\(
(.+?)\s+
(\d{4}-\d{2}-\d{2}\s+\d{2}:\d{2}:\d{2}\s+[+-]\d{4})
\s+(\d+)
\)''',
re.DOTALL | re.MULTILINE | re.VERBOSE)
def _debug_check_dates(max_date, sha_date_author, path_sha_loc):
"""Debug code to validate dates in `sha_date_author`, `path_sha_loc`
"""
if not STRICT_CHECKING:
return
assert max_date <= MAX_DATE, max_date
for path, sha_loc in path_sha_loc.items():
for sha, loc in sha_loc.items():
if loc <= 1:
continue
assert sha in sha_date_author, '%s not in sha_date_author' % [sha, path]
date, _ = sha_date_author[sha]
assert date <= max_date, ('date > max_date', sha, loc, [date, max_date], path)
class GitException(Exception):
def __init__(self, msg=None):
super(GitException, self).__init__(msg)
self.git_msg = msg
if IS_WINDOWS:
RE_LINE = re.compile(r'(?:\r\n|\n)+')
else:
RE_LINE = re.compile(r'[\n]+')
def _parse_blame(max_date, text, path):
"""Parses git blame output `text` and extracts LoC for each git hash found
max_date: Latest valid date for a commit
text: A string containing the git blame output of file `path`
path: Path of blamed file. Used only for constructing error messages in this function
Returns: line_sha_date_author {line_n: (sha, date, author)} over all lines in the file
"""
line_sha_date_author = {}
lines = RE_LINE.split(text)
while lines and not lines[-1]:
lines.pop()
if not lines:
raise GitException('is empty')
for i, ln in enumerate(lines):
if not ln:
continue
m = RE_BLAME.match(ln)
if not m:
raise GitException('bad line')
if m.group(2) == 'Not Committed Yet':
continue
sha = m.group(1)
author = m.group(2)
date_s = m.group(3)
line_n = int(m.group(4))
author = author.strip()
if author == '':
author = '<>'
assert line_n == i + 1, 'line_n=%d,i=%d\n%s\n%s' % (line_n, i, path, m.group(0))
assert author.strip() == author, 'author="%s\n%s:%d\n%s",' % (
author, path, i + 1, ln[:200])
date = to_timestamp(date_s)
if date > max_date:
raise GitException('bad date. sha=%s,date=%s' % (sha, date))
line_sha_date_author[line_n] = sha, date, author
if not line_sha_date_author:
raise GitException('is empty')
return line_sha_date_author
def _compute_author_sha_loc(line_sha_date_author, sloc_lines):
"""
line_sha_date_author: {line_n: (sha, date, author)}
sloc_lines: {line_n} for line_n in line_sha_date_author that are source code or None
Returns: sha_date_author, sha_aloc, sha_sloc
sha_date_author: {sha: (date, author)} over all SHA-1 hashes in
`line_sha_date_author`
sha_aloc: {sha: aloc} over all SHA-1 hashes found in `line_sha_date_author`. aloc
is "all lines of code"
sha_sloc: {sha: sloc} over SHA-1 hashes found in `line_sha_date_author` that are in
`sloc_lines`. sloc is "source lines of code". If sloc_lines is None then
sha_sloc is None.
"""
sha_date_author = {}
sha_aloc = Counter()
sha_sloc = None
for sha, date, author in line_sha_date_author.values():
sha_aloc[sha] += 1
sha_date_author[sha] = (date, author)
if not sha_aloc:
raise GitException('is empty')
if sloc_lines is not None:
sha_sloc = Counter()
counted_lines = set(line_sha_date_author.keys()) & sloc_lines
for line_n in counted_lines:
sha, _, _ = line_sha_date_author[line_n]
sha_sloc[sha] += 1
return sha_date_author, sha_aloc, sha_sloc
def get_ignored_files(gitstatsignore):
if gitstatsignore is None:
gitstatsignore = 'gitstatsignore'
else:
assert os.path.exists(gitstatsignore), 'gitstatsignore file "%s"' % gitstatsignore
if not gitstatsignore or not os.path.exists(gitstatsignore):
return set()
ignored_files = set()
with open(gitstatsignore, 'rt') as f:
for line in f:
line = line.strip('\n').strip()
if not line:
continue
ignored_files.update(git_file_list([line]))
return ignored_files
class Persistable(object):
"""Base class that
a) saves to disk,
catalog: a dict of objects
summary: a dict describing catalog
manifest: a dict of sizes of objects in a catalog
b) loads them from disk
Derived classes must contain a dict data member called `TEMPLATE` that gives the keys of the
data members to save / load and default constructors for each key.
"""
@staticmethod
def make_data_dir(path):
return path_join(path, 'data')
@staticmethod
def update_dict(base_dict, new_dict):
for k, v in new_dict.items():
if k in base_dict:
base_dict[k].update(v)
else:
base_dict[k] = v
return base_dict
def _make_path(self, name):
return path_join(self.base_dir, name)
def __init__(self, summary, base_dir):
"""Initialize the data based on TEMPLATE and set summary to `summary`.
summary: A dict giving a summary of the data to be saved
base_dir: Directory that summary, data and manifest are to be saved to
"""
assert 'TEMPLATE' in self.__class__.__dict__, 'No TEMPLATE in %s' % self.__class__.__dict__
self.base_dir = base_dir
self.data_dir = Persistable.make_data_dir(base_dir)
self.summary = summary.copy()
self.catalog = {k: v() for k, v in self.__class__.TEMPLATE.items()}
for k, v in self.catalog.items():
assert hasattr(v, 'update'), '%s.TEMPLATE[%s] does not have update(). type=%s' % (
self.__class__.__name__, k, type(v))
def _load_catalog(self):
catalog = load_object(self._make_path('data.pkl'), {})
if catalog.get('CACHE_FILE_VERSION', 0) != CACHE_FILE_VERSION:
return {}
del catalog['CACHE_FILE_VERSION']
return catalog
def load(self):
catalog = self._load_catalog()
if not catalog:
return False
Persistable.update_dict(self.catalog, catalog) # !@#$ Use toolz
path = self._make_path('summary')
if os.path.exists(path):
self.summary = eval(open(path, 'rt').read())
return True
def save(self):
# Load before saving in case another instance of this script is running
path = self._make_path('data.pkl')
if os.path.exists(path):
catalog = self._load_catalog()
self.catalog = Persistable.update_dict(catalog, self.catalog)
# Save the data, summary and manifest
mkdir(self.base_dir)
self.catalog['CACHE_FILE_VERSION'] = CACHE_FILE_VERSION
save_object(path, self.catalog)
open(self._make_path('summary'), 'wt').write(repr(self.summary))
manifest = {k: len(v) for k, v in self.catalog.items() if k != 'CACHE_FILE_VERSION'}
manifest['CACHE_FILE_VERSION'] = CACHE_FILE_VERSION
open(self._make_path('manifest'), 'wt').write(repr(manifest))
def __repr__(self):
return repr([self.base_dir, {k: len(v) for k, v in self.catalog.items()}])
class BlameRepoState(Persistable):
"""Repository level persisted data structures
Currently this is just sha_date_author.
"""
TEMPLATE = {'sha_date_author': lambda: {}}
class BlameRevState(Persistable):
"""Revision level persisted data structures
The main structures are path_sha_aloc and path_sha_sloc.
"""
TEMPLATE = {
'path_sha_aloc': lambda: {}, # aloc for all LoC
'path_sha_sloc': lambda: {}, # sloc for source LoC
'path_set': lambda: set(),
'bad_path_set': lambda: set(),
}
def get_lexer(path, text):
try:
return guess_lexer_for_filename(path, text[:1000])
except pygments.util.ClassNotFound:
return None
COMMMENTS = {
Comment,
Literal.String.Doc,
Comment.Multiline,
}
class LineState(object):
def __init__(self):
self.sloc = set()
self.tokens = []
self.lnum = 0
self.ltype = None
def tokens_to_lines(self, has_eol):
is_comment = self.ltype in COMMMENTS
is_blank = self.ltype == Text
text = concat(self.tokens)
if has_eol:
lines = text[:-1].split('\n')
else:
lines = text.spit('\n')
for line in lines:
self.lnum += 1
if not (is_comment or is_blank):
self.sloc.add(self.lnum)
self.tokens = []
self.ltype = None
def get_sloc_lines(path):
"""Determine the lines in file `path` that are source code. i.e. Not space or comments.
This requires a parser for this file type, which exists for most source code files in the
Pygment module.
Returns: If a Pygment lexer can be found for file `path`
{line_n} i.e. set of 1-offset line number for lines in `path` that are source.
Otherwise None
"""
with open(path, 'rb') as f:
text = decode_to_str(f.read())
lexer = get_lexer(path, text)
if lexer is None:
return None
line_state = LineState()
for ttype, value in lex(text, lexer):
if not line_state.ltype:
line_state.ltype = ttype
elif line_state.ltype == Text:
if ttype is not None:
line_state.ltype = ttype
elif line_state.ltype == Punctuation:
if ttype is not None and ttype != Text:
line_state.ltype = ttype
if value:
line_state.tokens.append(value)
if value.endswith('\n'):
line_state.tokens_to_lines(True)
return line_state.sloc
def _task_extract_author_sha_loc(args):
"""Wrapper around blame and comment detection code to allow it to be executed by a
multiprocessing Pool.
Runs git blame and parses output to extract LoC by sha for all the sha's (SHA-1 hashes) in
the blame output.
Runs a Pygment lexer over the file if there is a matching lexer and combines this with the
blame parse to extract SLoC for each git hash found
args: max_date, path
max_date: Latest valid date for a commit
path: path of file to analyze
Returns: path, sha_date_author, sha_loc, sha_sloc, exception
path: from `args`
sha_date_author: {sha: (date, author)} over all SHA-1 hashes found in `path`
sha_aloc: {sha: aloc} over all SHA-1 hashes found in `path`. aloc = all lines counts
sha_sloc: {sha: sloc} over all SHA-1 hashes found in `path`. sloc = source lines counts
"""
max_date, path = args
sha_date_author, sha_aloc, sha_sloc, exception = None, None, None, None
try:
text = git_blame_text(path)
line_sha_date_author = _parse_blame(max_date, text, path)
sloc_lines = get_sloc_lines(path)
sha_date_author, sha_aloc, sha_sloc = _compute_author_sha_loc(line_sha_date_author,
sloc_lines)
except Exception as e:
exception = e
if not DO_MULTIPROCESSING and not isinstance(e, (GitException, CalledProcessError,
IsADirectoryError)):
print('_task_extract_author_sha_loc: %s: %s' % (type(e), e), file=sys.stderr)
raise
return path, sha_date_author, sha_aloc, sha_sloc, exception
class BlameState(object):
"""A BlameState contains data from `git blame` that are used to compute reports.
This data can take a long time to generate so we allow it to be saved to and loaded from
disk so that it can be reused between runs.
Data members: (All are read-only)
repo_dir
sha_date_author
path_sha_aloc
path_sha_sloc
path_set
bad_path_set
Typical usage:
blame_state.load() # Load existing data from disk
changed = blame_state.update_data(file_set) # Blame files in file_set to update data
if changed:
blame_state.save() # Save updated data to disk
Internal members: 'repo_dir', '_repo_state', '_rev_state', '_repo_base_dir'
Disk storage
------------
<repo_base_dir> Defaults to ~/git.stats/<repository name>
└── cache
├── 241d0c54 Data for revision 241d0c54
│ ├── data.pkl The data in a bzipped pickle.
│ ├── manifest Python file with dict of data keys and lengths
│ └── summary Python file with dict of summary date
...
├
├── e7a3e5c4 Data for revision e7a3e5c4
│ ├── data.pkl
│ ├── manifest
│ └── summary
├── data.pkl Repository level data
├── manifest
└── summary
"""
def _debug_check(self):
"""Debugging code to check consistency of the data in a BlameState
"""
if not STRICT_CHECKING:
return
assert 'path_sha_aloc' in self._rev_state.catalog
path_sha_loc = self.path_sha_aloc
path_set = self.path_set
for path, sha_loc in path_sha_loc.items():
assert path in path_set, '%s not in self.path_set' % path
for sha, loc in sha_loc.items():
date, author = self.sha_date_author[sha]
if set(self.path_sha_aloc.keys()) | self.bad_path_set != self.path_set:
for path in sorted((set(self.path_sha_aloc.keys()) | self.bad_path_set) - self.path_set)[:20]:
print('!@!', path in self.path_sha_aloc, path in self.bad_path_set)
assert set(self.path_sha_aloc.keys()) | self.bad_path_set == self.path_set, (
'path sets wrong %d %d\n'
'(path_sha_aloc | bad_path_set) - path_set: %s\n'
'path_set - (path_sha_aloc | bad_path_set): %s\n' % (
len((set(self.path_sha_aloc.keys()) | self.bad_path_set) - self.path_set),
len(self.path_set - (set(self.path_sha_aloc.keys()) | self.bad_path_set)),
sorted((set(self.path_sha_aloc.keys()) | self.bad_path_set) - self.path_set)[:20],
sorted(self.path_set - (set(self.path_sha_aloc.keys()) | self.bad_path_set))[:20]
))
def __init__(self, repo_base_dir, repo_summary, rev_summary):
"""
repo_base_dir: Root of data saved for this repository. This is <repo_dir> in the storage
diagram. Typically ~/git.stats/<repository name>
repo_summary = {
'remote_url': remote_url,
'remote_name': remote_name,
}
rev_summary = {
'revision_sha': revision_sha,
'branch': git_current_branch(),
'description': description,
'name': git_name(),
'date': revision_date,
}
"""
self._repo_base_dir = repo_base_dir
self._repo_dir = path_join(repo_base_dir, 'cache')
self._repo_state = BlameRepoState(repo_summary, self.repo_dir)
rev_dir = path_join(self._repo_state.base_dir,
sha_str(rev_summary['revision_sha']))
self._rev_state = BlameRevState(rev_summary, rev_dir)
def copy(self, rev_dir):
"""Returns: A copy of self with its rev_dir member replaced by `rev_dir`
"""
blame_state = BlameState(self._repo_base_dir, self._repo_state.summary,
self._rev_state.summary)
blame_state._rev_state.base_dir = rev_dir
return blame_state
def load(self, max_date):
"""Loads a previously saved copy of it data from disk.
Returns: self
"""
valid_repo = self._repo_state.load()
valid_rev = self._rev_state.load()
if STRICT_CHECKING:
if max_date is not None:
_debug_check_dates(max_date, self.sha_date_author, self.path_sha_aloc)
assert 'path_sha_aloc' in self._rev_state.catalog, self._rev_state.catalog.keys()
assert 'path_sha_sloc' in self._rev_state.catalog, self._rev_state.catalog.keys()
self._debug_check()
return self, valid_repo and valid_rev
def save(self):
"""Saves a copy of its data to disk
"""
self._repo_state.save()
self._rev_state.save()
if STRICT_CHECKING:
self.load(None)
def __repr__(self):
return repr({k: repr(v) for k, v in self.__dict__.items()})
@property
def repo_dir(self):
"""Returns top directory for this repo's cached data.
Typically ~/git.stats/<repository name>/cache
"""
return self._repo_dir
@property
def sha_date_author(self):
"""Returns: {sha: (date, author)} for all commits that have been found in blaming this
repository. sha is SHA-1 hash of commit
This is a per-repository dict.
"""
return self._repo_state.catalog['sha_date_author']
@property
def path_sha_aloc(self):
"""Returns: {path: [(sha, loc)]} for all files that have been found in blaming this
revision of this repository.
path_sha_loc[path] is a list of (sha, loc) where sha = SHA-1 hash of commit and
loc = lines of code from that commit in file `path`
This is a per-revision dict.
"""
return self._rev_state.catalog['path_sha_aloc']
@property
def path_sha_sloc(self):
"""Returns: {path: [(sha, loc)]} for all files that have been found in blaming this
revision of this repository.
path_sha_aloc[path] is a list of (sha, loc) where sha = SHA-1 hash of commit and
loc = lines of code from that commit in file `path`
This is a per-revision dict.
"""
return self._rev_state.catalog['path_sha_sloc']
@property
def path_sha_sloc(self):
"""Returns: {path: [(sha, loc)]} for all files that have been found in blaming this
revision of this repository.
path_sha_sloc[path] is a list of (sha, loc) where sha = SHA-1 hash of commit and
loc = lines of code from that commit in file `path`
This is a per-revision dict.
"""
return self._rev_state.catalog['path_sha_sloc']
@property
def path_set(self):
"""Returns: set of paths of files that have been attempted to blame in this revision.
This is a per-revision dict.
"""
return self._rev_state.catalog['path_set']
@property
def bad_path_set(self):
"""Returns: set of paths of files that have been unsuccessfully attempted to blame in this
revision.
bad_path_set ∪ path_sha_aloc.keys() == path_set
This is a per-revision dict.
"""
return self._rev_state.catalog['bad_path_set']
def _get_peer_revisions(self):
"""Returns: data dicts of all revisions that have been blamed for this repository except
this revision.
"""
peer_dirs = (rev_dir for rev_dir in glob.glob(path_join(self.repo_dir, '*'))
if rev_dir != self._rev_state.base_dir and
os.path.exists(path_join(rev_dir, 'data.pkl')))
for rev_dir in peer_dirs:
rev, valid = self.copy(rev_dir).load(None)
if valid:
yield rev_dir, rev
def _update_from_existing(self, file_set):
"""Updates state of this repository / revision with data saved from blaming earlier
revisions in this repository.
"""
assert isinstance(file_set, set), type(file_set)
# remaining_path_set = files in `file_set` that we haven't yet loaded
remaining_path_set = file_set - self.path_set
print('-' * 80)
print('Update data from previous blames. %d remaining of %d files' % (
len(remaining_path_set), len(file_set)))
if not remaining_path_set:
return
peer_revisions = list(self._get_peer_revisions())
print('Checking up to %d peer revisions for blame data' % len(peer_revisions))
this_sha = self._rev_state.summary['revision_sha']
this_date = self._rev_state.summary['date']
peer_revisions.sort(key=lambda dir_rev: dir_rev[1]._rev_state.summary['date'])
for i, (that_dir, that_rev) in enumerate(peer_revisions):
if not remaining_path_set:
break
print('%2d: %s,' % (i, that_dir), end=' ')
that_date = that_rev._rev_state.summary['date']
sign = '> ' if that_date > this_date else '<='
print('%s %s %s' % (date_str(that_date), sign, date_str(this_date)), end=' ')
# This is important. git diff can report 2 versions of a file as being identical while
# git blame reports different commits for 1 or more lines in the file
# In these cases we use the older commits.
if that_date > this_date:
print('later')
continue
that_sha = that_rev._rev_state.summary['revision_sha']
that_path_set = that_rev.path_set
that_bad_path_set = that_rev.bad_path_set
try:
diff_set = set(git_diff(this_sha, that_sha))
except subprocess.CalledProcessError:
print('git_diff(%s, %s) failed. Has someone deleted an old revision %s?' % (
this_sha, that_sha, that_sha), file=sys.stderr)
continue
self.bad_path_set.update(that_bad_path_set - diff_set)
# existing_path_set: files in remaining_path_set that we already have data for
existing_path_set = remaining_path_set & (that_path_set - diff_set)
for path in existing_path_set:
if path in that_rev.path_sha_aloc:
self.path_sha_aloc[path] = that_rev.path_sha_aloc[path]
if STRICT_CHECKING:
for sha in self.path_sha_aloc[path].keys():
assert sha in self.sha_date_author, '\n%s\nthis=%s\nthat=%s' % (
(sha, path),
self._rev_state.summary, that_rev._rev_state.summary)
if path in that_rev.path_sha_sloc:
self.path_sha_sloc[path] = that_rev.path_sha_sloc[path]
self.path_set.add(path)
remaining_path_set.remove(path)
print('%d files of %d remaining, diff=%d, used=%d' % (
len(remaining_path_set), len(file_set), len(diff_set), len(existing_path_set)))
print()
bad_path_set = self.bad_path_set
bad_path_set -= set(self.path_sha_aloc.keys())
path_set = self.path_set
path_set |= set(self.path_sha_aloc.keys()) | bad_path_set
self._debug_check()
def _update_new_files(self, file_set, force):
"""Computes base statistics over whole revision
Blames all files in `file_set`
Updates: sha_date_author, path_sha_loc for files that are not already in path_sha_loc
"""
rev_summary = self._rev_state.summary
sha_date_author = self.sha_date_author
if not force:
file_set = file_set - self.path_set
n_files = len(file_set)
print('-' * 80)
print('Update data by blaming %d files' % len(file_set))
loc0 = sum(sum(sha_loc.values()) for sha_loc in self.path_sha_aloc.values())
commits0 = len(sha_date_author)
start = time.time()
blamed = 0
last_loc = loc0
last_i = 0
for path in file_set:
self.path_set.add(path)
if os.path.basename(path) in {'.gitignore'}:
self.bad_path_set.add(path)
max_date = rev_summary['date']
args_list = [(max_date, path) for path in file_set]
args_list.sort(key=lambda dip: -os.path.getsize(dip[1]))
with ProcessPool(ProcessPool.PROCESS, N_BLAME_PROCESSES) as pool:
for i, (path, h_d_a, sha_loc, sha_sloc, e) in enumerate(
pool.imap_unordered(_task_extract_author_sha_loc, args_list)):
if e is not None:
apath = os.path.abspath(path)
self.bad_path_set.add(path)
if isinstance(e, GitException):
if e.git_msg:
if e.git_msg != 'is empty':
print(' %s %s' % (apath, e.git_msg), file=sys.stderr)
else:
print(' %s cannot be blamed' % apath, file=sys.stderr)
continue
elif isinstance(e, (subprocess.CalledProcessError, IsADirectoryError)):
if not os.path.exists(path):
print(' %s no longer exists' % apath, file=sys.stderr)
continue
elif os.path.isdir(path):
print(' %s is a directory' % apath, file=sys.stderr)
continue
elif stat.S_IXUSR & os.stat(path)[stat.ST_MODE]:
print(' %s is an executable. e=%s' % (apath, e), file=sys.stderr)
continue
raise
else:
if path in self.bad_path_set:
self.bad_path_set.remove(path)
self.sha_date_author.update(h_d_a)
self.path_sha_aloc[path] = sha_loc
if sha_sloc:
self.path_sha_sloc[path] = sha_sloc
if i - last_i >= 100:
duration = time.time() - start
loc = sum(sum(sha_loc.values()) for sha_loc in self.path_sha_aloc.values())
if loc != last_loc:
print('%d of %d files (%.1f%%), %d bad, %d LoC, %d commits, %.1f secs, %s' %
(i, n_files, 100 * i / n_files, i - blamed, loc - loc0,
len(sha_date_author) - commits0, duration,
procrustes(path, width=65)))
sys.stdout.flush()
last_loc = loc
last_i = i
blamed += 1
if STRICT_CHECKING:
_debug_check_dates(max_date, sha_date_author, self.path_sha_aloc)
assert path in self.path_sha_aloc, os.path.abspath(path)
assert self.path_sha_aloc[path], os.path.abspath(path)
assert sum(self.path_sha_aloc[path].values()), os.path.abspath(path)
if STRICT_CHECKING:
for path in file_set - self.bad_path_set:
if os.path.basename(path) in {'.gitignore'}:
continue
assert path in self.path_sha_aloc, os.path.abspath(path)
print('~' * 80)
duration = time.time() - start
aloc = sum(sum(sha_loc.values()) for sha_loc in self.path_sha_aloc.values())
sloc = sum(sum(sha_loc.values()) for sha_loc in self.path_sha_sloc.values())
print('%d files,%d blamed,%d lines, %d source lines,%d commits,%.1f seconds' % (len(file_set), blamed,
aloc, sloc, len(sha_date_author), duration))
def update_data(self, file_set, force):
"""Updates blame state for this revision for this repository over files in 'file_set'
If `force` is True then blame all files in file_set, otherwise try tp re-use as much
existing blame data as possible.
Updates:
repository: sha_date_author
revision: path_sha_aloc, path_sha_sloc, file_set, bad_path_set
"""
assert isinstance(file_set, set), type(file_set)
n_paths0 = len(self.path_set)
print('^' * 80)
print('Update data for %d files. Previously %d files for this revision' % (
len(file_set), n_paths0))
self._debug_check()
if not force:
self._update_from_existing(file_set)
self._debug_check()
self._update_new_files(file_set, force)
self._debug_check()
if STRICT_CHECKING:
for path in set(file_set) - self.bad_path_set:
assert path in self.path_sha_aloc, path
return len(self.path_set) > n_paths0
def _filter_strings(str_iter, pattern):
"""Returns: Subset of strings in iterable `str_iter` that match regular expression `pattern`.
"""
if pattern is None:
return None
assert isinstance(str_iter, set), type(str_iter)
regex = re.compile(pattern, re.IGNORECASE)
return {s for s in str_iter if regex.search(s)}
def filter_path_sha_loc(blame_state, path_sha_loc, file_set=None, author_set=None):
""" blame_state: BlameState of revision
path_sha_loc: {path: {sha: loc}} over all files in revisions
file_set: files to filter on or None
author_set: authors to filter on or None
Returns: dict of items in `path_sha_loc` that match files in `file_set` and authors in
`author_set`.
NOTE: Does NOT modify path_sha_loc
"""
assert file_set is None or isinstance(file_set, set), type(file_set)
assert author_set is None or isinstance(author_set, set), type(author_set)
if file_set:
path_sha_loc = {path: sha_loc
for path, sha_loc in path_sha_loc.items()
if path in file_set}
if STRICT_CHECKING:
for path in path_sha_loc:
assert path_sha_loc[path], path
assert sum(path_sha_loc[path].values()), path
if author_set:
sha_set = {sha for sha, (_, author) in blame_state.sha_date_author.items()
if author in author_set}
path_sha_loc = {path: {sha: loc for sha, loc in sha_loc.items()
if sha in sha_set}
for path, sha_loc in path_sha_loc.items()}
path_sha_loc = {path: sha_loc
for path, sha_loc in path_sha_loc.items() if sha_loc}
if STRICT_CHECKING:
for path in path_sha_loc:
assert path_sha_loc[path], path
assert sum(path_sha_loc[path].values()), path
return path_sha_loc
def _task_show_oneline(sha):
"""Wrapper around git_show_oneline() to allow it to be executed by a multiprocessing Pool.
"""
text, exception = None, None
try:
text = git_show_oneline(sha)
except Exception as e:
exception = e
if not DO_MULTIPROCESSING:
raise
return sha, text, exception
def parallel_show_oneline(sha_iter):
"""Run git_show_oneline() on SHA-1 hashes in `sha_iter` in parallel
sha_iter: Iterable for some SHA-1 hashes
Returns: {sha: text} over SHA-1 hashes sha in `sha_iter`. text is git_show_oneline output
"""
sha_text = {}
exception = None
with ProcessPool(ProcessPool.THREAD, N_SHOW_THREADS) as pool:
for sha, text, e in pool.imap_unordered(_task_show_oneline, sha_iter):
if e:
exception = e
break
sha_text[sha] = text
if exception:
raise exception
return sha_text
def make_sha_path_loc(path_sha_loc):
"""Make a re-organized version of `path_sha_loc`
path_sha_loc: {path: {sha: loc}}
Returns: {sha: {path: loc}}
"""
sha_path_loc = defaultdict(dict)
for path, sha_loc in path_sha_loc.items():
for sha, loc in sha_loc.items():
sha_path_loc[sha][path] = loc
return sha_path_loc
def make_report_name(default_name, components):
if not components:
return default_name
elif len(components) == 1:
return list(components)[0]
else:
return ('(%s)' % ','.join(sorted(components))) if components else default_name
def make_report_dir(base_dir, default_name, components, max_len=None):
if max_len is None:
max_len = PATH_MAX
name = '.'.join(clean_path(cpt) for cpt in sorted(components)) if components else default_name
return path_join(base_dir, name)[:max_len]
def get_totals(path_sha_loc):
"""Returns: total numbers of files, commits, LoC for files in `path_sha_loc`
"""
all_commits = set()
total_loc = 0
for sha_loc in path_sha_loc.values():
all_commits.update(sha_loc.keys())
total_loc += sum(sha_loc.values())
return len(path_sha_loc), len(all_commits), total_loc
def write_manifest(blame_state, path_sha_loc, report_dir, name_description, title):
"""Write a README file in `report_dir`
"""
total_files, total_commits, total_loc = get_totals(path_sha_loc)
totals = 'Totals: %d files, %d commits, %d LoC' % (total_files, total_commits, total_loc)
repo_summary = blame_state._repo_state.summary
rev_summary = blame_state._rev_state.summary
details = 'Revision Details\n'\
'----------------\n'\
'Repository: %s (%s)\n'\
'Date: %s\n'\
'Description: %s\n'\
'SHA-1 hash %s\n' % (
repo_summary['remote_name'], repo_summary['remote_url'],
date_str(rev_summary['date']),
rev_summary['description'],
rev_summary['revision_sha'])
with open(path_join(report_dir, 'README'), 'wt') as f:
def put(s=''):
f.write('%s\n' % s)
put(title)
put('=' * len(title))
put()
put(totals)
put()
if details:
put(details)
put()
put('Files in this Directory')
put('-----------------------')
for name, description in sorted(name_description.items()):
put('%-12s: %s' % (name, description))
def compute_tables(blame_state, path_sha_loc):
"""Compute summary tables over whole report showing number of files and LoC by author and
file extension.
blame_state: BlameState of revision
path_sha_loc: {path: {sha: loc}} over files being reported
Returns: df_author_ext_files, df_author_ext_loc where
df_author_ext_files: DataFrame of file counts
df_author_ext_loc:: DataFrame of file counts
with both having columns of authors rows of file extensions.
"""
sha_date_author = blame_state.sha_date_author
exts = sorted({get_ext(path) for path in path_sha_loc.keys()})
authors = sorted({author for _, author in sha_date_author.values()})
assert '.patch' not in exts
assert '.dump' not in exts
author_ext_files = np.zeros((len(authors), len(exts)), dtype=np.int64)
author_ext_loc = np.zeros((len(authors), len(exts)), dtype=np.int64)
author_index = {author: i for i, author in enumerate(authors)}
ext_index = {ext: i for i, ext in enumerate(exts)}
if STRICT_CHECKING:
for path, v in path_sha_loc.items():
assert sum(v.values()), (path, len(v))
for i, e in enumerate(exts):
assert '--' not in e, e
assert '.' not in e[1:], e
for path, sha_loc in path_sha_loc.items():
ext = get_ext(path)
for sha, loc in sha_loc.items():
_, author = sha_date_author[sha]
a = author_index[author]
e = ext_index[ext]
author_ext_files[a, e] += 1
author_ext_loc[a, e] += loc
df_author_ext_files = DataFrame(author_ext_files, index=authors, columns=exts)
df_author_ext_loc = DataFrame(author_ext_loc, index=authors, columns=exts)
return df_author_ext_files, df_author_ext_loc
def get_tree_loc(path_loc):
""" path_loc: {path: loc} over files in a git repository
Returns: dir_tree_loc_frac for which
dir_tree_loc_frac[path] = loc, frac where
loc: LoC in path and all its descendants
frac: loc / loc_parent where loc_parent is LoC in path's parent and all its descendants
"""
dir_tree = defaultdict(set) # {parent: children} over all directories parent that have children
root = '' # Root of git ls-files directory tree.
for path in path_loc.keys():
child = path
while True:
parent = os.path.dirname(child)
if parent == child:
root = parent
break
dir_tree[parent].add(child)
child = parent
# So we can index children below. See dir_tree[parent][i]
dir_tree = {parent: list(children) for parent, children in dir_tree.items()}
tree_loc = Counter() # {path: loc} over all paths. loc = LoC in path and all its descendants if
# it is a directory
# Traverse dir_tree depth first. Add LoC on leaf nodes then sum LoC over directories when
# ascending
stack = [(root, 0)] # stack for depth first traversal of dir_tree
while stack:
parent, i = stack[-1]
stack[-1] = parent, i + 1 # Iterate over children
if parent not in dir_tree: # Terminal node
tree_loc[parent] = path_loc[parent]
stack.pop() # Ascend
else:
if i < len(dir_tree[parent]): # Not done with children in this frame?
stack.append((dir_tree[parent][i], 0)) # Descend
else: # Sum over descendants
tree_loc[parent] = (path_loc.get(parent, 0) +
sum(tree_loc[child] for child in dir_tree[parent]))
stack.pop() # Ascend
# dir_tree_loc is subset of tree_loc containing only directories (no normal files)
dir_tree_loc = {path: loc for path, loc in tree_loc.items() if path in dir_tree and loc > 0}
# dir_tree_loc_frac: {path: (loc, frac)} over all paths. loc = LoC in path and all its
# descendants. frac = fraction of LoC path's parents and its descendants that are in path and
# its descendants
dir_tree_loc_frac = {path: (loc, 0) for path, loc in dir_tree_loc.items()}
for parent, loc in dir_tree_loc.items():
for child in dir_tree[parent]:
if child in dir_tree_loc:
dir_tree_loc_frac[child] = tuple((tree_loc[child], tree_loc[child] / loc))
# Seems more natural to set root fraction to 1.0
dir_tree_loc_frac[root] = tuple((tree_loc[root], 1.0))
return dir_tree_loc_frac
def detailed_loc(path_sha_loc):
"""My attempt at showing the distribution of LoC over the directory structure of a git
repository.
I am using a table which seems unnatural but has the advantage that it can be viewed in
powerful .csv table programs such as Excel.
path_sha_loc: {path: {sha: loc}} over files in a git repository
Returns: DataFrame with columns 'dir', 'LoC', 'frac' where
dir: Sub-directories in the git repository
LoC: LoC in dir and all its descendants
frac: loc / loc_parent where loc_parent is LoC in dir's parent and all its descendants
"""
path_loc = {path: sum(loc for _, loc in sha_loc.items())
for path, sha_loc in path_sha_loc.items()}
dir_tree_loc_frac = get_tree_loc(path_loc)
dir_loc_frac = [(path, loc, frac) for path, (loc, frac) in dir_tree_loc_frac.items()]
dir_loc_frac.sort()
return | DataFrame(dir_loc_frac, columns=['dir', 'LoC', 'frac']) | pandas.DataFrame |
import pandas as pd
import numpy as np
import matplotlib.pyplot as pt
from sklearn import linear_model
from sklearn import metrics
from keras import models
from keras import layers
import pickle
df=pd.read_csv("C://Users//Dell//Desktop//SNU//Seventh Semester//Data Mining//Project//Data//finalData.csv")
dfW=pd.read_csv("C://Users//Dell//Desktop//SNU//Seventh Semester//Data Mining//Project//Data//Wastage.csv")
itemset=df["Menu"].str.upper().unique()
#print(sorted(itemset))
hm={}
dates=df["Date"].unique()
print(len(itemset))
mean=dfW["TOTAL"].mean()
sd=dfW["TOTAL"].std()
#print(mean)
#print(sd)
def check(wastage):
#print(wastage)
if wastage>=(mean-(.5*sd)) and wastage<=(mean+(.5*sd)):
#print(wastage)
#print(1)
return 1
elif wastage<(mean-(.5*sd)):
#print(wastage)
#print(0)
return 0
elif wastage>(mean+(.5*sd)):
#print(wastage)
#print(2)
return 2
df1=pd.DataFrame(columns=range(len(itemset)))
df2= | pd.DataFrame(columns=["Date","Output"]) | pandas.DataFrame |
import os
os.chdir("D:/George/Projects/PaperTrends/src")
import sys
from twitter import TwitterParser
from arxiv import ArxivAPI
import pandas as pd
from designer import generateIntro
from tqdm import tqdm
class Trend:
def __init__(self, user='arxivtrends', ignoreposted=False):
print("Trend initialized")
self.twitter = TwitterParser(user=user)
self.ignoreposted = ignoreposted
def candidates(self, n=10, feed='popular', loadsave=False, top=10, days=2):
if loadsave:
self.twitter = self.twitter.loadSaved()
else:
self.twitter = self.twitter.loadSaved().parse(keyword="arxiv.org/", regex="\d\d\d\d\.[0-9A-z]*", feed=feed, n=n).save()
# df = self.twitter.aggregated().sort_values('favorited', ascending=False)
df = self.twitter.filter(favorited=1, retweeted=1).aggregated(favorited=1, retweeted=1, tweets=2).sort_values('retweeted', ascending=False)
try:
if not self.ignoreposted:
posted = self._loadPosted()['key'].astype(str).values
self.candDF = df[~df['key'].isin(posted)].head(top)
else:
self.candDF = df.head(top)
except:
self.candDF = df.head(top)
print(self.candDF)
print(f"> Selected {len(self.candDF)} candidates")
# print(self.candDF)
return self
def _loadPosted(self):
try:
self.posted = pd.read_csv("../db/csv/posted.csv")
except:
self.posted = pd.DataFrame(columns=['key','posted'])
return self.posted
def parse(self):
print(f"> Fetching keys {self.candDF['key'].values}")
arxiv = ArxivAPI().fetch(self.candDF['key'].values)
self.df = pd.DataFrame(arxiv)
return self
def generate(self):
print(f"> Generating intros")
pbar = tqdm(self.df.to_dict('records'))
for record in pbar:
pbar.set_description(f"Intro image {record['key']}")
try:
image = generateIntro(record)
image.save(f"../db/intros/{record['key'][2:]}.jpeg", quality=95)
except FileNotFoundError:
print("Intro generation Failed", record['key'], f"{sys.exc_info()[0]} ~ {str(e)}")
except Exception as e:
raise
print("Intro generation Failed", record['key'], f"{sys.exc_info()[0]} ~ {str(e)}")
self.df = self.df[~self.df['key'].isin([record['key']])]
return self
def getDF(self):
return self.df
def post(self):
print(f"> Posting tweets")
try:
posted = pd.read_csv("../db/csv/posted.csv")
except:
posted = pd.DataFrame(columns=['key','posted'])
e = pd.read_csv('../db/csv/emojis.csv', encoding='utf-8')
pbar = tqdm(self.df.to_dict('records'))
try:
for rec in pbar:
tweet = self.composeTweet(rec,e)
print(tweet)
self.twitter.api.update_with_media(f"../db/intros/{rec['key'][2:]}.jpeg", tweet)
if not self.ignoreposted:
posted = posted.append({
'key': str(rec['key']),
'posted': pd.datetime.now()
}, ignore_index=True)
except Exception as e:
print(str(e))
print(rec['keywords'])
if not self.ignoreposted:
posted.to_csv("../db/csv/posted.csv", index=False)
return self
def composeTweet(self, rec, e):
emoji = e[e['id'] == rec['category_primary_id']]['emoji'].values[0]
tweet = f"{emoji} {rec['title']}\n"
tweet += f"✍️ {rec['author_main']}\n"
tweet += f"🔗 {rec['pdf']}\n\n"
try:
tweet += f"🔊 Tweeted by {self.candDF[self.candDF['key'] == rec['key']].head(1)['users'].values[0]} et al.\n"
except:
print("no users")
keywords = '#'+rec['category_primary'].replace('-','').replace(' ','')
rec['keywords'] = rec['keywords'].replace('#boringformattinginformation','')
keywords += ' ' + ' '.join(list(map(lambda x: '#'+x, rec['keywords'].replace('.','').replace(' ','').replace('-','').split(',')))) if rec['keywords'] != '-' and rec['keywords'] != '' else ''
keywords += ' ' + ' '.join(list(map(lambda x: '#'+x, rec['category_ids'].replace('.','').replace(' ','').replace('-','').split(','))))
tweet += f"{keywords}"
return tweet
def postCustom(self, keys):
try:
posted = | pd.read_csv("../db/csv/posted.csv") | pandas.read_csv |
from numpy import *
from numpy.random import *
import pandas as pd
import sqlite3
from os import remove
from os.path import exists
from itertools import combinations
db_path = 'db.sqlite3'
force = 1
nb_client = 1e1
nb_guarantee = 1e1
nb_fund_price = 1e1
nb_address_N = 5
nb_address_p = 0.1
nb_purchase_mu = 5
nb_purchase_sigma = 2
cities = ['ottawa', 'edmonton', 'victoria', 'winnipeg', 'fredericton', 'st_john', 'halifax', 'toronto', 'charlottetown', 'quebec', 'regina', 'yellowknife', 'iqaluit', 'whitehorse']
guarantees = ['gmdb', 'gmwb']
genders = ['female', 'male']
funds = ['vanguard', 'fidelity', 'rowe', 'merril', 'morgan', 'barclays', 'sachs', 'paribas', 'fargo', 'suisse', 'citi', 'mizuho', 'lazard', 'evercore', 'nomura', 'jefferies']
seed(0)
if not exists(db_path) or force:
print('making db ... ', end='')
if exists(db_path):
remove(db_path)
db = sqlite3.connect(db_path)
nb_client = int(nb_client)
nb_guarantee = int(nb_guarantee)
nb_fund_price = int(nb_fund_price)
# client
g = guarantees + list(','.join(g) for g in combinations(guarantees,2))
client = pd.DataFrame(dict(
cid = arange(nb_client)+1,
city = choice(cities, nb_client),
phone = randint(1e2, 1e3, nb_client),
guarantee = choice(g, nb_client),
gender = choice(genders, nb_client),
date_of_birth = choice(pd.date_range('1970-01-01', '2000-01-01'), nb_client),
))
client.to_sql('client', db, index=False)
# address
address = | pd.DataFrame() | pandas.DataFrame |
import numpy as np
from datetime import timedelta
import pandas as pd
import pandas.tslib as tslib
import pandas.util.testing as tm
import pandas.tseries.period as period
from pandas import (DatetimeIndex, PeriodIndex, period_range, Series, Period,
_np_version_under1p10, Index, Timedelta, offsets)
from pandas.tests.test_base import Ops
class TestPeriodIndexOps(Ops):
def setUp(self):
super(TestPeriodIndexOps, self).setUp()
mask = lambda x: (isinstance(x, DatetimeIndex) or
isinstance(x, PeriodIndex))
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = [o for o in self.objs if not mask(o)]
def test_ops_properties(self):
self.check_ops_properties(
['year', 'month', 'day', 'hour', 'minute', 'second', 'weekofyear',
'week', 'dayofweek', 'dayofyear', 'quarter'])
self.check_ops_properties(['qyear'],
lambda x: isinstance(x, PeriodIndex))
def test_asobject_tolist(self):
idx = pd.period_range(start='2013-01-01', periods=4, freq='M',
name='idx')
expected_list = [pd.Period('2013-01-31', freq='M'),
pd.Period('2013-02-28', freq='M'),
pd.Period('2013-03-31', freq='M'),
pd.Period('2013-04-30', freq='M')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = PeriodIndex(['2013-01-01', '2013-01-02', 'NaT',
'2013-01-04'], freq='D', name='idx')
expected_list = [pd.Period('2013-01-01', freq='D'),
pd.Period('2013-01-02', freq='D'),
pd.Period('NaT', freq='D'),
pd.Period('2013-01-04', freq='D')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
tm.assert_index_equal(result, expected)
for i in [0, 1, 3]:
self.assertEqual(result[i], expected[i])
self.assertIs(result[2], pd.NaT)
self.assertEqual(result.name, expected.name)
result_list = idx.tolist()
for i in [0, 1, 3]:
self.assertEqual(result_list[i], expected_list[i])
self.assertIs(result_list[2], pd.NaT)
def test_minmax(self):
# monotonic
idx1 = pd.PeriodIndex([pd.NaT, '2011-01-01', '2011-01-02',
'2011-01-03'], freq='D')
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = pd.PeriodIndex(['2011-01-01', pd.NaT, '2011-01-03',
'2011-01-02', pd.NaT], freq='D')
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), pd.Period('2011-01-01', freq='D'))
self.assertEqual(idx.max(), pd.Period('2011-01-03', freq='D'))
self.assertEqual(idx1.argmin(), 1)
self.assertEqual(idx2.argmin(), 0)
self.assertEqual(idx1.argmax(), 3)
self.assertEqual(idx2.argmax(), 2)
for op in ['min', 'max']:
# Return NaT
obj = PeriodIndex([], freq='M')
result = getattr(obj, op)()
self.assertIs(result, tslib.NaT)
obj = PeriodIndex([pd.NaT], freq='M')
result = getattr(obj, op)()
self.assertIs(result, tslib.NaT)
obj = PeriodIndex([pd.NaT, pd.NaT, pd.NaT], freq='M')
result = getattr(obj, op)()
self.assertIs(result, tslib.NaT)
def test_numpy_minmax(self):
pr = pd.period_range(start='2016-01-15', end='2016-01-20')
self.assertEqual(np.min(pr), Period('2016-01-15', freq='D'))
self.assertEqual(np.max(pr), Period('2016-01-20', freq='D'))
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.min, pr, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.max, pr, out=0)
self.assertEqual(np.argmin(pr), 0)
self.assertEqual(np.argmax(pr), 5)
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.argmin, pr, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.argmax, pr, out=0)
def test_representation(self):
# GH 7601
idx1 = PeriodIndex([], freq='D')
idx2 = PeriodIndex(['2011-01-01'], freq='D')
idx3 = PeriodIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = PeriodIndex(['2011-01-01', '2011-01-02', '2011-01-03'],
freq='D')
idx5 = PeriodIndex(['2011', '2012', '2013'], freq='A')
idx6 = PeriodIndex(['2011-01-01 09:00', '2012-02-01 10:00',
'NaT'], freq='H')
idx7 = pd.period_range('2013Q1', periods=1, freq="Q")
idx8 = pd.period_range('2013Q1', periods=2, freq="Q")
idx9 = pd.period_range('2013Q1', periods=3, freq="Q")
idx10 = PeriodIndex(['2011-01-01', '2011-02-01'], freq='3D')
exp1 = """PeriodIndex([], dtype='period[D]', freq='D')"""
exp2 = """PeriodIndex(['2011-01-01'], dtype='period[D]', freq='D')"""
exp3 = ("PeriodIndex(['2011-01-01', '2011-01-02'], dtype='period[D]', "
"freq='D')")
exp4 = ("PeriodIndex(['2011-01-01', '2011-01-02', '2011-01-03'], "
"dtype='period[D]', freq='D')")
exp5 = ("PeriodIndex(['2011', '2012', '2013'], dtype='period[A-DEC]', "
"freq='A-DEC')")
exp6 = ("PeriodIndex(['2011-01-01 09:00', '2012-02-01 10:00', 'NaT'], "
"dtype='period[H]', freq='H')")
exp7 = ("PeriodIndex(['2013Q1'], dtype='period[Q-DEC]', "
"freq='Q-DEC')")
exp8 = ("PeriodIndex(['2013Q1', '2013Q2'], dtype='period[Q-DEC]', "
"freq='Q-DEC')")
exp9 = ("PeriodIndex(['2013Q1', '2013Q2', '2013Q3'], "
"dtype='period[Q-DEC]', freq='Q-DEC')")
exp10 = ("PeriodIndex(['2011-01-01', '2011-02-01'], "
"dtype='period[3D]', freq='3D')")
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5,
idx6, idx7, idx8, idx9, idx10],
[exp1, exp2, exp3, exp4, exp5,
exp6, exp7, exp8, exp9, exp10]):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(idx, func)()
self.assertEqual(result, expected)
def test_representation_to_series(self):
# GH 10971
idx1 = PeriodIndex([], freq='D')
idx2 = PeriodIndex(['2011-01-01'], freq='D')
idx3 = PeriodIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = PeriodIndex(['2011-01-01', '2011-01-02',
'2011-01-03'], freq='D')
idx5 = PeriodIndex(['2011', '2012', '2013'], freq='A')
idx6 = PeriodIndex(['2011-01-01 09:00', '2012-02-01 10:00',
'NaT'], freq='H')
idx7 = pd.period_range('2013Q1', periods=1, freq="Q")
idx8 = pd.period_range('2013Q1', periods=2, freq="Q")
idx9 = pd.period_range('2013Q1', periods=3, freq="Q")
exp1 = """Series([], dtype: object)"""
exp2 = """0 2011-01-01
dtype: object"""
exp3 = """0 2011-01-01
1 2011-01-02
dtype: object"""
exp4 = """0 2011-01-01
1 2011-01-02
2 2011-01-03
dtype: object"""
exp5 = """0 2011
1 2012
2 2013
dtype: object"""
exp6 = """0 2011-01-01 09:00
1 2012-02-01 10:00
2 NaT
dtype: object"""
exp7 = """0 2013Q1
dtype: object"""
exp8 = """0 2013Q1
1 2013Q2
dtype: object"""
exp9 = """0 2013Q1
1 2013Q2
2 2013Q3
dtype: object"""
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5,
idx6, idx7, idx8, idx9],
[exp1, exp2, exp3, exp4, exp5,
exp6, exp7, exp8, exp9]):
result = repr(pd.Series(idx))
self.assertEqual(result, expected)
def test_summary(self):
# GH9116
idx1 = PeriodIndex([], freq='D')
idx2 = PeriodIndex(['2011-01-01'], freq='D')
idx3 = PeriodIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = PeriodIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = PeriodIndex(['2011', '2012', '2013'], freq='A')
idx6 = PeriodIndex(
['2011-01-01 09:00', '2012-02-01 10:00', 'NaT'], freq='H')
idx7 = pd.period_range('2013Q1', periods=1, freq="Q")
idx8 = pd.period_range('2013Q1', periods=2, freq="Q")
idx9 = pd.period_range('2013Q1', periods=3, freq="Q")
exp1 = """PeriodIndex: 0 entries
Freq: D"""
exp2 = """PeriodIndex: 1 entries, 2011-01-01 to 2011-01-01
Freq: D"""
exp3 = """PeriodIndex: 2 entries, 2011-01-01 to 2011-01-02
Freq: D"""
exp4 = """PeriodIndex: 3 entries, 2011-01-01 to 2011-01-03
Freq: D"""
exp5 = """PeriodIndex: 3 entries, 2011 to 2013
Freq: A-DEC"""
exp6 = """PeriodIndex: 3 entries, 2011-01-01 09:00 to NaT
Freq: H"""
exp7 = """PeriodIndex: 1 entries, 2013Q1 to 2013Q1
Freq: Q-DEC"""
exp8 = """PeriodIndex: 2 entries, 2013Q1 to 2013Q2
Freq: Q-DEC"""
exp9 = """PeriodIndex: 3 entries, 2013Q1 to 2013Q3
Freq: Q-DEC"""
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5,
idx6, idx7, idx8, idx9],
[exp1, exp2, exp3, exp4, exp5,
exp6, exp7, exp8, exp9]):
result = idx.summary()
self.assertEqual(result, expected)
def test_resolution(self):
for freq, expected in zip(['A', 'Q', 'M', 'D', 'H',
'T', 'S', 'L', 'U'],
['day', 'day', 'day', 'day',
'hour', 'minute', 'second',
'millisecond', 'microsecond']):
idx = pd.period_range(start='2013-04-01', periods=30, freq=freq)
self.assertEqual(idx.resolution, expected)
def test_add_iadd(self):
rng = pd.period_range('1/1/2000', freq='D', periods=5)
other = pd.period_range('1/6/2000', freq='D', periods=5)
# previously performed setop union, now raises TypeError (GH14164)
with tm.assertRaises(TypeError):
rng + other
with tm.assertRaises(TypeError):
rng += other
# offset
# DateOffset
rng = pd.period_range('2014', '2024', freq='A')
result = rng + pd.offsets.YearEnd(5)
expected = pd.period_range('2019', '2029', freq='A')
tm.assert_index_equal(result, expected)
rng += pd.offsets.YearEnd(5)
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365), Timedelta(days=365)]:
msg = ('Input has different freq(=.+)? '
'from PeriodIndex\\(freq=A-DEC\\)')
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng + o
rng = pd.period_range('2014-01', '2016-12', freq='M')
result = rng + pd.offsets.MonthEnd(5)
expected = pd.period_range('2014-06', '2017-05', freq='M')
tm.assert_index_equal(result, expected)
rng += pd.offsets.MonthEnd(5)
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365), Timedelta(days=365)]:
rng = pd.period_range('2014-01', '2016-12', freq='M')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=M\\)'
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng + o
# Tick
offsets = [pd.offsets.Day(3), timedelta(days=3),
np.timedelta64(3, 'D'), pd.offsets.Hour(72),
timedelta(minutes=60 * 24 * 3), np.timedelta64(72, 'h'),
Timedelta('72:00:00')]
for delta in offsets:
rng = pd.period_range('2014-05-01', '2014-05-15', freq='D')
result = rng + delta
expected = pd.period_range('2014-05-04', '2014-05-18', freq='D')
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(4, 'h'),
timedelta(hours=23), Timedelta('23:00:00')]:
rng = pd.period_range('2014-05-01', '2014-05-15', freq='D')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=D\\)'
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng + o
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), pd.offsets.Minute(120),
timedelta(minutes=120), np.timedelta64(120, 'm'),
Timedelta(minutes=120)]
for delta in offsets:
rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00',
freq='H')
result = rng + delta
expected = pd.period_range('2014-01-01 12:00', '2014-01-05 12:00',
freq='H')
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
for delta in [pd.offsets.YearBegin(2), timedelta(minutes=30),
np.timedelta64(30, 's'), Timedelta(seconds=30)]:
rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00',
freq='H')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=H\\)'
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
result = rng + delta
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng += delta
# int
rng = pd.period_range('2000-01-01 09:00', freq='H', periods=10)
result = rng + 1
expected = pd.period_range('2000-01-01 10:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng += 1
tm.assert_index_equal(rng, expected)
def test_sub(self):
rng = period_range('2007-01', periods=50)
result = rng - 5
exp = rng + (-5)
tm.assert_index_equal(result, exp)
def test_sub_isub(self):
# previously performed setop, now raises TypeError (GH14164)
# TODO needs to wait on #13077 for decision on result type
rng = pd.period_range('1/1/2000', freq='D', periods=5)
other = pd.period_range('1/6/2000', freq='D', periods=5)
with tm.assertRaises(TypeError):
rng - other
with tm.assertRaises(TypeError):
rng -= other
# offset
# DateOffset
rng = pd.period_range('2014', '2024', freq='A')
result = rng - pd.offsets.YearEnd(5)
expected = pd.period_range('2009', '2019', freq='A')
tm.assert_index_equal(result, expected)
rng -= pd.offsets.YearEnd(5)
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365)]:
rng = pd.period_range('2014', '2024', freq='A')
msg = ('Input has different freq(=.+)? '
'from PeriodIndex\\(freq=A-DEC\\)')
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng - o
rng = pd.period_range('2014-01', '2016-12', freq='M')
result = rng - pd.offsets.MonthEnd(5)
expected = pd.period_range('2013-08', '2016-07', freq='M')
tm.assert_index_equal(result, expected)
rng -= pd.offsets.MonthEnd(5)
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365)]:
rng = pd.period_range('2014-01', '2016-12', freq='M')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=M\\)'
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng - o
# Tick
offsets = [pd.offsets.Day(3), timedelta(days=3),
np.timedelta64(3, 'D'), pd.offsets.Hour(72),
timedelta(minutes=60 * 24 * 3), np.timedelta64(72, 'h')]
for delta in offsets:
rng = pd.period_range('2014-05-01', '2014-05-15', freq='D')
result = rng - delta
expected = pd.period_range('2014-04-28', '2014-05-12', freq='D')
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(4, 'h'),
timedelta(hours=23)]:
rng = pd.period_range('2014-05-01', '2014-05-15', freq='D')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=D\\)'
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng - o
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), pd.offsets.Minute(120),
timedelta(minutes=120), np.timedelta64(120, 'm')]
for delta in offsets:
rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00',
freq='H')
result = rng - delta
expected = pd.period_range('2014-01-01 08:00', '2014-01-05 08:00',
freq='H')
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
for delta in [pd.offsets.YearBegin(2), timedelta(minutes=30),
np.timedelta64(30, 's')]:
rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00',
freq='H')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=H\\)'
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
result = rng + delta
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng += delta
# int
rng = pd.period_range('2000-01-01 09:00', freq='H', periods=10)
result = rng - 1
expected = pd.period_range('2000-01-01 08:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng -= 1
tm.assert_index_equal(rng, expected)
def test_comp_nat(self):
left = pd.PeriodIndex([pd.Period('2011-01-01'), pd.NaT,
pd.Period('2011-01-03')])
right = pd.PeriodIndex([pd.NaT, pd.NaT, pd.Period('2011-01-03')])
for l, r in [(left, right), (left.asobject, right.asobject)]:
result = l == r
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = l != r
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == r, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(l != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != l, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > l, expected)
def test_value_counts_unique(self):
# GH 7735
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=10)
# create repeated values, 'n'th element is repeated by n+1 times
idx = PeriodIndex(np.repeat(idx.values, range(1, len(idx) + 1)),
freq='H')
exp_idx = PeriodIndex(['2011-01-01 18:00', '2011-01-01 17:00',
'2011-01-01 16:00', '2011-01-01 15:00',
'2011-01-01 14:00', '2011-01-01 13:00',
'2011-01-01 12:00', '2011-01-01 11:00',
'2011-01-01 10:00',
'2011-01-01 09:00'], freq='H')
expected = Series(range(10, 0, -1), index=exp_idx, dtype='int64')
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
expected = pd.period_range('2011-01-01 09:00', freq='H',
periods=10)
tm.assert_index_equal(idx.unique(), expected)
idx = PeriodIndex(['2013-01-01 09:00', '2013-01-01 09:00',
'2013-01-01 09:00', '2013-01-01 08:00',
'2013-01-01 08:00', pd.NaT], freq='H')
exp_idx = PeriodIndex(['2013-01-01 09:00', '2013-01-01 08:00'],
freq='H')
expected = Series([3, 2], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
exp_idx = PeriodIndex(['2013-01-01 09:00', '2013-01-01 08:00',
pd.NaT], freq='H')
expected = Series([3, 2, 1], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(dropna=False), expected)
tm.assert_index_equal(idx.unique(), exp_idx)
def test_drop_duplicates_metadata(self):
# GH 10115
idx = pd.period_range('2011-01-01', '2011-01-31', freq='D', name='idx')
result = idx.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertEqual(idx.freq, result.freq)
idx_dup = idx.append(idx) # freq will not be reset
result = idx_dup.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertEqual(idx.freq, result.freq)
def test_drop_duplicates(self):
# to check Index/Series compat
base = pd.period_range('2011-01-01', '2011-01-31', freq='D',
name='idx')
idx = base.append(base[:5])
res = idx.drop_duplicates()
tm.assert_index_equal(res, base)
res = Series(idx).drop_duplicates()
tm.assert_series_equal(res, Series(base))
res = idx.drop_duplicates(keep='last')
exp = base[5:].append(base[:5])
tm.assert_index_equal(res, exp)
res = Series(idx).drop_duplicates(keep='last')
tm.assert_series_equal(res, Series(exp, index=np.arange(5, 36)))
res = idx.drop_duplicates(keep=False)
tm.assert_index_equal(res, base[5:])
res = Series(idx).drop_duplicates(keep=False)
tm.assert_series_equal(res, Series(base[5:], index=np.arange(5, 31)))
def test_order_compat(self):
def _check_freq(index, expected_index):
if isinstance(index, PeriodIndex):
self.assertEqual(index.freq, expected_index.freq)
pidx = PeriodIndex(['2011', '2012', '2013'], name='pidx', freq='A')
# for compatibility check
iidx = Index([2011, 2012, 2013], name='idx')
for idx in [pidx, iidx]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, idx)
_check_freq(ordered, idx)
ordered = idx.sort_values(ascending=False)
self.assert_index_equal(ordered, idx[::-1])
_check_freq(ordered, idx[::-1])
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, idx)
self.assert_numpy_array_equal(indexer,
np.array([0, 1, 2]),
check_dtype=False)
_check_freq(ordered, idx)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
self.assert_index_equal(ordered, idx[::-1])
self.assert_numpy_array_equal(indexer,
np.array([2, 1, 0]),
check_dtype=False)
_check_freq(ordered, idx[::-1])
pidx = PeriodIndex(['2011', '2013', '2015', '2012',
'2011'], name='pidx', freq='A')
pexpected = PeriodIndex(
['2011', '2011', '2012', '2013', '2015'], name='pidx', freq='A')
# for compatibility check
iidx = Index([2011, 2013, 2015, 2012, 2011], name='idx')
iexpected = Index([2011, 2011, 2012, 2013, 2015], name='idx')
for idx, expected in [(pidx, pexpected), (iidx, iexpected)]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, expected)
_check_freq(ordered, idx)
ordered = idx.sort_values(ascending=False)
self.assert_index_equal(ordered, expected[::-1])
_check_freq(ordered, idx)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, expected)
exp = np.array([0, 4, 3, 1, 2])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
_check_freq(ordered, idx)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
self.assert_index_equal(ordered, expected[::-1])
exp = np.array([2, 1, 3, 4, 0])
self.assert_numpy_array_equal(indexer, exp,
check_dtype=False)
_check_freq(ordered, idx)
pidx = PeriodIndex(['2011', '2013', 'NaT', '2011'], name='pidx',
freq='D')
result = pidx.sort_values()
expected = PeriodIndex(['NaT', '2011', '2011', '2013'],
name='pidx', freq='D')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, 'D')
result = pidx.sort_values(ascending=False)
expected = PeriodIndex(
['2013', '2011', '2011', 'NaT'], name='pidx', freq='D')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, 'D')
def test_order(self):
for freq in ['D', '2D', '4D']:
idx = PeriodIndex(['2011-01-01', '2011-01-02', '2011-01-03'],
freq=freq, name='idx')
ordered = idx.sort_values()
self.assert_index_equal(ordered, idx)
self.assertEqual(ordered.freq, idx.freq)
ordered = idx.sort_values(ascending=False)
expected = idx[::-1]
self.assert_index_equal(ordered, expected)
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq, freq)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, idx)
self.assert_numpy_array_equal(indexer,
np.array([0, 1, 2]),
check_dtype=False)
self.assertEqual(ordered.freq, idx.freq)
self.assertEqual(ordered.freq, freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
expected = idx[::-1]
self.assert_index_equal(ordered, expected)
self.assert_numpy_array_equal(indexer,
np.array([2, 1, 0]),
check_dtype=False)
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq, freq)
idx1 = PeriodIndex(['2011-01-01', '2011-01-03', '2011-01-05',
'2011-01-02', '2011-01-01'], freq='D', name='idx1')
exp1 = PeriodIndex(['2011-01-01', '2011-01-01', '2011-01-02',
'2011-01-03', '2011-01-05'], freq='D', name='idx1')
idx2 = PeriodIndex(['2011-01-01', '2011-01-03', '2011-01-05',
'2011-01-02', '2011-01-01'],
freq='D', name='idx2')
exp2 = PeriodIndex(['2011-01-01', '2011-01-01', '2011-01-02',
'2011-01-03', '2011-01-05'],
freq='D', name='idx2')
idx3 = PeriodIndex([pd.NaT, '2011-01-03', '2011-01-05',
'2011-01-02', pd.NaT], freq='D', name='idx3')
exp3 = PeriodIndex([pd.NaT, pd.NaT, '2011-01-02', '2011-01-03',
'2011-01-05'], freq='D', name='idx3')
for idx, expected in [(idx1, exp1), (idx2, exp2), (idx3, exp3)]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, expected)
self.assertEqual(ordered.freq, 'D')
ordered = idx.sort_values(ascending=False)
self.assert_index_equal(ordered, expected[::-1])
self.assertEqual(ordered.freq, 'D')
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, expected)
exp = np.array([0, 4, 3, 1, 2])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertEqual(ordered.freq, 'D')
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
self.assert_index_equal(ordered, expected[::-1])
exp = np.array([2, 1, 3, 4, 0])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertEqual(ordered.freq, 'D')
def test_nat_new(self):
idx = pd.period_range('2011-01', freq='M', periods=5, name='x')
result = idx._nat_new()
exp = pd.PeriodIndex([pd.NaT] * 5, freq='M', name='x')
tm.assert_index_equal(result, exp)
result = idx._nat_new(box=False)
exp = np.array([tslib.iNaT] * 5, dtype=np.int64)
tm.assert_numpy_array_equal(result, exp)
def test_shift(self):
# GH 9903
idx = pd.PeriodIndex([], name='xxx', freq='H')
with tm.assertRaises(TypeError):
# period shift doesn't accept freq
idx.shift(1, freq='H')
tm.assert_index_equal(idx.shift(0), idx)
tm.assert_index_equal(idx.shift(3), idx)
idx = pd.PeriodIndex(['2011-01-01 10:00', '2011-01-01 11:00'
'2011-01-01 12:00'], name='xxx', freq='H')
tm.assert_index_equal(idx.shift(0), idx)
exp = pd.PeriodIndex(['2011-01-01 13:00', '2011-01-01 14:00'
'2011-01-01 15:00'], name='xxx', freq='H')
tm.assert_index_equal(idx.shift(3), exp)
exp = pd.PeriodIndex(['2011-01-01 07:00', '2011-01-01 08:00'
'2011-01-01 09:00'], name='xxx', freq='H')
tm.assert_index_equal(idx.shift(-3), exp)
def test_repeat(self):
index = pd.period_range('2001-01-01', periods=2, freq='D')
exp = pd.PeriodIndex(['2001-01-01', '2001-01-01',
'2001-01-02', '2001-01-02'], freq='D')
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
index = pd.period_range('2001-01-01', periods=2, freq='2D')
exp = pd.PeriodIndex(['2001-01-01', '2001-01-01',
'2001-01-03', '2001-01-03'], freq='2D')
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
index = pd.PeriodIndex(['2001-01', 'NaT', '2003-01'], freq='M')
exp = pd.PeriodIndex(['2001-01', '2001-01', '2001-01',
'NaT', 'NaT', 'NaT',
'2003-01', '2003-01', '2003-01'], freq='M')
for res in [index.repeat(3), np.repeat(index, 3)]:
tm.assert_index_equal(res, exp)
def test_nat(self):
self.assertIs(pd.PeriodIndex._na_value, pd.NaT)
self.assertIs(pd.PeriodIndex([], freq='M')._na_value, pd.NaT)
idx = pd.PeriodIndex(['2011-01-01', '2011-01-02'], freq='D')
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, False]))
self.assertFalse(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([], dtype=np.intp))
idx = pd.PeriodIndex(['2011-01-01', 'NaT'], freq='D')
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, True]))
self.assertTrue(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([1], dtype=np.intp))
def test_equals(self):
# GH 13107
for freq in ['D', 'M']:
idx = pd.PeriodIndex(['2011-01-01', '2011-01-02', 'NaT'],
freq=freq)
self.assertTrue(idx.equals(idx))
self.assertTrue(idx.equals(idx.copy()))
self.assertTrue(idx.equals(idx.asobject))
self.assertTrue(idx.asobject.equals(idx))
self.assertTrue(idx.asobject.equals(idx.asobject))
self.assertFalse(idx.equals(list(idx)))
self.assertFalse(idx.equals(pd.Series(idx)))
idx2 = pd.PeriodIndex(['2011-01-01', '2011-01-02', 'NaT'],
freq='H')
self.assertFalse(idx.equals(idx2))
self.assertFalse(idx.equals(idx2.copy()))
self.assertFalse(idx.equals(idx2.asobject))
self.assertFalse(idx.asobject.equals(idx2))
self.assertFalse(idx.equals(list(idx2)))
self.assertFalse(idx.equals(pd.Series(idx2)))
# same internal, different tz
idx3 = pd.PeriodIndex._simple_new(idx.asi8, freq='H')
tm.assert_numpy_array_equal(idx.asi8, idx3.asi8)
self.assertFalse(idx.equals(idx3))
self.assertFalse(idx.equals(idx3.copy()))
self.assertFalse(idx.equals(idx3.asobject))
self.assertFalse(idx.asobject.equals(idx3))
self.assertFalse(idx.equals(list(idx3)))
self.assertFalse(idx.equals(pd.Series(idx3)))
class TestPeriodIndexSeriesMethods(tm.TestCase):
""" Test PeriodIndex and Period Series Ops consistency """
def _check(self, values, func, expected):
idx = pd.PeriodIndex(values)
result = func(idx)
if isinstance(expected, pd.Index):
tm.assert_index_equal(result, expected)
else:
# comp op results in bool
tm.assert_numpy_array_equal(result, expected)
s = pd.Series(values)
result = func(s)
exp = pd.Series(expected, name=values.name)
tm.assert_series_equal(result, exp)
def test_pi_ops(self):
idx = PeriodIndex(['2011-01', '2011-02', '2011-03',
'2011-04'], freq='M', name='idx')
expected = PeriodIndex(['2011-03', '2011-04',
'2011-05', '2011-06'], freq='M', name='idx')
self._check(idx, lambda x: x + 2, expected)
self._check(idx, lambda x: 2 + x, expected)
self._check(idx + 2, lambda x: x - 2, idx)
result = idx - Period('2011-01', freq='M')
exp = pd.Index([0, 1, 2, 3], name='idx')
tm.assert_index_equal(result, exp)
result = Period('2011-01', freq='M') - idx
exp = pd.Index([0, -1, -2, -3], name='idx')
tm.assert_index_equal(result, exp)
def test_pi_ops_errors(self):
idx = PeriodIndex(['2011-01', '2011-02', '2011-03',
'2011-04'], freq='M', name='idx')
s = pd.Series(idx)
msg = r"unsupported operand type\(s\)"
for obj in [idx, s]:
for ng in ["str", 1.5]:
with tm.assertRaisesRegexp(TypeError, msg):
obj + ng
with tm.assertRaises(TypeError):
# error message differs between PY2 and 3
ng + obj
with tm.assertRaisesRegexp(TypeError, msg):
obj - ng
with tm.assertRaises(TypeError):
np.add(obj, ng)
if _np_version_under1p10:
self.assertIs(np.add(ng, obj), NotImplemented)
else:
with tm.assertRaises(TypeError):
np.add(ng, obj)
with tm.assertRaises(TypeError):
np.subtract(obj, ng)
if _np_version_under1p10:
self.assertIs(np.subtract(ng, obj), NotImplemented)
else:
with tm.assertRaises(TypeError):
np.subtract(ng, obj)
def test_pi_ops_nat(self):
idx = PeriodIndex(['2011-01', '2011-02', 'NaT',
'2011-04'], freq='M', name='idx')
expected = PeriodIndex(['2011-03', '2011-04',
'NaT', '2011-06'], freq='M', name='idx')
self._check(idx, lambda x: x + 2, expected)
self._check(idx, lambda x: 2 + x, expected)
self._check(idx, lambda x: np.add(x, 2), expected)
self._check(idx + 2, lambda x: x - 2, idx)
self._check(idx + 2, lambda x: np.subtract(x, 2), idx)
# freq with mult
idx = PeriodIndex(['2011-01', '2011-02', 'NaT',
'2011-04'], freq='2M', name='idx')
expected = PeriodIndex(['2011-07', '2011-08',
'NaT', '2011-10'], freq='2M', name='idx')
self._check(idx, lambda x: x + 3, expected)
self._check(idx, lambda x: 3 + x, expected)
self._check(idx, lambda x: np.add(x, 3), expected)
self._check(idx + 3, lambda x: x - 3, idx)
self._check(idx + 3, lambda x: np.subtract(x, 3), idx)
def test_pi_ops_array_int(self):
idx = PeriodIndex(['2011-01', '2011-02', 'NaT',
'2011-04'], freq='M', name='idx')
f = lambda x: x + np.array([1, 2, 3, 4])
exp = PeriodIndex(['2011-02', '2011-04', 'NaT',
'2011-08'], freq='M', name='idx')
self._check(idx, f, exp)
f = lambda x: np.add(x, np.array([4, -1, 1, 2]))
exp = PeriodIndex(['2011-05', '2011-01', 'NaT',
'2011-06'], freq='M', name='idx')
self._check(idx, f, exp)
f = lambda x: x - np.array([1, 2, 3, 4])
exp = PeriodIndex(['2010-12', '2010-12', 'NaT',
'2010-12'], freq='M', name='idx')
self._check(idx, f, exp)
f = lambda x: np.subtract(x, np.array([3, 2, 3, -2]))
exp = PeriodIndex(['2010-10', '2010-12', 'NaT',
'2011-06'], freq='M', name='idx')
self._check(idx, f, exp)
def test_pi_ops_offset(self):
idx = PeriodIndex(['2011-01-01', '2011-02-01', '2011-03-01',
'2011-04-01'], freq='D', name='idx')
f = lambda x: x + offsets.Day()
exp = PeriodIndex(['2011-01-02', '2011-02-02', '2011-03-02',
'2011-04-02'], freq='D', name='idx')
self._check(idx, f, exp)
f = lambda x: x + offsets.Day(2)
exp = PeriodIndex(['2011-01-03', '2011-02-03', '2011-03-03',
'2011-04-03'], freq='D', name='idx')
self._check(idx, f, exp)
f = lambda x: x - offsets.Day(2)
exp = PeriodIndex(['2010-12-30', '2011-01-30', '2011-02-27',
'2011-03-30'], freq='D', name='idx')
self._check(idx, f, exp)
def test_pi_offset_errors(self):
idx = PeriodIndex(['2011-01-01', '2011-02-01', '2011-03-01',
'2011-04-01'], freq='D', name='idx')
s = pd.Series(idx)
# Series op is applied per Period instance, thus error is raised
# from Period
msg_idx = r"Input has different freq from PeriodIndex\(freq=D\)"
msg_s = r"Input cannot be converted to Period\(freq=D\)"
for obj, msg in [(idx, msg_idx), (s, msg_s)]:
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
obj + offsets.Hour(2)
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
| offsets.Hour(2) | pandas.offsets.Hour |
import sys
sys.path.insert(0,'/usr/local/lib/python3.7/site-packages')
import pandas as pd
import numpy as np
import scipy
import bottleneck as bn
from sklearn.model_selection import KFold
from sklearn.preprocessing import normalize
from sklearn.feature_selection import f_regression
from scipy.stats import pearsonr, spearmanr
import pickle
'''
An implementation of MRMR feature selction method
By: <NAME> (<EMAIL>)
'''
class mrmrFeatureSelector():
def __init__(self, n_features = 10, verbose = 0, estimator = 'pearson'):
self.n_features = n_features
self.verbose = verbose
self.estimator = estimator
def fit(self, X, y):
return self._fit(X, y)
def transform(self, X):
return self._transform(X)
def fit_transform(self, X, y):
self._fit(X, y)
return self._transform(X)
def _fit(self, X, y):
self.X = X
n, p = X.shape
self.y = y.reshape((n,1))
selected = []
#List of all features indicies
Feat = list(range(p))
feature_score_matrix = np.zeros((self.n_features, p))
feature_score_matrix[:] = np.nan
selected_scores = []
# ---------------------------------------------------------------------
# FIND FIRST FEATURE
# ---------------------------------------------------------------------
#Calculate f-value between features and target variable
(F_scores,pvalues) = f_regression(X,y)
selected, Feat = self._add_remove(selected, Feat, bn.nanargmax(F_scores))
selected_scores.append(bn.nanmax(F_scores))
# ---------------------------------------------------------------------
# FIND SUBSEQUENT FEATURES
# ---------------------------------------------------------------------
n_features = self.n_features
while len(selected) < n_features:
s=len(selected) - 1
feature_score_matrix[s, Feat] = self.calculate_correlation(X, Feat, X.iloc[:,selected[-1]])
# make decision based on the chosen FS algorithm
fmm = feature_score_matrix[:len(selected), Feat]
# if self.method == 'MRMR':
if bn.allnan(bn.nanmean(fmm, axis = 0)):
break
#MIQ or MIS "{-}"
MRMR = F_scores[Feat] / bn.nanmean(fmm, axis=0)
fselected = Feat[bn.nanargmax(MRMR)]
selected_scores.append(bn.nanmax(bn.nanmin(fmm, axis=0)))
selected, Feat = self._add_remove(selected, Feat, fselected)
# ---------------------------------------------------------------------
# SAVE RESULTS
# ---------------------------------------------------------------------
self.n_features_ = len(selected)
self.support_ = np.zeros(p, dtype=np.bool)
self.support_[selected] = 1
self.ranking_ = selected
self.scores_ = selected_scores
return self
def _transform(self, X):
# sanity check
try:
self.ranking_
except AttributeError:
raise ValueError('You need to call the fit(X, y) method first.')
X = X.iloc[:,self.support_]
return X
def _add_remove(self, S, F, i):
"""
Helper function: removes ith element from F and adds it to S.
"""
S.append(i)
F.remove(i)
return S, F
def _print_results(self, S, MIs):
out = ''
out += ('Selected feature #' + str(len(S)) + ' / ' +
str(self.n_features) + ' : ' + str(S[-1]))
if self.verbose > 1:
out += ', ' + self.method + ' : ' + str(MIs[-1])
print (out)
def calculate_correlation(self, X, F, s):
#should return positive numbers always
if self.estimator == 'pearson':
res = [abs(scipy.stats.pearsonr(s, X.iloc[:,f])[0]) for f in F]
return res
elif self.estimator == 'spearman':
res = [abs(scipy.stats.spearmanr(s, X.iloc[:,f])[0]) for f in F]
return res
else:
print("Estimator is not supported, please select pearson or spearman :)")
"""
Feature Selection Function
Input:
path_data: path of feature matrix file
dm_file: filename of feature matrix
path_GT: path of ground truth file
cc_file: filename of ground truth file
label: name of target variable
features: modalities are used
i_cv: inner cross validation folds
o_cv: outer cross validation folds
Output:
selected_features: List of selected features
"""
def featureSelection(path_data,dm_file,path_GT,cc_file,label,features,i_cv,o_cv):
##load the feature matrix
df_feature= | pd.read_csv(path_data+dm_file) | pandas.read_csv |
import pandas as pd
passageiros = pd.read_csv('Passageiros.csv')
passageiros.head()
import seaborn as sns
import matplotlib as mpl
mpl.rcParams['figure.figsize'] = (10, 6)
mpl.rcParams['font.size'] = 22
sns.lineplot(x='tempo',y='passageiros', data=passageiros,label='dado_completo')
## Escalando os dados
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
sc.fit(passageiros)
dado_escalado = sc.transform(passageiros)
x=dado_escalado[:,0] #Features - Características - Tempo
y=dado_escalado[:,1] #Alvo - Número de passageiros
import matplotlib.pyplot as plt
sns.lineplot(x=x,y=y,label='dado_escalado')
plt.ylabel('Passageiros')
plt.xlabel('Data')
## Dividindo em treino e teste
tamanho_treino = int(len(passageiros)*0.9) #Pegando 90% dos dados para treino
tamanho_teste = len(passageiros)-tamanho_treino #O resto vamos reservar para teste
xtreino = x[0:tamanho_treino]
ytreino = y[0:tamanho_treino]
xteste = x[tamanho_treino:len(passageiros)]
yteste = y[tamanho_treino:len(passageiros)]
sns.lineplot(x=xtreino,y=ytreino,label='treino')
sns.lineplot(x=xteste,y=yteste,label='teste')
# Aula 2
## Regressão Linear
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
regressor = Sequential()
regressor.add(Dense(1, input_dim=1, kernel_initializer='Ones',
activation='linear',use_bias=False))
regressor.compile(loss='mean_squared_error',optimizer='adam')
regressor.summary()
regressor.fit(xtreino,ytreino)
y_predict= regressor.predict(xtreino) #Prevendo os dados de treino (o ajuste)
sns.lineplot(x=xtreino,y=ytreino,label='treino')
sns.lineplot(x=xtreino,y=y_predict[:,0],label='ajuste_treino')
d = {'tempo': xtreino, 'passageiros': y_predict[:,0]}
resultados = pd.DataFrame(data=d)
resultados
resultado_transf = sc.inverse_transform(resultados)
resultado_transf = pd.DataFrame(resultado_transf)
resultado_transf.columns = ['tempo','passageiros']
sns.lineplot(x="tempo",y="passageiros",data=passageiros)
sns.lineplot(x="tempo",y="passageiros",data=resultado_transf)
y_predict_teste= regressor.predict(xteste) #Prevendo os dados de teste(o futuro)
d = {'tempo': xteste, 'passageiros': y_predict_teste[:,0]}
resultados_teste = pd.DataFrame(data=d)
resultado_transf_teste = sc.inverse_transform(resultados_teste)
resultado_transf_teste = pd.DataFrame(resultado_transf_teste)
resultado_transf_teste.columns = ['tempo','passageiros']
sns.lineplot(x="tempo",y="passageiros",data=passageiros,label='dado_completo')
sns.lineplot(x="tempo",y="passageiros",data=resultado_transf,label='ajuste_treino')
sns.lineplot(x="tempo",y="passageiros",data=resultado_transf_teste,label='previsão')
## Regressão não-linear
regressor2 = Sequential()
regressor2.add(Dense(8, input_dim=1, kernel_initializer='random_uniform',
activation='sigmoid',use_bias=False))
regressor2.add(Dense(8, kernel_initializer='random_uniform',
activation='sigmoid',use_bias=False))
regressor2.add(Dense(1, kernel_initializer='random_uniform',
activation='linear',use_bias=False))
regressor2.compile(loss='mean_squared_error',optimizer='adam')
regressor2.summary()
regressor2.fit(xtreino,ytreino,epochs =500)
y_predict= regressor2.predict(xtreino) #Prevendo os dados de treino (o ajuste)
y_predict_teste= regressor2.predict(xteste) #Prevendo os dados de teste(o futuro)
sns.lineplot(x=xtreino,y=ytreino,label='treino')
sns.lineplot(x=xteste,y=yteste,label='teste')
sns.lineplot(x=xtreino,y=y_predict[:,0],label='ajuste_treino')
sns.lineplot(x=xteste,y=y_predict_teste[:,0],label='previsão')
# Aula 3
## Alterando a forma como passamos os dados
#Agora x e y vão valores diferentes. X vai conter o número de passageiros em um tempo anterior e y vai conter o número de passageiros em t+1, por exemplo.
vetor = pd.DataFrame(ytreino)[0]
import numpy as np
def separa_dados(vetor,n_passos):
"""Entrada: vetor: número de passageiros
n_passos: número de passos no regressor
Saída:
X_novo: Array 2D
y_novo: Array 1D - Nosso alvo
"""
X_novo, y_novo = [], []
for i in range(n_passos,vetor.shape[0]):
X_novo.append(list(vetor.loc[i-n_passos:i-1]))
y_novo.append(vetor.loc[i])
X_novo, y_novo = np.array(X_novo), np.array(y_novo)
return X_novo, y_novo
xtreino_novo, ytreino_novo = separa_dados(vetor,1)
print(xtreino_novo[0:5]) #X
print(ytreino_novo[0:5]) #y
## Agora vamos separar o teste
vetor2 = pd.DataFrame(yteste)[0]
xteste_novo, yteste_novo = separa_dados(vetor2,1)
## Voltando para as redes neurais
regressor3 = Sequential()
regressor3.add(Dense(8, input_dim=1, kernel_initializer='ones', activation='linear',use_bias=False))
regressor3.add(Dense(64, kernel_initializer='random_uniform', activation='sigmoid',use_bias=False))
regressor3.add(Dense(1, kernel_initializer='random_uniform', activation='linear',use_bias=False))
regressor3.compile(loss='mean_squared_error',optimizer='adam')
regressor3.summary()
regressor3.fit(xtreino_novo,ytreino_novo,epochs =100)
y_predict_novo = regressor3.predict(xtreino_novo)
sns.lineplot(x='tempo',y=ytreino_novo,data=passageiros[1:129],label='treino')
sns.lineplot(x='tempo',y=pd.DataFrame(y_predict_novo)[0],data=passageiros[1:129],label='ajuste_treino')
y_predict_teste_novo = regressor3.predict(xteste_novo)
resultado = pd.DataFrame(y_predict_teste_novo)[0]
sns.lineplot(x='tempo',y=ytreino_novo,data=passageiros[1:129],label='treino')
sns.lineplot(x='tempo',y=pd.DataFrame(y_predict_novo)[0],data=passageiros[1:129],label='ajuste_treino')
sns.lineplot(x='tempo',y=yteste_novo,data=passageiros[130:144],label='teste')
sns.lineplot(x='tempo',y=resultado.values,data=passageiros[130:144],label='previsão')
## Janelas
xtreino_novo, ytreino_novo = separa_dados(vetor,4)
xtreino_novo[0:5] #X
ytreino_novo[0:5] #y
xteste_novo, yteste_novo = separa_dados(vetor2,4)
regressor4 = Sequential()
regressor4.add(Dense(8, input_dim=4, kernel_initializer='random_uniform', activation='linear',use_bias=False)) #relu
regressor4.add(Dense(64, kernel_initializer='random_uniform', activation='sigmoid',use_bias=False)) #relu
regressor4.add(Dense(1, kernel_initializer='random_uniform', activation='linear',use_bias=False))
regressor4.compile(loss='mean_squared_error',optimizer='adam')
regressor4.summary()
regressor4.fit(xtreino_novo,ytreino_novo,epochs =300)
y_predict_teste_novo = regressor4.predict(xteste_novo)
resultado = pd.DataFrame(y_predict_teste_novo)[0]
sns.lineplot(x='tempo',y=ytreino_novo,data=passageiros[4:129],label='treino')
sns.lineplot(x='tempo',y=pd.DataFrame(y_predict_novo)[0],data=passageiros[4:129],label='ajuste_treino')
sns.lineplot(x='tempo',y=yteste_novo,data=passageiros[133:144],label='teste')
sns.lineplot(x='tempo',y=resultado.values,data=passageiros[133:144],label='previsão')
#Nova base de dados
bike = pd.read_csv('bicicletas.csv')
bike.head()
bike['datas'] = pd.to_datetime(bike['datas'])
sns.lineplot(x='datas',y='contagem', data=bike)
plt.xticks(rotation=70)
## Escalando os dados
sc2 = StandardScaler()
sc2.fit(bike['contagem'].values.reshape(-1,1))
y = sc2.transform(bike['contagem'].values.reshape(-1,1))
## Dividindo em treino e teste
tamanho_treino = int(len(bike)*0.9) #Pegando 90% dos dados para treino
tamanho_teste = len(bike)-tamanho_treino #O resto vamos reservar para teste
ytreino = y[0:tamanho_treino]
yteste = y[tamanho_treino:len(bike)]
sns.lineplot(x='datas',y=ytreino[:,0],data=bike[0:tamanho_treino],label='treino')
sns.lineplot(x='datas',y=yteste[:,0], data=bike[tamanho_treino:len(bike)],label='teste')
plt.xticks(rotation=70)
vetor = | pd.DataFrame(ytreino) | pandas.DataFrame |
# _*_ coding:utf-8 _*_
'''=================================
@Author :tix_hjq
@Date :19-10-30 下午9:36
================================='''
from sklearn.model_selection import KFold, StratifiedKFold
from sklearn.metrics import mean_squared_error as mse
from sklearn.metrics import f1_score, r2_score
from numpy.random import random, shuffle
import matplotlib.pyplot as plt
from pandas import DataFrame
from tqdm import tqdm
import lightgbm as lgb
import pandas as pd
import numpy as np
import warnings
import os
import gc
import re
import datetime
import sys
warnings.filterwarnings("ignore")
pd.set_option('display.max_columns', None)
| pd.set_option('display.max_rows', None) | pandas.set_option |
from copy import deepcopy
import logging
import time
import traceback
from typing import List, Set, Tuple, Union
import uuid
import numpy as np
import pandas as pd
from ..features.types import R_FLOAT
from ..models.abstract.abstract_model import AbstractModel
from ..models.ensemble.bagged_ensemble_model import BaggedEnsembleModel
from ..utils.exceptions import TimeLimitExceeded
from ..utils.utils import generate_train_test_split, unevaluated_fi_df_template
logger = logging.getLogger(__name__)
def add_noise_column(X: pd.DataFrame, rng: np.random.Generator, noise_columns: List[str] = None, count: int = 1) -> Tuple[pd.DataFrame, List[str]]:
"""
Create a copy of dataset X with extra synthetic columns generated from standard normal distribution.
"""
X = X.copy()
if noise_columns is None:
noise_columns = [str(uuid.uuid4()) for _ in range(1, count+1)]
for col_name in noise_columns:
noise = rng.standard_normal(len(X))
X[col_name] = noise
return X, noise_columns
def merge_importance_dfs(df_old: pd.DataFrame, df_new: pd.DataFrame, using_prev_fit_fi: Set[str]) -> pd.DataFrame:
"""
Create a dataframe that correctly merges two existing dataframe's permutation feature importance statistics,
specifically mean, standard deviation, and shuffle count. For each feature, if one dataframe's feature importance
has not been calculated, the resulting dataframe will contain the other dataframe's feature importance stats.
df_old is assumed to have been from previous feature importance computation round or even pruning round and
can have more features (rows) than df_new. Also, update using_prev_fit_fi to indicate the updated feature list that
uses feature importance values from previous fit.
"""
if df_old is None:
# Remove features whose importance has just been computed from using_prev_fit_fi if they exist
using_prev_fit_fi.difference_update(df_new[df_new['n'] > 0].index.tolist())
return df_new
assert len(df_old) >= len(df_new), "df_old cannot have less rows than df_new."
evaluated_old_rows, evaluated_new_rows = df_old[df_old['n'] > 0], df_new[df_new['n'] > 0]
unevaluated_old_rows, unevaluated_new_rows = df_old[df_old['n'] == 0], df_new[df_new['n'] == 0]
evaluated_both = evaluated_new_rows.index.intersection(evaluated_old_rows.index).difference(using_prev_fit_fi).tolist()
evaluated_neither = unevaluated_new_rows.index.intersection(unevaluated_old_rows.index).tolist()
evaluated_old_only = evaluated_old_rows[evaluated_old_rows.index.isin(unevaluated_new_rows.index)].index.tolist()
evaluated_new_only = evaluated_new_rows[evaluated_new_rows.index.isin(unevaluated_old_rows.index)].index.tolist()
evaluated_new_first_time = evaluated_new_rows.index.intersection(using_prev_fit_fi).tolist()
# for features with no info on both df_old and df_new, return no info rows
evaluated_neither_rows = unevaluated_new_rows.loc[evaluated_neither]
# for features with info on only df_old, return corresponding df_old rows
evaluated_old_only_rows = evaluated_old_rows.loc[evaluated_old_only]
# for features with info on only df_new or whose df_old feature importance came from the previous model, return corresponding df_new rows
evaluated_new_only_rows = evaluated_new_rows.loc[set(evaluated_new_only + evaluated_new_first_time)]
# for features with info on both df_new and whose df_old feature importance came from the current model, return combined statistics
evaluated_both_rows = pd.DataFrame()
evaluated_both_rows_new = evaluated_new_rows.loc[evaluated_both].sort_index()
evaluated_both_rows_old = evaluated_old_rows.loc[evaluated_both].sort_index()
mean_old, mean_new = evaluated_both_rows_old['importance'], evaluated_both_rows_new['importance']
stddev_old, stddev_new = evaluated_both_rows_old['stddev'], evaluated_both_rows_new['stddev']
n_old, n_new = evaluated_both_rows_old['n'], evaluated_both_rows_new['n']
evaluated_both_rows['importance'] = (n_old * mean_old + n_new * mean_new) / (n_old + n_new)
# Refer to https://math.stackexchange.com/questions/2971315/how-do-i-combine-standard-deviations-of-two-groups
evaluated_both_rows['stddev'] = (((n_old - 1) * stddev_old ** 2 + (n_new - 1) * stddev_new ** 2) / (n_old + n_new - 1) +
(n_old * n_new * (mean_old - mean_new) ** 2) / ((n_old + n_new) * (n_old + n_new - 1))) ** 0.5
evaluated_both_rows['p_value'] = None
evaluated_both_rows['n'] = n_old + n_new
# remove features evaluated in df_new from using_prev_fit_fi if they exist
using_prev_fit_fi.difference_update(evaluated_new_rows.index.tolist())
result = | pd.concat([evaluated_both_rows, evaluated_new_only_rows, evaluated_old_only_rows, evaluated_neither_rows]) | pandas.concat |
import pandas as pd
import requests
from tqdm import tqdm
import os
from os import listdir
from os.path import isfile, join
from datetime import datetime
from functools import cache
"""
Test welke van de leden+descendants in een refset er in de VT (totaal en lijst gyn) zitten.
146481000146103 |simpele referentieset met obstetrische verrichtingen (metadata)|
Plaats het VT Excel Release-bestand in ./resources
1) Maakt een lijst van een SNOMED refset en de descendants van die refsetleden.
2) Leest een release-bestand van de Verrichtingenthesaurus in
- Vergelijkt elke rij uit 2 met 1. Toont True/False in het output.xlsx bestand in kolom D.
Run met python3 refset+descendants_vs_vt.py. Kies in de dialoog het juist excel bestand en download output.xlsx.
"""
### Config ###
# Snowstorm URL - include trailing forward slash
snowstorm_url = "https://snowstorm.test-nictiz.nl/"
snomed_branch = 'MAIN/SNOMEDCT-NL'
snomed_versie = 'live-20210331'
# Dataframes VT creëren
files_in_folder = [f for f in listdir("./resources") if isfile(join("./resources", f))]
i=0
print("Bestanden in map:")
print("-"*80)
file_1 = False
file_2 = False
file_3 = False
for file in files_in_folder:
file_type = file.split("_")[-1:]
if file_type[0] == "ThesaurusConceptRol.csv":
thesaurusConceptRollen = pd.read_csv("./resources/"+file)
file_1 = file
if file_type[0] == "ThesaurusConcept.csv":
thesaurusConcepten = pd.read_csv("./resources/"+file)
file_2 = file
if file_type[0] == "ThesaurusTerm.csv":
thesaurusTermen = pd.read_csv("./resources/"+file)
file_3 = file
if file_1 and file_2 and file_3:
print("Bronbestanden gevonden.")
else:
exit("Niet alle bronbestanden aanwezig.")
print("-"*80)
print("-"*80)
print(file_1)
print(thesaurusConceptRollen.head())
print("-"*80)
print(file_2)
print(thesaurusConcepten.head())
print("-"*80)
print(file_3)
print(thesaurusTermen.head())
print("-"*80)
print("-"*80)
# Ophalen termen
@cache
def fetchTerms(conceptid):
url = f"{snowstorm_url}{snomed_branch}/concepts/{conceptid}/"
req = requests.get(url)
response = req.json()
if req.status_code == 200:
return response
else:
return {}
# Ophalen refset members
@cache
def fetchEcl(ecl):
concepts = []
url = f"{snowstorm_url}{snomed_branch}/concepts?ecl={ecl}&limit=10000&returnIdOnly=true"
# print(url)
req = requests.get(url)
response = req.json()
total = response.get('total',0)
while len(concepts) < total:
concepts += response.get('items',[])
url = f"{snowstorm_url}{snomed_branch}/concepts?ecl={ecl}&limit=10000&searchAfter={response.get('searchAfter')}&returnIdOnly=true"
# print(url)
req = requests.get(url)
response = req.json()
return concepts
conceptID_list = fetchEcl("^146481000146103")
print(f"{len(conceptID_list)} refsetleden opgehaald. Nu de descendants.")
# Descendants van refsetleden ophalen, en toevoegen aan lijst
deduplicated_list_ecl = conceptID_list.copy()
deduplicated_list_descendants = []
for concept in tqdm(deduplicated_list_ecl):
deduplicated_list_descendants += fetchEcl(f"<{concept}")
# Lijsten dedupliceren
deduplicated_list_ecl = list(set(deduplicated_list_ecl))
print(len(deduplicated_list_ecl), "concepten in refset.")
deduplicated_list_descendants = list(set(deduplicated_list_descendants))
print(len(deduplicated_list_descendants), "concepten in descendants.")
deduplicated_list_total = deduplicated_list_ecl + deduplicated_list_descendants
print(len(deduplicated_list_total), "concepten in totaal.")
print("-"*80)
# Lijst met thesaurusconcept ID's na filter creeren
thesaurusIDs = thesaurusConceptRollen['ConceptID'].values
# Iterate over kolom met Thesaurus ID's
print("SNOMED -> VT vergelijken")
output = []
for thesaurusID in tqdm(list(set(thesaurusIDs))):
thesaurusConcept = thesaurusConcepten[
(thesaurusConcepten['ConceptID'] == thesaurusID) & (thesaurusConcepten['Einddatum'] == 20991231)
]
thesaurusTerm = thesaurusTermen[
(thesaurusTermen['ConceptID'] == thesaurusID) &
(thesaurusTermen['Einddatum'] == 20991231) &
(thesaurusTermen['TypeTerm'] == 'voorkeursterm')
]
try:
SCTID = int(thesaurusConcept['SnomedID'])
except:
SCTID = False
try:
term = thesaurusTerm['Omschrijving'].values[0]
except:
term = False
groepCode = thesaurusConceptRollen[
thesaurusConceptRollen['ConceptID'] == thesaurusID
]['SpecialismeGroepCode'].values[0]
in_ecl = (SCTID in deduplicated_list_ecl)
in_descendants = (SCTID in deduplicated_list_descendants)
output.append({
'ThesaurusID' : str(thesaurusID),
'Snomed ID' : str(SCTID),
'Snomed FSN' : fetchTerms(SCTID).get('fsn',{}).get('term',None),
'Voorkeursterm' : term,
'SpecialismeGroepCode' : str(groepCode),
'SCTID in refset': in_ecl,
'SCTID in descendants van refsetleden': in_descendants,
})
print("-"*80)
# Iterate over refset members, controleer of ze in de VT zitten
print("VT -> SNOMED vergelijken")
output2 = []
for SCTID in tqdm(deduplicated_list_total):
present = False
thesaurusTerm = False
vt_concept = False
vt_concept_specialisme = False
for ConceptID in thesaurusConcepten[(thesaurusConcepten['SnomedID'] == SCTID) & (thesaurusConcepten['Einddatum'] == 20991231)]['ConceptID']:
present = True
vt_concept = ConceptID
try:
thesaurusTerm = thesaurusTermen[
(thesaurusTermen['ConceptID'] == ConceptID) &
(thesaurusTermen['Einddatum'] == 20991231) &
(thesaurusTermen['TypeTerm'] == 'voorkeursterm')
]['Omschrijving'].values[0]
except:
continue
try:
vt_concept_specialisme = thesaurusConceptRollen[
(thesaurusConceptRollen['ConceptID'] == ConceptID) &
(thesaurusTermen['Einddatum'] == 20991231)
]['SpecialismeGroepCode'].values[0]
except:
continue
output2.append({
'Snomed ID' : str(SCTID),
'Snomed FSN' : fetchTerms(SCTID).get('fsn',{}).get('term',None),
'Refset lid' : (SCTID in deduplicated_list_ecl),
'Descendant van refsetlid' : (SCTID in deduplicated_list_descendants),
'ThesaurusID' : str(vt_concept),
'Voorkeursterm VT' : thesaurusTerm,
'SpecialismeGroepCode' : vt_concept_specialisme,
'SNOMED Concept in VT': present,
})
print("-"*80)
# Exporteren naar Excel
print("Exporteren naar excel")
export_comment = input("Opmerkingen voor in het output-bestand? ")
now = datetime.now()
date_time = now.strftime("%m-%d-%Y_%H:%M:%S")
writer = pd.ExcelWriter(f"output_{date_time}.xlsx", engine='xlsxwriter')
# Sheet 1 met metadata
metadata_df = pd.DataFrame([
{'key' : 'Scriptnaam', 'value' : os.path.basename(__file__)},
{'key' : 'Export time', 'value' : date_time},
{'key' : 'SNOMED versie', 'value' : snomed_versie},
{'key' : 'Snowstorm URL', 'value' : snowstorm_url},
{'key' : 'VT bronbestand[0]', 'value' : file_1},
{'key' : 'VT bronbestand[1]', 'value' : file_2},
{'key' : 'VT bronbestand[2]', 'value' : file_3},
{'key' : 'Opmerkingen', 'value' : export_comment},
])
metadata_df.to_excel(writer, 'Metadata')
# Sheet 2 met resultaten - VT vs ECL
output_df = pd.DataFrame(output)
output_df.to_excel(writer, 'VT -> SNOMED')
# Sheet 3 met resultaten - ECL vs VT
output_df = | pd.DataFrame(output2) | pandas.DataFrame |
""""
Created by <NAME>, based on the Master Thesis:
"A proposed method for unsupervised anomaly detection for arg_from multivariate building dataset "
University of Bern/Neutchatel/Fribourg - 2017
Any copy of this code should be notified at <EMAIL>
to avoid intellectual property's problems.
Not details about this code are included, if you need more information. Please contact the email above.
"My work is well done to honor God at any time" <NAME>.
Mateo 6:33
"""
"""
Este proyecto ha sido desarrollado en la Gerencia de Operaciones de CENACE
Mateo633
"""
import datetime
import pickle
import time
import warnings
# import ipyparallel as ipp
import numpy as np
import pandas as pd
import json
from hmmlearn.hmm import GaussianHMM
from sklearn.externals import joblib
import os
# Función para imprimir con Markdown style:
# from IPython.display import Markdown, display
def h(x):
return -3 * pow(x, 3) + 2 * pow(x, 2) + 5 * x
def get_ipp_client(profile='default'):
rc = None
try:
# rc = ipp.Client(profile=profile)
# rc = Client(profile=profile)
print("Engines running for this client: {0}".format(rc.ids))
except Exception as e:
print(e)
print("Make sure you are running engines by the command: \n $ipcluster start --profile=default -n 4")
return rc
def pivot_DF_using_dates_and_hours(df):
df = df[~df.index.duplicated(keep='first')]
""" Allow to pivot the dataframe using dates and hours"""
df["hour"] = [x.time() for x in df.index]
df['date'] = [x._date_repr for x in df.index]
# transform series in arg_from table for hour and dates
try:
df = df.pivot(index='date', columns='hour')
# df.fillna(method='pad', inplace=True)
df.dropna(inplace=True)
except Exception as e:
print(e)
print('No possible convertion, in format: index-> date, columns-> hours')
df = pd.DataFrame()
return df
def select_best_HMM(training_set, validating_set, nComp_list, seed=777):
""" client is an instance of ipp.Client:
df_dataSet is an instance of pd.DataFrame
"""
best_score, best_log_prob = 0, -np.inf # best_score in [0 to 1] and best_log_prob > -np.inf
best_model, log_register_list = None, list()
np.random.seed(seed) # different random seed
for n_component in nComp_list:
try:
model = GaussianHMM(n_components=n_component, covariance_type="diag", n_iter=200).fit(training_set)
assert isinstance(model, GaussianHMM)
score, log_prob = score_model(validating_set, model)
log_register_list.append({"n_component": n_component, "score": round(score, 5),
"log_prob": round(log_prob, 1), "val_size": len(validating_set),
"train_size": len(training_set)})
if score > best_score and log_prob > best_log_prob:
best_score = score
best_model = model
best_log_prob = log_prob
except:
return None, None
# return best_model, score_list, best_log_prob
return best_model, log_register_list
def score_model(validating_set, model):
r, n = 0, len(validating_set)
try:
score_samples = model.predict_proba(validating_set)
log_prob = model.score(validating_set)
for sample_score in score_samples:
max_prob = max(sample_score)
r += max_prob
score = (r / n)
except:
return 0, -np.inf
return score, log_prob
def select_best_model_from_list(best_model_list, validating_set, verbose=True):
# warnings.warn("deprecated", DeprecationWarning)
warnings.filterwarnings("ignore", category=DeprecationWarning)
warnings.filterwarnings("ignore", category=RuntimeWarning)
best_score, best_log_prob = 0, -np.inf
best_model, log_register = None, list()
last_register = list()
err = 0
for item_model in best_model_list:
model = item_model['model']
if item_model['log_register'] is not None:
log_register += item_model['log_register']
if model is not None:
assert isinstance(model, GaussianHMM)
score, log_prob = score_model(validating_set, model)
last_register.append({"n_component": model.n_components, "score": round(score, 5),
"log_prob": round(log_prob, 1), "val_size": len(validating_set)})
if score > best_score and log_prob > best_log_prob:
best_score = score
best_model = model
best_log_prob = log_prob
else:
err += 1
if verbose:
# print('Trained models: {0}'.format(last_register))
nComponents = None
try:
print('\tBest model: \t\t\t\tnComp={0}, score={1:3.4f}, log_prob={2:5.2f}'.format(
best_model.n_components, best_score, best_log_prob))
nComponents = best_model.n_components
except Exception as e:
print(e, "\n", best_model_list)
if err > 0:
print("\tThere is {0} errors related to trained models".format(err))
return best_model, [{"best_model": {"n_components": nComponents, "score": best_score,
"log_prof": best_log_prob}}] + log_register + last_register
def ordered_hmm_model(model, method='average', metric='euclidean'):
"""
From arg_from trained model, creates arg_from new model that reorder the means of the model according
to the hierarchical clustering HC
:param model: arg_from trained Hidden Markov Model
:param method: Available methods: 'average', 'single', 'complete', 'median', 'ward', 'weighted'
:param metric: Available metrics: 'euclidean', 'minkowski', 'cityblock', 'sqeuclidean'
:return: A ordered hmm model
"""
from scipy.cluster.hierarchy import linkage
import copy
# from hmmlearn.hmm import GaussianHMM
ordered_model = copy.deepcopy(model)
# try:
# assert isinstance(model,GaussianHMM)
if model is None:
return None
""" Z_f contains the distance matrix of the means of the model """
Z_f = linkage(model.means_, method=method, metric=metric)
""" Create arg_from new order for the means of the model according to the hierarchical clustering """
n_comp, new_order = model.n_components, list()
for idx, idy, d, c in Z_f:
if idx < n_comp:
new_order.append(int(idx))
if idy < n_comp:
new_order.append(int(idy))
""" Ordering the means and covars according to 'new_order': """
# The use of model._covars_ is exceptional, usually it should be "model.covars_"
old_means, old_covars = model.means_, model._covars_
new_means, new_covars = np.zeros_like(old_means), np.zeros_like(old_covars)
for idx, re_idx in zip(list(range(n_comp)), new_order):
new_means[idx] = old_means[re_idx]
new_covars[idx] = old_covars[re_idx]
""" Ordering transition matrix B and start probability \pi """
old_transmat, new_transmat = model.transmat_, np.zeros_like(model.transmat_)
n = old_transmat.shape[0]
for x in list(range(n)):
for y in list(range(n)):
new_transmat[y, x] = old_transmat[new_order[y], new_order[x]]
start_p = np.array([1 / n_comp for i in range(n_comp)])
""" Setting the new ordered model """
ordered_model.startprob_ = start_p
ordered_model.transmat_ = new_transmat
ordered_model.means_ = new_means
ordered_model.covars_ = new_covars
return ordered_model
# except:
# return model
def save_model_and_log(model, log_register, model_path, log_path, file_name):
file1 = os.path.join(model_path, file_name)
file2 = os.path.join(log_path, file_name.replace(".pkl", ".json"))
try:
joblib.dump(model, filename=file1, compress=3, protocol=2)
save_json_file(log_register, file2)
except FileNotFoundError:
file1 = "./" + file_name
file2 = "./" + file_name.replace(".pkl", ".json")
joblib.dump(model, filename=file1, compress=3, protocol=2)
save_json_file(log_register, file2)
print('\tBest model saved in: \t\t\t', file1)
print('\tLog register in: \t\t\t', file2)
def open_pickle_file(file_path):
b = None
try:
with open(file_path, 'rb') as handle:
b = pickle.load(handle)
except Exception as e:
print(e)
return b
def save_pickle_file(file_path, to_save):
try:
with open(file_path, 'wb') as handle:
pickle.dump(to_save, handle, protocol=pickle.HIGHEST_PROTOCOL)
except Exception as e:
print(e)
def save_json_file(to_save, file_path):
try:
with open(file_path, 'w') as outfile:
json.dump(to_save, outfile)
except Exception as e:
print(e)
def open_json_file(file_path):
j = None
try:
with open(file_path, 'r') as outfile:
j = json.load(outfile)
except Exception as e:
print(e)
return j
def printmd(string):
# display(Markdown(string))
print(string)
def d_time(time_reference):
return time.time() - time_reference
def time_now():
return datetime.datetime.now().strftime('%H:%M:%S')
def get_model_dfx_dfy(model_path, data_path, filter_values=True, verbose=True):
"""
Read arg_from HMM model and the correspondent data to be processed
:param verbose: print details about this function
:param filter_values: Exclude undesirable samples
:param model_path: path of the model
:param data_path: path of the data to be processed
:return: model, df_x (data), df_y (labels)
"""
""" Reading the HMM model """
model = joblib.load(model_path)
n_comp = model.n_components
n_features = model.n_features
""" Reading data from """
df_x = read_dfx_from(data_path, filter_values)
x = df_x.values
""" Inferring the hidden states from the observed samples """
hidden_states = model.predict(x)
df_y = pd.DataFrame(hidden_states, columns=['hidden_states'])
df_y.index = df_x.index
if verbose:
print("-> Reading the HMM model from: \n\t\t{0}".format(model_path))
print("\t\tn_comp = {0}, n_features = {1}".format(n_comp, n_features))
print('-> Reading data from: \n\t\t' + data_path)
print("\t\t From " + df_x.index[0].strftime("%Y-%m-%d") + " to " + df_x.index[-1].strftime("%Y-%m-%d"))
print("\t\t Number of samples to observe: ", len(x))
print("-> Inferring the hidden states from the observed samples: "
"\n\t\t A sequence of {0} hidden status were inferred".format(len(hidden_states)))
return model, df_x, df_y
def read_dfx_from(data_path, filter_values=True):
"""Read raw data"""
df = pd.read_pickle(data_path)
for col in df.columns:
df[col] = pd.to_numeric(df[col], errors= 'coerce')
df[col] = df[col].interpolate(method='nearest', limit=3, limit_direction='both')
"""Excluding undesirable data"""
if filter_values:
try:
exclude_dates_list = open_json_file(data_path.replace(".pkl", "_exclude.json"))
mask = ~df.index.isin(exclude_dates_list)
df = df[mask]
except Exception as e:
print(e)
print("[{0: <21s}] No hay datos arg_from filtrar".format(time_now()))
"""Datetime index"""
try:
df.index = | pd.to_datetime(df.index) | pandas.to_datetime |
# Copyright 2020 (c) Netguru S.A.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from random import randint
from typing import Tuple
import numpy as np
import pandas as pd
import pytest
from pytest_cases import fixture_plus
from sklearn.datasets import load_iris
from opoca.data.dataset import Dataset
from opoca.data.split import Split
from opoca.data.splitter import RandomSplitter
from tests.sample_data import DiabetesDataHandler
NB_EXAMPLES = 100
NB_NUMERICAL_FEATURES = 50
NB_BOOL_FEATURES = 30
MIN_CATEGORIES = 10
MAX_CATEGORIES = 30
NB_CATEGORICAL_FEATURES = 10
RANDOM_STATE = 42
def create_data_frame(data: np.ndarray, prefix: str) -> pd.DataFrame:
cols = [f"{prefix}_{i}" for i in range(data.shape[1])]
return pd.DataFrame(data=data, columns=cols)
def get_iris_dataset():
iris = load_iris(as_frame=True)
return Dataset(x=iris.data, y=iris.target.to_frame(), name='iris')
@pytest.fixture(scope="session")
def iris_dataset() -> Dataset:
return get_iris_dataset()
@pytest.fixture(scope="session")
def iris_split() -> Split:
dataset = get_iris_dataset()
rs = RandomSplitter(random_state=RANDOM_STATE)
split = rs.split(dataset)
return split
@fixture_plus(unpack_into="x,y")
def heterogeneous_dataset() -> Tuple[pd.DataFrame, pd.DataFrame]:
numerical_data = np.random.normal(size=(NB_EXAMPLES, NB_NUMERICAL_FEATURES))
bool_data = np.random.binomial(1, 0.3, size=(NB_EXAMPLES, NB_BOOL_FEATURES)).astype(np.bool)
categorical_data = []
for i in range(NB_CATEGORICAL_FEATURES):
categories_count = randint(MIN_CATEGORIES, MAX_CATEGORIES)
population = [f"cat_{i}" for i in range(categories_count)]
column = np.random.choice(population, size=NB_EXAMPLES, replace=True).tolist()
categorical_data.append(column)
categorical_data = np.column_stack(categorical_data)
numerical_df = create_data_frame(numerical_data, "numerical")
bool_df = create_data_frame(bool_data, "bool")
categorical_df = create_data_frame(categorical_data, "cat")
x = | pd.concat([numerical_df, bool_df, categorical_df], axis=1) | pandas.concat |
import argparse
import logging
import os
import pickle
import re
from tqdm import tqdm
tqdm.pandas()
import boto3
import numpy as np
import pandas as pd
from sklearn.preprocessing import LabelEncoder, MinMaxScaler
logging.basicConfig(format='%(asctime)s,%(msecs)d %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s',
datefmt='%Y-%m-%d:%H:%M:%S',
level=logging.INFO)
########################################
# 从s3同步数据
########################################
def sync_s3(file_name_list, s3_folder, local_folder):
for f in file_name_list:
print("file preparation: download src key {} to dst key {}".format(os.path.join(
s3_folder, f), os.path.join(local_folder, f)))
s3client.download_file(bucket, os.path.join(
s3_folder, f), os.path.join(local_folder, f))
def write_to_s3(filename, bucket, key):
print("upload s3://{}/{}".format(bucket, key))
with open(filename, 'rb') as f: # Read in binary mode
# return s3client.upload_fileobj(f, bucket, key)
return s3client.put_object(
ACL='bucket-owner-full-control',
Bucket=bucket,
Key=key,
Body=f
)
def write_str_to_s3(content, bucket, key):
print("write s3://{}/{}, content={}".format(bucket, key, content))
s3client.put_object(Body=str(content).encode("utf8"), Bucket=bucket, Key=key, ACL='bucket-owner-full-control')
def prepare_df(item_path):
df = pd.read_csv(item_path)
df['c_id'] = df['c_id'].values.astype('int64')
return df
def get_actor(actor_str):
if not actor_str or str(actor_str).lower() in ['nan', 'nr', '']:
return [None]
actor_arr = actor_str.split('|')
return [item.strip().lower() for item in actor_arr]
def get_category(category_property):
if not category_property or str(category_property).lower() in ['nan', 'nr', '']:
return [None]
if not category_property:
return [None]
return [item.strip().lower() for item in category_property.split('|')]
def get_single_item(item):
if not item or str(item).lower().strip() in ['nan', 'nr', '']:
return [None]
return [str(item).lower().strip()]
def item_embed(x, raw_embed_item_mapping, ub_item_embeddings):
embed_item_idx = raw_embed_item_mapping[str(x)]
if int(embed_item_idx) < len(ub_item_embeddings):
# print(user_portrait[x])
return ub_item_embeddings[int(embed_item_idx)]
else:
return [0] * embed_dim
def item_id_feat(x, i):
return x[i]
parser = argparse.ArgumentParser(description="app inputs and outputs")
parser.add_argument("--bucket", type=str, help="s3 bucket")
parser.add_argument("--prefix", type=str, help="s3 input key prefix")
parser.add_argument("--region", type=str, help="aws region")
args, _ = parser.parse_known_args()
print("args:", args)
if args.region:
print("region:", args.region)
boto3.setup_default_session(region_name=args.region)
bucket = args.bucket
prefix = args.prefix
if prefix.endswith("/"):
prefix = prefix[:-1]
print(f"bucket:{bucket}, prefix:{prefix}")
s3 = boto3.client('s3')
s3client = s3
local_folder = 'info'
if not os.path.exists(local_folder):
os.makedirs(local_folder)
# youtubednn模型数据加载
file_name_list = ['raw_embed_item_mapping.pickle',
'raw_embed_user_mapping.pickle']
s3_folder = '{}/feature/action/'.format(prefix)
sync_s3(file_name_list, s3_folder, local_folder)
file_name_list = ['ub_item_embeddings.npy']
s3_folder = '{}/feature/action/'.format(prefix)
ub_item_exists = False
try:
sync_s3(file_name_list, s3_folder, local_folder)
ub_item_exists = True
except Exception as e:
print("run as init load, cannot find ub_item_embeddings.npy")
print(repr(e))
# 倒排列表的pickle文件
file_name_list = ['card_id_card_property_dict.pickle']
s3_folder = '{}/feature/content/inverted-list/'.format(prefix)
sync_s3(file_name_list, s3_folder, local_folder)
file_name_list = ['item.csv']
s3_folder = '{}/system/item-data/'.format(prefix)
sync_s3(file_name_list, s3_folder, local_folder)
# 加载pickle文件
file_to_load = open("info/card_id_card_property_dict.pickle", "rb")
dict_id_content = pickle.load(file_to_load)
print("length of card_id v.s. card_property {}".format(len(dict_id_content)))
file_to_load = open("info/raw_embed_item_mapping.pickle", "rb")
raw_embed_item_mapping = pickle.load(file_to_load)
file_to_load = open("info/raw_embed_user_mapping.pickle", "rb")
raw_embed_user_mapping = pickle.load(file_to_load)
# return pd.Series(f_dict)
def sparse_item_id_feat(x, mt, dict_id_content=dict_id_content):
result = dict_id_content[str(x)][mt]
if result[0] is None:
return None
else:
return '|'.join(result)
# 加载模型
# user_embedding_model = load_model('info/user_embeddings.h5', custom_objects)
if ub_item_exists:
ub_item_embeddings = np.load("info/ub_item_embeddings.npy")
else:
ub_item_embeddings = []
embed_dim = 32
df = prepare_df("info/item.csv")
card_id_card_property_data = {}
row_cnt = 0
for row in df.iterrows():
item_row = row[1]
program_id = str(item_row['c_id'])
program_dict = {
# 'c_singer_sex': str(item_row['c_singer_sex']),
'c_singer_user_id': str(item_row['c_singer_user_id']),
# 'c_singer_age': str(item_row['c_singer_age']),
# 'c_singer_country': str(item_row['c_singer_country']),
'c_song_name': str(item_row['c_song_name']),
'c_song_artist': str(item_row['c_song_artist'])
}
row_content = []
row_content.append(str(item_row['c_id']))
# row_content.append(program_dict['c_singer_sex'])
row_content.append(program_dict['c_singer_user_id'])
# row_content.append(program_dict['c_singer_age'])
# row_content.append(program_dict['c_singer_country'])
row_content.append(program_dict['c_song_name'])
row_content.append(program_dict['c_song_artist'])
card_id_card_property_data['row_{}'.format(row_cnt)] = row_content
row_cnt = row_cnt + 1
raw_data_pddf = pd.DataFrame.from_dict(card_id_card_property_data, orient='index',
columns=['c_id', 'c_singer_user_id', 'c_song_name',
'c_song_artist'])
raw_data_pddf = raw_data_pddf.reset_index(drop=True)
sample_data_pddf = raw_data_pddf
# item id feature - item embedding
print("根据item_id索引itemid_feat(嵌入)")
sample_data_pddf['itemid_feat'] = sample_data_pddf['c_id'].progress_apply(
lambda x: item_embed(x, raw_embed_item_mapping, ub_item_embeddings))
print("将{}维物品嵌入转化为不同的连续型feature".format(embed_dim))
for i in tqdm(range(embed_dim)):
sample_data_pddf['item_feature_{}'.format(i)] = sample_data_pddf['itemid_feat'].apply(lambda x: item_id_feat(x, i))
# sparse feature
print("根据item_id对应的content生成离散feature")
popularity_method_list = ['c_singer_user_id', 'c_song_name',
'c_song_artist']
for i, mt in tqdm(enumerate(popularity_method_list)):
sample_data_pddf['sparse_feature_{}'.format(i)] = sample_data_pddf['c_id'].apply(
lambda x: sparse_item_id_feat(x, mt))
mk_data = sample_data_pddf
dense_feature_size = embed_dim
sparse_feature_size = 3 #should be changed by fact
for i in range(dense_feature_size):
mk_data['I{}'.format(i + embed_dim)] = mk_data['item_feature_{}'.format(i)]
for i in range(sparse_feature_size):
mk_data['C{}'.format(i + 1)] = mk_data['sparse_feature_{}'.format(i)]
mk_sparse_features = ['C' + str(i) for i in range(1, sparse_feature_size + 1)]
mk_dense_features = ['I' + str(i + embed_dim - 1) for i in range(1, dense_feature_size + 1)]
mk_data[mk_sparse_features] = mk_data[mk_sparse_features].fillna('-1', )
mk_data[mk_dense_features] = mk_data[mk_dense_features].fillna(0, )
for feat in mk_sparse_features:
lbe = LabelEncoder()
mk_data[feat] = lbe.fit_transform(mk_data[feat])
nms = MinMaxScaler(feature_range=(0, 1))
mk_data[mk_dense_features] = nms.fit_transform(mk_data[mk_dense_features])
card_id_card_feature_data = {}
for row in mk_data.iterrows():
item_row = row[1]
# print(item_row)
# break
program_dict = str(item_row['c_id'])
row_content = []
row_content.append(str(item_row['c_id']))
dense_score = []
for feat in mk_sparse_features:
row_content.append(item_row[feat])
for feat in mk_dense_features:
row_content.append(item_row[feat])
dense_score.append(item_row[feat])
row_content.append(np.mean(dense_score))
card_id_card_feature_data['row_{}'.format(row_cnt)] = row_content
row_cnt = row_cnt + 1
col_names = ['c_id'] + mk_sparse_features + mk_dense_features + ['item_feat_mean']
mk_item_feature_pddf = | pd.DataFrame.from_dict(card_id_card_feature_data, orient='index', columns=col_names) | pandas.DataFrame.from_dict |
"""
This network uses the last 26 observations of gwl, tide, and rain to predict the next 18
values of gwl for well MMPS-125. The data for MMPS-125 is missing <NAME>.
"""
import pandas as pd
from pandas import DataFrame
from pandas import concat
from pandas import read_csv
from sklearn.metrics import mean_squared_error
from sklearn.preprocessing import MinMaxScaler
import tensorflow as tf
import keras
import keras.backend as K
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from keras.layers import GRU
from keras.layers import Dropout
from keras.layers import Activation
from keras.utils import plot_model
from keras.regularizers import L1L2
from math import sqrt
import matplotlib.pyplot as plt
import matplotlib
import numpy as np
import random as rn
import os
matplotlib.rcParams.update({'font.size': 8})
# convert time series into supervised learning problem
def series_to_supervised(data, n_in=1, n_out=1, dropnan=True):
n_vars = 1 if type(data) is list else data.shape[1]
df = DataFrame(data)
cols, names = list(), list()
# input sequence (t-n, ... t-1)
for i in range(n_in, 0, -1):
cols.append(df.shift(i))
names += [('var%d(t-%d)' % (j+1, i)) for j in range(n_vars)]
# forecast sequence (t, t+1, ... t+n)
for i in range(0, n_out):
cols.append(df.shift(-i))
if i == 0:
names += [('var%d(t)' % (j+1)) for j in range(n_vars)]
else:
names += [('var%d(t+%d)' % (j+1, i)) for j in range(n_vars)]
# put it all together
agg = concat(cols, axis=1)
agg.columns = names
# drop rows with NaN values
if dropnan:
agg.dropna(inplace=True)
return agg
# def create_weights(train_labels):
# obs_mean = np.mean(train_labels, axis=-1)
# obs_mean = np.reshape(obs_mean, (n_batch, 1))
# obs_mean = np.repeat(obs_mean, n_ahead, axis=1)
# weights = (train_labels + obs_mean) / (2 * obs_mean)
# return weights
#
#
# def sq_err(y_true, y_pred):
# return K.square(y_pred - y_true)
#
#
def mse(y_true, y_pred):
return K.mean(K.square(y_pred - y_true), axis=-1)
def rmse(y_true, y_pred):
return K.sqrt(K.mean(K.square(y_pred - y_true), axis=-1))
def pw_rmse(y_true, y_pred):
# num_rows, num_cols = K.int_shape(y_true)[0], K.int_shape(y_true)[1]
# print(num_rows, num_cols)
act_mean = K.mean(y_true, axis=-1)
# print("act_mean 1 is:", act_mean)
act_mean = K.reshape(act_mean, (n_batch, 1))
# print("act_mean is: ", act_mean)
mean_repeat = K.repeat_elements(act_mean, n_ahead, axis=1)
# print("mean_repeat is:", mean_repeat)
weights = (y_true+mean_repeat)/(2*mean_repeat)
return K.sqrt(K.mean((K.square(y_pred - y_true)*weights), axis=-1))
# configure network
n_lags = 52
n_ahead = 18
n_features = 3
n_train = 48357
n_test = 7577
n_epochs = 10000
n_neurons = 10
n_batch = 48357
# load dataset
dataset_raw = read_csv("C:/Users/<NAME>/Documents/HRSD GIS/Site Data/MMPS_125_no_blanks.csv",
index_col=None, parse_dates=True, infer_datetime_format=True)
dataset_raw.loc[dataset_raw['GWL'] > 4.1, 'GWL'] = 4.1
# dataset_raw = dataset_raw[0:len(dataset_raw)-1]
# split datetime column into train and test for plots
train_dates = dataset_raw[['Datetime', 'GWL', 'Tide', 'Precip.']].iloc[:n_train]
test_dates = dataset_raw[['Datetime', 'GWL', 'Tide', 'Precip.']].iloc[n_train:]
test_dates = test_dates.reset_index(drop=True)
test_dates['Datetime'] = pd.to_datetime(test_dates['Datetime'])
# drop columns we don't want to predict
dataset = dataset_raw.drop(dataset_raw.columns[[0, 4]], axis=1)
values = dataset.values
values = values.astype('float32')
gwl = values[:, 0]
gwl = gwl.reshape(gwl.shape[0], 1)
tide = values[:, 1]
tide = tide.reshape(tide.shape[0], 1)
rain = values[:, 2]
rain = rain.reshape(rain.shape[0], 1)
# normalize features with individual scalers
gwl_scaler, tide_scaler, rain_scaler = MinMaxScaler(), MinMaxScaler(), MinMaxScaler()
gwl_scaled = gwl_scaler.fit_transform(gwl)
tide_scaled = tide_scaler.fit_transform(tide)
rain_scaled = rain_scaler.fit_transform(rain)
scaled = np.concatenate((gwl_scaled, tide_scaled, rain_scaled), axis=1)
# frame as supervised learning
reframed = series_to_supervised(scaled, n_lags, n_ahead)
values = reframed.values
# split into train and test sets
train, test = values[:n_train, :], values[n_train:, :]
# split into input and outputs
input_cols, label_cols = [], []
for i in range(values.shape[1]):
if i <= n_lags*n_features-1:
input_cols.append(i)
elif i % 3 != 0:
input_cols.append(i)
elif i % 3 == 0:
label_cols.append(i)
train_X, train_y = train[:, input_cols], train[:, label_cols] # [start:stop:increment, (cols to include)]
test_X, test_y = test[:, input_cols], test[:, label_cols]
# reshape input to be 3D [samples, timesteps, features]
train_X = train_X.reshape((train_X.shape[0], 1, train_X.shape[1]))
test_X = test_X.reshape((test_X.shape[0], 1, test_X.shape[1]))
print(train_X.shape, train_y.shape, test_X.shape, test_y.shape)
#create weights for peak weighted rmse loss function
# weights = create_weights(train_y)
# load model here if needed
# model = keras.models.load_model("C:/Users/<NAME>/PycharmProjects/Tensorflow/keras_models/mmps125.h5",
# custom_objects={'pw_rmse':pw_rmse})
# set random seeds for model reproducibility as suggested in:
# https://keras.io/getting-started/faq/#how-can-i-obtain-reproducible-results-using-keras-during-development
os.environ['PYTHONHASHSEED'] = '0'
np.random.seed(42)
rn.seed(12345)
session_conf = tf.ConfigProto(intra_op_parallelism_threads=1, inter_op_parallelism_threads=1)
tf.set_random_seed(1234)
sess = tf.Session(graph=tf.get_default_graph(), config=session_conf)
K.set_session(sess)
# define model
model = Sequential()
model.add(LSTM(units=n_neurons, input_shape=(None, train_X.shape[2]), use_bias=True,
bias_regularizer=L1L2(l1=0.01, l2=0.01))) # This is hidden layer
# model.add(LSTM(units=n_neurons, return_sequences=True, input_shape=(None, train_X.shape[2]), use_bias=True))
# model.add(LSTM(units=n_neurons, return_sequences=True, use_bias=True))
# model.add(LSTM(units=n_neurons, use_bias=True))
model.add(Dropout(.1))
model.add(Dense(activation='linear', units=n_ahead, use_bias=True)) # this is output layer
# model.add(Activation('linear'))
adam = keras.optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)
model.compile(loss=rmse, optimizer='adam')
tbCallBack = keras.callbacks.TensorBoard(log_dir='C:/tmp/tensorflow/keras/logs', histogram_freq=0, write_graph=True,
write_images=False)
earlystop = keras.callbacks.EarlyStopping(monitor='loss', min_delta=0.00000001, patience=10, verbose=1, mode='auto')
history = model.fit(train_X, train_y, batch_size=n_batch, epochs=n_epochs, verbose=2, shuffle=False,
callbacks=[earlystop, tbCallBack])
# save model
# model.save("C:/Users/<NAME>/PycharmProjects/Tensorflow/keras_models/mmps125.h5")
# plot model history
# plt.plot(history.history['loss'], label='train')
# # plt.plot(history.history['val_loss'], label='validate')
# # plt.legend()
# # ticks = np.arange(0, n_epochs, 1) # (start,stop,increment)
# # plt.xticks(ticks)
# plt.xlabel("Epochs")
# plt.ylabel("Loss")
# plt.tight_layout()
# plt.show()
# make predictions
trainPredict = model.predict(train_X)
yhat = model.predict(test_X)
inv_trainPredict = gwl_scaler.inverse_transform(trainPredict)
inv_yhat = gwl_scaler.inverse_transform(yhat)
inv_y = gwl_scaler.inverse_transform(test_y)
inv_train_y = gwl_scaler.inverse_transform(train_y)
# post process predicted values to not be greater than the land surface elevation
inv_yhat[inv_yhat > 4.1] = 4.1
# save train predictions and observed
inv_trainPredict_df = DataFrame(inv_trainPredict)
inv_trainPredict_df.to_csv("C:/Users/<NAME>/PycharmProjects/Tensorflow/mmps125_results/train_predicted.csv")
inv_train_y_df = DataFrame(inv_train_y)
inv_train_y_df.to_csv("C:/Users/<NAME>/PycharmProjects/Tensorflow/mmps125_results/train_observed.csv")
# save test predictions and observed
inv_yhat_df = DataFrame(inv_yhat)
inv_yhat_df.to_csv("C:/Users/<NAME>/PycharmProjects/Tensorflow/mmps125_results/predicted.csv")
inv_y_df = DataFrame(inv_y)
inv_y_df.to_csv("C:/Users/<NAME>/PycharmProjects/Tensorflow/mmps125_results/observed.csv")
# calculate RMSE for whole test series (each forecast step)
RMSE_forecast = []
for i in np.arange(0, n_ahead, 1):
rmse = sqrt(mean_squared_error(inv_y[:, i], inv_yhat[:, i]))
RMSE_forecast.append(rmse)
RMSE_forecast = DataFrame(RMSE_forecast)
rmse_avg = sqrt(mean_squared_error(inv_y, inv_yhat))
print('Average Test RMSE: %.3f' % rmse_avg)
RMSE_forecast.to_csv("C:/Users/<NAME>/PycharmProjects/Tensorflow/mmps125_results/RMSE.csv")
# calculate RMSE for each individual time step
RMSE_timestep = []
for i in np.arange(0, inv_yhat.shape[0], 1):
rmse = sqrt(mean_squared_error(inv_y[i, :], inv_yhat[i, :]))
RMSE_timestep.append(rmse)
RMSE_timestep = DataFrame(RMSE_timestep)
# plot rmse vs forecast steps
plt.plot(RMSE_forecast, 'ko')
ticks = np.arange(0, n_ahead, 1) # (start,stop,increment)
plt.xticks(ticks)
plt.ylabel("RMSE (ft)")
plt.xlabel("Forecast Step")
plt.tight_layout()
plt.show()
# plot training predictions
plt.plot(inv_train_y[:, 0], label='actual')
plt.plot(inv_trainPredict[:, 0], label='predicted')
plt.xlabel("Timestep")
plt.ylabel("GWL (ft)")
plt.title("Training Predictions")
# ticks = np.arange(0, n_ahead, 1)
# plt.xticks(ticks)
plt.legend()
plt.tight_layout()
plt.show()
# plot test predictions for Hermine, Julia, and Matthew
dates = DataFrame(test_dates[["Datetime"]][n_lags:-n_ahead+1])
dates = dates.reset_index(inplace=False)
dates = dates.drop(columns=['index'])
dates = dates[:]
dates = dates.reset_index(inplace=False)
dates = dates.drop(columns=['index'])
dates_9 = DataFrame(test_dates[["Datetime"]][n_lags+8:-n_ahead+9])
dates_9 = dates_9.reset_index(inplace=False)
dates_9 = dates_9.drop(columns=['index'])
dates_9 = dates_9[:]
dates_9 = dates_9.reset_index(inplace=False)
dates_9 = dates_9.drop(columns=['index'])
dates_18 = DataFrame(test_dates[["Datetime"]][n_lags+17:])
dates_18 = dates_18.reset_index(inplace=False)
dates_18 = dates_18.drop(columns=['index'])
dates_18 = dates_18[:]
dates_18 = dates_18.reset_index(inplace=False)
dates_18 = dates_18.drop(columns=['index'])
fig, (ax1, ax2, ax3) = plt.subplots(3, 1, sharex=True, figsize=(6, 4))
x_ticks = np.arange(0, 7435, 720)
ax1.plot(inv_y[:, 0], '-', label='Obs.')
ax1.plot(inv_yhat[:, 0], ':', label='Pred.')
ax1.set_xticks(x_ticks)
ax1.set_xticklabels(dates['Datetime'][x_ticks].dt.strftime('%m-%d'), rotation='vertical')
ax2.plot(inv_y[:, 8], '-', label='Obs.')
ax2.plot(inv_yhat[:, 8], ':', label='Pred.')
ax2.set_xticks(x_ticks)
ax2.set_xticklabels(dates_9['Datetime'][x_ticks].dt.strftime('%m-%d'), rotation='vertical')
ax3.plot(inv_y[:, 17], '-', label='Obs.')
ax3.plot(inv_yhat[:, 17], ':', label='Pred.')
ax3.set_xticks(x_ticks)
ax3.set_xticklabels(dates_18['Datetime'][x_ticks].dt.strftime('%m-%d'), rotation='vertical')
ax1.text(-200, 4, 't+1')
ax2.text(-200, 4, 't+9')
ax3.text(-200, 4, 't+18')
ax2.set(ylabel="GWL (ft)")
plt.legend(loc=9)
plt.tight_layout()
plt.show()
fig.savefig('C:/Users/<NAME>/Documents/HRSD GIS/Presentation Images/Paper Figures/MMPS125_preds.tif', dpi=300)
# create dfs of timestamps, obs, and pred data to find peak values and times
obs_t1 = np.reshape(inv_y[:, 0], (7435, 1))
pred_t1 = np.reshape(inv_yhat[:, 0], (7435,1))
df_t1 = np.concatenate([obs_t1, pred_t1], axis=1)
df_t1 = DataFrame(df_t1, index=None, columns=["obs", "pred"])
df_t1 = pd.concat([df_t1, dates], axis=1)
df_t1 = df_t1.set_index("Datetime")
df_t1 = df_t1.rename(columns={'obs': 'Obs. GWL t+1', 'pred': 'Pred. GWL t+1'})
obs_t9 = np.reshape(inv_y[:, 8], (7435, 1))
pred_t9 = np.reshape(inv_yhat[:, 8], (7435,1))
df_t9 = np.concatenate([obs_t9, pred_t9], axis=1)
df_t9 = DataFrame(df_t9, index=None, columns=["obs", "pred"])
df_t9 = pd.concat([df_t9, dates_9], axis=1)
df_t9 = df_t9.set_index("Datetime")
df_t9 = df_t9.rename(columns={'obs': 'Obs. GWL t+9', 'pred': 'Pred. GWL t+9'})
obs_t18 = np.reshape(inv_y[:, 17], (7435, 1))
pred_t18 = np.reshape(inv_yhat[:, 17], (7435,1))
df_t18 = np.concatenate([obs_t18, pred_t18], axis=1)
df_t18 = DataFrame(df_t18, index=None, columns=["obs", "pred"])
df_t18 = | pd.concat([df_t18, dates_18], axis=1) | pandas.concat |
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 11 16:31:58 2021
@author: snoone
"""
import os
import glob
import pandas as pd
import numpy as np
pd.options.mode.chained_assignment = None # default='warn'
OUTDIR2= "D:/Python_CDM_conversion/daily/cdm_out/head"
OUTDIR = "D:/Python_CDM_conversion/daily/cdm_out/obs"
os.chdir("D:/Python_CDM_conversion/daily/.csv/")
extension = 'csv'
#my_file = open("D:/Python_CDM_conversion/hourly/qff/ls1.txt", "r")
#all_filenames = my_file.readlines()
#print(all_filenames)
##use a list of file name sto run 5000 parallel
#with open("D:/Python_CDM_conversion/hourly/qff/ls1.txt", "r") as f:
#all_filenames = f.read().splitlines()
all_filenames = [i for i in glob.glob('*.{}'.format(extension))]
##to start at begining of files
for filename in all_filenames:
##to start at next file after last processe
#for filename in all_filenames[all_filenames.index('SWM00002338.qff'):] :
df=pd.read_csv(filename, sep=",")
##add column headers
df.columns=["Station_ID", "Date", "observed_variable", "observation_value","quality_flag","Measurement_flag","Source_flag","hour"]
df = df.astype(str)
# importing pandas as pd
# filtering the rows where Credit-Rating is Fair
df = df[df["observed_variable"].isin(["SNWD", "PRCP", "TMIN", "TMAX", "TAVG", "SNOW", "AWND", "AWDR", "WESD"])]
df["Source_flag"]=df["Source_flag"]. astype(str)
df['Source_flag'] = df['Source_flag'].str.replace("0","c")
df['Source_flag'] = df['Source_flag'].str.replace("6","n")
df['Source_flag'] = df['Source_flag'].str.replace("7","t")
df['Source_flag'] = df['Source_flag'].str.replace("A","224")
df['Source_flag'] = df['Source_flag'].str.replace("c","161")
df['Source_flag'] = df['Source_flag'].str.replace("n","162")
df['Source_flag'] = df['Source_flag'].str.replace("t","120")
df['Source_flag'] = df['Source_flag'].str.replace("A","224")
df['Source_flag'] = df['Source_flag'].str.replace("a","225")
df['Source_flag'] = df['Source_flag'].str.replace("B","159")
df['Source_flag'] = df['Source_flag'].str.replace("b","226")
df['Source_flag'] = df['Source_flag'].str.replace("C","227")
df['Source_flag'] = df['Source_flag'].str.replace("D","228")
df['Source_flag'] = df['Source_flag'].str.replace("E","229")
df['Source_flag'] = df['Source_flag'].str.replace("F","230")
df['Source_flag'] = df['Source_flag'].str.replace("G","231")
df['Source_flag'] = df['Source_flag'].str.replace("H","160")
df['Source_flag'] = df['Source_flag'].str.replace("I","232")
df['Source_flag'] = df['Source_flag'].str.replace("K","233")
df['Source_flag'] = df['Source_flag'].str.replace("M","234")
df['Source_flag'] = df['Source_flag'].str.replace("N","235")
df['Source_flag'] = df['Source_flag'].str.replace("Q","236")
df['Source_flag'] = df['Source_flag'].str.replace("R","237")
df['Source_flag'] = df['Source_flag'].str.replace("r","238")
df['Source_flag'] = df['Source_flag'].str.replace("S","166")
df['Source_flag'] = df['Source_flag'].str.replace("s","239")
df['Source_flag'] = df['Source_flag'].str.replace("T","240")
df['Source_flag'] = df['Source_flag'].str.replace("U","241")
df['Source_flag'] = df['Source_flag'].str.replace("u","242")
df['Source_flag'] = df['Source_flag'].str.replace("W","163")
df['Source_flag'] = df['Source_flag'].str.replace("X","164")
df['Source_flag'] = df['Source_flag'].str.replace("Z","165")
df['Source_flag'] = df['Source_flag'].str.replace("z","243")
df['Source_flag'] = df['Source_flag'].str.replace("m","196")
station_id=df.iloc[1]["Station_ID"]
##set the value significnace for each variable
df["value_significance"]=""
df['observed_variable'] = df['observed_variable'].str.replace("SNWD","53")
df.loc[df['observed_variable'] == "53", 'value_significance'] = '13'
df['observed_variable'] = df['observed_variable'].str.replace("PRCP","44")
df.loc[df['observed_variable'] == "44", 'value_significance'] = "13"
df.loc[df['observed_variable'] == "TMIN", 'value_significance'] = '1'
df['observed_variable'] = df['observed_variable'].str.replace("TMIN","85")
df.loc[df['observed_variable'] == "TMAX", 'value_significance'] = '0'
df['observed_variable'] = df['observed_variable'].str.replace("TMAX","85")
df.loc[df['observed_variable'] == "TAVG", 'value_significance'] = '2'
df['observed_variable'] = df['observed_variable'].str.replace("TAVG","85")
df['observed_variable'] = df['observed_variable'].str.replace("SNOW","45")
df.loc[df['observed_variable'] == "45", 'value_significance'] = '13'
df['observed_variable'] = df['observed_variable'].str.replace("AWND","107")
df.loc[df['observed_variable'] == "107", 'value_significance'] = '2'
df['observed_variable'] = df['observed_variable'].str.replace("AWDR","106")
df.loc[df['observed_variable'] == "106", 'value_significance'] = '2'
df['observed_variable'] = df['observed_variable'].str.replace("WESD","55")
df.loc[df['observed_variable'] == "55", 'value_significance'] = '13'
df["observation_value"] = pd.to_numeric(df["observation_value"],errors='coerce')
df["original_value"]=df["observation_value"]
df['original_value'] = np.where(df['observed_variable'] == "44",
df['original_value'] / 10,
df['original_value']).round(2)
df['original_value'] = np.where(df['observed_variable'] == "53",
df['original_value'] / 10,
df['original_value']).round(2)
df['original_value'] = np.where(df['observed_variable'] == "85",
df['original_value'] / 10,
df['original_value']).round(2)
df["original_value"] = np.where(df['observed_variable'] == '45',
df['original_value'] / 10,
df['original_value']).round(2)
df['original_value'] = np.where(df['observed_variable'] == '55',
df['original_value'] / 10,
df['original_value']).round(2)
##SET OBSERVED VALUES TO CDM COMPLIANT values
df["observation_value"] = pd.to_numeric(df["observation_value"],errors='coerce')
#df["observed_variable"] = pd.to_numeric(df["observed_variable"],errors='coerce')
#df['observation_value'] = df['observation_value'].astype(int).round(2)
df['observation_value'] = np.where(df['observed_variable'] == "44",
df['observation_value'] / 10,
df['observation_value']).round(2)
df['observation_value'] = np.where(df['observed_variable'] == "53",
df['observation_value'] / 10,
df['observation_value']).round(2)
df['observation_value'] = np.where(df['observed_variable'] == "85",
df['observation_value'] / 10 + 273.15,
df['observation_value']).round(2)
df['observation_value'] = np.where(df['observed_variable'] == '45',
df['observation_value'] / 10,
df['observation_value']).round(2)
df['observation_value'] = np.where(df['observed_variable'] == '55',
df['observation_value'] / 10,
df['observation_value']).round(2)
df['observation_value'] = np.where(df['observed_variable'] == '107',
df['observation_value'] / 10,
df['observation_value']).round(2)
##set the units for each variable
df["original_units"]=""
df.loc[df['observed_variable'] == "85", 'original_units'] = '350'
df.loc[df['observed_variable'] == "44", 'original_units'] = '710'
df.loc[df['observed_variable'] == "45", 'original_units'] = '710'
df.loc[df['observed_variable'] == "55", 'original_units'] = '710'
df.loc[df['observed_variable'] == "106", 'original_units'] = '731'
df.loc[df['observed_variable'] == "107", 'original_units'] = "320"
df.loc[df['observed_variable'] == "53", 'original_units'] = '715'
##set the original units for each variable
df["units"]=""
df.loc[df['observed_variable'] == "85", 'units'] = '5'
df.loc[df['observed_variable'] == "44", 'units'] = '710'
df.loc[df['observed_variable'] == "45", 'units'] = '710'
df.loc[df['observed_variable'] == "55", 'units'] = '710'
df.loc[df['observed_variable'] == "106", 'units'] = '731'
df.loc[df['observed_variable'] == "107", 'units'] = "320"
df.loc[df['observed_variable'] == "53", 'units'] = '715'
##set each height above station surface for each variable
df["observation_height_above_station_surface"]=""
df.loc[df['observed_variable'] == "85", 'observation_height_above_station_surface'] = '2'
df.loc[df['observed_variable'] == "44", 'observation_height_above_station_surface'] = '1'
df.loc[df['observed_variable'] == "45", 'observation_height_above_station_surface'] = '1'
df.loc[df['observed_variable'] == "55", 'observation_height_above_station_surface'] = '1'
df.loc[df['observed_variable'] == "106", 'observation_height_above_station_surface'] = '10'
df.loc[df['observed_variable'] == "107", 'observation_height_above_station_surface'] = "10"
df.loc[df['observed_variable'] == "53", 'observation_height_above_station_surface'] = "1"
##set conversion flags for variables
df["conversion_flag"]=""
df.loc[df['observed_variable'] == "85", 'conversion_flag'] = '0'
df.loc[df['observed_variable'] == "44", 'conversion_flag'] = '2'
df.loc[df['observed_variable'] == "45", 'conversion_flag'] = '2'
df.loc[df['observed_variable'] == "55", 'conversion_flag'] = '2'
df.loc[df['observed_variable'] == "106", 'conversion_flag'] = '2'
df.loc[df['observed_variable'] == "107", 'conversion_flag'] = "2"
df.loc[df['observed_variable'] == "53", 'conversion_flag'] = "2"
##set conversion method for variables
df["conversion_method"]=""
df.loc[df['observed_variable'] == "85", 'conversion_method'] = '1'
##set numerical precision for variables
df["numerical_precision"]=""
df.loc[df['observed_variable'] == "85", 'numerical_precision'] = '0.01'
df.loc[df['observed_variable'] == "44", 'numerical_precision'] = '0.1'
df.loc[df['observed_variable'] == "45", 'numerical_precision'] = '0.1'
df.loc[df['observed_variable'] == "55", 'numerical_precision'] = '0.1'
df.loc[df['observed_variable'] == "106", 'numerical_precision'] = '0.1'
df.loc[df['observed_variable'] == "107", 'numerical_precision'] = "0.1"
df.loc[df['observed_variable'] == "53", 'numerical_precision'] = "1"
df["original_precision"]=""
df.loc[df['observed_variable'] == "85", 'original_precision'] = '0.1'
df.loc[df['observed_variable'] == "44", 'original_precision'] = '0.1'
df.loc[df['observed_variable'] == "45", 'original_precision'] = '0.1'
df.loc[df['observed_variable'] == "55", "original_precision"] = '0.1'
df.loc[df['observed_variable'] == "106", 'original_precision'] = '1'
df.loc[df['observed_variable'] == "107", 'original_precision'] = "0.1"
df.loc[df['observed_variable'] == "53", 'original_precision'] = "1"
#add all columns for cdmlite
df['year'] = df['Date'].str[:4]
df['month'] = df['Date'].map(lambda x: x[4:6])
df['day'] = df['Date'].map(lambda x: x[6:8])
df["hour"] ="00"
df["Minute"]="00"
df["report_type"]="3"
df["source_id"]=df["Source_flag"]
df["date_time_meaning"]="1"
df["observation_duration"]="13"
df["platform_type"]=""
df["station_type"]="1"
df["observation_id"]=""
df["data_policy_licence"]=""
df["primary_station_id"]=df["Station_ID"]
df["qc_method"]=df["quality_flag"].astype(str)
df["quality_flag"]=df["quality_flag"].astype(str)
df["crs"]=""
df["z_coordinate"]=""
df["z_coordinate_type"]=""
df["secondary_variable"]=""
df["secondary_value"]=""
df["code_table"]=""
df["sensor_id"]=""
df["sensor_automation_status"]=""
df["exposure_of_sensor"]=""
df["processing_code"]=""
df["processing_level"]="0"
df["adjustment_id"]=""
df["traceability"]=""
df["advanced_qc"]=""
df["advanced_uncertainty"]=""
df["advanced_homogenisation"]=""
df["advanced_assimilation_feedback"]=""
df["source_record_id"]=""
df["location_method"]=""
df["location_precision"]=""
df["z_coordinate_method"]=""
df["bbox_min_longitude"]=""
df["bbox_max_longitude"]=""
df["bbox_min_latitude"]=""
df["bbox_max_latitude"]=""
df["spatial_representativeness"]=""
df["original_code_table"]=""
df["report_id"]=""
###set quality flag to pass 0 or fail 1
#df.loc[df['quality_flag'].notnull(), "quality_flag"] = "1"
#df = df.fillna("Null")
df.quality_flag[df.quality_flag == "nan"] = "0"
df.quality_flag = df.quality_flag.str.replace('D', '1') \
.str.replace('G', '1') \
.str.replace('I', '1')\
.str.replace('K', '1')\
.str.replace('L', '1')\
.str.replace('M', '1')\
.str.replace('N', '1')\
.str.replace('O', '1')\
.str.replace('R', '1')\
.str.replace('S', '1')\
.str.replace('T', '1')\
.str.replace('W', '1')\
.str.replace('X', '1')\
.str.replace('Z', '1')\
.str.replace('H', '1')\
.str.replace('P', '1')
#print (df.dtypes)
##add timestamp to df and cerate report id
df["Timestamp2"] = df["year"].map(str) + "-" + df["month"].map(str)+ "-" + df["day"].map(str)
df["Seconds"]="00"
df["offset"]="+00"
df["date_time"] = df["Timestamp2"].map(str)+ " " + df["hour"].map(str)+":"+df["Minute"].map(str)+":"+df["Seconds"].map(str)
#df['date_time'] = pd.to_datetime(df['date_time'], format='%Y/%m/%d' " ""%H:%M")
#df['date_time'] = df['date_time'].astype('str')
df.date_time = df.date_time + '+00'
df["dates"]=df["date_time"].str[:-11]
df['primary_station_id_2']=df['primary_station_id'].astype(str)+'-'+df['source_id'].astype(str)
df["observation_value"] = | pd.to_numeric(df["observation_value"],errors='coerce') | pandas.to_numeric |
"""Requires installation of requirements-extras.txt"""
import pandas as pd
import os
import seaborn as sns
from absl import logging
from ._nlp_constants import PROMPTS_PATHS, PERSPECTIVE_API_MODELS
from credoai.data.utils import get_data_path
from credoai.modules.credo_module import CredoModule
from credoai.utils.common import NotRunError, ValidationError, wrap_list
from functools import partial
from googleapiclient import discovery
from time import sleep
class NLPGeneratorAnalyzer(CredoModule):
"""
This module assesses language generation models based on various prompts and assessment attributes
Parameters
----------
prompts : str
choices are builtin datasets, which include:
'bold_gender', 'bold_political_ideology', 'bold_profession',
'bold_race', 'bold_religious_ideology' (Dhamala et al. 2021)
'realtoxicityprompts_1000', 'realtoxicityprompts_challenging_20',
'realtoxicityprompts_challenging_100', 'realtoxicityprompts_challenging' (Gehman et al. 2020)
'conversationai_age', 'conversationai_disability', 'conversationai_gender', 'conversationai_race',
'conversationai_religious_ideology', 'conversationai_sexual_orientation' (Dixon et al. 2018)
or path of your own prompts csv file with columns 'group', 'subgroup', 'prompt'
generation_functions : dict
keys are names of the models and values are their callable generation functions
assessment_functions : dict
keys are names of the assessment functions and values could be custom callable assessment functions
or name of builtin assessment functions.
Current choices, all using Perspective API include:
'perspective_toxicity', 'perspective_severe_toxicity',
'perspective_identify_attack', 'perspective_insult',
'perspective_profanity', 'perspective_threat'
perspective_config : dict
if Perspective API is to be used, this must be passed with the following:
'api_key': your Perspective API key
'rpm_limit': request per minute limit of your Perspective API account
"""
def __init__(
self,
prompts,
generation_functions,
assessment_functions,
perspective_config=None,
):
super().__init__()
self.prompts = prompts
self.generation_functions = generation_functions
self.assessment_functions = assessment_functions
self.perspective_config = perspective_config
self.perspective_client = None
def prepare_results(self):
"""Generates summary statistics of raw assessment results generated by self.run
Returns
-------
pandas.dataframe
Summary statistics of assessment results
Schema: ['generation_model' 'assessment_attribute', 'group', 'mean', 'std']
Raises
------
NotRunError
Occurs if self.run is not called yet to generate the raw assessment results
"""
if self.results is not None:
# Calculate statistics across groups and assessment attributes
results = (
self.results['assessment_results'][
["generation_model", "group", "assessment_attribute", "value"]
]
.groupby(
["generation_model", "group", "assessment_attribute"],
as_index=False,
)
.agg(mean=("value", "mean"), std=("value", "std"))
)
results.sort_values(
by=["generation_model", "assessment_attribute", "group"], inplace=True
)
results = results[
["generation_model", "assessment_attribute", "group", "mean", "std"]
]
return results
else:
raise NotRunError(
"Results not created yet. Call 'run' with appropriate arguments before preparing results"
)
def run(self, n_iterations=1):
"""Run the generations and assessments
Parameters
----------
n_iterations : int, optional
Number of times to generate responses for a prompt, by default 1
Increase if your generation model is stochastic for a higher confidence
Returns
-------
self
"""
df = self._get_prompts(self.prompts)
logging.info("Loaded the prompts dataset " + self.prompts)
# Perform prerun checks
self._perform_prerun_checks()
logging.info(
"Performed prerun checks of generation and assessment functions"
)
# Generate and record responses for the prompts with all the generation models n_iterations times
dfruns_lst = []
for gen_name, gen_fun in self.generation_functions.items():
gen_fun = partial(gen_fun, num_sequences=n_iterations)
logging.info(f"Generating {n_iterations} text responses per prompt with model: {gen_name}")
prompts = df['prompt']
responses = [self._gen_fun_robust(p, gen_fun) for p in prompts]
temp = pd.concat([df, pd.DataFrame(responses)], axis=1) \
.assign(prompt=prompts) \
.melt(id_vars=df.columns, var_name='run', value_name='response') \
.assign(generation_model = gen_name)
dfruns_lst.append(temp)
dfruns = pd.concat(dfruns_lst)
# Assess the responses for the input assessment attributes
logging.info("Performing assessment of the generated responses")
dfrunst = dfruns[
dfruns["response"] != "nlp generator error"
].copy() # exclude cases where generator failed to generate a response
dfrunst_assess_lst = []
for assessment_attribute, assessment_fun in self.assessment_functions.items():
logging.info(f"Performing {assessment_attribute} assessment")
temp = dfrunst.copy()
temp["assessment_attribute"] = assessment_attribute
if assessment_fun in list(PERSPECTIVE_API_MODELS):
temp["value"] = temp["response"].apply(
lambda x: self._assess_with_perspective(
x, PERSPECTIVE_API_MODELS[assessment_fun]
)
)
else:
temp["value"] = temp["response"].apply(assessment_fun)
dfrunst_assess_lst.append(temp)
dfrunst_assess = | pd.concat(dfrunst_assess_lst) | pandas.concat |
'''
@Author = Ollie
'''
import yfinance as yf
from pandas_datareader import data as pdr
yf.pdr_override()
import pandas as pd
from datetime import datetime, timedelta, date
class stock_dataframe():
def __init__(self, ticker, start_date, df):
'''This class represents a dataframe that can be used to scrape up to
date market data from yfinance api or perform cleaning and add columns
:param ticker: the code used to represent the stock entered in form
suitable for SQL and adjusted here for yfinance
:param start_date: date from which the market data should be gathered
can be set to None and will download past 5 years
:param df: can input pre created dataframe to use clean and returns fns
'''
self.ticker = ticker.replace("_", ".")
self.ticker = ''.join([i for i in self.ticker if not i.isdigit()])
self.start_date = start_date
self.df = df
def download_data(self):
if not self.start_date:
self.start_date = datetime.today() - timedelta(days=1825)
self.df = pdr.get_data_yahoo(self.ticker, self.start_date,
datetime.today())
self.df.columns = ['Open', 'High', 'Low', 'Close', 'AdjClose', 'Volume']
return self.df
def clean_data(self):
'''This fn used to clean downloaded data from yfinance
- converts all prices to pence
- inserts missing dates and resamples to business days
- interpolates missing values'''
for i in range(len(self.df) - 1):
for j in range(len(self.df.columns) - 1):
if (0.1 > self.df.iloc[i+1][j] / self.df.iloc[i][j]):
self.df.iat[i+1, j] = self.df.iloc[i+1][j] * 100
elif (self.df.iloc[i+1][j] / self.df.iloc[i][j] > 10):
if not self.update_previous(i, j):
return False
self.df = self.df.asfreq('D')
self.df = self.df[self.df.index.dayofweek < 5]
self.df = self.df.interpolate(method='spline', order=1)
return self.df
def update_previous(self, i, j):
'''Fn backtracks up column to update all prices to pence
:param i: row from which backtracking should start (inclusive)
:param j: column which needs backtracking
:return: True or False depending on whether operation was successful'''
try:
for x in range(i + 1):
self.df.iat[x, j] = self.df.iloc[x][j] * 100
except:
return False
else:
return True
def returns(self):
if 'Returns' in self.df:
del self.df['Returns']
self.df['Returns'] = ((self.df['AdjClose'].pct_change() + 1).cumprod())
self.df.iat[0, len(self.df.columns) - 1] = 1
return self.df
def moving_averages(self, t_frame=50):
'''Fn to create a new column in dataframe for moving averages of Returns
:param t_frame: number of days over which moving average is taken
:return: updated dataframe'''
if 'ReturnsMA' in self.df:
del self.df['ReturnsMA']
self.df['ReturnsMA'] = self.df['Returns'].rolling(window=t_frame).mean()
return self.df
def pre_process(self, clean):
if clean:
self.clean_data()
self.returns()
self.moving_averages()
return self.df
def new_stock_df(self):
self.download_data()
return self.pre_process(True)
def update_stock_df(self):
'''Updates stock dataframe to include up to date prices'''
old_df = self.df.copy()
if 'Returns' in old_df:
del old_df['Returns']
if 'ReturnsMA' in old_df:
del old_df['ReturnsMA']
self.download_data()
self.df = | pd.concat([old_df, self.df]) | pandas.concat |
"""
Processing data from the output database.
"""
import logging
from typing import List
from datetime import date
import numpy as np
import pandas as pd
from autumn.tools.db.database import get_database
from autumn.tools.db.load import load_mcmc_tables
from autumn.tools.utils.runs import read_run_id
logger = logging.getLogger(__name__)
def collate_databases(src_db_paths: List[str], target_db_path: str, tables=None):
"""
Collate the output of many calibration databases into a single database.
Run names are renamed to be ascending in the final database.
"""
logger.info("Collating db outputs into %s", target_db_path)
target_db = get_database(target_db_path)
for db_path in src_db_paths:
logger.info("Reading data from %s", db_path)
source_db = get_database(db_path)
for table_name in source_db.table_names():
if tables and table_name not in tables:
logger.info("Skipping table %s", table_name)
continue
logger.info("Copying table %s", table_name)
table_df = source_db.query(table_name)
target_db.dump_df(table_name, table_df)
logger.info("Finished collating db outputs into %s", target_db_path)
def find_mle_run(df: pd.DataFrame) -> pd.DataFrame:
accept_mask = df["accept"] == 1
max_ll = df[accept_mask]["loglikelihood"].max()
max_ll_mask = accept_mask & (df["loglikelihood"] == max_ll)
return df[max_ll_mask].copy()
def find_mle_params(mcmc_df: pd.DataFrame, param_df: pd.DataFrame) -> dict:
mle_run_df = find_mle_run(mcmc_df)
run_id = mle_run_df["run"].iloc[0]
chain_id = mle_run_df["chain"].iloc[0]
param_mask = (param_df["run"] == run_id) & (param_df["chain"] == chain_id)
params = {}
for _, row in param_df[param_mask].iterrows():
params[row["name"]] = row["value"]
return params
def get_identifying_run_ids(table: pd.DataFrame) -> pd.Series:
"""
Args:
table (pd.DataFrame): Table with 'run' and 'chain' columns
Returns:
pd.Series: Combined run identifier of same length as table
"""
return table["chain"].astype(str) + ":" + table["run"].astype(str)
def select_pruning_candidates(src_db_path: str, n_candidates: int, weighted=True) -> pd.DataFrame:
"""Select a random set of 'good enough' candidates for manual inspection
The output set will be guaranteed to contain the highest
MLE run from all the chains, in addition to randomly selected candidates
Args:
src_db_path (str): Base path of calibration run (containing subdirectories for each chain)
n_candidates (int): Number of candidates to select. If 1, then only the MLE run from all chains will be selected
weighted (bool): Weight candidates by 1.0/loglikelihood (False means uniform selection)
Returns:
candidates (pd.DataFrame): DataFrame containing unique identifiers (chain_id, run_id) of all candidates
"""
# +++ FIXME/TODO
# We just use a naive random selection, disregarding burn-in etc
# Could possibly use selection routine from sample_outputs_for_calibration_fit
# Load all MCMC run data to select from
all_mcmc_df = pd.concat(load_mcmc_tables(src_db_path), ignore_index=True)
all_accepted = all_mcmc_df[all_mcmc_df["accept"] == 1]
# Find the MLE candidate
max_ll = all_accepted["loglikelihood"].max()
max_ll_candidate = all_accepted[all_accepted["loglikelihood"] == max_ll].iloc[0].name
# Ensure candidates have been sampled and that output data is available
accepted_and_sampled = all_accepted[all_accepted["sampled"] == 1]
# Sample random candidates
possible_candidates = list(accepted_and_sampled.index)
if max_ll_candidate in possible_candidates:
possible_candidates.remove(max_ll_candidate)
if weighted:
# +++ FIXME Adding 10.0 to not overweight, should parameterise this
weights = 1.0 / (
10.0 + np.abs(np.array(accepted_and_sampled.loc[possible_candidates].loglikelihood))
)
weights = weights / weights.sum()
else:
weights = None
# Ensure we aren't sampling too many candidates (most likely to show up in testing)
n_candidates = min(n_candidates, len(possible_candidates))
candidates = list(
np.random.choice(possible_candidates, n_candidates - 1, replace=False, p=weights)
)
# Ensure we have the max likelihood candidate
candidates.append(max_ll_candidate)
candidates_df = all_accepted.loc[candidates]
return candidates_df
def prune_chain(source_db_path: str, target_db_path: str, chain_candidates: pd.DataFrame):
"""
Read the model outputs from a database and removes output data that is not MLE.
This is an operation applied to each chain's database.
"""
logger.info("Pruning %s into %s", source_db_path, target_db_path)
source_db = get_database(source_db_path)
target_db = get_database(target_db_path)
# Copy tables over, pruning some.
tables_to_copy = source_db.table_names()
for table_name in tables_to_copy:
table_df = source_db.query(table_name)
if table_name == "outputs":
# Drop everything except the MLE run
logger.info("Pruning outputs so that it only contains candidate runs")
candidate_mask = table_df["run"].isin(chain_candidates["run"])
candidate_table_df = table_df[candidate_mask]
target_db.dump_df(table_name, candidate_table_df)
elif table_name:
# Copy table over (mcmc_run, mcmc_params, derived_outputs)
# We need to keep derived outputs here to be used by uncertainty calculations
logger.info("Copying %s", table_name)
target_db.dump_df(table_name, table_df)
logger.info("Finished pruning %s into %s", source_db_path, target_db_path)
def prune_final(source_db_path: str, target_db_path: str, candidates_df: pd.DataFrame):
"""
Read the model outputs from a database and remove all run-related data that is not MLE.
This is the final pruning for the collated database.
"""
logger.info("Pruning %s into %s", source_db_path, target_db_path)
source_db = get_database(source_db_path)
target_db = get_database(target_db_path)
# Find the maximum accepted loglikelihood for all runs
mcmc_run_df = source_db.query("mcmc_run")
mle_run_df = find_mle_run(mcmc_run_df)
mle_run_id = mle_run_df.run.iloc[0]
mle_chain_id = mle_run_df.chain.iloc[0]
# Copy tables over, pruning some.
tables_to_copy = source_db.table_names()
for table_name in tables_to_copy:
table_df = source_db.query(table_name)
if table_name == "derived_outputs":
# Drop everything except the candidate runs
logger.info("Pruning derived_outputs so that it only contains candidate runs")
candidate_iruns = get_identifying_run_ids(candidates_df)
table_df["irun_id"] = get_identifying_run_ids(table_df)
filtered_table_df = table_df[table_df["irun_id"].isin(candidate_iruns)]
final_df = filtered_table_df.drop(columns="irun_id")
target_db.dump_df(table_name, final_df)
elif table_name:
# Copy table over (outputs, mcmc_run, mcmc_params)
# Note: Outputs has already been pruned to candidates in early prune_chains sweep
logger.info("Copying %s", table_name)
target_db.dump_df(table_name, table_df)
logger.info("Finished pruning %s into %s", source_db_path, target_db_path)
def powerbi_postprocess(source_db_path: str, target_db_path: str, run_id: str):
"""
Read the model outputs from a database and then convert them into a form
that is readable by our PowerBI dashboard.
Save the converted data into its own database.
"""
from autumn.tools.project import get_project
source_db = get_database(source_db_path)
target_db = get_database(target_db_path)
tables_to_copy = [t for t in source_db.table_names() if t != "outputs"]
for table_name in tables_to_copy:
logger.info("Copying %s", table_name)
table_df = source_db.query(table_name)
if table_name == "uncertainty":
# Rename "time" field to "times"
table_df.rename(columns={"time": "times"})
target_db.dump_df(table_name, table_df)
app_name, region_name, timestamp, git_commit = read_run_id(run_id)
# Add build metadata table
build_key = f"{timestamp}-{git_commit}"
logger.info("Adding 'build' metadata table with key %s", build_key)
build_df = pd.DataFrame.from_dict(
{"build_key": [build_key], "app_name": [app_name], "region_name": [region_name]}
)
target_db.dump_df("build", build_df)
# Add scenario metadata table
logger.info("Adding 'scenario' metadata table")
project = get_project(app_name, region_name)
basline_params = project.param_set.baseline.to_dict()
sc_params = [sc.to_dict() for sc in project.param_set.scenarios]
# Add default scenario
scenario_data = [
{
"scenario": 0,
"start_time": int(basline_params["time"]["start"]),
"description": basline_params.get("description", ""),
}
]
for sc_idx, sc_params in enumerate(sc_params):
sc_datum = {
"scenario": int(sc_idx + 1),
"start_time": int(sc_params["time"]["start"]),
"description": sc_params.get("description", ""),
}
scenario_data.append(sc_datum)
scenario_df = pd.DataFrame(scenario_data)
target_db.dump_df("scenario", scenario_df)
# Add calibration targets
logger.info("Adding 'targets' table")
targets_data = []
for target in project.calibration.targets:
for t, v in zip(target["years"], target["values"]):
t_datum = {
"key": target["output_key"],
"times": t,
"value": v,
}
targets_data.append(t_datum)
targets_df = pd.DataFrame(targets_data)
target_db.dump_df("targets", targets_df)
logger.info("Converting outputs to PowerBI format")
outputs_df = source_db.query("outputs")
pbi_outputs_df = unpivot_outputs(outputs_df)
target_db.dump_df("powerbi_outputs", pbi_outputs_df)
logger.info("Finished creating PowerBI output database at %s", target_db_path)
def unpivot_outputs(output_df: pd.DataFrame):
"""
Take outputs in the form they come out of the model object and convert them into a "long", "melted" or "unpiovted"
format in order to more easily plug to PowerBI
"""
id_cols = ["chain", "run", "scenario", "times"]
value_cols = [c for c in output_df.columns if c not in id_cols]
output_df = output_df.melt(id_vars=id_cols, value_vars=value_cols)
cols = {"compartment"}
def label_strata(row: list):
strata = {"compartment": row[0]}
for el in row[1:]:
parts = el.split("_")
k = parts[0]
# FIXME: Use this once Milinda can use it in PowerBI
# v = "_".join(parts[1:])
strata[k] = el
cols.add(k)
return strata
variables = (s.split("X") for s in output_df.variable)
new_cols_df = pd.DataFrame([label_strata(row) for row in variables])
output_df = output_df.join(new_cols_df)
output_df = output_df.drop(columns="variable")
return output_df
def sample_runs(mcmc_df: pd.DataFrame, num_samples: int):
"""
Returns a list of chain ids + run ids for each sampled run.
Choose runs with probability proprotional to their acceptance weights.
"""
run_choices = list(zip(mcmc_df["chain"].tolist(), mcmc_df["run"].tolist()))
assert num_samples < len(run_choices), "Must be more samples than choices"
weights = mcmc_df["weight"].to_numpy()
sample_pr = weights / weights.sum()
idxs = np.array([i for i in range(len(weights))])
chosen_idxs = np.random.choice(idxs, size=num_samples, replace=False, p=sample_pr)
chosen_runs = [run_choices[i] for i in chosen_idxs]
return chosen_runs
def select_outputs_from_candidates(
output_name: str,
derived_output_tables: pd.DataFrame,
candidates_df: pd.DataFrame,
ref_date: date,
):
out_df = pd.DataFrame()
for idx, c in candidates_df.iterrows():
chain = int(c["chain"])
run = int(c["run"])
ctable = derived_output_tables[chain]
run_mask = ctable["run"] == run
scenario_mask = ctable["scenario"] == 0
masked = ctable[run_mask & scenario_mask]
name = f"{chain}_{run}"
out_df[name] = pd.Series(
index=timelist_to_dti(masked["times"], ref_date), data=masked[output_name].data
)
return out_df
def timelist_to_dti(times, ref_date):
datelist = [ref_date + pd.offsets.Day(t) for t in times]
return pd.DatetimeIndex(datelist)
def target_to_series(target, ref_date):
index = timelist_to_dti(target["times"], ref_date)
return | pd.Series(index=index, data=target["values"]) | pandas.Series |
"""
A set of classes for aggregation of TERA data sources into common formats.
"""
from rdflib import Graph, Namespace, Literal, URIRef, BNode
from rdflib.namespace import RDF, OWL, RDFS
UNIT = Namespace('http://qudt.org/vocab/unit#')
import pandas as pd
import validators
import glob
import math
from tqdm import tqdm
import warnings
import copy
import tera.utils as ut
nan_values = ['nan', float('nan'),'--','-X','NA','NC',-1,'','sp.', -1,'sp,','var.','variant','NR','sp','ssp','ssp.','ssp,']
class DataObject:
def __init__(self, namespace = 'http://www.example.org/', verbose = True, name = 'Data Object'):
"""
Base class for aggregation of data.
Parameters
----------
namespace : str
Base URI for the data set.
verbose : bool
"""
self.graph = Graph()
self.namespace = Namespace(namespace)
self.name = name
self.verbose = verbose
def __add__(self, other):
c = copy.deepcopy(self)
c.graph += other.graph
return c
def __str__(self):
return self.name
def __dict__(self):
return {
'namespace':self.namespace,
'num_triples':len(self.graph)
}
def __del__(self):
self.graph = Graph()
def save(self, path):
"""Save graph to file.
Parameters
----------
path : str
ex: file.nt
"""
self.graph.serialize(path, format=path.split('.').pop(-1))
def replace(self, converted):
"""Replace old entities with new in data object.
Usefull after converting between datasets.
Parameters
----------
converted : list
list of (old, new) tuples.
"""
if len(converted) < 1:
warnings.warn('Empty mapping list.')
return
tmp = set()
for old, new in converted:
triples = self.graph.triples((old,None,None))
tmp |= set([(new,p,o) for _,p,o in triples])
triples = self.graph.triples((None, None, old))
tmp |= set([(s,p,new) for s,p,_ in triples])
self.graph.remove((old,None,None))
self.graph.remove((None,None,old))
for t in tmp:
self.graph.add(t)
def apply_func(self, func, dataframe, cols, sub_bar=False):
pbar = None
if self.verbose and not sub_bar:
pbar = tqdm(total=len(dataframe.index),desc=self.name)
for row in zip(*[dataframe[c] for c in cols]):
func(row)
if pbar: pbar.update(1)
class Taxonomy(DataObject):
def __init__(self,
namespace = 'https://www.ncbi.nlm.nih.gov/taxonomy/',
name = 'NCBI Taxonomy',
verbose = True,
directory = None):
"""
Aggregation of the NCBI Taxonomy.
Parameters
----------
directory : str
Path to data set. Downloaded from ftp://ftp.ncbi.nlm.nih.gov/pub/taxonomy/new_taxdump/new_taxdump.zip
"""
super(Taxonomy, self).__init__(namespace, verbose, name)
if directory:
self._load_ncbi_taxonomy(directory)
self.verbose = verbose
def _add_subproperties(self, uri, pref = False):
self.graph.add((uri,OWL.subPropertyOf,RDFS.label))
if pref:
self.graph.add((uri,OWL.subPropertyOf,URIRef('http://www.w3.org/2004/02/skos/core#prefLabel')))
def _load_ncbi_taxonomy(self, directory):
self._load_hierarchy(directory+'nodes.dmp')
self._load_divisions(directory+'division.dmp')
self._load_names(directory+'names.dmp')
self._add_domain_and_range_triples()
self._add_disjoint_axioms()
def _load_hierarchy(self, path):
df = pd.read_csv(path, sep='|', usecols=[0,1,2,4], names=['child','parent','rank','division'], na_values = nan_values, dtype = str)
df.dropna(inplace=True)
df = df.apply(lambda x: x.str.strip())
def func(row):
c,p,r,d = row
c = self.namespace['taxon/'+str(c)]
rc = r
r = r.replace(' ','_')
if r != 'no_rank':
self.graph.add((c, self.namespace['rank'], self.namespace['rank/'+r]))
self.graph.add((self.namespace['rank/'+r], RDFS.label, Literal(rc)))
self.graph.add((self.namespace['rank/'+r], RDF.type, self.namespace['Rank']))
p = self.namespace['taxon/'+str(p)]
d = str(d).replace(' ','_')
d = self.namespace['division/'+str(d)]
if r == 'species': #species are treated as instances
self.graph.add((c,RDF.type, p))
self.graph.add((c, RDF.type, d))
else:
self.graph.add((c,RDFS.subClassOf, p))
self.graph.add((c, RDFS.subClassOf, d))
self.apply_func(func, df, ['child','parent','rank','division'])
def _load_names(self, path):
df = pd.read_csv(path, sep='|', usecols=[0,1,2,3], names=['taxon','name','unique_name','name_type'],na_values = nan_values,dtype = str)
df.dropna(inplace=True)
df = df.apply(lambda x: x.str.strip())
def func(row):
c,n,un,nt = row
c = self.namespace['taxon/'+str(c)]
n = Literal(n)
un = Literal(un)
if len(un) > 0:
self.graph.add((c, self.namespace['uniqueName'], un))
self._add_subproperties(self.namespace['uniqueName'], pref=True)
if len(n) > 0:
ntl = Literal(nt)
nt = self.namespace[nt.replace(' ','_')]
self._add_subproperties(nt,pref=False)
self.graph.add((c,nt,n))
self.graph.add((nt,RDFS.label,ntl))
self.graph.add((nt,RDFS.domain,self.namespace['Taxon']))
self.apply_func(func, df, ['taxon','name','unique_name','name_type'])
def _load_divisions(self, path):
df = pd.read_csv(path, sep='|', usecols=[0,1,2], names=['division','acronym','name'], na_values = nan_values, dtype = str)
df.dropna(inplace=True)
df = df.apply(lambda x: x.str.strip())
def func(row):
d,a,n = row
d = self.namespace['division/'+str(d)]
self.graph.add((d,RDF.type,self.namespace['Division']))
self.graph.add((d,RDFS.label,Literal(n)))
#self.graph.add((d,RDFS.label,Literal(a)))
self.apply_func(func, df, ['division','acronym','name'])
def _add_domain_and_range_triples(self):
self.graph.add((self.namespace['rank'],RDFS.domain,self.namespace['Taxon']))
self.graph.add((self.namespace['rank'],RDFS.range,self.namespace['Rank']))
def _add_disjoint_axioms(self):
for d in [self.namespace['division/1'], #Invertebrates
self.namespace['division/2'], #Mammals
self.namespace['division/4'], #Plants and Fungi
self.namespace['division/5'], #Primates
self.namespace['division/6'], #Rodents
self.namespace['division/9'], #Viruses
self.namespace['division/10']]: #Vertebrates
self.graph.add((self.namespace['division/0'], #Bacteria
OWL.disjoinWith,d))
for d in [self.namespace['division/2'], #Mammals
self.namespace['division/4'], #Plants and Fungi
self.namespace['division/5'], #Primates
self.namespace['division/6'], #Rodents
self.namespace['division/9'], #Viruses
self.namespace['division/10']]: #Vertebrates
self.graph.add((self.namespace['division/1'], #Invertebrates
OWL.disjoinWith,d))
for d in [self.namespace['division/4'], #Plants and Fungi
self.namespace['division/9'], #Viruses
self.namespace['division/10']]: #Vertebrates
self.graph.add((self.namespace['division/2'], #Mammals
OWL.disjoinWith,d))
for d in [self.namespace['division/2'], #Mammals
self.namespace['division/4'], #Plants and Fungi
self.namespace['division/5'], #Primates
self.namespace['division/6'], #Rodents
self.namespace['division/10']]: #Vertebrates
self.graph.add((self.namespace['division/3'], #Phages
OWL.disjoinWith,d))
for d in [self.namespace['division/2'], #Mammals
self.namespace['division/5'], #Primates
self.namespace['division/6'], #Rodents
self.namespace['division/10']]: #Vertebrates
self.graph.add((self.namespace['division/4'], #Plants and Fungi
OWL.disjoinWith,d))
for d in [self.namespace['division/1']]: #Invertebrates
self.graph.add((self.namespace['division/5'], #Primates
OWL.disjoinWith,d))
for d in [self.namespace['division/1']]: #Invertebrates
self.graph.add((self.namespace['division/6'], #Rodents
OWL.disjoinWith,d))
for d in [self.namespace['division/1'], #Invertebrates
self.namespace['division/0'], #Bacteria
self.namespace['division/2'], #Mammals
self.namespace['division/4'], #Plants and Fungi
self.namespace['division/5'], #Primates
self.namespace['division/6'], #Rodents
self.namespace['division/10']]: #Vertebrates
self.graph.add((self.namespace['division/9'], #Viruses
OWL.disjoinWith,d))
class Traits(DataObject):
def __init__(self,
namespace = 'https://eol.org/pages/',
name = 'EOL Traits',
verbose = True,
directory = None):
"""
Encyclopedia of Life Traits.
Parameters
----------
directory : str
Path to data set. See https://opendata.eol.org/dataset/all-trait-data-large
"""
super(Traits, self).__init__(namespace, verbose, name)
if directory:
self._load_eol_traits(directory)
def _load_eol_traits(self, directory):
self._load_traits(directory+'trait_bank/traits.csv')
self._load_desc(directory+'trait_bank/terms.csv')
for f in glob.glob(directory+'eol_rels/*.csv'):
self._load_eol_subclasses(f)
def _load_traits(self, path):
df = pd.read_csv(path, sep=',', usecols=['page_id','predicate','value_uri'], na_values = nan_values, dtype=str)
df.dropna(inplace=True)
df = df.apply(lambda x: x.str.strip())
def func(row):
s,p,o = row
s = self.namespace[s]
try:
val = validators.url(o)
o = URIRef(o)
except TypeError:
o = Literal(o)
val = True
if validators.url(s) and validators.url(p) and val:
self.graph.add((URIRef(s),URIRef(p),o))
self.apply_func(func, df, ['page_id','predicate','value_uri'])
def _load_literal_traits(self,path):
df = pd.read_csv(path, sep=',', usecols=['page_id','predicate','measurement','units_uri'], na_values = nan_values, dtype=str)
df.dropna(inplace=True)
df = df.apply(lambda x: x.str.strip())
def func(row):
s,p,o,u = row
s = self.namespace[s]
try:
o = Literal(o)
u = URIRef(u)
bnode = BNode()
self.graph.add((bnode,RDF.value,o))
self.graph.add((bnode,UNIT.units,u))
self.graph.add((URIRef(s),URIRef(p),bnode))
except TypeError:
pass
self.apply_func(func, df, ['page_id','predicate',''])
def _load_desc(self, path):
df = pd.read_csv(path, sep=',', usecols=['uri','name'], na_values = nan_values, dtype=str)
df.dropna(inplace=True)
df = df.apply(lambda x: x.str.strip())
def func(row):
uri,name = row
if validators.url(uri) and name:
self.graph.add((URIRef(uri),RDFS.label,Literal(name)))
self.apply_func(func, df, ['uri','name'])
def _load_eol_subclasses(self, path):
try:
try:
df = | pd.read_csv(path,sep=',',usecols=['child','parent'],na_values = nan_values, dtype=str) | pandas.read_csv |
import concurrent.futures as cf
from functools import partial
import os
import pandas as pd
import utility_functions as utilfunc
import config
# load logger
logger = utilfunc.get_logger()
class Agents(object):
"""
Agents class instance
"""
def __init__(self, agents_df):
"""
Initialize Agents Class
Parameters
----------
agents_df : 'pd.df'
Pandas Dataframe containing agents and their attributes.
Index = agent ids, columns = agent attributes
Returns
-------
agent_df : 'pd.df'
Agents DataFrame
agent_ids : 'ndarray'
Array of agent ids
agent_attrs : 'ndarray'
Array of agent attributes
attrs_types : 'pd.Series'
Array of dtypes for each attribute
"""
self.df = agents_df
self.ids = agents_df.index
self.attrs = agents_df.columns
self.types = agents_df.dtypes
def __len__(self):
"""
Return number of agents
"""
return len(self.ids)
def __repr__(self):
"""
Print number of agents and attributes
"""
return ('{a} contains {n} agents with {c} attributes'
.format(a=self.__class__.__name__,
n=len(self),
c=len(self.attrs)))
@property
def check_types(self):
"""
Check to see if attribute types have changed
"""
types = self.df.dtypes
check = self.types == types
if not all(check):
print('Attribute dtypes have changed')
@property
def update_attrs(self):
"""
Update agent class attributes
"""
self.ids = self.df.index
self.attrs = self.df.columns
self.types = self.df.dtypes
def __add__(self, df):
"""
Add agents to agents
Parameters
----------
df : 'pd.df'
Pandas Dataframe containing agents to be added
Returns
-------
agent_df : 'pd.df'
Updated Agents DataFrame
agent_ids : 'ndarray'
Updated array of agent ids
"""
self.df = self.df.append(df)
self.update_attrs
def add_attrs(self, attr_df, on=None):
"""
Add attributes to agents
Parameters
----------
df : 'pd.df'
Pandas Dataframe containing new attributes for agents
on : 'object'
Pandas on kwarg, if None join on index
Returns
-------
agent_df : 'pd.df'
Updated Agents DataFrame
attrs_types : 'pd.Series'
Updated attribute types
"""
if on is None:
self.df = self.df.join(attr_df, how='left')
else:
self.df = self.df.reset_index()
self.df = | pd.merge(self.df, attr_df, how='left', on=on) | pandas.merge |
import pandas as pd
import glob
import matplotlib.pyplot as plt
import seaborn as sns
language = 'en'
embed='glove'
plot=True
df = | pd.DataFrame() | pandas.DataFrame |
import numpy as np
import pandas as pd
import pandas.util.testing as tm
import pandas.tseries.period as period
from pandas import period_range, PeriodIndex, Index, date_range
def _permute(obj):
return obj.take(np.random.permutation(len(obj)))
class TestPeriodIndex(tm.TestCase):
def setUp(self):
pass
def test_joins(self):
index = period_range('1/1/2000', '1/20/2000', freq='D')
for kind in ['inner', 'outer', 'left', 'right']:
joined = index.join(index[:-5], how=kind)
tm.assertIsInstance(joined, PeriodIndex)
self.assertEqual(joined.freq, index.freq)
def test_join_self(self):
index = period_range('1/1/2000', '1/20/2000', freq='D')
for kind in ['inner', 'outer', 'left', 'right']:
res = index.join(index, how=kind)
self.assertIs(index, res)
def test_join_does_not_recur(self):
df = tm.makeCustomDataframe(
3, 2, data_gen_f=lambda *args: np.random.randint(2),
c_idx_type='p', r_idx_type='dt')
s = df.iloc[:2, 0]
res = s.index.join(df.columns, how='outer')
expected = Index([s.index[0], s.index[1],
df.columns[0], df.columns[1]], object)
tm.assert_index_equal(res, expected)
def test_union(self):
# union
rng1 = pd.period_range('1/1/2000', freq='D', periods=5)
other1 = pd.period_range('1/6/2000', freq='D', periods=5)
expected1 = pd.period_range('1/1/2000', freq='D', periods=10)
rng2 = pd.period_range('1/1/2000', freq='D', periods=5)
other2 = pd.period_range('1/4/2000', freq='D', periods=5)
expected2 = pd.period_range('1/1/2000', freq='D', periods=8)
rng3 = pd.period_range('1/1/2000', freq='D', periods=5)
other3 = pd.PeriodIndex([], freq='D')
expected3 = pd.period_range('1/1/2000', freq='D', periods=5)
rng4 = pd.period_range('2000-01-01 09:00', freq='H', periods=5)
other4 = pd.period_range('2000-01-02 09:00', freq='H', periods=5)
expected4 = pd.PeriodIndex(['2000-01-01 09:00', '2000-01-01 10:00',
'2000-01-01 11:00', '2000-01-01 12:00',
'2000-01-01 13:00', '2000-01-02 09:00',
'2000-01-02 10:00', '2000-01-02 11:00',
'2000-01-02 12:00', '2000-01-02 13:00'],
freq='H')
rng5 = pd.PeriodIndex(['2000-01-01 09:01', '2000-01-01 09:03',
'2000-01-01 09:05'], freq='T')
other5 = pd.PeriodIndex(['2000-01-01 09:01', '2000-01-01 09:05'
'2000-01-01 09:08'],
freq='T')
expected5 = pd.PeriodIndex(['2000-01-01 09:01', '2000-01-01 09:03',
'2000-01-01 09:05', '2000-01-01 09:08'],
freq='T')
rng6 = pd.period_range('2000-01-01', freq='M', periods=7)
other6 = pd.period_range('2000-04-01', freq='M', periods=7)
expected6 = pd.period_range('2000-01-01', freq='M', periods=10)
rng7 = pd.period_range('2003-01-01', freq='A', periods=5)
other7 = pd.period_range('1998-01-01', freq='A', periods=8)
expected7 = pd.period_range('1998-01-01', freq='A', periods=10)
for rng, other, expected in [(rng1, other1, expected1),
(rng2, other2, expected2),
(rng3, other3, expected3), (rng4, other4,
expected4),
(rng5, other5, expected5), (rng6, other6,
expected6),
(rng7, other7, expected7)]:
result_union = rng.union(other)
tm.assert_index_equal(result_union, expected)
def test_union_misc(self):
index = period_range('1/1/2000', '1/20/2000', freq='D')
result = index[:-5].union(index[10:])
tm.assert_index_equal(result, index)
# not in order
result = _permute(index[:-5]).union(_permute(index[10:]))
tm.assert_index_equal(result, index)
# raise if different frequencies
index = period_range('1/1/2000', '1/20/2000', freq='D')
index2 = period_range('1/1/2000', '1/20/2000', freq='W-WED')
with tm.assertRaises(period.IncompatibleFrequency):
index.union(index2)
msg = 'can only call with other PeriodIndex-ed objects'
with tm.assertRaisesRegexp(ValueError, msg):
index.join(index.to_timestamp())
index3 = period_range('1/1/2000', '1/20/2000', freq='2D')
with tm.assertRaises(period.IncompatibleFrequency):
index.join(index3)
def test_union_dataframe_index(self):
rng1 = pd.period_range('1/1/1999', '1/1/2012', freq='M')
s1 = pd.Series(np.random.randn(len(rng1)), rng1)
rng2 = pd.period_range('1/1/1980', '12/1/2001', freq='M')
s2 = pd.Series(np.random.randn(len(rng2)), rng2)
df = pd.DataFrame({'s1': s1, 's2': s2})
exp = pd.period_range('1/1/1980', '1/1/2012', freq='M')
self.assert_index_equal(df.index, exp)
def test_intersection(self):
index = period_range('1/1/2000', '1/20/2000', freq='D')
result = index[:-5].intersection(index[10:])
tm.assert_index_equal(result, index[10:-5])
# not in order
left = _permute(index[:-5])
right = _permute(index[10:])
result = left.intersection(right).sort_values()
tm.assert_index_equal(result, index[10:-5])
# raise if different frequencies
index = | period_range('1/1/2000', '1/20/2000', freq='D') | pandas.period_range |
from influxdb import InfluxDBClient
import time
import pandas as pd
import numpy as np
from pprint import pprint
import plotly.graph_objs as go
import plotly.io as pio
from datetime import datetime, timedelta
import pandas as pd
import os
host = 'hs-04.ipa.psnc.pl'
port = 8086
user = 'root'
password = '<PASSWORD>'
dbname = 'int_telemetry_db'
dbuser = 'int'
dbuser_password = '<PASSWORD>'
pio.kaleido.scope.default_width = 1000
pio.kaleido.scope.default_height = 0.6 * pio.kaleido.scope.default_width
client = InfluxDBClient(host, port, user, password, dbname)
def get_datatime(int_reports):
if len(int_reports) == 0:
return []
timestamps = [r['dstts'] for r in int_reports]
timestamps = | pd.DataFrame(timestamps, dtype='float64') | pandas.DataFrame |
# =========================================================================================================================================
# =================================== Extract Data from XML files and create Lua Tables. =================================
# =========================================================================================================================================
# I hate XML.
# =========
# LIBRARIES
# =========
import pandas # For fancy dataframes.
import pandas_read_xml as pdx # To convert XML to fancy dataframes.
# =============
# READ XML FILE
# =============
enumerationsDataFrame = pdx.read_xml(
"xml/Enumerations.xml", ["root", "enumerations", "enumeration"])
statObjectDefsDataFrame = pdx.read_xml(
"xml/StatObjectDefinitions.xml", ["root", "stat_object_definitions", "stat_object_definition"])
# ==================
# WRITE LUA CONTENTS
# ==================
# Enumerations
# ------------
luaTableContents = "References = {\n\t[\"Enumerations\"] = {\n"
for index, content in enumerationsDataFrame.iterrows():
# Extract dataframe
luaTableContents += "\t\t[\"" + content["@name"] + "\"] = {\n"
try:
itemDataFrame = pandas.DataFrame.from_dict(content["items"]["item"])
except ValueError: # For Act and SkillElements that have only one entry.
itemDataFrame = | pandas.DataFrame([{"@index": 0, "@value": 1}]) | pandas.DataFrame |
import pandas as pd
import numpy as np
import sys
import os
clear = lambda: os.system('cls')
clear()
print("\n3. FILTRO BASADO EN CONTENIDO: PALABRAS CLAVES\n")
path="ml-latest-small"
movies = pd.read_csv(path+'/moviesES.csv', sep=',', encoding='latin-1', usecols=['movieId', 'title', 'genres'])
ratings = pd.read_csv(path+'/ratings.csv', sep=',', encoding='latin-1', usecols=['movieId', 'rating'])
tags = | pd.read_csv(path+'/tags.csv', sep=',', encoding='latin-1', usecols=['movieId', 'tag']) | pandas.read_csv |
from flask import Flask, render_template, request, redirect, url_for, session
import pandas as pd
import pymysql
import os
import io
#from werkzeug.utils import secure_filename
from pulp import *
import numpy as np
import pymysql
import pymysql.cursors
from pandas.io import sql
#from sqlalchemy import create_engine
import pandas as pd
import numpy as np
#import io
import statsmodels.formula.api as smf
import statsmodels.api as sm
import scipy.optimize as optimize
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
#from flask import Flask, render_template, request, redirect, url_for, session, g
from sklearn.linear_model import LogisticRegression
from math import sin, cos, sqrt, atan2, radians
from statsmodels.tsa.arima_model import ARIMA
#from sqlalchemy import create_engine
from collections import defaultdict
from sklearn import linear_model
import statsmodels.api as sm
import scipy.stats as st
import pandas as pd
import numpy as np
from pulp import *
import pymysql
import math
app = Flask(__name__)
app.secret_key = os.urandom(24)
localaddress="D:\\home\\site\\wwwroot"
localpath=localaddress
os.chdir(localaddress)
@app.route('/')
def index():
return redirect(url_for('home'))
@app.route('/home')
def home():
return render_template('home.html')
@app.route('/demandplanning')
def demandplanning():
return render_template("Demand_Planning.html")
@app.route("/elasticopt",methods = ['GET','POST'])
def elasticopt():
if request.method== 'POST':
start_date =request.form['from']
end_date=request.form['to']
prdct_name=request.form['typedf']
# connection = pymysql.connect(host='localhost',
# user='user',
# password='',
# db='test',
# charset='utf8mb4',
# cursorclass=pymysql.cursors.DictCursor)
#
# x=connection.cursor()
# x.execute("select * from `transcdata`")
# connection.commit()
# datass=pd.DataFrame(x.fetchall())
datass = pd.read_csv("C:\\Users\\1026819\\Downloads\\optimizdata.csv")
# datas = datass[(datass['Week']>=start_date) & (datass['Week']<=end_date )]
datas=datass
df = datas[datas['Product'] == prdct_name]
df=datass
changeData=pd.concat([df['Product_Price'],df['Product_Qty']],axis=1)
changep=[]
changed=[]
for i in range(0,len(changeData)-1):
changep.append(changeData['Product_Price'].iloc[i]-changeData['Product_Price'].iloc[i+1])
changed.append(changeData['Product_Qty'].iloc[1]-changeData['Product_Qty'].iloc[i+1])
cpd=pd.concat([pd.DataFrame(changep),pd.DataFrame(changed)],axis=1)
cpd.columns=['Product_Price','Product_Qty']
sortedpricedata=df.sort_values(['Product_Price'], ascending=[True])
spq=pd.concat([sortedpricedata['Product_Price'],sortedpricedata['Product_Qty']],axis=1).reset_index(drop=True)
pint=[]
dint=[]
x = spq['Product_Price']
num_bins = 5
# n, pint, patches = plt.hist(x, num_bins, facecolor='blue', alpha=0.5)
y = spq['Product_Qty']
num_bins = 5
# n, dint, patches = plt.hist(y, num_bins, facecolor='blue', alpha=0.5)
arr= np.zeros(shape=(len(pint),len(dint)))
count=0
for i in range(0, len(pint)):
lbp=pint[i]
if i==len(pint)-1:
ubp=pint[i]+1
else:
ubp=pint[i+1]
for j in range(0, len(dint)):
lbd=dint[j]
if j==len(dint)-1:
ubd=dint[j]+1
else:
ubd=dint[j+1]
print(lbd,ubd)
for k in range(0, len(spq)):
if (spq['Product_Price'].iloc[k]>=lbp\
and spq['Product_Price'].iloc[k]<ubp):
if(spq['Product_Qty'].iloc[k]>=lbd\
and spq['Product_Qty'].iloc[k]<ubd):
count+=1
arr[i][j]+=1
price_range=np.zeros(shape=(len(pint),2))
for j in range(0,len(pint)):
lbp=pint[j]
price_range[j][0]=lbp
if j==len(pint)-1:
ubp=pint[j]+1
price_range[j][1]=ubp
else:
ubp=pint[j+1]
price_range[j][1]=ubp
demand_range=np.zeros(shape=(len(dint),2))
for j in range(0,len(dint)):
lbd=dint[j]
demand_range[j][0]=lbd
if j==len(dint)-1:
ubd=dint[j]+1
demand_range[j][1]=ubd
else:
ubd=dint[j+1]
demand_range[j][1]=ubd
pr=pd.DataFrame(price_range)
pr.columns=['Price','Demand']
dr=pd.DataFrame(demand_range)
dr.columns=['Price','Demand']
priceranges=pr.Price.astype(str).str.cat(pr.Demand.astype(str), sep='-')
demandranges=dr.Price.astype(str).str.cat(dr.Demand.astype(str), sep='-')
price=pd.DataFrame(arr)
price.columns=demandranges
price.index=priceranges
pp=price.reset_index()
global data
data=pd.concat([df['Week'],df['Product_Qty'],df['Product_Price'],df['Comp_Prod_Price'],df['Promo1'],df['Promo2'],df['overallsale']],axis=1)
return render_template('dataview.html',cpd=cpd.values,pp=pp.to_html(index=False),data=data.to_html(index=False),graphdata=data.values,ss=1)
return render_template('dataview.html')
@app.route('/priceelasticity',methods = ['GET','POST'])
def priceelasticity():
return render_template('Optimisation_heatmap_revenue.html')
@app.route("/elasticity",methods = ['GET','POST'])
def elasticity():
if request.method== 'POST':
Price=0
Average_Price=0
Promotions=0
Promotionss=0
if request.form.get('Price'):
Price=1
if request.form.get('Average_Price'):
Average_Price=1
if request.form.get('Promotion_1'):
Promotions=1
if request.form.get('Promotion_2'):
Promotionss=1
Modeldata=pd.DataFrame()
Modeldata['Product_Qty']=data.Product_Qty
lst=[]
for row in data.index:
lst.append(row+1)
Modeldata['Week']=np.log(lst)
if Price == 1:
Modeldata['Product_Price']=data['Product_Price']
if Price == 0:
Modeldata['Product_Price']=0
if Average_Price==1:
Modeldata['Comp_Prod_Price']=data['Comp_Prod_Price']
if Average_Price==0:
Modeldata['Comp_Prod_Price']=0
if Promotions==1:
Modeldata['Promo1']=data['Promo1']
if Promotions==0:
Modeldata['Promo1']=0
if Promotionss==1:
Modeldata['Promo2']=data['Promo2']
if Promotionss==0:
Modeldata['Promo2']=0
diffpriceprodvscomp= (Modeldata['Product_Price']-Modeldata['Comp_Prod_Price'])
promo1=Modeldata.Promo1
promo2=Modeldata.Promo2
week=Modeldata.Week
quantityproduct=Modeldata.Product_Qty
df=pd.concat([quantityproduct,diffpriceprodvscomp,promo1,promo2,week],axis=1)
df.columns=['quantityproduct','diffpriceprodvscomp','promo1','promo2','week']
Model = smf.ols(formula='df.quantityproduct ~ df.diffpriceprodvscomp + df.promo1 + df.promo2 + df.week', data=df)
res = Model.fit()
global intercept,diffpriceprodvscomp_param,promo1_param,promo2_param,week_param
intercept=res.params[0]
diffpriceprodvscomp_param=res.params[1]
promo1_param=res.params[2]
promo2_param=res.params[3]
week_param=res.params[4]
Product_Price_min=0
maxvalue_of_price=int(Modeldata['Product_Price'].max())
Product_Price_max=int(Modeldata['Product_Price'].max())
if maxvalue_of_price==0:
Product_Price_max=1
maxfunction=[]
pricev=[]
weeks=[]
dd=[]
ddl=[]
for vatr in range(0,len(Modeldata)):
weeks.append(lst[vatr])
for Product_Price in range(Product_Price_min,Product_Price_max+1):
function=0
function=(intercept+(Modeldata['Promo1'].iloc[vatr]*promo1_param)+(Modeldata['Promo2'].iloc[vatr]*promo2_param) +
(diffpriceprodvscomp_param*(Product_Price-Modeldata['Comp_Prod_Price'].iloc[vatr]))+(Modeldata['Week'].iloc[vatr]*lst[vatr]))
maxfunction.append(function)
dd.append(Product_Price)
ddl.append(vatr)
for Product_Price in range(Product_Price_min,Product_Price_max+1):
pricev.append(Product_Price)
df1=pd.DataFrame(maxfunction)
df2=pd.DataFrame(dd)
df3=pd.DataFrame(ddl)
dfo=pd.concat([df3,df2,df1],axis=1)
dfo.columns=['weeks','prices','Demandfunctions']
demand=[]
for rows in dfo.values:
w=int(rows[0])
p=int(rows[1])
d=int(rows[2])
demand.append([w,p,d])
Co_eff=pd.DataFrame(res.params.values)#intercept
standard_error=pd.DataFrame(res.bse.values)#standard error
p_values=pd.DataFrame(res.pvalues.values)
conf_lower =pd.DataFrame(res.conf_int()[0].values)
conf_higher =pd.DataFrame(res.conf_int()[1].values)
R_square=res.rsquared
atr=['Intercept','DeltaPrice','Promo1','Promo2','Week']
atribute=pd.DataFrame(atr)
SummaryTable=pd.concat([atribute,Co_eff,standard_error,p_values,conf_lower,conf_higher],axis=1)
SummaryTable.columns=['Atributes','Co_eff','Standard_error','P_values','conf_lower','conf_higher']
reshapedf=df1.values.reshape(len(Modeldata),(-Product_Price_min+(Product_Price_max+1)))
dataofmas=pd.DataFrame(reshapedf)
maxv=dataofmas.apply( max, axis=1 )
minv=dataofmas.apply(min,axis=1)
avgv=dataofmas.sum(axis=1)/(-Product_Price_min+(Product_Price_max+1))
wks=pd.DataFrame(weeks)
ddofs=pd.concat([wks,minv,avgv,maxv],axis=1)
dataofmas=pd.DataFrame(reshapedf)
kk=pd.DataFrame()
sums=0
for i in range(0,len(dataofmas.columns)):
sums=sums+i
vv=i*dataofmas[[i]]
kk=pd.concat([kk,vv],axis=1)
dfr=pd.DataFrame(kk)
mrevenue=dfr.apply( max, axis=1 )
prices=dfr.idxmax(axis=1)
wks=pd.DataFrame(weeks)
revenuedf=pd.concat([wks,mrevenue,prices],axis=1)
return render_template('Optimisation_heatmap_revenue.html',revenuedf=revenuedf.values,ddofs=ddofs.values,SummaryTable=SummaryTable.to_html(index=False),ss=1,weeks=weeks,demand=demand,pricev=pricev,R_square=R_square)
@app.route('/inputtomaxm',methods=["GET","POST"])
def inputtomaxm():
return render_template("Optimize.html")
@app.route("/maxm",methods=["GET","POST"])
def maxm():
if request.method=="POST":
week=request.form['TimePeriod']
price_low=request.form['Price_Lower']
price_max=request.form['Price_Upper']
promofirst=request.form['Promotion_1']
promosecond=request.form['Promotion_2']
# week=24
# price_low=6
# price_max=20
# promofirst=1
# promosecond=0
#
# time_period=24
#
# global a
# a=243.226225
# global b
# b=-9.699634
# global d
# d=1.671505
# global pr1
# pr1=21.866260
# global pr2
# pr2=-0.511606
# global cm
# cm=-14.559594
# global s_0
# s_0= 2000
# promo1=1
# promo2=0
time_period=int(week)
global a
a=intercept
global b
b=diffpriceprodvscomp_param
global d
d=week_param
global pr1
pr1=promo1_param
global pr2
pr2=promo2_param
global s_0
s_0= 2000
promo1=int(promofirst)
promo2=int(promosecond)
global comp
comp=np.random.randint(7,15,time_period)
def demand(p, a=a, b=b, d=d, promo1=promo1,promo2_param=promo2,comp=comp, t=np.linspace(1,time_period,time_period)):
""" Return demand given an array of prices p for times t
(see equation 5 above)"""
return a+(b*(p-comp))+(d*t)+(promo1*pr1)+(promo2*pr2)
def objective(p_t, a, b, d,promo1,promo2, comp, t=np.linspace(1,time_period,time_period)):
return -1.0 * np.sum( p_t * demand(p_t, a, b, d,promo1,promo2, comp, t) )
def constraint_1(p_t, s_0, a, b, d, promo1,promo2, comp, t=np.linspace(1,time_period,time_period)):
""" Inventory constraint. s_0 - np.sum(x_t) >= 0.
This is an inequality constraint. See more below.
"""
return s_0 - np.sum(demand(p_t, a, b, d,promo1,promo2, comp, t))
def constraint_2(p_t):
#""" Positive demand. Another inequality constraint x_t >= 0 """
return p_t
t = np.linspace(1,time_period,time_period)
# Starting values :
b_min=int(price_low)
p_start = b_min * np.ones(len(t))
# bounds on the values :
bmax=int(price_max)
bounds = tuple((0,bmax) for x in p_start)
import scipy.optimize as optimize
# Constraints :
constraints = ({'type': 'ineq', 'fun': lambda x, s_0=s_0: constraint_1(x,s_0, a, b, d,promo1,promo2, comp, t=t)},
{'type': 'ineq', 'fun': lambda x: constraint_2(x)}
)
opt_results = optimize.minimize(objective, p_start, args=(a, b, d,promo1,promo2, comp, t),
method='SLSQP', bounds=bounds, constraints=constraints)
np.sum(opt_results['x'])
opt_price=opt_results['x']
opt_demand=demand(opt_results['x'], a, b, d, promo1,promo2_param, comp, t=t)
weeks=[]
for row in range(1,len(opt_price)+1):
weeks.append(row)
d=pd.DataFrame(weeks).astype(int)
dd=pd.DataFrame(opt_price)
optimumumprice_perweek=pd.concat([d,dd,pd.DataFrame(opt_demand).astype(int)],axis=1)
optimumumprice_perweek.columns=['Week','Price','Demand']
dataval=optimumumprice_perweek
diff=[]
diffs=[]
for i in range(0,len(opt_demand)-1):
valss=opt_demand[i]-opt_demand[i+1]
diff.append(valss)
diffs.append(i+1)
differenceofdemand_df=pd.concat([pd.DataFrame(diffs),pd.DataFrame(diff)],axis=1)
MP=round(optimumumprice_perweek.loc[optimumumprice_perweek['Price'].idxmin()],1)
minimumprice=pd.DataFrame(MP).T
MaxP=round(optimumumprice_perweek.loc[optimumumprice_perweek['Price'].idxmax()],1)
maximumprice=pd.DataFrame(MaxP).T
averageprice=round((optimumumprice_perweek['Price'].sum()/len(optimumumprice_perweek)),2)
MD=round(optimumumprice_perweek.loc[optimumumprice_perweek['Demand'].idxmin()],0)
minimumDemand=pd.DataFrame(MD).T
MaxD=round(optimumumprice_perweek.loc[optimumumprice_perweek['Demand'].idxmax()],0)
maximumDemand=pd.DataFrame(MaxD).T
averageDemand=round((optimumumprice_perweek['Demand'].sum()/len(optimumumprice_perweek)),0)
totaldemand=round(optimumumprice_perweek['Demand'].sum(),0)
return render_template("Optimize.html",totaldemand=totaldemand,averageDemand=averageDemand,maximumDemand=maximumDemand.values,minimumDemand=minimumDemand.values,averageprice=averageprice,maximumprice=maximumprice.values,minimumprice=minimumprice.values,dataval=dataval.values,differenceofdemand_df=differenceofdemand_df.values,optimumumprice_perweek=optimumumprice_perweek.to_html(index=False),ll=1)
@app.route("/Inventorymanagment",methods=["GET","POST"])
def Inventorymanagment():
return render_template("Inventory_Management.html")
@app.route("/DISTRIBUTION_NETWORK_OPT",methods=["GET","POST"])
def DISTRIBUTION_NETWORK_OPT():
return render_template("DISTRIBUTION_NETWORK_OPTIMIZATION.html")
@app.route("/Procurement_Plan",methods=["GET","POST"])
def Procurement_Plan():
return render_template("Procurement_Planning.html")
#<NAME>
@app.route("/fleetallocation")
def fleetallocation():
return render_template('fleetallocation.html')
@app.route("/reset")
def reset():
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
cur.execute("DELETE FROM `input`")
cur.execute("DELETE FROM `output`")
cur.execute("DELETE FROM `Scenario`")
conn.commit()
conn.close()
open(localaddress+'\\static\\demodata.txt', 'w').close()
return render_template('fleetallocation.html')
@app.route("/dalink",methods = ['GET','POST'])
def dalink():
sql = "INSERT INTO `input` (`Route`,`SLoc`,`Ship-to Abb`,`Primary Equipment`,`Batch`,`Prod Dt`,`SW`,`Met Held`,`Heat No`,`Delivery Qty`,`Width`,`Length`,`Test Cut`,`Customer Priority`) VALUES( %s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)"
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
if request.method == 'POST':
typ = request.form.get('type')
frm = request.form.get('from')
to = request.form.get('to')
if typ and frm and to:
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
curr = conn.cursor()
cur.execute("SELECT * FROM `inventory_data` WHERE `Primary Equipment` = '" + typ + "' AND `Prod Dt` BETWEEN '" + frm + "' AND '" + to + "'")
res = cur.fetchall()
if len(res)==0:
conn.close()
return render_template('fleetallocation.html',alert='No data available')
sfile = pd.DataFrame(res)
df1 = pd.DataFrame(sfile)
df1['Prod Dt'] =df1['Prod Dt'].astype(object)
for index, i in df1.iterrows():
data = (i['Route'],i['SLoc'],i['Ship-to Abb'],i['Primary Equipment'],i['Batch'],i['Prod Dt'],i['SW'],i['Met Held'],i['Heat No'],i['Delivery Qty'],i['Width'],i['Length'],i['Test Cut'],i['Customer Priority'])
curr.execute(sql,data)
conn.commit()
conn.close()
return render_template('fleetallocation.html',typ=" Equipment type: "+typ,frm="From: "+frm,to=" To:"+to,data = sfile.to_html(index=False))
else:
return render_template('fleetallocation.html',alert ='All input fields are required')
return render_template('fleetallocation.html')
@app.route('/optimise', methods=['GET', 'POST'])
def optimise():
open(localaddress+'\\static\\demodata.txt', 'w').close()
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
curr = conn.cursor()
cur.execute("DELETE FROM `output`")
conn.commit()
os.system('python optimising.py')
sa=1
cur.execute("SELECT * FROM `output`")
result = cur.fetchall()
if len(result)==0:
say=0
else:
say=1
curr.execute("SELECT * FROM `input`")
sfile = curr.fetchall()
if len(sfile)==0:
conn.close()
return render_template('fleetallocation.html',say=say,sa=sa,alert='No data available')
sfile = pd.DataFrame(sfile)
conn.close()
with open(localaddress+"\\static\\demodata.txt", "r") as f:
content = f.read()
return render_template('fleetallocation.html',say=say,sa=sa,data = sfile.to_html(index=False),content=content)
@app.route("/scenario")
def scenario():
return render_template('scenario.html')
@app.route("/scenario_insert", methods=['GET','POST'])
def scenario_insert():
if request.method == 'POST':
scenario = request.form.getlist("scenario[]")
customer_priority = request.form.getlist("customer_priority[]")
oldest_sw = request.form.getlist("oldest_sw[]")
production_date = request.form.getlist("production_date[]")
met_held_group = request.form.getlist("met_held_group[]")
test_cut_group = request.form.getlist("test_cut_group[]")
sub_grouping_rules = request.form.getlist("sub_grouping_rules[]")
load_lower_bounds = request.form.getlist("load_lower_bounds[]")
load_upper_bounds = request.form.getlist("load_upper_bounds[]")
width_bounds = request.form.getlist("width_bounds[]")
length_bounds = request.form.getlist("length_bounds[]")
description = request.form.getlist("description[]")
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
curr = conn.cursor()
lngth = len(scenario)
curr.execute("DELETE FROM `scenario`")
if scenario and customer_priority and oldest_sw and production_date and met_held_group and test_cut_group and sub_grouping_rules and load_lower_bounds and load_upper_bounds and width_bounds and length_bounds and description:
say=0
for i in range(lngth):
scenario_clean = scenario[i]
customer_priority_clean = customer_priority[i]
oldest_sw_clean = oldest_sw[i]
production_date_clean = production_date[i]
met_held_group_clean = met_held_group[i]
test_cut_group_clean = test_cut_group[i]
sub_grouping_rules_clean = sub_grouping_rules[i]
load_lower_bounds_clean = load_lower_bounds[i]
load_upper_bounds_clean = load_upper_bounds[i]
width_bounds_clean = width_bounds[i]
length_bounds_clean = length_bounds[i]
description_clean = description[i]
if scenario_clean and customer_priority_clean and oldest_sw_clean and production_date_clean and met_held_group_clean and test_cut_group_clean and sub_grouping_rules_clean and load_lower_bounds_clean and load_upper_bounds_clean and width_bounds_clean and length_bounds_clean:
cur.execute("INSERT INTO `scenario`(scenario, customer_priority, oldest_sw, production_date, met_held_group, test_cut_group, sub_grouping_rules, load_lower_bounds, load_upper_bounds, width_bounds, length_bounds, description) VALUES('"+scenario_clean+"' ,'"+customer_priority_clean+"','"+oldest_sw_clean+"','"+production_date_clean+"','"+met_held_group_clean+"','"+test_cut_group_clean+"', '"+sub_grouping_rules_clean+"','"+load_lower_bounds_clean+"', '"+load_upper_bounds_clean+"','"+width_bounds_clean+"','"+length_bounds_clean+"','"+description_clean+"')")
else:
say = 1
conn.commit()
if(say==0):
alert='All Scenarios inserted'
else:
alert='Some scenarios were not inserted'
return (alert)
conn.close()
return ('All fields are required!')
return ('Failed!!!')
@app.route("/fetch", methods=['GET','POST'])
def fetch():
if request.method == 'POST':
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
cur.execute("SELECT * FROM scenario")
result = cur.fetchall()
if len(result)==0:
conn.close()
return render_template('scenario.html',alert1='No scenarios Available')
result1 = pd.DataFrame(result)
result1 = result1.drop('Sub-grouping rules', axis=1)
conn.close()
return render_template('scenario.html',sdata = result1.to_html(index=False))
return ("Error")
@app.route("/delete", methods=['GET','POST'])
def delete():
if request.method == 'POST':
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
cur.execute("DELETE FROM scenario")
conn.commit()
conn.close()
return render_template('scenario.html',alert1="All the scenerios were dropped!")
return ("Error")
@app.route('/papadashboard', methods=['GET', 'POST'])
def papadashboard():
sql1 = "SELECT `Scenario`, MAX(`Wagon-No`) AS 'Wagon Used', COUNT(`Batch`) AS 'Products Allocated', SUM(`Delivery Qty`) AS 'Total Product Allocated', SUM(`Delivery Qty`)/(MAX(`Wagon-No`)) AS 'Average Load Carried', SUM(`Width`)/(MAX(`Wagon-No`)) AS 'Average Width Used' FROM `output` WHERE `Wagon-No`>0 GROUP BY `Scenario`"
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
curs = conn.cursor()
curs.execute("SELECT `scenario` FROM `scenario`")
sdata = curs.fetchall()
if len(sdata)==0:
conn.close()
return render_template('warning.html',alert='No data available')
cur1 = conn.cursor()
cur1.execute(sql1)
data1 = cur1.fetchall()
if len(data1)==0:
conn.close()
return render_template('warning.html',alert='Infeasible to due Insufficient Load')
cu = conn.cursor()
cu.execute("SELECT `length_bounds`,`width_bounds`,`load_lower_bounds`,`load_upper_bounds` FROM `scenario`")
sdaa = cu.fetchall()
sdaa = pd.DataFrame(sdaa)
asa=list()
for index, i in sdaa.iterrows():
hover = "Length Bound:"+str(i['length_bounds'])+", Width Bound:"+str(i['width_bounds'])+", Load Upper Bound:"+str(i['load_upper_bounds'])+", Load Lower Bound:"+str(i['load_lower_bounds'])
asa.append(hover)
asa=pd.DataFrame(asa)
asa.columns=['Details']
data1 = pd.DataFrame(data1)
data1['Average Width Used'] = data1['Average Width Used'].astype(int)
data1['Total Product Allocated'] = data1['Total Product Allocated'].astype(int)
data1['Average Load Carried'] = data1['Average Load Carried'].astype(float)
data1['Average Load Carried'] = round(data1['Average Load Carried'],2)
data1['Average Load Carried'] = data1['Average Load Carried'].astype(str)
fdata = pd.DataFrame(columns=['Scenario','Wagon Used','Products Allocated','Total Product Allocated','Average Load Carried','Average Width Used','Details'])
fdata[['Scenario','Wagon Used','Products Allocated','Total Product Allocated','Average Load Carried','Average Width Used']] = data1[['Scenario','Wagon Used','Products Allocated','Total Product Allocated','Average Load Carried','Average Width Used']]
fdata['Details'] = asa['Details']
fdata = fdata.values
sql11 = "SELECT `Scenario`, SUM(`Delivery Qty`)/(MAX(`Wagon-No`)) AS 'Average Load Carried', COUNT(`Batch`) AS 'Allocated', SUM(`Delivery Qty`) AS 'Load Allocated' FROM `output`WHERE `Wagon-No`>0 GROUP BY `Scenario`"
sql21 = "SELECT COUNT(`Batch`) AS 'Total Allocated' FROM `output` GROUP BY `Scenario`"
sql31 = "SELECT `load_upper_bounds` FROM `scenario`"
conn1 = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur11 = conn1.cursor()
cur21 = conn1.cursor()
cur31 = conn1.cursor()
cur11.execute(sql11)
data11 = cur11.fetchall()
data11 = pd.DataFrame(data11)
cur21.execute(sql21)
data21 = cur21.fetchall()
data21 = pd.DataFrame(data21)
cur31.execute(sql31)
data31 = cur31.fetchall()
data31 = pd.DataFrame(data31)
data11['Average Load Carried']=data11['Average Load Carried'].astype(float)
fdata1 = pd.DataFrame(columns=['Scenario','Utilisation Percent','Allocation Percent','Total Load Allocated'])
fdata1['Utilisation Percent'] = round(100*(data11['Average Load Carried']/data31['load_upper_bounds']),2)
data11['Load Allocated']=data11['Load Allocated'].astype(int)
fdata1[['Scenario','Total Load Allocated']]=data11[['Scenario','Load Allocated']]
data11['Allocated']=data11['Allocated'].astype(float)
data21['Total Allocated']=data21['Total Allocated'].astype(float)
fdata1['Allocation Percent'] = round(100*(data11['Allocated']/data21['Total Allocated']),2)
fdata1['Allocation Percent'] = fdata1['Allocation Percent'].astype(str)
fdat1 = fdata1.values
conn1.close()
if request.method == 'POST':
conn2 = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn2.cursor()
ata = request.form['name']
cur.execute("SELECT * FROM `output` WHERE `Scenario` = '"+ata+"' ")
ssdata = cur.fetchall()
datasss = pd.DataFrame(ssdata)
data=datasss.replace("Not Allocated", 0)
df=data[['Delivery Qty','Wagon-No','Width','Group-Number']]
df['Wagon-No']=df['Wagon-No'].astype(int)
a=df['Wagon-No'].max()
##bar1
result_array = np.array([])
for i in range (a):
data_i = df[df['Wagon-No'] == i+1]
del_sum_i = data_i['Delivery Qty'].sum()
per_i=[((del_sum_i)/(205000)*100)]
result_array = np.append(result_array, per_i)
result_array1 = np.array([])
for j in range (a):
data_j = df[df['Wagon-No'] == j+1]
del_sum_j = data_j['Width'].sum()
per_util_j=[((del_sum_j)/(370)*100)]
result_array1 = np.append(result_array1, per_util_j)
##pie1
df112 = df[df['Wagon-No'] == 0]
pie1 = df112 ['Width'].sum()
df221 = df[df['Wagon-No'] > 0]
pie11 = df221['Width'].sum()
df1=data[['SW','Group-Number']]
dff1 = df1[data['Wagon-No'] == 0]
da1 =dff1.groupby(['SW']).count()
re11 = np.array([])
res12 = np.append(re11,da1)
da1['SW'] = da1.index
r1 = np.array([])
r12 = np.append(r1, da1['SW'])
df0=data[['Group-Number','Route','SLoc','Ship-to Abb','Wagon-No','Primary Equipment']]
df1=df0.replace("Not Allocated", 0)
f2 = pd.DataFrame(df1)
f2['Wagon-No']=f2['Wagon-No'].astype(int)
####Not-Allocated
f2['Group']=data['Group-Number']
df=f2[['Group','Wagon-No']]
dee = df[df['Wagon-No'] == 0]
deer =dee.groupby(['Group']).count()##Not Allocated
deer['Group'] = deer.index
##Total-Data
f2['Group1']=data['Group-Number']
dfc=f2[['Group1','Wagon-No']]
dfa=pd.DataFrame(dfc)
der = dfa[dfa['Wagon-No'] >= 0]
dear =der.groupby(['Group1']).count()##Wagons >1
dear['Group1'] = dear.index
dear.rename(columns={'Wagon-No': 'Allocated'}, inplace=True)
result = pd.concat([deer, dear], axis=1, join_axes=[dear.index])
resu=result[['Group1','Wagon-No','Allocated']]
result1=resu.fillna(00)
r5 = np.array([])
r6 = np.append(r5, result1['Wagon-No'])
r66=r6[0:73]###Not Allocated
r7 = np.append(r5, result1['Allocated'])
r77=r7[0:73]####total
r8 = np.append(r5, result1['Group1'])
r88=r8[0:73]###group
conn2.close()
return render_template('papadashboard.html',say=1,data=fdata,data1=fdat1,ata=ata,bar1=result_array,bar11=result_array1,pie11=pie1,pie111=pie11,x=r12,y=res12,xname=r88, bar7=r77,bar8=r66)
conn.close()
return render_template('papadashboard.html',data=fdata,data1=fdat1)
@app.route('/facilityallocation')
def facilityallocation():
return render_template('facilityhome.html')
@app.route('/dataimport')
def dataimport():
return render_template('facilityimport.html')
@app.route('/dataimport1')
def dataimport1():
return redirect(url_for('dataimport'))
@app.route('/facility_location')
def facility_location():
return render_template('facility_location.html')
@app.route('/facility')
def facility():
return redirect(url_for('facilityallocation'))
@app.route("/imprt", methods=['GET','POST'])
def imprt():
global customerdata
global factorydata
global Facyy
global Custo
customerfile = request.files['CustomerData'].read()
factoryfile = request.files['FactoryData'].read()
if len(customerfile)==0 or len(factoryfile)==0:
return render_template('facilityhome.html',warning='Data Invalid')
cdat=pd.read_csv(io.StringIO(customerfile.decode('utf-8')))
customerdata=pd.DataFrame(cdat)
fdat=pd.read_csv(io.StringIO(factoryfile.decode('utf-8')))
factorydata=pd.DataFrame(fdat)
Custo=customerdata.drop(['Lat','Long'],axis=1)
Facyy=factorydata.drop(['Lat','Long'],axis=1)
return render_template('facilityimport1.html',loc1=factorydata.values,loc2=customerdata.values,factory=Facyy.to_html(index=False),customer=Custo.to_html(index=False))
@app.route("/gmap")
def gmap():
custdata=customerdata
Factorydata=factorydata
price=1
#to get distance beetween customer and factory
#first get the Dimension
#get no of factories
Numberoffact=len(Factorydata)
#get Number of Customer
Numberofcust=len(custdata)
#Get The dist/unit cost
cost=price
#def function for distance calculation
# approximate radius of earth in km
def dist(lati1,long1,lati2,long2,cost):
R = 6373.0
lat1 = radians(lati1)
lon1 = radians(long1)
lat2 = radians(lati2)
lon2 = radians(long2)
dlon = lon2 - lon1
dlat = lat2 - lat1
a = sin(dlat / 2)**2 + cos(lat1) * cos(lat2) * sin(dlon / 2)**2
c = 2 * atan2(sqrt(a), sqrt(1 - a))
distance =round(R * c,2)
return distance*cost
#Create a list for customer and factory
def costtable(custdata,Factorydata):
distance=list()
for lat1,long1 in zip(custdata.Lat, custdata.Long):
for lat2,long2 in zip(Factorydata.Lat, Factorydata.Long):
distance.append(dist(lat1,long1,lat2,long2,cost))
distable=np.reshape(distance, (Numberofcust,Numberoffact)).T
tab=pd.DataFrame(distable,index=[Factorydata.Factory],columns=[custdata.Customer])
return tab
DelCost=costtable(custdata,Factorydata)#return cost table of the customer and factoery
#creating Demand Table
demand=np.array(custdata.Demand)
col1=np.array(custdata.Customer)
Demand=pd.DataFrame(demand,col1).T
cols=sorted(col1)
#Creating capacity table
fact=np.array(Factorydata.Capacity)
col2=np.array(Factorydata.Factory)
Capacity=pd.DataFrame(fact,index=col2).T
colo=sorted(col2)
#creating Fixed cost table
fixed_c=np.array(Factorydata.FixedCost)
col3=np.array(Factorydata.Factory)
FixedCost= pd.DataFrame(fixed_c,index=col3)
# Create the 'prob' variable to contain the problem data
model = LpProblem("Min Cost Facility Location problem",LpMinimize)
production = pulp.LpVariable.dicts("Production",
((factory, cust) for factory in Capacity for cust in Demand),
lowBound=0,
cat='Integer')
factory_status =pulp.LpVariable.dicts("factory_status", (factory for factory in Capacity),
cat='Binary')
cap_slack =pulp.LpVariable.dicts("capslack",
(cust for cust in Demand),
lowBound=0,
cat='Integer')
model += pulp.lpSum(
[DelCost.loc[factory, cust] * production[factory, cust] for factory in Capacity for cust in Demand]
+ [FixedCost.loc[factory] * factory_status[factory] for factory in Capacity] + 5000000*cap_slack[cust] for cust in Demand)
for cust in Demand:
model += pulp.lpSum(production[factory, cust] for factory in Capacity)+cap_slack[cust] == Demand[cust]
for factory in Capacity:
model += pulp.lpSum(production[factory, cust] for cust in Demand) <= Capacity[factory]*factory_status[factory]
model.solve()
print("Status:", LpStatus[model.status])
for v in model.variables():
print(v.name, "=", v.varValue)
print("Total Cost of Ingredients per can = ", value(model.objective))
# Getting the table for the Factorywise Allocation
def factoryalloc(model,Numberoffact,Numberofcust,listoffac,listofcus):
listj=list()
listk=list()
listcaps=list()
for v in model.variables():
listj.append(v.varValue)
customer=listj[(len(listj)-Numberofcust-Numberoffact):(len(listj)-Numberoffact)]
del listj[(len(listj)-Numberoffact-Numberofcust):len(listj)]
for row in listj:
if row==0:
listk.append(0)
else:
listk.append(1)
x=np.reshape(listj,(Numberoffact,Numberofcust))
y=np.reshape(listk,(Numberoffact,Numberofcust))
FactoryAlloc_table=pd.DataFrame(x,index=listoffac,columns=listofcus)
Factorystatus=pd.DataFrame(y,index=listoffac,columns=listofcus)
return FactoryAlloc_table,Factorystatus,customer
Alltable,FactorystatusTable,ded=factoryalloc(model,Numberoffact,Numberofcust,colo,cols)
Allstatus=list()
dede=pd.DataFrame(ded,columns=['UnSatisfied'])
finaldede=dede[dede.UnSatisfied != 0]
colss=pd.DataFrame(cols,columns=['CustomerLocation'])
fina=pd.concat([colss,finaldede],axis=1, join='inner')
print(fina)
for i in range(len(Alltable)):
for j in range(len(Alltable.columns)):
if (Alltable.loc[Alltable.index[i], Alltable.columns[j]]>0):
all=[Alltable.index[i], Alltable.columns[j], Alltable.loc[Alltable.index[i], Alltable.columns[j]]]
Allstatus.append(all)
Status=pd.DataFrame(Allstatus,columns=['Factory','Customer','Allocation']).astype(str)
#To get the Factory Data
con = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
#Making Connection to the Database
cur = con.cursor()
engine = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
Status.to_sql(con=engine, name='facilityallocation',index=False, if_exists='replace')
cur = con.cursor()
cur1 = con.cursor()
cur.execute("SELECT * FROM `facilityallocation`")
file=cur.fetchall()
dat=pd.DataFrame(file)
lst=dat[['Factory','Customer']]
mlst=[]
names=lst['Factory'].unique().tolist()
for name in names:
lsty=lst.loc[lst.Factory==name]
mlst.append(lsty.values)
data=dat[['Factory','Customer','Allocation']]
sql="SELECT SUM(`Allocation`) AS 'UseCapacity', `Factory` FROM `facilityallocation` GROUP BY `Factory`"
cur1.execute(sql)
file2=cur1.fetchall()
udata=pd.DataFrame(file2)
bdata=factorydata.sort_values(by=['Factory'])
adata=bdata['Capacity']
con.close()
infdata=dat[['Customer','Factory','Allocation']]
infodata=infdata.sort_values(by=['Customer'])
namess=infodata.Customer.unique()
lstyy=[]
for nam in namess:
bb=infodata[infodata.Customer==nam]
comment=bb['Factory']+":"+bb['Allocation']
prin=[nam,str(comment.values).strip('[]')]
lstyy.append(prin)
return render_template('facilityoptimise.html',say=1,lstyy=lstyy,x1=adata.values,x2=udata.values,dat=mlst,loc1=factorydata.values,
loc2=customerdata.values,factory=Facyy.to_html(index=False),customer=Custo.to_html(index=False),summary=data.to_html(index=False))
#Demand Forecast
@app.route('/demandforecast')
def demandforecast():
return render_template('demandforecast.html')
@app.route("/demandforecastdataimport",methods = ['GET','POST'])
def demandforecastdataimport():
if request.method== 'POST':
global actualforecastdata
flat=request.files['flat'].read()
if len(flat)==0:
return('No Data Selected')
cdat=pd.read_csv(io.StringIO(flat.decode('utf-8')))
actualforecastdata=pd.DataFrame(cdat)
return render_template('demandforecast.html',data=actualforecastdata.to_html(index=False))
@app.route('/demandforecastinput', methods = ['GET', 'POST'])
def demandforecastinput():
if request.method=='POST':
global demandforecastfrm
global demandforecasttoo
global demandforecastinputdata
demandforecastfrm=request.form['from']
demandforecasttoo=request.form['to']
value=request.form['typedf']
demandforecastinputdata=actualforecastdata[(actualforecastdata['Date'] >= demandforecastfrm) & (actualforecastdata['Date'] <= demandforecasttoo)]
if value=='monthly': ##monthly
engine = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
demandforecastinputdata.to_sql(con=engine, name='demandforecastinputdata', index=False,if_exists='replace')
return redirect(url_for('monthlyforecast'))
if value=='quarterly': ##quarterly
global Quaterdata
dated2 = demandforecastinputdata['Date']
nlst=[]
for var in dated2:
var1 = int(var[5:7])
if var1 >=1 and var1 <4:
varr=var[:4]+'-01-01'
elif var1 >=4 and var1 <7:
varr=var[:4]+'-04-01'
elif var1 >=7 and var1 <10:
varr=var[:4]+'-07-01'
else:
varr=var[:4]+'-10-01'
nlst.append(varr)
nwlst=pd.DataFrame(nlst,columns=['Newyear'])
demandforecastinputdata=demandforecastinputdata.reset_index()
demandforecastinputdata['Date']=nwlst['Newyear']
Quaterdata=demandforecastinputdata.groupby(['Date']).sum()
Quaterdata=Quaterdata.reset_index()
Quaterdata=Quaterdata.drop('index',axis=1)
engine = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
Quaterdata.to_sql(con=engine, name='demandforecastinputdata', index=False,if_exists='replace')
return redirect(url_for('quarterlyforecast'))
if value=='yearly': ##yearly
global Yeardata
#copydata=demandforecastinputdata
dated1 = demandforecastinputdata['Date']
lst=[]
for var in dated1:
var1 = var[:4]+'-01-01'
lst.append(var1)
newlst=pd.DataFrame(lst,columns=['NewYear'])
demandforecastinputdata=demandforecastinputdata.reset_index()
demandforecastinputdata['Date']=newlst['NewYear']
Yeardata=demandforecastinputdata.groupby(['Date']).sum()
Yeardata=Yeardata.reset_index()
Yeardata=Yeardata.drop('index',axis=1)
engine = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
Yeardata.to_sql(con=engine, name='demandforecastinputdata', index=False,if_exists='replace')
return redirect(url_for('yearlyforecast'))
#if value=='weakly': ##weakly
# return redirect(url_for('output4'))
return render_template('demandforecast.html')
@app.route("/monthlyforecast",methods = ['GET','POST'])
def monthlyforecast():
data = pd.DataFrame(demandforecastinputdata)
# container1
a1=data.sort_values(['GDP','TotalDemand'], ascending=[True,True])
# container2
a2=data.sort_values(['Pi_Exports','TotalDemand'], ascending=[True,True])
# container3
a3=data.sort_values(['Market_Share','TotalDemand'], ascending=[True,True])
# container4
a4=data.sort_values(['Advertisement_Expense','TotalDemand'], ascending=[True,True])
# container1
df=a1[['GDP']]
re11 = np.array([])
res11 = np.append(re11,df)
df1=a1[['TotalDemand']]
r1 = np.array([])
r11 = np.append(r1, df1)
# top graph
tdf=data['Date'].astype(str)
tre11 = np.array([])
tres11 = np.append(tre11,tdf)
tr1 = np.array([])
tr11 = np.append(tr1, df1)
# container2
udf=a2[['Pi_Exports']]
ure11 = np.array([])
ures11 = np.append(ure11,udf)
ur1 = np.array([])
ur11 = np.append(ur1, df1)
# container3
vdf=a3[['Market_Share']]
vre11 = np.array([])
vres11 = np.append(vre11,vdf)
vr1 = np.array([])
vr11 = np.append(vr1, df1)
# container4
wdf=a4[['Advertisement_Expense']]
wre11 = np.array([])
wres11 = np.append(wre11,wdf)
wr1 = np.array([])
wr11 = np.append(wr1, df1)
if request.method == 'POST':
mov=0
exp=0
reg=0
ari=0
arx=0
till = request.form.get('till')
if request.form.get('moving'):
mov=1
if request.form.get('ESPO'):
exp=1
if request.form.get('regression'):
reg=1
if request.form.get('ARIMA'):
ari=1
if request.form.get('ARIMAX'):
arx=1
con = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = con.cursor()
cur.execute("CREATE TABLE IF NOT EXISTS `ftech` (`mov` VARCHAR(1),`exp` VARCHAR(1), `reg` VARCHAR(1),`ari` VARCHAR(1),`arx` VARCHAR(1),`till` VARCHAR(10))")
cur.execute("DELETE FROM `ftech`")
con.commit()
cur.execute("INSERT INTO `ftech` VALUES('"+str(mov)+"','"+str(exp)+"','"+str(reg)+"','"+str(ari)+"','"+str(arx)+"','"+str(till)+"')")
con.commit()
cur.execute("CREATE TABLE IF NOT EXISTS `forecastoutput`(`Model` VARCHAR(25),`Date` VARCHAR(10),`TotalDemand` VARCHAR(10),`RatioIncrease` VARCHAR(10),`Spain` VARCHAR(10),`Austria` VARCHAR(10),`Japan` VARCHAR(10),`Hungary` VARCHAR(10),`Germany` VARCHAR(10),`Polland` VARCHAR(10),`UK` VARCHAR(10),`France` VARCHAR(10),`Romania` VARCHAR(10),`Italy` VARCHAR(10),`Greece` VARCHAR(10),`Crotia` VARCHAR(10),`Holland` VARCHAR(10),`Finland` VARCHAR(10),`Hongkong` VARCHAR(10))")
con.commit()
cur.execute("DELETE FROM `forecastoutput`")
con.commit()
sql = "INSERT INTO `forecastoutput` (`Model`,`Date`,`TotalDemand`,`RatioIncrease`,`Spain`,`Austria`,`Japan`,`Hungary`,`Germany`,`Polland`,`UK`,`France`,`Romania`,`Italy`,`Greece`,`Crotia`,`Holland`,`Finland`,`Hongkong`) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)"
#read the monthly file and index that with time
df=data.set_index('Date')
split_point =int(0.7*len(df))
D, V = df[0:split_point],df[split_point:]
data=pd.DataFrame(D)
#Functions for ME, MAE, MAPE
#ME
def ME(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(y_true - y_pred)
#MAE
def MAE(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs(y_true - y_pred))
#MAPE
def MAPE(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs((y_true - y_pred) / y_pred)) * 100
cur1=con.cursor()
cur1.execute("SELECT * FROM `ftech`")
ftech=pd.DataFrame(cur1.fetchall())
ari=int(ftech['ari'])
arx=int(ftech['arx'])
exp=int(ftech['exp'])
mov=int(ftech['mov'])
reg=int(ftech['reg'])
start_index1=str(D['GDP'].index[-1])
end_index1=str(ftech['till'][0])
#end_index1=indx[:4]
df2 = | pd.DataFrame(data=0,index=["ME","MAE","MAPE"],columns=["Moving Average","ARIMA","Exponential Smoothing","Regression"]) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
This script saves bid and ask data for specified ETFs to files for each day
during market open hours.
It assumes the computer is at US East Coast Time.
@author: mark
"""
import os
import pandas as pd
import numpy as np
from itertools import product
import streamlit as st
from bokeh.plotting import figure
from bokeh.models.tools import HoverTool
from bokeh.models import NumeralTickFormatter, DatetimeTickFormatter, Rect, ColumnDataSource, VBar, LabelSet
from streamlit_metrics import metric_row
def display_method_to_choose_etfs(selected_method_choose_dates, all_etfs, etf_data, sl_obj):
"""
Generates various streamlit options for selecting which ETFs to display.
Parameters
----------
selected_method_choose_dates : list of str
Strings of the various methods of selecting ETFs.
all_etfs : list of str
List of all ETF tickers.
etf_data : pd.DataFrame
Dataframe containing bulk data about ETFs.
sl_obj : streamlit
Stremlit object to place the elements.
Returns
-------
selected_etfs : list of str
List of str tickers chosen by users.
"""
selected_etfs = all_etfs
if 'By volume traded' in selected_method_choose_dates:
selection_data = etf_data['volume (shares/day)']
log_min = float(np.floor(np.log10(selection_data.min())))
log_max = float(np.ceil(np.log10(selection_data.max())))
min_vol, max_vol = sl_obj.slider('Average Volume (shares/day)',
min_value=float(log_min),
max_value=float(log_max),
value=(float(log_min), float(log_max)),
step=float(log_min - log_max) / 100,
format='10^%.1f'
)
selected = (selection_data >= 10**min_vol) & (selection_data <= 10**max_vol)
selected_etfs = list(set(selected_etfs) & set(selection_data[selected].index))
if 'By market cap' in selected_method_choose_dates:
selection_data = etf_data['net assets (million USD)']
log_min = float(np.floor(np.log10(selection_data.min())))
log_max = float(np.ceil(np.log10(selection_data.max())))
min_vol, max_vol = sl_obj.slider('Market Cap as of 2021-02-21 (million USD)',
min_value=float(log_min),
max_value=float(log_max),
value=(float(log_min), float(log_max)),
step=float(log_min - log_max) / 100,
format='10^%.1f'
)
selected = (selection_data >= 10**min_vol) & (selection_data <= 10**max_vol)
selected_etfs = list(set(selected_etfs) & set(selection_data[selected].index))
if 'Only ESG ETFs' in selected_method_choose_dates:
esg_etfs = etf_data[etf_data['esg'] == True].index
selected_etfs = list(set(selected_etfs) & set(esg_etfs))
if 'choose specific ETFs' in selected_method_choose_dates:
selected_etfs = sl_obj.multiselect('Which ETFs do you want to look at', list(selected_etfs), ['ESGV','VTI','BND', 'VCEB', 'VSGX'])
return selected_etfs
def get_averages(data, selected_dates, selected_etfs):
"""
Obtain average values of various ETFs across the trading day.
Parameters
----------
data : pd.DataFrame
data of various days and ETFs.
selected_dates : list of str
list of dates in format YYYY-MM-DD.
selected_etfs : list of str
list of ETF tickers.
Returns
-------
pd.Series
Data frame of average values in ETFs at various times during tradiing day.
"""
potential_columns = product(selected_dates, selected_etfs)
actual_columns = [x for x in potential_columns if x in data.columns]
return data[actual_columns].T.groupby(level=['etf']).mean().T
def add_trade_windows(p, t_new, t_old, ymax):
"""
Add trade windows to plot
Parameters
----------
p : Bokeh figure
Figure to add trading windows to.
t_new : tuple of timestamps
Starting and ending timestamp of the old trading window.
t_old : tuple of timestamps
Starting and ending timestamp of the new trading window.
ymax : float
Maxs value to extend trading windows.
Returns
-------
None.
"""
source = ColumnDataSource(dict(x=[t_old[0]+0.5*(t_old[1]-t_old[0]),t_new[0]+0.5*(t_new[1]-t_new[0])],
y=[ymax-0.0002, ymax-0.0002 ],
w=[t_old[1]-t_old[0], t_new[1]-t_new[0]],
h =[2,2],
desc=['Old', 'New']))
if ymax > 2:
patch = {'h' : [ (0, ymax), (1, ymax) ],}
source.patch(patch)
boxes = Rect(x='x',y='y',width='w', height='h', fill_color='grey', fill_alpha=0.1,
line_width=0)
boxes_select = Rect(x='x',y='y',width='w', height='h', fill_color='grey', fill_alpha=.2,
line_width=0)
box_rend = p.add_glyph(source, boxes)
box_rend.hover_glyph = boxes_select
tooltips = [('trade window','@desc')]
p.add_tools(HoverTool(tooltips=tooltips, renderers=[box_rend]))
def format_plots(p, ymax=None):
"""
Format bokeh plots for quoted spreads across market times
Parameters
----------
p : Bokeh figure plot
Bokeh plot object to format
ymax : TYPE, optional
Max yaxis value. The default is None.
Returns
-------
None
"""
if ymax is None:
num_formatter='0.00%'
else:
num_zeros = int(np.log10(1/ymax)-.4)
num_formatter = '0.'+''.join(['0' for x in range(num_zeros)])+'%'
p.yaxis.formatter = NumeralTickFormatter(format=num_formatter)
p.xaxis.formatter = DatetimeTickFormatter(hours='%H:%M')
p.xaxis.axis_label = 'Market Time'
p.xgrid.grid_line_color = None
p.ygrid.grid_line_color = None
p.toolbar.autohide = True
def make_multi_etf_plot(selected_etfs, selected_dates, t_new, t_old, quoted_spread):
"""
Make plot with multiple ETF averages
Parameters
----------
selected_etfs : list of str
List of ETF tickers
selected_dates : list of str
List of dates to obtain averages of. In format YYYY-MM-DD.
t_new : tuple of timestamps
Starting and ending timestamp of the old trading window.
t_old : tuple of timestamps
Starting and ending timestamp of the new trading window.
quoted_spread : pd.DataFrame
Quoted spread data for various times, days, and ETFs.
Returns
-------
p : Bokeh figure
Plot of multiple ETF averages.
"""
t_all = t_new + t_old
average_data = get_averages(quoted_spread, selected_dates, selected_etfs)
p = figure(plot_width=400, plot_height=400, x_axis_type="datetime",
toolbar_location='below', title='quoted Bid-Ask Spread for various ETFs',
x_range=(pd.Timestamp('2021-01-01 9:30'), max(t_all)+pd.Timedelta(hours=1.5)),
y_range=(0, average_data.max().max()+0.0001))
#trading windows
add_trade_windows(p, t_new, t_old, average_data.max().max())
# etf lines
renders = []
for etf in selected_etfs:
renders.append(p.line(average_data.index, average_data[etf],# set visual properties for selected glyphs
hover_color="firebrick",
hover_alpha=1,
# set visual properties for non-selected glyphs
color="grey",
alpha=0.5,
name=etf))
tooltips = [('etf','$name'),
('time','$x{%H:%M}'),
('Bid-Ask spread', '$y{"0.00%"}')]
formatters = { "$x": "datetime",}
p.add_tools(HoverTool(tooltips=tooltips, renderers=renders, formatters=formatters))
format_plots(p, ymax=average_data.max().max()+0.0001)
return p
def make_single_etf_plot(selected_etf, selected_dates, t_new, t_old, quoted_spread, supress_hover_after= 10000):
"""
Plots data for a single ETF for multiple days.
Parameters
----------
selected_etfs : list of str
List of ETF tickers
selected_dates : list of str
List of dates to plot. In format YYYY-MM-DD.
t_new : tuple of timestamps
Starting and ending timestamp of the old trading window.
t_old : tuple of timestamps
Starting and ending timestamp of the new trading window.
quoted_spread : pd.DataFrame
Quoted spread data for various times, days, and ETFs.
supress_hover_after : int, optional
Do not show hover functionality if there are more than this number of days. The default is 10000.
Returns
-------
p : Bokeh figure
Plot of single ETF over various days.
"""
t_all = t_new + t_old
average_data = get_averages(quoted_spread, selected_dates, [selected_etf])
p = figure(plot_width=400, plot_height=400, x_axis_type="datetime",
toolbar_location='below', title='Quoted spread for {}'.format(selected_etf),
x_range=(pd.Timestamp('2021-01-01 9:30'), max(t_all)+pd.Timedelta(hours=1.5)),
y_range=(0, average_data.max().max()+0.0001))
add_trade_windows(p, t_new, t_old, average_data.max().max())
# etf lines
renders = []
if len(selected_dates) > 1:
for date in selected_dates:
try:
render = p.line(quoted_spread.index, quoted_spread.loc[:,(date,selected_etf)],# set visual properties for selected glyphs
hover_color="firebrick",
hover_alpha=0.33,
color="grey",
alpha=0.25,
name=date)
except KeyError:
continue
if len(selected_dates) < supress_hover_after:
renders.append(render)
average_name = 'average'
else:
average_name = selected_dates[0]
renders.append(p.line(average_data.index, average_data[selected_etf],# set visual properties for selected glyphs
hover_color="firebrick",
hover_alpha=0.75,
color="black",
alpha=0.5,
name=average_name))
tooltips = [('date','$name'),
('time','$x{%H:%M}'),
('Bid-Ask spread', '$y{"0.00%"}')]
formatters = { "$x": "datetime",}
p.add_tools(HoverTool(tooltips=tooltips, renderers=renders, formatters=formatters))
format_plots(p)
return p
def make_bid_ask_plot(selected_etf, selected_date, t_new, t_old, directory):
"""
Plots bid and ask prices over one trading day for one ETF.
Parameters
----------
selected_etf : str
ETF ticker of data to show.
selected_date : str
Date of data to show. In format YYYY-MM-DD.
t_new : tuple of timestamps
Starting and ending timestamp of the old trading window.
t_old : tuple of timestamps
Starting and ending timestamp of the new trading window.
directory : str
Folder containing ETF bid and ask price data. File must be in format date_etf.csv.
Returns
-------
p : Bokeh figure
Plot of bid and ask prices.
"""
data = pd.read_csv(os.path.join(directory, '{}_{}.csv'.format(selected_date, selected_etf)), index_col=0)
basetime = pd.to_datetime('2021-01-01') + pd.Timedelta(hours=9, minutes=30)
timedeltas = pd.TimedeltaIndex([pd.Timedelta(seconds=x) for x in data.index])
data.index = timedeltas + basetime
t_all = t_new + t_old
bid = data.bid
ask = data.ask
p = figure(plot_width=400, plot_height=400, x_axis_type="datetime",
toolbar_location='below', title='Bid & ask prices for {} on {}'.format(selected_etf, selected_date),
x_range=(pd.Timestamp('2021-01-01 9:30'), max(t_all)+pd.Timedelta(hours=1.5)),
y_range=(min(bid.min(),ask.min())-0.2, max(bid.max(),ask.max())+0.2))
add_trade_windows(p, t_new, t_old, max(bid.max(),ask.max()))
renders = []
renders.append(p.line(bid.index, bid.values,# set visual properties for selected glyphs
hover_color="blue",
hover_alpha=1,
color="blue",
alpha=.5,
name='bid'))
renders.append(p.line(ask.index, ask.values,# set visual properties for selected glyphs
hover_color="firebrick",
hover_alpha=1,
color="firebrick",
alpha=0.5,
name='ask'))
tooltips = [('type','$name'),
('time','$x{%H:%M}'),
('price', '$y{"$0.00"}')]
formatters = { "$x": "datetime",}
p.add_tools(HoverTool(tooltips=tooltips, renderers=renders, formatters=formatters))
format_plots(p)
p.yaxis.formatter = NumeralTickFormatter(format="$0.00")
return p
def make_relative_fee_amount(selected_ratios, t_new_text = ''):
"""
Generate a bar plot for the ratio of quoted spread to expense ratio.
Parameters
----------
selected_ratios : pd.Series
Data of ratio of quoted spread to expense ratio.
t_new_text : str
Time range to place in title of plot.
Returns
-------
p : Bokeh figure
Produced plot.
"""
p = figure(plot_width=400, plot_height=400,
x_axis_label="ETFs", x_minor_ticks=len(selected_ratios),
toolbar_location='below', title='Ratio of quoted spread to expense ratio {}'.format(t_new_text))
source = ColumnDataSource(dict(x=range(len(selected_ratios)),
top=selected_ratios.values,
desc=selected_ratios.index,))
glyph = VBar(x='x', top='top', bottom=0, width=0.5, fill_color='grey',
line_width=0, fill_alpha=0.5)
glyph_hover = VBar(x='x', top='top', bottom=0, width=0.5, fill_color='firebrick',
line_width=0, fill_alpha=1)
rend = p.add_glyph(source, glyph)
rend.hover_glyph = glyph_hover
labels = LabelSet(x='x', level='glyph', source=source, render_mode='canvas')
tooltips = [('etf','@desc'),
('ratio','@top')]
p.add_tools(HoverTool(tooltips=tooltips, renderers=[rend]))
num_zeros = int(np.log10(1/selected_ratios.max())-.4)
num_formatter = '0.'+''.join(['0' for x in range(num_zeros)])+'%'
p.yaxis.formatter = NumeralTickFormatter(format=num_formatter)
p.xgrid.grid_line_color = None
p.ygrid.grid_line_color = None
p.toolbar.autohide = True
p.xaxis.bounds = (-.5,len(selected_ratios)-.5)
p.xaxis.ticker = list(range(len(selected_ratios)))
p.xaxis.major_label_overrides = dict(zip(range(len(selected_ratios)), list(selected_ratios.index)))
p.xaxis.major_label_orientation = 3.14/2
return p
def get_quoted_spread_change(selected_etfs, selected_dates, t_old, t_new, quoted_spread):
"""
Get the relative change in average quoted spread between the two time windows.
Parameters
----------
selected_etfs : list of str
List of ETF tickers
selected_dates : list of str
List of dates to obtain averages of. In format YYYY-MM-DD.
t_new : tuple of timestamps
Starting and ending timestamp of the old trading window.
t_old : tuple of timestamps
Starting and ending timestamp of the new trading window.
quoted_spread : pd.DataFrame
Quoted spread data for various times, days, and ETFs.
Returns
-------
pd.Series
The relative change in average quoted spread between the two time windows.
"""
df = get_averages(quoted_spread, selected_dates, selected_etfs)
old_quotes = df[(df.index > t_old[0]) & (df.index < t_old[1])].mean(0)
new_quotes = df[(df.index > t_new[0]) & (df.index < t_new[1])].mean(0)
return (new_quotes / old_quotes).sort_values(ascending=False)
def create_metrics(fractional_increase, nwide=4, container=st, max_rows=2):
"""
Print information about fractional change in quoted spreads in metric form
Parameters
----------
fractional_increase : pd.Series
Data of the increase in fees between two windows.
nwide : int, optional
Number of metrics to print side-by-side. The default is 4.
container : streamlit object, optional
Object to display metrics. The default is st.
max_rows : int, optional
Max number of rows to present data for. The default is 2.
Returns
-------
None.
"""
metrics = {}
rows = 0
for etf, val in dict(fractional_increase).items():
if len(metrics) == nwide:
with container:
metric_row(metrics)
metrics = {}
rows += 1
if rows == max_rows:
break
metrics[etf] = '{:.0f}%'.format((val-1)*100)
if len(metrics) > 0:
with container:
metric_row(metrics)
st.write("# Bid-Ask spreads. Does time of day matter?")
st.write("#### By <NAME>")
st.write('first published March 10, 2021')
intro = st.beta_expander("Introduction")
data_selection = st.beta_expander("Data selection")
results = st.beta_expander("Results")
conclusion = st.beta_expander("Conclusion")
methods = st.beta_expander("Methods")
disclaimer = st.beta_expander("Disclaimer")
quoted_spread = pd.read_pickle('data/quoted_spread.pkl')
# remove outliers that impact average
del quoted_spread[('2020-12-16', 'SPCX')] # high value on second day of trading
del quoted_spread[('2020-03-12', 'ESGU')] # short high value on during large uncertainty
del quoted_spread[('2020-03-17', 'DRIV')] # short high value on during large uncertainty
del quoted_spread[('2020-02-03', 'EAGG')] # short high value on during large uncertainty
all_dates = list(quoted_spread.columns.levels[0])
all_dates.sort()
all_etfs = list(quoted_spread.columns.levels[1])
etf_data = pd.read_csv('etf.csv', index_col='Symbol')
etf_data = etf_data[etf_data['for_data'] == True]
start, end = data_selection.select_slider('Dates to analyze', all_dates, (all_dates[0], all_dates[-1]))
selected_dates = all_dates[all_dates.index(start):all_dates.index(end)]
method_choose_etfs = data_selection.multiselect('Methods for selecting ETFs',
['By volume traded', 'By market cap', 'Only ESG ETFs', 'choose specific ETFs'], ['choose specific ETFs'])
selected_etfs = display_method_to_choose_etfs(method_choose_etfs, all_etfs,etf_data,sl_obj=data_selection)
left_column, right_column = data_selection.beta_columns(2)
t_old = right_column.slider('Old trading window timing',
min_value=pd.Timestamp('2021-01-01 9:30').to_pydatetime(),
max_value=pd.Timestamp('2021-01-01 16:00').to_pydatetime(),
value=(pd.Timestamp('2021-01-01 10:00').to_pydatetime(), pd.Timestamp('2021-01-01 10:15').to_pydatetime()),
step=pd.Timedelta(minutes=5).to_pytimedelta(),
format='H:mm'
)
t_new = left_column.slider('New trading window timing',
min_value= | pd.Timestamp('2021-01-01 9:30') | pandas.Timestamp |
# General purpose packages
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import randint
# Image processing packages
from skimage import io, color
from skimage.transform import resize
from skimage.segmentation import slic
from skimage.color import label2rgb
from skimage.filters import try_all_threshold, sobel
from skimage import exposure
# Preprocessing modeling packages
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
from sklearn.model_selection import train_test_split
# Modeling packages
from sklearn.neighbors import KNeighborsClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import VotingClassifier
# Test metrics packages
from sklearn.model_selection import cross_val_score, GridSearchCV, RandomizedSearchCV
from sklearn.metrics import roc_curve, auc, accuracy_score, mean_squared_error as MSE, classification_report
##########################################
df = | pd.read_csv('signatures_data.csv', index_col=0) | pandas.read_csv |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from read_input import read_study
########################American#############################################
def main_analysis():
output_folder = './output_ref/'
studies = ['bermudean', 'maxcall2', 'maxcall10', 'strangle']
losses_ref = {'maxcall10': 38.278, \
'maxcall2': 13.899, \
'strangle': 11.794,\
'bermudean': 0.06031}
name_studies = {'maxcall10': 'Max-call, d = 10', \
'maxcall2': 'Max-call, d = 2',\
'bermudean': 'Bermudean put', \
'strangle': 'Strangle spread'}
losses = {}
error = {}
times = {}
for study in studies:
results = pd.read_csv(output_folder + study + '/results.csv', sep=';')
losses[study] = results['price'].values[0]
error[study] = np.abs((losses[study]-losses_ref[study])/ losses_ref[study])
times[study] = round(results['time'].values[0], 1)
round_n = 4
results_df = pd.DataFrame()
results_df['Use case / Method'] = [name_studies[study] for study in studies]
results_df['Algorithm \ref{algo:algoGlobal}'] = [round(losses[study], round_n) \
for study in studies]
results_df['Reference'] = [np.around(losses_ref[study],round_n) for \
study in studies]
results_df['Difference'] = [str(np.around(100 * error[study],2)) + '\%' for \
study in studies]
results_df['Time (s)'] = [times[study] for \
study in studies]
print(results_df.to_latex(index=False, escape=False))
#plot learning curve
for study in studies:
learning_curve = pd.read_csv(output_folder + \
study + '/learning_curve.csv', sep=';')
fig, ax = plt.subplots(figsize=(20, 12))
ax.plot(learning_curve['iter'], learning_curve['loss_train'], label='loss train')
ax.plot(learning_curve['iter'], learning_curve['loss_test'], label='loss test')
ax.plot(learning_curve['iter'], \
-np.ones((len(learning_curve['iter']),))*losses_ref[study],\
label='reference value')
ax.grid()
ax.legend(loc='best', fontsize=20)
ax.set_xlabel('Number of iterations', fontsize=25)
ax.set_ylabel('Loss', fontsize=25)
for tick in ax.xaxis.get_major_ticks():
tick.label.set_fontsize(20)
for tick in ax.yaxis.get_major_ticks():
tick.label.set_fontsize(20)
fig.savefig(output_folder + study + '/learningcurve_' + study + '.png',\
bbox_inches='tight')
plt.show()
plt.close()
#########################Bermudean analysis
parameters = read_study('./input', 'bermudean.json')
control = parameters['control']
control.load_weights(output_folder + 'bermudean/' + 'optimal_weights')
range_x = np.linspace(0.6, 1.4, 1000)
times = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
proba = np.zeros((len(range_x), len(times)))
fig, ax = plt.subplots(figsize=(20, 12))
for ind_t, t in enumerate(times):
state = (range_x.reshape(-1,1) - control.normalisation_dict['mean'].numpy()[0,0,0]) \
/ control.normalisation_dict['std'].numpy()[0,0,0]
state = np.concatenate((t*np.ones((len(range_x), 1)), state), axis=1)
state = np.concatenate((state, np.zeros((len(range_x), 1))), axis=1)
output = 10 * np.tanh(control.nn(state))
proba[:,ind_t] = 1/(1+np.exp(-output[:,0]))
ax.plot(range_x, proba[:,ind_t], label='time to maturity = ' + str(np.round(1-t,1)))
ax.grid()
ax.legend(loc='best', fontsize=20)
for tick in ax.xaxis.get_major_ticks():
tick.label.set_fontsize(20)
for tick in ax.yaxis.get_major_ticks():
tick.label.set_fontsize(20)
ax.set_xlabel('Price', fontsize=25)
ax.set_ylabel('Probability of exercise', fontsize=25)
fig.savefig(output_folder + 'bermudean/proba_exercise.png', bbox_inches='tight')
plt.show()
plt.close()
range_t = np.linspace(0, 1, 1000)
price_limit = np.zeros((len(range_t), ))
for ind_t, t in enumerate(range_t):
state = (range_x.reshape(-1,1) - control.normalisation_dict['mean'].numpy()[0,0,0]) \
/ control.normalisation_dict['std'].numpy()[0,0,0]
state = np.concatenate((t*np.ones((len(range_x), 1)), state), axis=1)
state = np.concatenate((state, np.zeros((len(range_x), 1))), axis=1)
output = 10 * np.tanh(control.nn(state))
proba = 1/(1+np.exp(-output[:,0]))
p_l = range_x[np.where(proba < 0.5)[0][0]]
price_limit[ind_t] = p_l
fig, ax = plt.subplots(figsize=(20, 12))
ax.plot((1-range_t), price_limit, label='Neural network')
ax.grid()
for tick in ax.xaxis.get_major_ticks():
tick.label.set_fontsize(20)
for tick in ax.yaxis.get_major_ticks():
tick.label.set_fontsize(20)
ax.set_ylabel('Price', fontsize=25)
ax.set_xlabel('Time to maturity', fontsize=25)
fig.savefig(output_folder + 'bermudean/region_exercise.png', bbox_inches='tight')
plt.show()
plt.close()
##########################Swing Ibanez#######################################
strikes = [35, 40, 45]
nb_exercises = range(1, 7)
round_n = 3
ibanez = {35: [5.114, 10.195, 15.230, 20.230, 25.200, 30.121], \
40: [1.774, 3.480, 5.111, 6.661, 8.124, 9.502], \
45: [0.411, 0.772, 1.089, 1.358, 1.582, 1.756]}
nn = dict()
txt_dict = dict()
error_dict = dict()
times_dict = dict()
for strike in strikes:
price = []
error = []
txt = []
times = []
for ind_ex, ex in enumerate(nb_exercises):
results = pd.read_csv(output_folder + 'swingIbanez' + str(strike) + \
str(ex) + '/results.csv', sep=';')
price.append(results['price'].values[0])
error.append(np.around(100 * np.abs((price[-1] - ibanez[strike][ind_ex])/ \
ibanez[strike][ind_ex]),2))
txt.append('(' + str(round(price[-1], round_n)) + ', ' + \
str(round(ibanez[strike][ind_ex], round_n)) + ', ' + \
str(round(error[-1], round_n)) + '\%)')
times.append(round(results['time'].values[0],1))
nn[strike] = price
times_dict[strike] = times
error_dict[strike] = error
txt_dict[strike] = txt
results_df = pd.DataFrame()
times_df = pd.DataFrame()
results_df['$l$ / $S_0$'] = nb_exercises
times_df['$l$ / $S_0$'] = nb_exercises
for strike in strikes:
results_df[strike] = txt_dict[strike]
times_df[strike] = times_dict[strike]
print(results_df.to_latex(index=False, escape=False))
print(times_df.to_latex(index=False, escape=False))
parameters = read_study('./input', 'swingIbanez406.json')
control = parameters['control']
control.load_weights(output_folder + 'swingIbanez406/' + 'optimal_weights')
range_x = np.linspace(30, 50, 1000)
time = 0.5
range_l = [0,1,2,3,4,5]
proba = np.zeros((len(range_x), len(range_l)))
fig, ax = plt.subplots(figsize=(20, 12))
for ind_l, l in enumerate(range_l):
state = (range_x.reshape(-1,1) - control.normalisation_dict['mean'].numpy()[0,0,0]) \
/ control.normalisation_dict['std'].numpy()[0,0,0]
state = np.concatenate((time*np.ones((len(range_x), 1)), state), axis=1)
state = np.concatenate((state, l*np.ones((len(range_x), 1))), axis=1)
output = 10 * np.tanh(control.nn(state))
proba[:,ind_l] = 1/(1+np.exp(-output[:,0]))
ax.plot(range_x, proba[:,ind_l], label='Remaining exercises = ' + str(6-l))
ax.grid()
for tick in ax.xaxis.get_major_ticks():
tick.label.set_fontsize(20)
for tick in ax.yaxis.get_major_ticks():
tick.label.set_fontsize(20)
ax.legend(loc='best', fontsize=20)
ax.set_xlabel('Price', fontsize=25)
ax.set_ylabel('Probability of exercise', fontsize=25)
fig.savefig(output_folder + 'swingIbanez406/proba_exercise_swing.png', bbox_inches='tight')
plt.show()
plt.close()
range_t = np.linspace(0, 1, 1000)
range_l = [0,1,2,3,4,5]
price_limit = np.zeros((len(range_t), len(range_l)))
for ind_l, l in enumerate(range_l):
for ind_t, t in enumerate(range_t):
state = (range_x.reshape(-1,1) - control.normalisation_dict['mean'].numpy()[0,0,0]) \
/ control.normalisation_dict['std'].numpy()[0,0,0]
state = np.concatenate((t*np.ones((len(range_x), 1)), state), axis=1)
state = np.concatenate((state, l*np.ones((len(range_x), 1))), axis=1)
output = 10 * np.tanh(control.nn(state))
proba = 1/(1+np.exp(-output[:,0]))
p_l = range_x[np.where(proba < 0.5)[0][0]]
price_limit[ind_t,ind_l] = p_l
fig, ax = plt.subplots(figsize=(20, 12))
maturity = control.time[-1]
time_true = np.linspace(0, maturity, 1000)
for ind_l, l in enumerate(range_l):
ax.plot(maturity-time_true, price_limit[:,ind_l], label='Remaining exercises = ' + str(6-l))
ax.grid()
for tick in ax.xaxis.get_major_ticks():
tick.label.set_fontsize(20)
for tick in ax.yaxis.get_major_ticks():
tick.label.set_fontsize(20)
ax.set_ylabel('Price', fontsize=25)
ax.set_xlabel('Time to maturity', fontsize=25)
ax.legend(loc='best', fontsize=20)
fig.savefig(output_folder + 'swingIbanez406/region_exercise_swing.png', bbox_inches='tight')
plt.show()
plt.close()
#########################Swing Ibanez 5d######################################
nn = dict()
txt_dict = dict()
error_dict = dict()
times_dict = dict()
n_exercise = 6
price = []
error = []
txt = []
times = []
for ex in range(1, n_exercise+1):
results = pd.read_csv(output_folder + 'swingIbanez40' + \
str(ex) + '5d/results.csv', sep=';')
price.append(results['price'].values[0])
error.append(np.abs((price[-1] - ibanez[40][ex-1])/ \
ibanez[40][ex-1]))
times.append(round(results['time'].values[0],1))
results_df = | pd.DataFrame() | pandas.DataFrame |
import sys
import os
from tqdm import tqdm
import pmdarima as pm
from pmdarima.model_selection import train_test_split
import numpy as np
from datetime import timedelta
import pandas as pd
from bokeh.io import output_file, show
from bokeh.models import Select, Slider
from bokeh.models import ColumnDataSource
from bokeh.plotting import figure, output_file, show
from bokeh.models import DatetimeTickFormatter
from bokeh.layouts import column
import math
from bokeh.io import curdoc
sys.path.insert(0,'../covid_forcast')
sys.path.insert(0,'../../../covid_forcast')
# where to save things
# this is for bokey server, to know the location
LOCATION_OF_REPO ='/Users/rafaelvalerofernandez/Documents/repositories/covid_forecast/'
sys.path.insert(0,LOCATION_OF_REPO)
from covid_forecast.utils.data_io import get_data, download_csv_from_link
# where to save things
LOCATION_OF_REPO = ''
OUTPUT = '../outputs/arima'
os.makedirs(OUTPUT,exist_ok=True)
# In case you need to refresh the data, you need a folder /data
download_csv_from_link()
"""List of countries to explore"""
country_list = ['China', 'Italy', 'Germany', 'India', 'Spain', 'United_Kingdom', 'United_States_of_America',
'Lithuania', 'Cyprus']
variable_list = ['cases', 'deaths']
data = get_data()
data['dateRep'] = | pd.to_datetime(data['dateRep'], infer_datetime_format=True) | pandas.to_datetime |
from datetime import datetime, timedelta
import operator
from typing import Any, Sequence, Type, Union, cast
import warnings
import numpy as np
from pandas._libs import NaT, NaTType, Timestamp, algos, iNaT, lib
from pandas._libs.tslibs.c_timestamp import integer_op_not_supported
from pandas._libs.tslibs.period import DIFFERENT_FREQ, IncompatibleFrequency, Period
from pandas._libs.tslibs.timedeltas import Timedelta, delta_to_nanoseconds
from pandas._libs.tslibs.timestamps import RoundTo, round_nsint64
from pandas._typing import DatetimeLikeScalar
from pandas.compat import set_function_name
from pandas.compat.numpy import function as nv
from pandas.errors import AbstractMethodError, NullFrequencyError, PerformanceWarning
from pandas.util._decorators import Appender, Substitution
from pandas.util._validators import validate_fillna_kwargs
from pandas.core.dtypes.common import (
is_categorical_dtype,
is_datetime64_any_dtype,
is_datetime64_dtype,
is_datetime64tz_dtype,
is_datetime_or_timedelta_dtype,
is_dtype_equal,
is_float_dtype,
is_integer_dtype,
is_list_like,
is_object_dtype,
is_period_dtype,
is_string_dtype,
is_timedelta64_dtype,
is_unsigned_integer_dtype,
pandas_dtype,
)
from pandas.core.dtypes.generic import ABCSeries
from pandas.core.dtypes.inference import is_array_like
from pandas.core.dtypes.missing import is_valid_nat_for_dtype, isna
from pandas.core import missing, nanops, ops
from pandas.core.algorithms import checked_add_with_arr, take, unique1d, value_counts
from pandas.core.arrays.base import ExtensionArray, ExtensionOpsMixin
import pandas.core.common as com
from pandas.core.indexers import check_bool_array_indexer
from pandas.core.ops.common import unpack_zerodim_and_defer
from pandas.core.ops.invalid import invalid_comparison, make_invalid_op
from pandas.tseries import frequencies
from pandas.tseries.offsets import DateOffset, Tick
def _datetimelike_array_cmp(cls, op):
"""
Wrap comparison operations to convert Timestamp/Timedelta/Period-like to
boxed scalars/arrays.
"""
opname = f"__{op.__name__}__"
nat_result = opname == "__ne__"
@unpack_zerodim_and_defer(opname)
def wrapper(self, other):
if isinstance(other, str):
try:
# GH#18435 strings get a pass from tzawareness compat
other = self._scalar_from_string(other)
except ValueError:
# failed to parse as Timestamp/Timedelta/Period
return invalid_comparison(self, other, op)
if isinstance(other, self._recognized_scalars) or other is NaT:
other = self._scalar_type(other)
self._check_compatible_with(other)
other_i8 = self._unbox_scalar(other)
result = op(self.view("i8"), other_i8)
if isna(other):
result.fill(nat_result)
elif not is_list_like(other):
return invalid_comparison(self, other, op)
elif len(other) != len(self):
raise ValueError("Lengths must match")
else:
if isinstance(other, list):
# TODO: could use pd.Index to do inference?
other = np.array(other)
if not isinstance(other, (np.ndarray, type(self))):
return invalid_comparison(self, other, op)
if is_object_dtype(other):
# We have to use comp_method_OBJECT_ARRAY instead of numpy
# comparison otherwise it would fail to raise when
# comparing tz-aware and tz-naive
with np.errstate(all="ignore"):
result = ops.comp_method_OBJECT_ARRAY(
op, self.astype(object), other
)
o_mask = isna(other)
elif not type(self)._is_recognized_dtype(other.dtype):
return invalid_comparison(self, other, op)
else:
# For PeriodDType this casting is unnecessary
other = type(self)._from_sequence(other)
self._check_compatible_with(other)
result = op(self.view("i8"), other.view("i8"))
o_mask = other._isnan
if o_mask.any():
result[o_mask] = nat_result
if self._hasnans:
result[self._isnan] = nat_result
return result
return set_function_name(wrapper, opname, cls)
class AttributesMixin:
_data: np.ndarray
@classmethod
def _simple_new(cls, values, **kwargs):
raise AbstractMethodError(cls)
@property
def _scalar_type(self) -> Type[DatetimeLikeScalar]:
"""The scalar associated with this datelike
* PeriodArray : Period
* DatetimeArray : Timestamp
* TimedeltaArray : Timedelta
"""
raise AbstractMethodError(self)
def _scalar_from_string(
self, value: str
) -> Union[Period, Timestamp, Timedelta, NaTType]:
"""
Construct a scalar type from a string.
Parameters
----------
value : str
Returns
-------
Period, Timestamp, or Timedelta, or NaT
Whatever the type of ``self._scalar_type`` is.
Notes
-----
This should call ``self._check_compatible_with`` before
unboxing the result.
"""
raise AbstractMethodError(self)
def _unbox_scalar(self, value: Union[Period, Timestamp, Timedelta, NaTType]) -> int:
"""
Unbox the integer value of a scalar `value`.
Parameters
----------
value : Union[Period, Timestamp, Timedelta]
Returns
-------
int
Examples
--------
>>> self._unbox_scalar(Timedelta('10s')) # DOCTEST: +SKIP
10000000000
"""
raise AbstractMethodError(self)
def _check_compatible_with(
self, other: Union[Period, Timestamp, Timedelta, NaTType], setitem: bool = False
) -> None:
"""
Verify that `self` and `other` are compatible.
* DatetimeArray verifies that the timezones (if any) match
* PeriodArray verifies that the freq matches
* Timedelta has no verification
In each case, NaT is considered compatible.
Parameters
----------
other
setitem : bool, default False
For __setitem__ we may have stricter compatiblity resrictions than
for comparisons.
Raises
------
Exception
"""
raise AbstractMethodError(self)
class DatelikeOps:
"""
Common ops for DatetimeIndex/PeriodIndex, but not TimedeltaIndex.
"""
@Substitution(
URL="https://docs.python.org/3/library/datetime.html"
"#strftime-and-strptime-behavior"
)
def strftime(self, date_format):
"""
Convert to Index using specified date_format.
Return an Index of formatted strings specified by date_format, which
supports the same string format as the python standard library. Details
of the string format can be found in `python string format
doc <%(URL)s>`__.
Parameters
----------
date_format : str
Date format string (e.g. "%%Y-%%m-%%d").
Returns
-------
ndarray
NumPy ndarray of formatted strings.
See Also
--------
to_datetime : Convert the given argument to datetime.
DatetimeIndex.normalize : Return DatetimeIndex with times to midnight.
DatetimeIndex.round : Round the DatetimeIndex to the specified freq.
DatetimeIndex.floor : Floor the DatetimeIndex to the specified freq.
Examples
--------
>>> rng = pd.date_range(pd.Timestamp("2018-03-10 09:00"),
... periods=3, freq='s')
>>> rng.strftime('%%B %%d, %%Y, %%r')
Index(['March 10, 2018, 09:00:00 AM', 'March 10, 2018, 09:00:01 AM',
'March 10, 2018, 09:00:02 AM'],
dtype='object')
"""
result = self._format_native_types(date_format=date_format, na_rep=np.nan)
return result.astype(object)
class TimelikeOps:
"""
Common ops for TimedeltaIndex/DatetimeIndex, but not PeriodIndex.
"""
_round_doc = """
Perform {op} operation on the data to the specified `freq`.
Parameters
----------
freq : str or Offset
The frequency level to {op} the index to. Must be a fixed
frequency like 'S' (second) not 'ME' (month end). See
:ref:`frequency aliases <timeseries.offset_aliases>` for
a list of possible `freq` values.
ambiguous : 'infer', bool-ndarray, 'NaT', default 'raise'
Only relevant for DatetimeIndex:
- 'infer' will attempt to infer fall dst-transition hours based on
order
- bool-ndarray where True signifies a DST time, False designates
a non-DST time (note that this flag is only applicable for
ambiguous times)
- 'NaT' will return NaT where there are ambiguous times
- 'raise' will raise an AmbiguousTimeError if there are ambiguous
times.
.. versionadded:: 0.24.0
nonexistent : 'shift_forward', 'shift_backward', 'NaT', timedelta, \
default 'raise'
A nonexistent time does not exist in a particular timezone
where clocks moved forward due to DST.
- 'shift_forward' will shift the nonexistent time forward to the
closest existing time
- 'shift_backward' will shift the nonexistent time backward to the
closest existing time
- 'NaT' will return NaT where there are nonexistent times
- timedelta objects will shift nonexistent times by the timedelta
- 'raise' will raise an NonExistentTimeError if there are
nonexistent times.
.. versionadded:: 0.24.0
Returns
-------
DatetimeIndex, TimedeltaIndex, or Series
Index of the same type for a DatetimeIndex or TimedeltaIndex,
or a Series with the same index for a Series.
Raises
------
ValueError if the `freq` cannot be converted.
Examples
--------
**DatetimeIndex**
>>> rng = pd.date_range('1/1/2018 11:59:00', periods=3, freq='min')
>>> rng
DatetimeIndex(['2018-01-01 11:59:00', '2018-01-01 12:00:00',
'2018-01-01 12:01:00'],
dtype='datetime64[ns]', freq='T')
"""
_round_example = """>>> rng.round('H')
DatetimeIndex(['2018-01-01 12:00:00', '2018-01-01 12:00:00',
'2018-01-01 12:00:00'],
dtype='datetime64[ns]', freq=None)
**Series**
>>> pd.Series(rng).dt.round("H")
0 2018-01-01 12:00:00
1 2018-01-01 12:00:00
2 2018-01-01 12:00:00
dtype: datetime64[ns]
"""
_floor_example = """>>> rng.floor('H')
DatetimeIndex(['2018-01-01 11:00:00', '2018-01-01 12:00:00',
'2018-01-01 12:00:00'],
dtype='datetime64[ns]', freq=None)
**Series**
>>> pd.Series(rng).dt.floor("H")
0 2018-01-01 11:00:00
1 2018-01-01 12:00:00
2 2018-01-01 12:00:00
dtype: datetime64[ns]
"""
_ceil_example = """>>> rng.ceil('H')
DatetimeIndex(['2018-01-01 12:00:00', '2018-01-01 12:00:00',
'2018-01-01 13:00:00'],
dtype='datetime64[ns]', freq=None)
**Series**
>>> pd.Series(rng).dt.ceil("H")
0 2018-01-01 12:00:00
1 2018-01-01 12:00:00
2 2018-01-01 13:00:00
dtype: datetime64[ns]
"""
def _round(self, freq, mode, ambiguous, nonexistent):
# round the local times
if is_datetime64tz_dtype(self):
# operate on naive timestamps, then convert back to aware
naive = self.tz_localize(None)
result = naive._round(freq, mode, ambiguous, nonexistent)
aware = result.tz_localize(
self.tz, ambiguous=ambiguous, nonexistent=nonexistent
)
return aware
values = self.view("i8")
result = round_nsint64(values, mode, freq)
result = self._maybe_mask_results(result, fill_value=NaT)
return self._simple_new(result, dtype=self.dtype)
@Appender((_round_doc + _round_example).format(op="round"))
def round(self, freq, ambiguous="raise", nonexistent="raise"):
return self._round(freq, RoundTo.NEAREST_HALF_EVEN, ambiguous, nonexistent)
@Appender((_round_doc + _floor_example).format(op="floor"))
def floor(self, freq, ambiguous="raise", nonexistent="raise"):
return self._round(freq, RoundTo.MINUS_INFTY, ambiguous, nonexistent)
@Appender((_round_doc + _ceil_example).format(op="ceil"))
def ceil(self, freq, ambiguous="raise", nonexistent="raise"):
return self._round(freq, RoundTo.PLUS_INFTY, ambiguous, nonexistent)
class DatetimeLikeArrayMixin(ExtensionOpsMixin, AttributesMixin, ExtensionArray):
"""
Shared Base/Mixin class for DatetimeArray, TimedeltaArray, PeriodArray
Assumes that __new__/__init__ defines:
_data
_freq
and that the inheriting class has methods:
_generate_range
"""
@property
def ndim(self) -> int:
return self._data.ndim
@property
def shape(self):
return self._data.shape
def reshape(self, *args, **kwargs):
# Note: we drop any freq
data = self._data.reshape(*args, **kwargs)
return type(self)(data, dtype=self.dtype)
def ravel(self, *args, **kwargs):
# Note: we drop any freq
data = self._data.ravel(*args, **kwargs)
return type(self)(data, dtype=self.dtype)
@property
def _box_func(self):
"""
box function to get object from internal representation
"""
raise AbstractMethodError(self)
def _box_values(self, values):
"""
apply box func to passed values
"""
return lib.map_infer(values, self._box_func)
def __iter__(self):
return (self._box_func(v) for v in self.asi8)
@property
def asi8(self) -> np.ndarray:
"""
Integer representation of the values.
Returns
-------
ndarray
An ndarray with int64 dtype.
"""
# do not cache or you'll create a memory leak
return self._data.view("i8")
@property
def _ndarray_values(self):
return self._data
# ----------------------------------------------------------------
# Rendering Methods
def _format_native_types(self, na_rep="NaT", date_format=None):
"""
Helper method for astype when converting to strings.
Returns
-------
ndarray[str]
"""
raise AbstractMethodError(self)
def _formatter(self, boxed=False):
# TODO: Remove Datetime & DatetimeTZ formatters.
return "'{}'".format
# ----------------------------------------------------------------
# Array-Like / EA-Interface Methods
@property
def nbytes(self):
return self._data.nbytes
def __array__(self, dtype=None) -> np.ndarray:
# used for Timedelta/DatetimeArray, overwritten by PeriodArray
if is_object_dtype(dtype):
return np.array(list(self), dtype=object)
return self._data
@property
def size(self) -> int:
"""The number of elements in this array."""
return np.prod(self.shape)
def __len__(self) -> int:
return len(self._data)
def __getitem__(self, key):
"""
This getitem defers to the underlying array, which by-definition can
only handle list-likes, slices, and integer scalars
"""
is_int = lib.is_integer(key)
if lib.is_scalar(key) and not is_int:
raise IndexError(
"only integers, slices (`:`), ellipsis (`...`), "
"numpy.newaxis (`None`) and integer or boolean "
"arrays are valid indices"
)
getitem = self._data.__getitem__
if is_int:
val = getitem(key)
if lib.is_scalar(val):
# i.e. self.ndim == 1
return self._box_func(val)
return type(self)(val, dtype=self.dtype)
if com.is_bool_indexer(key):
key = check_bool_array_indexer(self, key)
if key.all():
key = slice(0, None, None)
else:
key = lib.maybe_booleans_to_slice(key.view(np.uint8))
is_period = is_period_dtype(self)
if is_period:
freq = self.freq
else:
freq = None
if isinstance(key, slice):
if self.freq is not None and key.step is not None:
freq = key.step * self.freq
else:
freq = self.freq
elif key is Ellipsis:
# GH#21282 indexing with Ellipsis is similar to a full slice,
# should preserve `freq` attribute
freq = self.freq
result = getitem(key)
if result.ndim > 1:
# To support MPL which performs slicing with 2 dim
# even though it only has 1 dim by definition
if is_period:
return self._simple_new(result, dtype=self.dtype, freq=freq)
return result
return self._simple_new(result, dtype=self.dtype, freq=freq)
def __setitem__(
self,
key: Union[int, Sequence[int], Sequence[bool], slice],
value: Union[NaTType, Any, Sequence[Any]],
) -> None:
# I'm fudging the types a bit here. "Any" above really depends
# on type(self). For PeriodArray, it's Period (or stuff coercible
# to a period in from_sequence). For DatetimeArray, it's Timestamp...
# I don't know if mypy can do that, possibly with Generics.
# https://mypy.readthedocs.io/en/latest/generics.html
if lib.is_scalar(value) and not isna(value):
value = com.maybe_box_datetimelike(value)
if is_list_like(value):
is_slice = isinstance(key, slice)
if lib.is_scalar(key):
raise ValueError("setting an array element with a sequence.")
if not is_slice:
key = cast(Sequence, key)
if len(key) != len(value) and not com.is_bool_indexer(key):
msg = (
f"shape mismatch: value array of length '{len(key)}' "
"does not match indexing result of length "
f"'{len(value)}'."
)
raise ValueError(msg)
elif not len(key):
return
value = type(self)._from_sequence(value, dtype=self.dtype)
self._check_compatible_with(value, setitem=True)
value = value.asi8
elif isinstance(value, self._scalar_type):
self._check_compatible_with(value, setitem=True)
value = self._unbox_scalar(value)
elif is_valid_nat_for_dtype(value, self.dtype):
value = iNaT
else:
msg = (
f"'value' should be a '{self._scalar_type.__name__}', 'NaT', "
f"or array of those. Got '{type(value).__name__}' instead."
)
raise TypeError(msg)
self._data[key] = value
self._maybe_clear_freq()
def _maybe_clear_freq(self):
# inplace operations like __setitem__ may invalidate the freq of
# DatetimeArray and TimedeltaArray
pass
def astype(self, dtype, copy=True):
# Some notes on cases we don't have to handle here in the base class:
# 1. PeriodArray.astype handles period -> period
# 2. DatetimeArray.astype handles conversion between tz.
# 3. DatetimeArray.astype handles datetime -> period
from pandas import Categorical
dtype = pandas_dtype(dtype)
if is_object_dtype(dtype):
return self._box_values(self.asi8)
elif is_string_dtype(dtype) and not is_categorical_dtype(dtype):
return self._format_native_types()
elif is_integer_dtype(dtype):
# we deliberately ignore int32 vs. int64 here.
# See https://github.com/pandas-dev/pandas/issues/24381 for more.
values = self.asi8
if is_unsigned_integer_dtype(dtype):
# Again, we ignore int32 vs. int64
values = values.view("uint64")
if copy:
values = values.copy()
return values
elif (
is_datetime_or_timedelta_dtype(dtype)
and not is_dtype_equal(self.dtype, dtype)
) or is_float_dtype(dtype):
# disallow conversion between datetime/timedelta,
# and conversions for any datetimelike to float
msg = f"Cannot cast {type(self).__name__} to dtype {dtype}"
raise TypeError(msg)
elif is_categorical_dtype(dtype):
return Categorical(self, dtype=dtype)
else:
return np.asarray(self, dtype=dtype)
def view(self, dtype=None):
if dtype is None or dtype is self.dtype:
return type(self)(self._data, dtype=self.dtype)
return self._data.view(dtype=dtype)
# ------------------------------------------------------------------
# ExtensionArray Interface
def unique(self):
result = unique1d(self.asi8)
return type(self)(result, dtype=self.dtype)
def _validate_fill_value(self, fill_value):
"""
If a fill_value is passed to `take` convert it to an i8 representation,
raising ValueError if this is not possible.
Parameters
----------
fill_value : object
Returns
-------
fill_value : np.int64
Raises
------
ValueError
"""
if isna(fill_value):
fill_value = iNaT
elif isinstance(fill_value, self._recognized_scalars):
self._check_compatible_with(fill_value)
fill_value = self._scalar_type(fill_value)
fill_value = self._unbox_scalar(fill_value)
else:
raise ValueError(
f"'fill_value' should be a {self._scalar_type}. Got '{fill_value}'."
)
return fill_value
def take(self, indices, allow_fill=False, fill_value=None):
if allow_fill:
fill_value = self._validate_fill_value(fill_value)
new_values = take(
self.asi8, indices, allow_fill=allow_fill, fill_value=fill_value
)
return type(self)(new_values, dtype=self.dtype)
@classmethod
def _concat_same_type(cls, to_concat):
dtypes = {x.dtype for x in to_concat}
assert len(dtypes) == 1
dtype = list(dtypes)[0]
values = np.concatenate([x.asi8 for x in to_concat])
return cls(values, dtype=dtype)
def copy(self):
values = self.asi8.copy()
return type(self)._simple_new(values, dtype=self.dtype, freq=self.freq)
def _values_for_factorize(self):
return self.asi8, iNaT
@classmethod
def _from_factorized(cls, values, original):
return cls(values, dtype=original.dtype)
def _values_for_argsort(self):
return self._data
# ------------------------------------------------------------------
# Additional array methods
# These are not part of the EA API, but we implement them because
# pandas assumes they're there.
def searchsorted(self, value, side="left", sorter=None):
"""
Find indices where elements should be inserted to maintain order.
Find the indices into a sorted array `self` such that, if the
corresponding elements in `value` were inserted before the indices,
the order of `self` would be preserved.
Parameters
----------
value : array_like
Values to insert into `self`.
side : {'left', 'right'}, optional
If 'left', the index of the first suitable location found is given.
If 'right', return the last such index. If there is no suitable
index, return either 0 or N (where N is the length of `self`).
sorter : 1-D array_like, optional
Optional array of integer indices that sort `self` into ascending
order. They are typically the result of ``np.argsort``.
Returns
-------
indices : array of ints
Array of insertion points with the same shape as `value`.
"""
if isinstance(value, str):
value = self._scalar_from_string(value)
if not (isinstance(value, (self._scalar_type, type(self))) or isna(value)):
raise ValueError(f"Unexpected type for 'value': {type(value)}")
self._check_compatible_with(value)
if isinstance(value, type(self)):
value = value.asi8
else:
value = self._unbox_scalar(value)
return self.asi8.searchsorted(value, side=side, sorter=sorter)
def repeat(self, repeats, *args, **kwargs):
"""
Repeat elements of an array.
See Also
--------
numpy.ndarray.repeat
"""
nv.validate_repeat(args, kwargs)
values = self._data.repeat(repeats)
return type(self)(values.view("i8"), dtype=self.dtype)
def value_counts(self, dropna=False):
"""
Return a Series containing counts of unique values.
Parameters
----------
dropna : bool, default True
Don't include counts of NaT values.
Returns
-------
Series
"""
from pandas import Series, Index
if dropna:
values = self[~self.isna()]._data
else:
values = self._data
cls = type(self)
result = value_counts(values, sort=False, dropna=dropna)
index = Index(
cls(result.index.view("i8"), dtype=self.dtype), name=result.index.name
)
return Series(result.values, index=index, name=result.name)
def map(self, mapper):
# TODO(GH-23179): Add ExtensionArray.map
# Need to figure out if we want ExtensionArray.map first.
# If so, then we can refactor IndexOpsMixin._map_values to
# a standalone function and call from here..
# Else, just rewrite _map_infer_values to do the right thing.
from pandas import Index
return Index(self).map(mapper).array
# ------------------------------------------------------------------
# Null Handling
def isna(self):
return self._isnan
@property # NB: override with cache_readonly in immutable subclasses
def _isnan(self):
"""
return if each value is nan
"""
return self.asi8 == iNaT
@property # NB: override with cache_readonly in immutable subclasses
def _hasnans(self):
"""
return if I have any nans; enables various perf speedups
"""
return bool(self._isnan.any())
def _maybe_mask_results(self, result, fill_value=iNaT, convert=None):
"""
Parameters
----------
result : a ndarray
fill_value : object, default iNaT
convert : str, dtype or None
Returns
-------
result : ndarray with values replace by the fill_value
mask the result if needed, convert to the provided dtype if its not
None
This is an internal routine.
"""
if self._hasnans:
if convert:
result = result.astype(convert)
if fill_value is None:
fill_value = np.nan
result[self._isnan] = fill_value
return result
def fillna(self, value=None, method=None, limit=None):
# TODO(GH-20300): remove this
# Just overriding to ensure that we avoid an astype(object).
# Either 20300 or a `_values_for_fillna` would avoid this duplication.
if isinstance(value, ABCSeries):
value = value.array
value, method = validate_fillna_kwargs(value, method)
mask = self.isna()
if is_array_like(value):
if len(value) != len(self):
raise ValueError(
f"Length of 'value' does not match. Got ({len(value)}) "
f" expected {len(self)}"
)
value = value[mask]
if mask.any():
if method is not None:
if method == "pad":
func = missing.pad_1d
else:
func = missing.backfill_1d
values = self._data
if not is_period_dtype(self):
# For PeriodArray self._data is i8, which gets copied
# by `func`. Otherwise we need to make a copy manually
# to avoid modifying `self` in-place.
values = values.copy()
new_values = func(values, limit=limit, mask=mask)
if is_datetime64tz_dtype(self):
# we need to pass int64 values to the constructor to avoid
# re-localizing incorrectly
new_values = new_values.view("i8")
new_values = type(self)(new_values, dtype=self.dtype)
else:
# fill with value
new_values = self.copy()
new_values[mask] = value
else:
new_values = self.copy()
return new_values
# ------------------------------------------------------------------
# Frequency Properties/Methods
@property
def freq(self):
"""
Return the frequency object if it is set, otherwise None.
"""
return self._freq
@freq.setter
def freq(self, value):
if value is not None:
value = frequencies.to_offset(value)
self._validate_frequency(self, value)
self._freq = value
@property
def freqstr(self):
"""
Return the frequency object as a string if its set, otherwise None
"""
if self.freq is None:
return None
return self.freq.freqstr
@property # NB: override with cache_readonly in immutable subclasses
def inferred_freq(self):
"""
Tryies to return a string representing a frequency guess,
generated by infer_freq. Returns None if it can't autodetect the
frequency.
"""
if self.ndim != 1:
return None
try:
return frequencies.infer_freq(self)
except ValueError:
return None
@property # NB: override with cache_readonly in immutable subclasses
def _resolution(self):
return frequencies.Resolution.get_reso_from_freq(self.freqstr)
@property # NB: override with cache_readonly in immutable subclasses
def resolution(self):
"""
Returns day, hour, minute, second, millisecond or microsecond
"""
return frequencies.Resolution.get_str(self._resolution)
@classmethod
def _validate_frequency(cls, index, freq, **kwargs):
"""
Validate that a frequency is compatible with the values of a given
Datetime Array/Index or Timedelta Array/Index
Parameters
----------
index : DatetimeIndex or TimedeltaIndex
The index on which to determine if the given frequency is valid
freq : DateOffset
The frequency to validate
"""
if is_period_dtype(cls):
# Frequency validation is not meaningful for Period Array/Index
return None
inferred = index.inferred_freq
if index.size == 0 or inferred == freq.freqstr:
return None
try:
on_freq = cls._generate_range(
start=index[0], end=None, periods=len(index), freq=freq, **kwargs
)
if not np.array_equal(index.asi8, on_freq.asi8):
raise ValueError
except ValueError as e:
if "non-fixed" in str(e):
# non-fixed frequencies are not meaningful for timedelta64;
# we retain that error message
raise e
# GH#11587 the main way this is reached is if the `np.array_equal`
# check above is False. This can also be reached if index[0]
# is `NaT`, in which case the call to `cls._generate_range` will
# raise a ValueError, which we re-raise with a more targeted
# message.
raise ValueError(
f"Inferred frequency {inferred} from passed values "
f"does not conform to passed frequency {freq.freqstr}"
)
# monotonicity/uniqueness properties are called via frequencies.infer_freq,
# see GH#23789
@property
def _is_monotonic_increasing(self):
return algos.is_monotonic(self.asi8, timelike=True)[0]
@property
def _is_monotonic_decreasing(self):
return algos.is_monotonic(self.asi8, timelike=True)[1]
@property
def _is_unique(self):
return len(unique1d(self.asi8)) == len(self)
# ------------------------------------------------------------------
# Arithmetic Methods
_create_comparison_method = classmethod(_datetimelike_array_cmp)
# pow is invalid for all three subclasses; TimedeltaArray will override
# the multiplication and division ops
__pow__ = make_invalid_op("__pow__")
__rpow__ = make_invalid_op("__rpow__")
__mul__ = make_invalid_op("__mul__")
__rmul__ = make_invalid_op("__rmul__")
__truediv__ = make_invalid_op("__truediv__")
__rtruediv__ = make_invalid_op("__rtruediv__")
__floordiv__ = make_invalid_op("__floordiv__")
__rfloordiv__ = make_invalid_op("__rfloordiv__")
__mod__ = make_invalid_op("__mod__")
__rmod__ = make_invalid_op("__rmod__")
__divmod__ = make_invalid_op("__divmod__")
__rdivmod__ = | make_invalid_op("__rdivmod__") | pandas.core.ops.invalid.make_invalid_op |
# -*- coding: utf-8 -*-
"""
Copyright (c) 2019, <NAME> <akoenzen | uvic.ca>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
"""
import pandas as pd
import requests as rq
from colorama import Back, Style
class NBClassification(object):
def __init__(self, label: str, value: float = 0.0):
self.label: str = label
self.value: float = value
def __repr__(self):
return "{0}<{1}>".format(self.label, self.value)
class NBTerm(object):
def __init__(self, term: str, likelihood: float = 0.0):
self.term: str = term.lower().strip()
self.likelihood: float = likelihood
def __repr__(self):
return "{0}<{1}>".format(self.term, self.likelihood)
class NBDocument(object):
USE_FILTERED: bool = False
def __init__(self, raw_terms: [NBTerm], filtered_terms: [NBTerm]):
self.raw_terms: [NBTerm] = raw_terms # stopwords included
self.filtered_terms: [NBTerm] = filtered_terms # stopwords removed
def __repr__(self):
str = "\t\t\tTerms: {}\n".format(len(self.get_terms()))
for t in self.get_terms():
str += "\t\t\t{}\n".format(t)
return str
def get_terms(self):
if NBDocument.USE_FILTERED:
return self.filtered_terms
else:
return self.raw_terms
class NBClass(object):
def __init__(self, label: str):
self.label: str = label
self.documents: [NBDocument] = []
self.prior: float = 0.0
self.likelihoods: [NBTerm] = []
self.name: str = ""
if self.label == '0':
self.name = 'Wise Saying'
elif self.label == '1':
self.name = 'Future'
def __repr__(self):
str = "\tClass Label: {}\n".format(self.label)
str += "\tDocuments: {}\n".format(len(self.documents))
for d in self.documents:
str += "\t\t{}\n".format(d)
str += "\tPrior: {}\n".format(self.prior)
str += "\tLikelihoods: {}\n".format(len(self.likelihoods))
for l in self.likelihoods:
str += "\t\t{}\n".format(l)
return str
def add_create_document(self, message: str) -> None:
# break the document into terms
terms = message.split(' ')
raw_terms = [NBTerm(term=t) for t in terms]
filtered_terms = raw_terms # legacy, no use
self.documents.append(NBDocument(raw_terms=raw_terms, filtered_terms=filtered_terms))
def compute_likelihood(self, lexicon: [str]) -> None:
# this will include ALL terms in the class, INCLUDED repeated terms!!!
class_terms = [t.term for d in self.documents for t in d.get_terms()] # ALL TERMS!!!
# now for each term in lexicon compute its likelihood and add to the list of likelihoods
# likelihood = occurrences of term / all terms
for t in lexicon:
# compute numerator. add 1 to avoid the zero-frequency problem
numerator = class_terms.count(t) + 1
# compute denominator. add count of lexicon to avoid zero-frequency problem
denominator = len(class_terms) + len(lexicon)
# add to the likelihood list IF not present
flag = False
for e in self.likelihoods:
if e.term == t:
flag = True
if not flag:
self.likelihoods.append(NBTerm(term=t, likelihood=(numerator / denominator)))
def get_likelihood(self, term: str) -> None:
for e in self.likelihoods:
if e.term == term:
return e.likelihood
def get_class_lexicon(self) -> [str]:
lexicon = []
for d in self.documents:
for t in d.get_terms():
if t.term not in lexicon:
lexicon.append(t.term)
return lexicon
@staticmethod
def get_class_name(label: str):
if label == '0':
return 'Wise Saying'
elif label == '1':
return 'Future'
return 'None'
class NBModel(object):
DEBUG = False
def __init__(self):
self.classes: [NBClass] = []
self.lexicon: [str] = [] # vocabulary of UNIQUE words in ALL documents
def __repr__(self):
str = "Classes: {}\n".format(len(self.classes))
for c in self.classes:
str += "{}\n".format(c)
str += "Lexicon: {}\n".format(len(self.lexicon))
str += "{}".format(sorted(self.lexicon))
return str
def get_class(self, label: str) -> NBClass:
for c in self.classes:
if c.label == label:
return c
return None
def calculate_and_update_prior(self, label: str) -> None:
N_c = float(len(self.get_class(label=label).documents)) # number of docs in class
N = 0.0 # number of docs in all classes
for c in self.classes:
N += len(c.documents)
# update prior
self.get_class(label=label).prior = N_c / N
# +++ DEBUG
if NBModel.DEBUG:
print("PRIOR for class {0} is {1}.".format(label, N_c / N))
print("N_c: {0}, N: {1}".format(N_c, N))
def compute_lexicon(self) -> None:
# vocabulary should NOT contain duplicates
for c in self.classes:
for d in c.documents:
for t in d.get_terms():
if t.term not in self.lexicon:
self.lexicon.append(t.term)
def compute_likelihood(self) -> None:
for c in self.classes:
c.compute_likelihood(lexicon=self.lexicon)
class NaiveBayesTextClassifier(object):
"""
Text classifier using the Naïve Bayes Classifier. This classifier supports only 2 classes, so it's a
binary classifier.
"""
DEBUG = False
SHOW_MODEL = False
MAKE_SUBSET_FOR_TRAINING = False
TRAINING_SUBSET_SIZE = 2
MAKE_SUBSET_FOR_TESTING = False
TESTING_SUBSET_SIZE = 2
def __init__(self):
self.model: NBModel = NBModel()
pass
def train(self, training_set: [str] = [], debug: bool = False) -> NBModel:
# parse the training data and labels and convert them into pandas Series
training_data = rq.get(
'http://www.apkc.net/data/csc_578d/assignment01/problem04/traindata.txt'
).text.splitlines()
if training_data is not None:
t_data_series = pd.Series(training_data)
training_labels = rq.get(
'http://www.apkc.net/data/csc_578d/assignment01/problem04/trainlabels.txt'
).text.splitlines()
if training_labels is not None:
t_labels_series = pd.Series(training_labels)
# combine both series into a DataFrame
t_data_matrix = pd.DataFrame({
'message': t_data_series,
'label': t_labels_series
})
# make a custom subset of the entire training set for debugging purposes
if NaiveBayesTextClassifier.MAKE_SUBSET_FOR_TRAINING:
_0_messages = t_data_matrix.loc[
t_data_matrix.label == '0',
'message'][0:NaiveBayesTextClassifier.TRAINING_SUBSET_SIZE
]
_0_labels = ['0' for _ in _0_messages]
_1_messages = t_data_matrix.loc[
t_data_matrix.label == '1',
'message'][0:NaiveBayesTextClassifier.TRAINING_SUBSET_SIZE
]
_1_labels = ['1' for _ in _1_messages]
# replace the DataFrame
t_data_matrix = pd.DataFrame({
'message': pd.concat([
pd.Series(list(_0_messages)),
pd.Series(list(_1_messages))
]),
'label': pd.concat([
pd.Series(_0_labels),
pd.Series(_1_labels)
])
})
# +++ DEBUG
if NaiveBayesTextClassifier.DEBUG:
print("DataFrame: (Future: Class 1, Wise Saying: Class 0)")
print(t_data_matrix)
# construct the model
# 1. save classes, documents, terms
for label in t_data_matrix.label.unique(): # this returns an ndarray
self.model.classes.append(NBClass(label=label))
# save all messages for each class
tmp = t_data_matrix.loc[t_data_matrix.label == label, 'message']
cls = self.model.get_class(label)
for _, m in tmp.items():
cls.add_create_document(str(m))
# 2. calculate priors
for label in t_data_matrix.label.unique(): # this returns an ndarray
self.model.calculate_and_update_prior(label)
# 3. compute lexicon
self.model.compute_lexicon()
# 4. compute likelihoods
self.model.compute_likelihood()
# +++ DEBUG
if NaiveBayesTextClassifier.SHOW_MODEL:
print('')
print('++++++')
print(self.model)
return self.model
def classify(self, model: NBModel, testing_set: [str] = [], debug: bool = False) -> None:
# parse the training data and labels and convert them into pandas Series
testing_data = rq.get(
'http://www.apkc.net/data/csc_578d/assignment01/problem04/traindata.txt'
).text.splitlines()
if testing_data is not None:
t_data_series = pd.Series(testing_data)
testing_labels = rq.get(
'http://www.apkc.net/data/csc_578d/assignment01/problem04/trainlabels.txt'
).text.splitlines()
if testing_labels is not None:
t_labels_series = | pd.Series(testing_labels) | pandas.Series |
import numpy as np
import pandas as pd
from pandas import (
Categorical,
DataFrame,
Index,
Series,
Timestamp,
)
import pandas._testing as tm
from pandas.core.arrays import IntervalArray
class TestGetNumericData:
def test_get_numeric_data_preserve_dtype(self):
# get the numeric data
obj = DataFrame({"A": [1, "2", 3.0]})
result = obj._get_numeric_data()
expected = DataFrame(index=[0, 1, 2], dtype=object)
tm.assert_frame_equal(result, expected)
def test_get_numeric_data(self):
datetime64name = np.dtype("M8[ns]").name
objectname = np.dtype(np.object_).name
df = DataFrame(
{"a": 1.0, "b": 2, "c": "foo", "f": Timestamp("20010102")},
index=np.arange(10),
)
result = df.dtypes
expected = Series(
[
np.dtype("float64"),
np.dtype("int64"),
np.dtype(objectname),
np.dtype(datetime64name),
],
index=["a", "b", "c", "f"],
)
tm.assert_series_equal(result, expected)
df = DataFrame(
{
"a": 1.0,
"b": 2,
"c": "foo",
"d": np.array([1.0] * 10, dtype="float32"),
"e": np.array([1] * 10, dtype="int32"),
"f": np.array([1] * 10, dtype="int16"),
"g": Timestamp("20010102"),
},
index=np.arange(10),
)
result = df._get_numeric_data()
expected = df.loc[:, ["a", "b", "d", "e", "f"]]
tm.assert_frame_equal(result, expected)
only_obj = df.loc[:, ["c", "g"]]
result = only_obj._get_numeric_data()
expected = df.loc[:, []]
tm.assert_frame_equal(result, expected)
df = DataFrame.from_dict({"a": [1, 2], "b": ["foo", "bar"], "c": [np.pi, np.e]})
result = df._get_numeric_data()
expected = DataFrame.from_dict({"a": [1, 2], "c": [np.pi, np.e]})
tm.assert_frame_equal(result, expected)
df = result.copy()
result = df._get_numeric_data()
expected = df
tm.assert_frame_equal(result, expected)
def test_get_numeric_data_mixed_dtype(self):
# numeric and object columns
df = DataFrame(
{
"a": [1, 2, 3],
"b": [True, False, True],
"c": ["foo", "bar", "baz"],
"d": [None, None, None],
"e": [3.14, 0.577, 2.773],
}
)
result = df._get_numeric_data()
tm.assert_index_equal(result.columns, | Index(["a", "b", "e"]) | pandas.Index |
import pandas as pd
import numpy as np
file4 = '../data/VITALS_BP1.xlsx'
x4 = pd.ExcelFile(file4)
bp = x4.parse('Sheet1')
print(bp.shape)
print(bp.iloc[0:1])
print(bp.dtypes)
bp = bp.dropna(subset=['START_DATE'])
bp['RECORDED_TIME'] = bp['RECORDED_TIME'].str[0:7] + '20' + bp['RECORDED_TIME'].str[7:]
bp['START_DATE'] = bp['START_DATE'].str[0:7] + '20' + bp['START_DATE'].str[7:]
bp['RECORDED_TIME'] = | pd.to_datetime(bp['RECORDED_TIME']) | pandas.to_datetime |
import numpy as np
import pytest
from pandas import (
DataFrame,
Index,
MultiIndex,
Series,
Timestamp,
date_range,
)
import pandas._testing as tm
@pytest.mark.parametrize("bad_raw", [None, 1, 0])
def test_rolling_apply_invalid_raw(bad_raw):
with pytest.raises(ValueError, match="raw parameter must be `True` or `False`"):
Series(range(3)).rolling(1).apply(len, raw=bad_raw)
def test_rolling_apply_out_of_bounds(engine_and_raw):
# gh-1850
engine, raw = engine_and_raw
vals = Series([1, 2, 3, 4])
result = vals.rolling(10).apply(np.sum, engine=engine, raw=raw)
assert result.isna().all()
result = vals.rolling(10, min_periods=1).apply(np.sum, engine=engine, raw=raw)
expected = Series([1, 3, 6, 10], dtype=float)
tm.assert_almost_equal(result, expected)
@pytest.mark.parametrize("window", [2, "2s"])
def test_rolling_apply_with_pandas_objects(window):
# 5071
df = DataFrame(
{"A": np.random.randn(5), "B": np.random.randint(0, 10, size=5)},
index=date_range("20130101", periods=5, freq="s"),
)
# we have an equal spaced timeseries index
# so simulate removing the first period
def f(x):
if x.index[0] == df.index[0]:
return np.nan
return x.iloc[-1]
result = df.rolling(window).apply(f, raw=False)
expected = df.iloc[2:].reindex_like(df)
tm.assert_frame_equal(result, expected)
with tm.external_error_raised(AttributeError):
df.rolling(window).apply(f, raw=True)
def test_rolling_apply(engine_and_raw):
engine, raw = engine_and_raw
expected = Series([], dtype="float64")
result = expected.rolling(10).apply(lambda x: x.mean(), engine=engine, raw=raw)
tm.assert_series_equal(result, expected)
# gh-8080
s = Series([None, None, None])
result = s.rolling(2, min_periods=0).apply(lambda x: len(x), engine=engine, raw=raw)
expected = Series([1.0, 2.0, 2.0])
tm.assert_series_equal(result, expected)
result = s.rolling(2, min_periods=0).apply(len, engine=engine, raw=raw)
| tm.assert_series_equal(result, expected) | pandas._testing.assert_series_equal |
import re
import pandas as pd
from config import Config
class Dataset(Config):
"""
Attributes
----------
ukbb_vars: list
Variable names based on user selections as coded in the Biobank.
recoded_vars: list
Variable names based on user selections as will be recoded.
df: DataFrame
Dataset which can be manipulated using the below methods
Methods
-------
create_binary_variables(voi: str, patterns: dict)
Takes as input a variable of interest (e.g., 'medication') and a dictionary with keys representing new
variable names mapped onto regular expressions. New binary variables will be created based on whether
each individual has a value matching the regular expression in any of the columns related to the variable
of interest.
Example:
>>> myDataset.create_binary_variables("medication", {"taking_painkiller": "(aspirin|tylenol)"})
recode_diagnoses()
Creates new variables for groups of diagnoses included or excluded, based on
whether one or more of such diagnoses is present.
apply_inclusion_criteria(method: str)
Apply inclusion criteria based on specified method. Available options are "AND" and "OR".
apply_exclusion_criteria()
Apply exclusion criteria by removing cases where any of the specified diagnoses are present
clean(voi: str)
Takes as input a variable of interest (e.g., 'medication'). Removes all columns beginning with this string from the final dataframe.
recode_vars()
Replace values for each variable as specified in the config class
write_csv()
Write self.df to the filepath specified in the config class
"""
ukbb_vars, recoded_vars = ["eid"], ["eid"]
for var in Config.variables:
if Config.variables[var]["Included"]:
array_vars = []
for i in Config.variables[var]['ArrayRange']:
array_vars.append(f"{Config.variables[var]['DataField']}-{Config.variables[var]['InstanceNum']}.{i}")
ukbb_vars += array_vars
if len(Config.variables[var]['ArrayRange']) == 1:
recoded_vars.append(f"{var}_t{Config.variables[var]['InstanceNum']}")
else:
array_vars = []
for i in Config.variables[var]['ArrayRange']:
array_vars.append(f"{var}_t{Config.variables[var]['InstanceNum']}_{i}")
recoded_vars += array_vars
assert len(ukbb_vars) == len(recoded_vars)
def __init__(self) -> None:
self.df = pd.read_csv(self.filepaths["RawData"], dtype=str, usecols=self.ukbb_vars)
self.df.rename({k: v for k, v in zip(self.ukbb_vars, self.recoded_vars)}, axis=1, inplace=True)
self.df.dropna(axis=1, how="all", inplace=True)
def create_binary_variables(self, voi: str, patterns: dict):
cols = [col for col in self.df if col.startswith(voi)]
all_vars = list(patterns.keys())
new_vars = {var_name: [] for var_name in ["eid"] + all_vars}
for index, row in self.df[cols].iterrows():
new_vars["eid"].append(self.df["eid"][index])
for pat in patterns:
for value in row:
try:
if re.match(patterns[pat], value) is not None:
new_vars[pat].append(True)
break
except TypeError:
continue
if len(new_vars["eid"]) != len(new_vars[pat]):
new_vars[pat].append(False)
if not sum([len(x) for x in new_vars.values()]) == len(new_vars["eid"]) * len(new_vars.keys()):
raise ValueError(f"{sum([len(x) for x in new_vars.values()])} != {len(new_vars['eid']) * len(new_vars.keys())}")
new_df = pd.DataFrame(new_vars)
self.df = pd.merge(self.df, new_df, left_on="eid", right_on="eid")
def recode_diagnoses(self):
dx_cols = [col for col in self.df if col.startswith("diagnoses")]
all_dx = list(self.selected_diagnoses.keys())
new_vars = {var_name: [] for var_name in ["eid"] + all_dx}
for i in range(len(self.df)):
new_vars["eid"].append(self.df["eid"][i])
for col in dx_cols:
value = self.df[col][i]
if pd.isnull(value):
for dx in all_dx:
if len(new_vars[dx]) != len(new_vars["eid"]):
new_vars[dx].append(False)
break
for dx in self.selected_diagnoses:
if re.match(self.selected_diagnoses[dx], value) is not None:
if len(new_vars[dx]) != len(new_vars["eid"]):
new_vars[dx].append(True)
assert sum([len(x) for x in new_vars.values()]) == len(new_vars["eid"]) * len(new_vars.keys())
new_df = | pd.DataFrame(new_vars) | pandas.DataFrame |
from __future__ import print_function
from datetime import datetime, timedelta
import numpy as np
import pandas as pd
from pandas import (Series, Index, Int64Index, Timestamp, Period,
DatetimeIndex, PeriodIndex, TimedeltaIndex,
Timedelta, timedelta_range, date_range, Float64Index,
_np_version_under1p10)
import pandas.tslib as tslib
import pandas.tseries.period as period
import pandas.util.testing as tm
from pandas.tests.test_base import Ops
class TestDatetimeIndexOps(Ops):
tz = [None, 'UTC', 'Asia/Tokyo', 'US/Eastern', 'dateutil/Asia/Singapore',
'dateutil/US/Pacific']
def setUp(self):
super(TestDatetimeIndexOps, self).setUp()
mask = lambda x: (isinstance(x, DatetimeIndex) or
isinstance(x, PeriodIndex))
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = [o for o in self.objs if not mask(o)]
def test_ops_properties(self):
self.check_ops_properties(
['year', 'month', 'day', 'hour', 'minute', 'second', 'weekofyear',
'week', 'dayofweek', 'dayofyear', 'quarter'])
self.check_ops_properties(['date', 'time', 'microsecond', 'nanosecond',
'is_month_start', 'is_month_end',
'is_quarter_start',
'is_quarter_end', 'is_year_start',
'is_year_end', 'weekday_name'],
lambda x: isinstance(x, DatetimeIndex))
def test_ops_properties_basic(self):
# sanity check that the behavior didn't change
# GH7206
for op in ['year', 'day', 'second', 'weekday']:
self.assertRaises(TypeError, lambda x: getattr(self.dt_series, op))
# attribute access should still work!
s = Series(dict(year=2000, month=1, day=10))
self.assertEqual(s.year, 2000)
self.assertEqual(s.month, 1)
self.assertEqual(s.day, 10)
self.assertRaises(AttributeError, lambda: s.weekday)
def test_asobject_tolist(self):
idx = pd.date_range(start='2013-01-01', periods=4, freq='M',
name='idx')
expected_list = [Timestamp('2013-01-31'),
Timestamp('2013-02-28'),
Timestamp('2013-03-31'),
Timestamp('2013-04-30')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = pd.date_range(start='2013-01-01', periods=4, freq='M',
name='idx', tz='Asia/Tokyo')
expected_list = [Timestamp('2013-01-31', tz='Asia/Tokyo'),
Timestamp('2013-02-28', tz='Asia/Tokyo'),
Timestamp('2013-03-31', tz='Asia/Tokyo'),
Timestamp('2013-04-30', tz='Asia/Tokyo')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = DatetimeIndex([datetime(2013, 1, 1), datetime(2013, 1, 2),
pd.NaT, datetime(2013, 1, 4)], name='idx')
expected_list = [Timestamp('2013-01-01'),
Timestamp('2013-01-02'), pd.NaT,
Timestamp('2013-01-04')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
def test_minmax(self):
for tz in self.tz:
# monotonic
idx1 = pd.DatetimeIndex(['2011-01-01', '2011-01-02',
'2011-01-03'], tz=tz)
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = pd.DatetimeIndex(['2011-01-01', pd.NaT, '2011-01-03',
'2011-01-02', pd.NaT], tz=tz)
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), Timestamp('2011-01-01', tz=tz))
self.assertEqual(idx.max(), Timestamp('2011-01-03', tz=tz))
self.assertEqual(idx.argmin(), 0)
self.assertEqual(idx.argmax(), 2)
for op in ['min', 'max']:
# Return NaT
obj = DatetimeIndex([])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = DatetimeIndex([pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = DatetimeIndex([pd.NaT, pd.NaT, pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
def test_numpy_minmax(self):
dr = pd.date_range(start='2016-01-15', end='2016-01-20')
self.assertEqual(np.min(dr),
Timestamp('2016-01-15 00:00:00', freq='D'))
self.assertEqual(np.max(dr),
Timestamp('2016-01-20 00:00:00', freq='D'))
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.min, dr, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.max, dr, out=0)
self.assertEqual(np.argmin(dr), 0)
self.assertEqual(np.argmax(dr), 5)
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.argmin, dr, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.argmax, dr, out=0)
def test_round(self):
for tz in self.tz:
rng = pd.date_range(start='2016-01-01', periods=5,
freq='30Min', tz=tz)
elt = rng[1]
expected_rng = DatetimeIndex([
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 01:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 02:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 02:00:00', tz=tz, freq='30T'),
])
expected_elt = expected_rng[1]
tm.assert_index_equal(rng.round(freq='H'), expected_rng)
self.assertEqual(elt.round(freq='H'), expected_elt)
msg = pd.tseries.frequencies._INVALID_FREQ_ERROR
with tm.assertRaisesRegexp(ValueError, msg):
rng.round(freq='foo')
with tm.assertRaisesRegexp(ValueError, msg):
elt.round(freq='foo')
msg = "<MonthEnd> is a non-fixed frequency"
tm.assertRaisesRegexp(ValueError, msg, rng.round, freq='M')
tm.assertRaisesRegexp(ValueError, msg, elt.round, freq='M')
def test_repeat_range(self):
rng = date_range('1/1/2000', '1/1/2001')
result = rng.repeat(5)
self.assertIsNone(result.freq)
self.assertEqual(len(result), 5 * len(rng))
for tz in self.tz:
index = pd.date_range('2001-01-01', periods=2, freq='D', tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01',
'2001-01-02', '2001-01-02'], tz=tz)
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
index = pd.date_range('2001-01-01', periods=2, freq='2D', tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01',
'2001-01-03', '2001-01-03'], tz=tz)
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
index = pd.DatetimeIndex(['2001-01-01', 'NaT', '2003-01-01'],
tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01', '2001-01-01',
'NaT', 'NaT', 'NaT',
'2003-01-01', '2003-01-01', '2003-01-01'],
tz=tz)
for res in [index.repeat(3), np.repeat(index, 3)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
def test_repeat(self):
reps = 2
msg = "the 'axis' parameter is not supported"
for tz in self.tz:
rng = pd.date_range(start='2016-01-01', periods=2,
freq='30Min', tz=tz)
expected_rng = DatetimeIndex([
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:30:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:30:00', tz=tz, freq='30T'),
])
res = rng.repeat(reps)
tm.assert_index_equal(res, expected_rng)
self.assertIsNone(res.freq)
tm.assert_index_equal(np.repeat(rng, reps), expected_rng)
tm.assertRaisesRegexp(ValueError, msg, np.repeat,
rng, reps, axis=1)
def test_representation(self):
idx = []
idx.append(DatetimeIndex([], freq='D'))
idx.append(DatetimeIndex(['2011-01-01'], freq='D'))
idx.append(DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D'))
idx.append(DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00'
], freq='H', tz='Asia/Tokyo'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT], tz='US/Eastern'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT], tz='UTC'))
exp = []
exp.append("""DatetimeIndex([], dtype='datetime64[ns]', freq='D')""")
exp.append("DatetimeIndex(['2011-01-01'], dtype='datetime64[ns]', "
"freq='D')")
exp.append("DatetimeIndex(['2011-01-01', '2011-01-02'], "
"dtype='datetime64[ns]', freq='D')")
exp.append("DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03'], "
"dtype='datetime64[ns]', freq='D')")
exp.append("DatetimeIndex(['2011-01-01 09:00:00+09:00', "
"'2011-01-01 10:00:00+09:00', '2011-01-01 11:00:00+09:00']"
", dtype='datetime64[ns, Asia/Tokyo]', freq='H')")
exp.append("DatetimeIndex(['2011-01-01 09:00:00-05:00', "
"'2011-01-01 10:00:00-05:00', 'NaT'], "
"dtype='datetime64[ns, US/Eastern]', freq=None)")
exp.append("DatetimeIndex(['2011-01-01 09:00:00+00:00', "
"'2011-01-01 10:00:00+00:00', 'NaT'], "
"dtype='datetime64[ns, UTC]', freq=None)""")
with pd.option_context('display.width', 300):
for indx, expected in zip(idx, exp):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(indx, func)()
self.assertEqual(result, expected)
def test_representation_to_series(self):
idx1 = DatetimeIndex([], freq='D')
idx2 = DatetimeIndex(['2011-01-01'], freq='D')
idx3 = DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'], freq='H', tz='Asia/Tokyo')
idx6 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT],
tz='US/Eastern')
idx7 = DatetimeIndex(['2011-01-01 09:00', '2011-01-02 10:15'])
exp1 = """Series([], dtype: datetime64[ns])"""
exp2 = """0 2011-01-01
dtype: datetime64[ns]"""
exp3 = """0 2011-01-01
1 2011-01-02
dtype: datetime64[ns]"""
exp4 = """0 2011-01-01
1 2011-01-02
2 2011-01-03
dtype: datetime64[ns]"""
exp5 = """0 2011-01-01 09:00:00+09:00
1 2011-01-01 10:00:00+09:00
2 2011-01-01 11:00:00+09:00
dtype: datetime64[ns, Asia/Tokyo]"""
exp6 = """0 2011-01-01 09:00:00-05:00
1 2011-01-01 10:00:00-05:00
2 NaT
dtype: datetime64[ns, US/Eastern]"""
exp7 = """0 2011-01-01 09:00:00
1 2011-01-02 10:15:00
dtype: datetime64[ns]"""
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4,
idx5, idx6, idx7],
[exp1, exp2, exp3, exp4,
exp5, exp6, exp7]):
result = repr(Series(idx))
self.assertEqual(result, expected)
def test_summary(self):
# GH9116
idx1 = DatetimeIndex([], freq='D')
idx2 = DatetimeIndex(['2011-01-01'], freq='D')
idx3 = DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'],
freq='H', tz='Asia/Tokyo')
idx6 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT],
tz='US/Eastern')
exp1 = """DatetimeIndex: 0 entries
Freq: D"""
exp2 = """DatetimeIndex: 1 entries, 2011-01-01 to 2011-01-01
Freq: D"""
exp3 = """DatetimeIndex: 2 entries, 2011-01-01 to 2011-01-02
Freq: D"""
exp4 = """DatetimeIndex: 3 entries, 2011-01-01 to 2011-01-03
Freq: D"""
exp5 = ("DatetimeIndex: 3 entries, 2011-01-01 09:00:00+09:00 "
"to 2011-01-01 11:00:00+09:00\n"
"Freq: H")
exp6 = """DatetimeIndex: 3 entries, 2011-01-01 09:00:00-05:00 to NaT"""
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5, idx6],
[exp1, exp2, exp3, exp4, exp5, exp6]):
result = idx.summary()
self.assertEqual(result, expected)
def test_resolution(self):
for freq, expected in zip(['A', 'Q', 'M', 'D', 'H', 'T',
'S', 'L', 'U'],
['day', 'day', 'day', 'day', 'hour',
'minute', 'second', 'millisecond',
'microsecond']):
for tz in self.tz:
idx = pd.date_range(start='2013-04-01', periods=30, freq=freq,
tz=tz)
self.assertEqual(idx.resolution, expected)
def test_union(self):
for tz in self.tz:
# union
rng1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other1 = pd.date_range('1/6/2000', freq='D', periods=5, tz=tz)
expected1 = pd.date_range('1/1/2000', freq='D', periods=10, tz=tz)
rng2 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other2 = pd.date_range('1/4/2000', freq='D', periods=5, tz=tz)
expected2 = pd.date_range('1/1/2000', freq='D', periods=8, tz=tz)
rng3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other3 = pd.DatetimeIndex([], tz=tz)
expected3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
for rng, other, expected in [(rng1, other1, expected1),
(rng2, other2, expected2),
(rng3, other3, expected3)]:
result_union = rng.union(other)
tm.assert_index_equal(result_union, expected)
def test_add_iadd(self):
for tz in self.tz:
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
result = rng + delta
expected = pd.date_range('2000-01-01 02:00',
'2000-02-01 02:00', tz=tz)
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
# int
rng = pd.date_range('2000-01-01 09:00', freq='H', periods=10,
tz=tz)
result = rng + 1
expected = pd.date_range('2000-01-01 10:00', freq='H', periods=10,
tz=tz)
tm.assert_index_equal(result, expected)
rng += 1
tm.assert_index_equal(rng, expected)
idx = DatetimeIndex(['2011-01-01', '2011-01-02'])
msg = "cannot add a datelike to a DatetimeIndex"
with tm.assertRaisesRegexp(TypeError, msg):
idx + Timestamp('2011-01-01')
with tm.assertRaisesRegexp(TypeError, msg):
Timestamp('2011-01-01') + idx
def test_add_dti_dti(self):
# previously performed setop (deprecated in 0.16.0), now raises
# TypeError (GH14164)
dti = date_range('20130101', periods=3)
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
with tm.assertRaises(TypeError):
dti + dti
with tm.assertRaises(TypeError):
dti_tz + dti_tz
with tm.assertRaises(TypeError):
dti_tz + dti
with tm.assertRaises(TypeError):
dti + dti_tz
def test_difference(self):
for tz in self.tz:
# diff
rng1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other1 = pd.date_range('1/6/2000', freq='D', periods=5, tz=tz)
expected1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
rng2 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other2 = pd.date_range('1/4/2000', freq='D', periods=5, tz=tz)
expected2 = pd.date_range('1/1/2000', freq='D', periods=3, tz=tz)
rng3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other3 = pd.DatetimeIndex([], tz=tz)
expected3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
for rng, other, expected in [(rng1, other1, expected1),
(rng2, other2, expected2),
(rng3, other3, expected3)]:
result_diff = rng.difference(other)
tm.assert_index_equal(result_diff, expected)
def test_sub_isub(self):
for tz in self.tz:
# offset
offsets = [ | pd.offsets.Hour(2) | pandas.offsets.Hour |
"""
Python module to do preliminary preprocessing
Creates train and test .csv files
"""
import pandas as pd
from sklearn.model_selection import train_test_split
import os
seed = 42
raw_data_dir = r'C:\Users\adrian.bergem\Google Drive\Data science\Projects\AI Credit Default\data\raw'
# Load in data
borrower = pd.read_csv(os.path.join(raw_data_dir, 'Borrower Information.csv'))
loan = pd.read_csv(os.path.join(raw_data_dir, 'Loan Classification Information.csv'))
df = pd.merge(borrower, loan, on='member_id')
# Limit the dataset into only terminated ones
df = df[df['loan_status'].isin(['Charged Off', 'Fully Paid'])]
# Transform the loan status variable to 1s and 0s
df['target'] = df['loan_status'].apply(lambda x: 1 if x == 'Charged Off' else 0)
# Drop the loan status feature as we will no longer need it
df.drop(labels='loan_status', axis=1, inplace=True)
# Remove id columns
df.drop(labels=['Unnamed: 0_x', 'Unnamed: 0_y', 'member_id', 'id'], axis=1, inplace=True)
# Remove features with more than 90% NaNs
nans = pd.DataFrame(index=df.columns, columns=['percentage NaNs'])
for feature in df.columns:
nans.loc[feature] = df[feature].isnull().sum()/(len(df))
cols_to_remove = nans[nans['percentage NaNs'] > 0.90].index.tolist()
df.drop(labels=cols_to_remove, axis=1, inplace=True)
# Omit old data
df = df[df['issue_d'] > '01-01-2010']
# Split data into target (y) and features (X)
X = df.drop(labels='target', axis=1)
y = df['target']
# Stratified split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=seed, stratify=y)
# Reset indexes in train and test data sets
X_train.reset_index(drop=True, inplace=True)
y_train.reset_index(drop=True, inplace=True)
X_test.reset_index(drop=True, inplace=True)
y_test.reset_index(drop=True, inplace=True)
# Recombine features and target in train/test data
train = | pd.concat([y_train, X_train], axis=1) | pandas.concat |
from __future__ import print_function
import os.path
import random
from functools import partial
import datetime as dt
from flask import Flask, json, Response
import h5py
import numpy as np
import pandas as pd
import dask.array as da
from subsample import coarsen
from bokeh.server.crossdomain import crossdomain
from StringIO import StringIO
FACTOR_BASE = 15000
fromtimestamp = dt.datetime.fromtimestamp
app = Flask(__name__)
def to_seconds(ts):
if isinstance(ts, dt.datetime):
return (ts - dt.datetime(1970, 1, 1)).total_seconds() * 1000
else:
return 1000 * ((ts - np.datetime64('1970-01-01T00:00:00Z')) / np.timedelta64(1, 's'))
def create_sim_data(date, low, high, freq=60, max_period=84600):
res = []
ts = dt.datetime.strptime(date, '%Y-%m-%d')
next_day = ts + dt.timedelta(seconds=max_period)
while ts < next_day:
res.append((ts, random.uniform(low, high)))
ts = ts + dt.timedelta(seconds=freq)
return res
aapl = pd.read_csv('data/aapl_hours_raw.csv')
# create data if doesn't exist..
if not os.path.exists('data/aapl_minutes_raw.csv'):
print("High resolution data file not found... Creating a new one (this may take some time...).")
haapl = | pd.read_csv('data/aapl.csv') | pandas.read_csv |
import streamlit as st
import pandas as pd
import numpy as np
import nltk
import numpy as np
from sklearn.metrics.pairwise import cosine_similarity
from sentence_transformers import SentenceTransformer, util
import copy
import random
import requests
from bs4 import BeautifulSoup
import random
import time
from newspaper import Article
import re
import unicodedata
# Open Entry field to enter a left leaning source --- DONE
# Open entry field to enter a right leaning source --- DONE
# function to scrape the news from each of these sources --- DONE
# function to explode into sentences & clean up sentences a bit --- DONE
# function to encode into embeddings & store
# function to create comparisons & printout
def general_scraper(url):
"""
Scrapes the provided url link using python's requests module and returns a BS object containing returned information text
Scraping wrapped around try-except blocks along with conditional check if status code is 200.
Args:
url ([str]): website to be scrapped.
Returns:
soup [Beautiful Soup object]: Returns scraped text in a Beautiful Soup object type.
"""
try:
response = requests.get(url)
if response.status_code == 200:
soup = BeautifulSoup(response.text, 'html5lib')
return soup
else:
print(f"Did not get status code 200 for url: \n{url}\n.Instead got status code {response.status_code}")
return None
except Exception as err_msge:
print(f"Error while scraping: {err_msge}")
return None
def scrape_news(story_links):
if type(story_links) == str:
story_links = [story_links]
stories_columns = ['news_title', 'news_source', 'global_bias', 'News_link', 'text']
stories_df = | pd.DataFrame(columns=stories_columns) | pandas.DataFrame |
# -*- coding: utf-8 -*-
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
import pytest
import re
from numpy import nan as NA
import numpy as np
from numpy.random import randint
from pandas.compat import range, u
import pandas.compat as compat
from pandas import Index, Series, DataFrame, isna, MultiIndex, notna
from pandas.util.testing import assert_series_equal
import pandas.util.testing as tm
import pandas.core.strings as strings
class TestStringMethods(object):
def test_api(self):
# GH 6106, GH 9322
assert Series.str is strings.StringMethods
assert isinstance(Series(['']).str, strings.StringMethods)
# GH 9184
invalid = Series([1])
with tm.assert_raises_regex(AttributeError,
"only use .str accessor"):
invalid.str
assert not hasattr(invalid, 'str')
def test_iter(self):
# GH3638
strs = 'google', 'wikimedia', 'wikipedia', 'wikitravel'
ds = Series(strs)
for s in ds.str:
# iter must yield a Series
assert isinstance(s, Series)
# indices of each yielded Series should be equal to the index of
# the original Series
tm.assert_index_equal(s.index, ds.index)
for el in s:
# each element of the series is either a basestring/str or nan
assert isinstance(el, compat.string_types) or isna(el)
# desired behavior is to iterate until everything would be nan on the
# next iter so make sure the last element of the iterator was 'l' in
# this case since 'wikitravel' is the longest string
assert s.dropna().values.item() == 'l'
def test_iter_empty(self):
ds = Series([], dtype=object)
i, s = 100, 1
for i, s in enumerate(ds.str):
pass
# nothing to iterate over so nothing defined values should remain
# unchanged
assert i == 100
assert s == 1
def test_iter_single_element(self):
ds = Series(['a'])
for i, s in enumerate(ds.str):
pass
assert not i
assert_series_equal(ds, s)
def test_iter_object_try_string(self):
ds = Series([slice(None, randint(10), randint(10, 20)) for _ in range(
4)])
i, s = 100, 'h'
for i, s in enumerate(ds.str):
pass
assert i == 100
assert s == 'h'
def test_cat(self):
one = np.array(['a', 'a', 'b', 'b', 'c', NA], dtype=np.object_)
two = np.array(['a', NA, 'b', 'd', 'foo', NA], dtype=np.object_)
# single array
result = strings.str_cat(one)
exp = 'aabbc'
assert result == exp
result = strings.str_cat(one, na_rep='NA')
exp = 'aabbcNA'
assert result == exp
result = strings.str_cat(one, na_rep='-')
exp = 'aabbc-'
assert result == exp
result = strings.str_cat(one, sep='_', na_rep='NA')
exp = 'a_a_b_b_c_NA'
assert result == exp
result = strings.str_cat(two, sep='-')
exp = 'a-b-d-foo'
assert result == exp
# Multiple arrays
result = strings.str_cat(one, [two], na_rep='NA')
exp = np.array(['aa', 'aNA', 'bb', 'bd', 'cfoo', 'NANA'],
dtype=np.object_)
tm.assert_numpy_array_equal(result, exp)
result = strings.str_cat(one, two)
exp = np.array(['aa', NA, 'bb', 'bd', 'cfoo', NA], dtype=np.object_)
tm.assert_almost_equal(result, exp)
def test_count(self):
values = np.array(['foo', 'foofoo', NA, 'foooofooofommmfoo'],
dtype=np.object_)
result = strings.str_count(values, 'f[o]+')
exp = np.array([1, 2, NA, 4])
tm.assert_numpy_array_equal(result, exp)
result = Series(values).str.count('f[o]+')
exp = Series([1, 2, NA, 4])
assert isinstance(result, Series)
tm.assert_series_equal(result, exp)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_count(mixed, 'a')
xp = np.array([1, NA, 0, NA, NA, 0, NA, NA, NA])
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.count('a')
xp = Series([1, NA, 0, NA, NA, 0, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = [u('foo'), u('foofoo'), NA, u('foooofooofommmfoo')]
result = strings.str_count(values, 'f[o]+')
exp = np.array([1, 2, NA, 4])
tm.assert_numpy_array_equal(result, exp)
result = Series(values).str.count('f[o]+')
exp = Series([1, 2, NA, 4])
assert isinstance(result, Series)
tm.assert_series_equal(result, exp)
def test_contains(self):
values = np.array(['foo', NA, 'fooommm__foo',
'mmm_', 'foommm[_]+bar'], dtype=np.object_)
pat = 'mmm[_]+'
result = strings.str_contains(values, pat)
expected = np.array([False, NA, True, True, False], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
result = strings.str_contains(values, pat, regex=False)
expected = np.array([False, NA, False, False, True], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
values = ['foo', 'xyz', 'fooommm__foo', 'mmm_']
result = strings.str_contains(values, pat)
expected = np.array([False, False, True, True])
assert result.dtype == np.bool_
tm.assert_numpy_array_equal(result, expected)
# case insensitive using regex
values = ['Foo', 'xYz', 'fOOomMm__fOo', 'MMM_']
result = strings.str_contains(values, 'FOO|mmm', case=False)
expected = np.array([True, False, True, True])
tm.assert_numpy_array_equal(result, expected)
# case insensitive without regex
result = strings.str_contains(values, 'foo', regex=False, case=False)
expected = np.array([True, False, True, False])
tm.assert_numpy_array_equal(result, expected)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_contains(mixed, 'o')
xp = np.array([False, NA, False, NA, NA, True, NA, NA, NA],
dtype=np.object_)
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.contains('o')
xp = Series([False, NA, False, NA, NA, True, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = np.array([u'foo', NA, u'fooommm__foo', u'mmm_'],
dtype=np.object_)
pat = 'mmm[_]+'
result = strings.str_contains(values, pat)
expected = np.array([False, np.nan, True, True], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
result = strings.str_contains(values, pat, na=False)
expected = np.array([False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
values = np.array(['foo', 'xyz', 'fooommm__foo', 'mmm_'],
dtype=np.object_)
result = strings.str_contains(values, pat)
expected = np.array([False, False, True, True])
assert result.dtype == np.bool_
tm.assert_numpy_array_equal(result, expected)
# na
values = Series(['om', 'foo', np.nan])
res = values.str.contains('foo', na="foo")
assert res.loc[2] == "foo"
def test_startswith(self):
values = Series(['om', NA, 'foo_nom', 'nom', 'bar_foo', NA, 'foo'])
result = values.str.startswith('foo')
exp = Series([False, NA, True, False, False, NA, True])
tm.assert_series_equal(result, exp)
# mixed
mixed = np.array(['a', NA, 'b', True, datetime.today(),
'foo', None, 1, 2.], dtype=np.object_)
rs = strings.str_startswith(mixed, 'f')
xp = np.array([False, NA, False, NA, NA, True, NA, NA, NA],
dtype=np.object_)
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.startswith('f')
assert isinstance(rs, Series)
xp = Series([False, NA, False, NA, NA, True, NA, NA, NA])
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('om'), NA, u('foo_nom'), u('nom'), u('bar_foo'), NA,
u('foo')])
result = values.str.startswith('foo')
exp = Series([False, NA, True, False, False, NA, True])
tm.assert_series_equal(result, exp)
result = values.str.startswith('foo', na=True)
tm.assert_series_equal(result, exp.fillna(True).astype(bool))
def test_endswith(self):
values = Series(['om', NA, 'foo_nom', 'nom', 'bar_foo', NA, 'foo'])
result = values.str.endswith('foo')
exp = Series([False, NA, False, False, True, NA, True])
tm.assert_series_equal(result, exp)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_endswith(mixed, 'f')
xp = np.array([False, NA, False, NA, NA, False, NA, NA, NA],
dtype=np.object_)
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.endswith('f')
xp = Series([False, NA, False, NA, NA, False, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('om'), NA, u('foo_nom'), u('nom'), u('bar_foo'), NA,
u('foo')])
result = values.str.endswith('foo')
exp = Series([False, NA, False, False, True, NA, True])
tm.assert_series_equal(result, exp)
result = values.str.endswith('foo', na=False)
tm.assert_series_equal(result, exp.fillna(False).astype(bool))
def test_title(self):
values = Series(["FOO", "BAR", NA, "Blah", "blurg"])
result = values.str.title()
exp = Series(["Foo", "Bar", NA, "Blah", "Blurg"])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(["FOO", NA, "bar", True, datetime.today(), "blah", None,
1, 2.])
mixed = mixed.str.title()
exp = Series(["Foo", NA, "Bar", NA, NA, "Blah", NA, NA, NA])
tm.assert_almost_equal(mixed, exp)
# unicode
values = Series([u("FOO"), NA, u("bar"), u("Blurg")])
results = values.str.title()
exp = Series([u("Foo"), NA, u("Bar"), u("Blurg")])
tm.assert_series_equal(results, exp)
def test_lower_upper(self):
values = Series(['om', NA, 'nom', 'nom'])
result = values.str.upper()
exp = Series(['OM', NA, 'NOM', 'NOM'])
tm.assert_series_equal(result, exp)
result = result.str.lower()
tm.assert_series_equal(result, values)
# mixed
mixed = Series(['a', NA, 'b', True, datetime.today(), 'foo', None, 1,
2.])
mixed = mixed.str.upper()
rs = Series(mixed).str.lower()
xp = Series(['a', NA, 'b', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('om'), NA, u('nom'), u('nom')])
result = values.str.upper()
exp = Series([u('OM'), NA, u('NOM'), u('NOM')])
tm.assert_series_equal(result, exp)
result = result.str.lower()
tm.assert_series_equal(result, values)
def test_capitalize(self):
values = Series(["FOO", "BAR", NA, "Blah", "blurg"])
result = values.str.capitalize()
exp = Series(["Foo", "Bar", NA, "Blah", "Blurg"])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(["FOO", NA, "bar", True, datetime.today(), "blah", None,
1, 2.])
mixed = mixed.str.capitalize()
exp = Series(["Foo", NA, "Bar", NA, NA, "Blah", NA, NA, NA])
tm.assert_almost_equal(mixed, exp)
# unicode
values = Series([u("FOO"), NA, u("bar"), u("Blurg")])
results = values.str.capitalize()
exp = Series([u("Foo"), NA, u("Bar"), u("Blurg")])
tm.assert_series_equal(results, exp)
def test_swapcase(self):
values = Series(["FOO", "BAR", NA, "Blah", "blurg"])
result = values.str.swapcase()
exp = Series(["foo", "bar", NA, "bLAH", "BLURG"])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(["FOO", NA, "bar", True, datetime.today(), "Blah", None,
1, 2.])
mixed = mixed.str.swapcase()
exp = Series(["foo", NA, "BAR", NA, NA, "bLAH", NA, NA, NA])
tm.assert_almost_equal(mixed, exp)
# unicode
values = Series([u("FOO"), NA, u("bar"), u("Blurg")])
results = values.str.swapcase()
exp = Series([u("foo"), NA, u("BAR"), u("bLURG")])
tm.assert_series_equal(results, exp)
def test_casemethods(self):
values = ['aaa', 'bbb', 'CCC', 'Dddd', 'eEEE']
s = Series(values)
assert s.str.lower().tolist() == [v.lower() for v in values]
assert s.str.upper().tolist() == [v.upper() for v in values]
assert s.str.title().tolist() == [v.title() for v in values]
assert s.str.capitalize().tolist() == [v.capitalize() for v in values]
assert s.str.swapcase().tolist() == [v.swapcase() for v in values]
def test_replace(self):
values = Series(['fooBAD__barBAD', NA])
result = values.str.replace('BAD[_]*', '')
exp = Series(['foobar', NA])
tm.assert_series_equal(result, exp)
result = values.str.replace('BAD[_]*', '', n=1)
exp = Series(['foobarBAD', NA])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['aBAD', NA, 'bBAD', True, datetime.today(), 'fooBAD',
None, 1, 2.])
rs = Series(mixed).str.replace('BAD[_]*', '')
xp = Series(['a', NA, 'b', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA])
result = values.str.replace('BAD[_]*', '')
exp = Series([u('foobar'), NA])
tm.assert_series_equal(result, exp)
result = values.str.replace('BAD[_]*', '', n=1)
exp = Series([u('foobarBAD'), NA])
tm.assert_series_equal(result, exp)
# flags + unicode
values = Series([b"abcd,\xc3\xa0".decode("utf-8")])
exp = Series([b"abcd, \xc3\xa0".decode("utf-8")])
result = values.str.replace(r"(?<=\w),(?=\w)", ", ", flags=re.UNICODE)
tm.assert_series_equal(result, exp)
# GH 13438
for klass in (Series, Index):
for repl in (None, 3, {'a': 'b'}):
for data in (['a', 'b', None], ['a', 'b', 'c', 'ad']):
values = klass(data)
pytest.raises(TypeError, values.str.replace, 'a', repl)
def test_replace_callable(self):
# GH 15055
values = Series(['fooBAD__barBAD', NA])
# test with callable
repl = lambda m: m.group(0).swapcase()
result = values.str.replace('[a-z][A-Z]{2}', repl, n=2)
exp = Series(['foObaD__baRbaD', NA])
tm.assert_series_equal(result, exp)
# test with wrong number of arguments, raising an error
if compat.PY2:
p_err = r'takes (no|(exactly|at (least|most)) ?\d+) arguments?'
else:
p_err = (r'((takes)|(missing)) (?(2)from \d+ to )?\d+ '
r'(?(3)required )positional arguments?')
repl = lambda: None
with tm.assert_raises_regex(TypeError, p_err):
values.str.replace('a', repl)
repl = lambda m, x: None
with tm.assert_raises_regex(TypeError, p_err):
values.str.replace('a', repl)
repl = lambda m, x, y=None: None
with tm.assert_raises_regex(TypeError, p_err):
values.str.replace('a', repl)
# test regex named groups
values = Series(['Foo Bar Baz', NA])
pat = r"(?P<first>\w+) (?P<middle>\w+) (?P<last>\w+)"
repl = lambda m: m.group('middle').swapcase()
result = values.str.replace(pat, repl)
exp = Series(['bAR', NA])
tm.assert_series_equal(result, exp)
def test_replace_compiled_regex(self):
# GH 15446
values = Series(['fooBAD__barBAD', NA])
# test with compiled regex
pat = re.compile(r'BAD[_]*')
result = values.str.replace(pat, '')
exp = Series(['foobar', NA])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['aBAD', NA, 'bBAD', True, datetime.today(), 'fooBAD',
None, 1, 2.])
rs = Series(mixed).str.replace(pat, '')
xp = Series(['a', NA, 'b', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA])
result = values.str.replace(pat, '')
exp = Series([u('foobar'), NA])
tm.assert_series_equal(result, exp)
result = values.str.replace(pat, '', n=1)
exp = Series([u('foobarBAD'), NA])
tm.assert_series_equal(result, exp)
# flags + unicode
values = Series([b"abcd,\xc3\xa0".decode("utf-8")])
exp = Series([b"abcd, \xc3\xa0".decode("utf-8")])
pat = re.compile(r"(?<=\w),(?=\w)", flags=re.UNICODE)
result = values.str.replace(pat, ", ")
tm.assert_series_equal(result, exp)
# case and flags provided to str.replace will have no effect
# and will produce warnings
values = Series(['fooBAD__barBAD__bad', NA])
pat = re.compile(r'BAD[_]*')
with tm.assert_raises_regex(ValueError,
"case and flags cannot be"):
result = values.str.replace(pat, '', flags=re.IGNORECASE)
with tm.assert_raises_regex(ValueError,
"case and flags cannot be"):
result = values.str.replace(pat, '', case=False)
with tm.assert_raises_regex(ValueError,
"case and flags cannot be"):
result = values.str.replace(pat, '', case=True)
# test with callable
values = Series(['fooBAD__barBAD', NA])
repl = lambda m: m.group(0).swapcase()
pat = re.compile('[a-z][A-Z]{2}')
result = values.str.replace(pat, repl, n=2)
exp = Series(['foObaD__baRbaD', NA])
tm.assert_series_equal(result, exp)
def test_repeat(self):
values = Series(['a', 'b', NA, 'c', NA, 'd'])
result = values.str.repeat(3)
exp = Series(['aaa', 'bbb', NA, 'ccc', NA, 'ddd'])
tm.assert_series_equal(result, exp)
result = values.str.repeat([1, 2, 3, 4, 5, 6])
exp = Series(['a', 'bb', NA, 'cccc', NA, 'dddddd'])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['a', NA, 'b', True, datetime.today(), 'foo', None, 1,
2.])
rs = Series(mixed).str.repeat(3)
xp = Series(['aaa', NA, 'bbb', NA, NA, 'foofoofoo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('a'), u('b'), NA, u('c'), NA, u('d')])
result = values.str.repeat(3)
exp = Series([u('aaa'), u('bbb'), NA, u('ccc'), NA, u('ddd')])
tm.assert_series_equal(result, exp)
result = values.str.repeat([1, 2, 3, 4, 5, 6])
exp = Series([u('a'), u('bb'), NA, u('cccc'), NA, u('dddddd')])
tm.assert_series_equal(result, exp)
def test_match(self):
# New match behavior introduced in 0.13
values = Series(['fooBAD__barBAD', NA, 'foo'])
result = values.str.match('.*(BAD[_]+).*(BAD)')
exp = Series([True, NA, False])
tm.assert_series_equal(result, exp)
values = Series(['fooBAD__barBAD', NA, 'foo'])
result = values.str.match('.*BAD[_]+.*BAD')
exp = Series([True, NA, False])
tm.assert_series_equal(result, exp)
# test passing as_indexer still works but is ignored
values = Series(['fooBAD__barBAD', NA, 'foo'])
exp = Series([True, NA, False])
with tm.assert_produces_warning(FutureWarning):
result = values.str.match('.*BAD[_]+.*BAD', as_indexer=True)
tm.assert_series_equal(result, exp)
with tm.assert_produces_warning(FutureWarning):
result = values.str.match('.*BAD[_]+.*BAD', as_indexer=False)
tm.assert_series_equal(result, exp)
with tm.assert_produces_warning(FutureWarning):
result = values.str.match('.*(BAD[_]+).*(BAD)', as_indexer=True)
tm.assert_series_equal(result, exp)
pytest.raises(ValueError, values.str.match, '.*(BAD[_]+).*(BAD)',
as_indexer=False)
# mixed
mixed = Series(['aBAD_BAD', NA, 'BAD_b_BAD', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.match('.*(BAD[_]+).*(BAD)')
xp = Series([True, NA, True, NA, NA, False, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA, u('foo')])
result = values.str.match('.*(BAD[_]+).*(BAD)')
exp = Series([True, NA, False])
tm.assert_series_equal(result, exp)
# na GH #6609
res = Series(['a', 0, np.nan]).str.match('a', na=False)
exp = Series([True, False, False])
assert_series_equal(exp, res)
res = Series(['a', 0, np.nan]).str.match('a')
exp = Series([True, np.nan, np.nan])
assert_series_equal(exp, res)
def test_extract_expand_None(self):
values = Series(['fooBAD__barBAD', NA, 'foo'])
with tm.assert_produces_warning(FutureWarning):
values.str.extract('.*(BAD[_]+).*(BAD)', expand=None)
def test_extract_expand_unspecified(self):
values = Series(['fooBAD__barBAD', NA, 'foo'])
with tm.assert_produces_warning(FutureWarning):
values.str.extract('.*(BAD[_]+).*(BAD)')
def test_extract_expand_False(self):
# Contains tests like those in test_match and some others.
values = Series(['fooBAD__barBAD', NA, 'foo'])
er = [NA, NA] # empty row
result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=False)
exp = DataFrame([['BAD__', 'BAD'], er, er])
tm.assert_frame_equal(result, exp)
# mixed
mixed = Series(['aBAD_BAD', NA, 'BAD_b_BAD', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.extract('.*(BAD[_]+).*(BAD)', expand=False)
exp = DataFrame([['BAD_', 'BAD'], er, ['BAD_', 'BAD'], er, er, er, er,
er, er])
tm.assert_frame_equal(rs, exp)
# unicode
values = Series([u('fooBAD__barBAD'), NA, u('foo')])
result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=False)
exp = DataFrame([[u('BAD__'), u('BAD')], er, er])
tm.assert_frame_equal(result, exp)
# GH9980
# Index only works with one regex group since
# multi-group would expand to a frame
idx = Index(['A1', 'A2', 'A3', 'A4', 'B5'])
with tm.assert_raises_regex(ValueError, "supported"):
idx.str.extract('([AB])([123])', expand=False)
# these should work for both Series and Index
for klass in [Series, Index]:
# no groups
s_or_idx = klass(['A1', 'B2', 'C3'])
f = lambda: s_or_idx.str.extract('[ABC][123]', expand=False)
pytest.raises(ValueError, f)
# only non-capturing groups
f = lambda: s_or_idx.str.extract('(?:[AB]).*', expand=False)
pytest.raises(ValueError, f)
# single group renames series/index properly
s_or_idx = klass(['A1', 'A2'])
result = s_or_idx.str.extract(r'(?P<uno>A)\d', expand=False)
assert result.name == 'uno'
exp = klass(['A', 'A'], name='uno')
if klass == Series:
tm.assert_series_equal(result, exp)
else:
tm.assert_index_equal(result, exp)
s = Series(['A1', 'B2', 'C3'])
# one group, no matches
result = s.str.extract('(_)', expand=False)
exp = Series([NA, NA, NA], dtype=object)
tm.assert_series_equal(result, exp)
# two groups, no matches
result = s.str.extract('(_)(_)', expand=False)
exp = DataFrame([[NA, NA], [NA, NA], [NA, NA]], dtype=object)
tm.assert_frame_equal(result, exp)
# one group, some matches
result = s.str.extract('([AB])[123]', expand=False)
exp = Series(['A', 'B', NA])
tm.assert_series_equal(result, exp)
# two groups, some matches
result = s.str.extract('([AB])([123])', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
tm.assert_frame_equal(result, exp)
# one named group
result = s.str.extract('(?P<letter>[AB])', expand=False)
exp = Series(['A', 'B', NA], name='letter')
tm.assert_series_equal(result, exp)
# two named groups
result = s.str.extract('(?P<letter>[AB])(?P<number>[123])',
expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]],
columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# mix named and unnamed groups
result = s.str.extract('([AB])(?P<number>[123])', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]],
columns=[0, 'number'])
tm.assert_frame_equal(result, exp)
# one normal group, one non-capturing group
result = s.str.extract('([AB])(?:[123])', expand=False)
exp = Series(['A', 'B', NA])
tm.assert_series_equal(result, exp)
# two normal groups, one non-capturing group
result = Series(['A11', 'B22', 'C33']).str.extract(
'([AB])([123])(?:[123])', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
tm.assert_frame_equal(result, exp)
# one optional group followed by one normal group
result = Series(['A1', 'B2', '3']).str.extract(
'(?P<letter>[AB])?(?P<number>[123])', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, '3']],
columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# one normal group followed by one optional group
result = Series(['A1', 'B2', 'C']).str.extract(
'(?P<letter>[ABC])(?P<number>[123])?', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], ['C', NA]],
columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# GH6348
# not passing index to the extractor
def check_index(index):
data = ['A1', 'B2', 'C']
index = index[:len(data)]
s = Series(data, index=index)
result = s.str.extract(r'(\d)', expand=False)
exp = Series(['1', '2', NA], index=index)
tm.assert_series_equal(result, exp)
result = Series(data, index=index).str.extract(
r'(?P<letter>\D)(?P<number>\d)?', expand=False)
e_list = [
['A', '1'],
['B', '2'],
['C', NA]
]
exp = DataFrame(e_list, columns=['letter', 'number'], index=index)
tm.assert_frame_equal(result, exp)
i_funs = [
tm.makeStringIndex, tm.makeUnicodeIndex, tm.makeIntIndex,
tm.makeDateIndex, tm.makePeriodIndex, tm.makeRangeIndex
]
for index in i_funs:
check_index(index())
# single_series_name_is_preserved.
s = Series(['a3', 'b3', 'c2'], name='bob')
r = s.str.extract(r'(?P<sue>[a-z])', expand=False)
e = Series(['a', 'b', 'c'], name='sue')
tm.assert_series_equal(r, e)
assert r.name == e.name
def test_extract_expand_True(self):
# Contains tests like those in test_match and some others.
values = Series(['fooBAD__barBAD', NA, 'foo'])
er = [NA, NA] # empty row
result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=True)
exp = DataFrame([['BAD__', 'BAD'], er, er])
tm.assert_frame_equal(result, exp)
# mixed
mixed = Series(['aBAD_BAD', NA, 'BAD_b_BAD', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.extract('.*(BAD[_]+).*(BAD)', expand=True)
exp = DataFrame([['BAD_', 'BAD'], er, ['BAD_', 'BAD'], er, er,
er, er, er, er])
tm.assert_frame_equal(rs, exp)
# unicode
values = Series([u('fooBAD__barBAD'), NA, u('foo')])
result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=True)
exp = DataFrame([[u('BAD__'), u('BAD')], er, er])
tm.assert_frame_equal(result, exp)
# these should work for both Series and Index
for klass in [Series, Index]:
# no groups
s_or_idx = klass(['A1', 'B2', 'C3'])
f = lambda: s_or_idx.str.extract('[ABC][123]', expand=True)
pytest.raises(ValueError, f)
# only non-capturing groups
f = lambda: s_or_idx.str.extract('(?:[AB]).*', expand=True)
pytest.raises(ValueError, f)
# single group renames series/index properly
s_or_idx = klass(['A1', 'A2'])
result_df = s_or_idx.str.extract(r'(?P<uno>A)\d', expand=True)
assert isinstance(result_df, DataFrame)
result_series = result_df['uno']
assert_series_equal(result_series, Series(['A', 'A'], name='uno'))
def test_extract_series(self):
# extract should give the same result whether or not the
# series has a name.
for series_name in None, "series_name":
s = Series(['A1', 'B2', 'C3'], name=series_name)
# one group, no matches
result = s.str.extract('(_)', expand=True)
exp = DataFrame([NA, NA, NA], dtype=object)
tm.assert_frame_equal(result, exp)
# two groups, no matches
result = s.str.extract('(_)(_)', expand=True)
exp = DataFrame([[NA, NA], [NA, NA], [NA, NA]], dtype=object)
tm.assert_frame_equal(result, exp)
# one group, some matches
result = s.str.extract('([AB])[123]', expand=True)
exp = DataFrame(['A', 'B', NA])
tm.assert_frame_equal(result, exp)
# two groups, some matches
result = s.str.extract('([AB])([123])', expand=True)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
tm.assert_frame_equal(result, exp)
# one named group
result = s.str.extract('(?P<letter>[AB])', expand=True)
exp = DataFrame({"letter": ['A', 'B', NA]})
tm.assert_frame_equal(result, exp)
# two named groups
result = s.str.extract(
'(?P<letter>[AB])(?P<number>[123])',
expand=True)
e_list = [
['A', '1'],
['B', '2'],
[NA, NA]
]
exp = DataFrame(e_list, columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# mix named and unnamed groups
result = s.str.extract('([AB])(?P<number>[123])', expand=True)
exp = DataFrame(e_list, columns=[0, 'number'])
tm.assert_frame_equal(result, exp)
# one normal group, one non-capturing group
result = s.str.extract('([AB])(?:[123])', expand=True)
exp = DataFrame(['A', 'B', NA])
tm.assert_frame_equal(result, exp)
def test_extract_optional_groups(self):
# two normal groups, one non-capturing group
result = Series(['A11', 'B22', 'C33']).str.extract(
'([AB])([123])(?:[123])', expand=True)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
tm.assert_frame_equal(result, exp)
# one optional group followed by one normal group
result = Series(['A1', 'B2', '3']).str.extract(
'(?P<letter>[AB])?(?P<number>[123])', expand=True)
e_list = [
['A', '1'],
['B', '2'],
[NA, '3']
]
exp = DataFrame(e_list, columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# one normal group followed by one optional group
result = Series(['A1', 'B2', 'C']).str.extract(
'(?P<letter>[ABC])(?P<number>[123])?', expand=True)
e_list = [
['A', '1'],
['B', '2'],
['C', NA]
]
exp = DataFrame(e_list, columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# GH6348
# not passing index to the extractor
def check_index(index):
data = ['A1', 'B2', 'C']
index = index[:len(data)]
result = Series(data, index=index).str.extract(
r'(\d)', expand=True)
exp = DataFrame(['1', '2', NA], index=index)
tm.assert_frame_equal(result, exp)
result = Series(data, index=index).str.extract(
r'(?P<letter>\D)(?P<number>\d)?', expand=True)
e_list = [
['A', '1'],
['B', '2'],
['C', NA]
]
exp = DataFrame(e_list, columns=['letter', 'number'], index=index)
tm.assert_frame_equal(result, exp)
i_funs = [
tm.makeStringIndex, tm.makeUnicodeIndex, tm.makeIntIndex,
tm.makeDateIndex, tm.makePeriodIndex, tm.makeRangeIndex
]
for index in i_funs:
check_index(index())
def test_extract_single_group_returns_frame(self):
# GH11386 extract should always return DataFrame, even when
# there is only one group. Prior to v0.18.0, extract returned
# Series when there was only one group in the regex.
s = Series(['a3', 'b3', 'c2'], name='series_name')
r = s.str.extract(r'(?P<letter>[a-z])', expand=True)
e = DataFrame({"letter": ['a', 'b', 'c']})
tm.assert_frame_equal(r, e)
def test_extractall(self):
subject_list = [
'<EMAIL>',
'<EMAIL>',
'<EMAIL>',
'<EMAIL> some text <EMAIL>',
'<EMAIL> some text c@d.<EMAIL> and <EMAIL>',
np.nan,
"",
]
expected_tuples = [
("dave", "google", "com"),
("tdhock5", "gmail", "com"),
("maudelaperriere", "gmail", "com"),
("rob", "gmail", "com"), ("steve", "gmail", "com"),
("a", "b", "com"), ("c", "d", "com"), ("e", "f", "com"),
]
named_pattern = r"""
(?P<user>[a-z0-9]+)
@
(?P<domain>[a-z]+)
\.
(?P<tld>[a-z]{2,4})
"""
expected_columns = ["user", "domain", "tld"]
S = Series(subject_list)
# extractall should return a DataFrame with one row for each
# match, indexed by the subject from which the match came.
expected_index = MultiIndex.from_tuples([
(0, 0),
(1, 0),
(2, 0),
(3, 0),
(3, 1),
(4, 0),
(4, 1),
(4, 2),
], names=(None, "match"))
expected_df = DataFrame(
expected_tuples, expected_index, expected_columns)
computed_df = S.str.extractall(named_pattern, re.VERBOSE)
tm.assert_frame_equal(computed_df, expected_df)
# The index of the input Series should be used to construct
# the index of the output DataFrame:
series_index = MultiIndex.from_tuples([
("single", "Dave"),
("single", "Toby"),
("single", "Maude"),
("multiple", "robAndSteve"),
("multiple", "abcdef"),
("none", "missing"),
("none", "empty"),
])
Si = Series(subject_list, series_index)
expected_index = MultiIndex.from_tuples([
("single", "Dave", 0),
("single", "Toby", 0),
("single", "Maude", 0),
("multiple", "robAndSteve", 0),
("multiple", "robAndSteve", 1),
("multiple", "abcdef", 0),
("multiple", "abcdef", 1),
("multiple", "abcdef", 2),
], names=(None, None, "match"))
expected_df = DataFrame(
expected_tuples, expected_index, expected_columns)
computed_df = Si.str.extractall(named_pattern, re.VERBOSE)
tm.assert_frame_equal(computed_df, expected_df)
# MultiIndexed subject with names.
Sn = Series(subject_list, series_index)
Sn.index.names = ("matches", "description")
expected_index.names = ("matches", "description", "match")
expected_df = DataFrame(
expected_tuples, expected_index, expected_columns)
computed_df = Sn.str.extractall(named_pattern, re.VERBOSE)
tm.assert_frame_equal(computed_df, expected_df)
# optional groups.
subject_list = ['', 'A1', '32']
named_pattern = '(?P<letter>[AB])?(?P<number>[123])'
computed_df = Series(subject_list).str.extractall(named_pattern)
expected_index = MultiIndex.from_tuples([
(1, 0),
(2, 0),
(2, 1),
], names=(None, "match"))
expected_df = DataFrame([
('A', '1'),
(NA, '3'),
(NA, '2'),
], expected_index, columns=['letter', 'number'])
tm.assert_frame_equal(computed_df, expected_df)
# only one of two groups has a name.
pattern = '([AB])?(?P<number>[123])'
computed_df = Series(subject_list).str.extractall(pattern)
expected_df = DataFrame([
('A', '1'),
(NA, '3'),
(NA, '2'),
], expected_index, columns=[0, 'number'])
tm.assert_frame_equal(computed_df, expected_df)
def test_extractall_single_group(self):
# extractall(one named group) returns DataFrame with one named
# column.
s = Series(['a3', 'b3', 'd4c2'], name='series_name')
r = s.str.extractall(r'(?P<letter>[a-z])')
i = MultiIndex.from_tuples([
(0, 0),
(1, 0),
(2, 0),
(2, 1),
], names=(None, "match"))
e = DataFrame({"letter": ['a', 'b', 'd', 'c']}, i)
tm.assert_frame_equal(r, e)
# extractall(one un-named group) returns DataFrame with one
# un-named column.
r = s.str.extractall(r'([a-z])')
e = DataFrame(['a', 'b', 'd', 'c'], i)
tm.assert_frame_equal(r, e)
def test_extractall_single_group_with_quantifier(self):
# extractall(one un-named group with quantifier) returns
# DataFrame with one un-named column (GH13382).
s = Series(['ab3', 'abc3', 'd4cd2'], name='series_name')
r = s.str.extractall(r'([a-z]+)')
i = MultiIndex.from_tuples([
(0, 0),
(1, 0),
(2, 0),
(2, 1),
], names=(None, "match"))
e = DataFrame(['ab', 'abc', 'd', 'cd'], i)
tm.assert_frame_equal(r, e)
def test_extractall_no_matches(self):
s = Series(['a3', 'b3', 'd4c2'], name='series_name')
# one un-named group.
r = s.str.extractall('(z)')
e = DataFrame(columns=[0])
tm.assert_frame_equal(r, e)
# two un-named groups.
r = s.str.extractall('(z)(z)')
e = DataFrame(columns=[0, 1])
tm.assert_frame_equal(r, e)
# one named group.
r = s.str.extractall('(?P<first>z)')
e = DataFrame(columns=["first"])
tm.assert_frame_equal(r, e)
# two named groups.
r = s.str.extractall('(?P<first>z)(?P<second>z)')
e = DataFrame(columns=["first", "second"])
tm.assert_frame_equal(r, e)
# one named, one un-named.
r = s.str.extractall('(z)(?P<second>z)')
e = DataFrame(columns=[0,
"second"])
tm.assert_frame_equal(r, e)
def test_extractall_stringindex(self):
s = Series(["a1a2", "b1", "c1"], name='xxx')
res = s.str.extractall(r"[ab](?P<digit>\d)")
exp_idx = MultiIndex.from_tuples([(0, 0), (0, 1), (1, 0)],
names=[None, 'match'])
exp = DataFrame({'digit': ["1", "2", "1"]}, index=exp_idx)
tm.assert_frame_equal(res, exp)
# index should return the same result as the default index without name
# thus index.name doesn't affect to the result
for idx in [Index(["a1a2", "b1", "c1"]),
Index(["a1a2", "b1", "c1"], name='xxx')]:
res = idx.str.extractall(r"[ab](?P<digit>\d)")
tm.assert_frame_equal(res, exp)
s = Series(["a1a2", "b1", "c1"], name='s_name',
index=Index(["XX", "yy", "zz"], name='idx_name'))
res = s.str.extractall(r"[ab](?P<digit>\d)")
exp_idx = MultiIndex.from_tuples([("XX", 0), ("XX", 1), ("yy", 0)],
names=["idx_name", 'match'])
exp = DataFrame({'digit': ["1", "2", "1"]}, index=exp_idx)
tm.assert_frame_equal(res, exp)
def test_extractall_errors(self):
# Does not make sense to use extractall with a regex that has
# no capture groups. (it returns DataFrame with one column for
# each capture group)
s = Series(['a3', 'b3', 'd4c2'], name='series_name')
with tm.assert_raises_regex(ValueError, "no capture groups"):
s.str.extractall(r'[a-z]')
def test_extract_index_one_two_groups(self):
s = Series(['a3', 'b3', 'd4c2'], index=["A3", "B3", "D4"],
name='series_name')
r = s.index.str.extract(r'([A-Z])', expand=True)
e = DataFrame(['A', "B", "D"])
tm.assert_frame_equal(r, e)
# Prior to v0.18.0, index.str.extract(regex with one group)
# returned Index. With more than one group, extract raised an
# error (GH9980). Now extract always returns DataFrame.
r = s.index.str.extract(
r'(?P<letter>[A-Z])(?P<digit>[0-9])', expand=True)
e_list = [
("A", "3"),
("B", "3"),
("D", "4"),
]
e = DataFrame(e_list, columns=["letter", "digit"])
tm.assert_frame_equal(r, e)
def test_extractall_same_as_extract(self):
s = Series(['a3', 'b3', 'c2'], name='series_name')
pattern_two_noname = r'([a-z])([0-9])'
extract_two_noname = s.str.extract(pattern_two_noname, expand=True)
has_multi_index = s.str.extractall(pattern_two_noname)
no_multi_index = has_multi_index.xs(0, level="match")
tm.assert_frame_equal(extract_two_noname, no_multi_index)
pattern_two_named = r'(?P<letter>[a-z])(?P<digit>[0-9])'
extract_two_named = s.str.extract(pattern_two_named, expand=True)
has_multi_index = s.str.extractall(pattern_two_named)
no_multi_index = has_multi_index.xs(0, level="match")
tm.assert_frame_equal(extract_two_named, no_multi_index)
pattern_one_named = r'(?P<group_name>[a-z])'
extract_one_named = s.str.extract(pattern_one_named, expand=True)
has_multi_index = s.str.extractall(pattern_one_named)
no_multi_index = has_multi_index.xs(0, level="match")
tm.assert_frame_equal(extract_one_named, no_multi_index)
pattern_one_noname = r'([a-z])'
extract_one_noname = s.str.extract(pattern_one_noname, expand=True)
has_multi_index = s.str.extractall(pattern_one_noname)
no_multi_index = has_multi_index.xs(0, level="match")
tm.assert_frame_equal(extract_one_noname, no_multi_index)
def test_extractall_same_as_extract_subject_index(self):
# same as above tests, but s has an MultiIndex.
i = MultiIndex.from_tuples([
("A", "first"),
("B", "second"),
("C", "third"),
], names=("capital", "ordinal"))
s = Series(['a3', 'b3', 'c2'], i, name='series_name')
pattern_two_noname = r'([a-z])([0-9])'
extract_two_noname = s.str.extract(pattern_two_noname, expand=True)
has_match_index = s.str.extractall(pattern_two_noname)
no_match_index = has_match_index.xs(0, level="match")
tm.assert_frame_equal(extract_two_noname, no_match_index)
pattern_two_named = r'(?P<letter>[a-z])(?P<digit>[0-9])'
extract_two_named = s.str.extract(pattern_two_named, expand=True)
has_match_index = s.str.extractall(pattern_two_named)
no_match_index = has_match_index.xs(0, level="match")
tm.assert_frame_equal(extract_two_named, no_match_index)
pattern_one_named = r'(?P<group_name>[a-z])'
extract_one_named = s.str.extract(pattern_one_named, expand=True)
has_match_index = s.str.extractall(pattern_one_named)
no_match_index = has_match_index.xs(0, level="match")
tm.assert_frame_equal(extract_one_named, no_match_index)
pattern_one_noname = r'([a-z])'
extract_one_noname = s.str.extract(pattern_one_noname, expand=True)
has_match_index = s.str.extractall(pattern_one_noname)
no_match_index = has_match_index.xs(0, level="match")
tm.assert_frame_equal(extract_one_noname, no_match_index)
def test_empty_str_methods(self):
empty_str = empty = Series(dtype=object)
empty_int = Series(dtype=int)
empty_bool = Series(dtype=bool)
empty_bytes = Series(dtype=object)
# GH7241
# (extract) on empty series
tm.assert_series_equal(empty_str, empty.str.cat(empty))
assert '' == empty.str.cat()
tm.assert_series_equal(empty_str, empty.str.title())
tm.assert_series_equal(empty_int, empty.str.count('a'))
tm.assert_series_equal(empty_bool, empty.str.contains('a'))
tm.assert_series_equal(empty_bool, empty.str.startswith('a'))
tm.assert_series_equal(empty_bool, empty.str.endswith('a'))
tm.assert_series_equal(empty_str, empty.str.lower())
tm.assert_series_equal(empty_str, empty.str.upper())
tm.assert_series_equal(empty_str, empty.str.replace('a', 'b'))
tm.assert_series_equal(empty_str, empty.str.repeat(3))
tm.assert_series_equal(empty_bool, empty.str.match('^a'))
tm.assert_frame_equal(
DataFrame(columns=[0], dtype=str),
empty.str.extract('()', expand=True))
tm.assert_frame_equal(
DataFrame(columns=[0, 1], dtype=str),
empty.str.extract('()()', expand=True))
tm.assert_series_equal(
empty_str,
empty.str.extract('()', expand=False))
tm.assert_frame_equal(
DataFrame(columns=[0, 1], dtype=str),
empty.str.extract('()()', expand=False))
tm.assert_frame_equal(DataFrame(dtype=str), empty.str.get_dummies())
tm.assert_series_equal(empty_str, empty_str.str.join(''))
tm.assert_series_equal(empty_int, empty.str.len())
tm.assert_series_equal(empty_str, empty_str.str.findall('a'))
tm.assert_series_equal(empty_int, empty.str.find('a'))
tm.assert_series_equal(empty_int, empty.str.rfind('a'))
tm.assert_series_equal(empty_str, empty.str.pad(42))
tm.assert_series_equal(empty_str, empty.str.center(42))
tm.assert_series_equal(empty_str, empty.str.split('a'))
tm.assert_series_equal(empty_str, empty.str.rsplit('a'))
tm.assert_series_equal(empty_str,
empty.str.partition('a', expand=False))
tm.assert_series_equal(empty_str,
empty.str.rpartition('a', expand=False))
tm.assert_series_equal(empty_str, empty.str.slice(stop=1))
tm.assert_series_equal(empty_str, empty.str.slice(step=1))
tm.assert_series_equal(empty_str, empty.str.strip())
tm.assert_series_equal(empty_str, empty.str.lstrip())
tm.assert_series_equal(empty_str, empty.str.rstrip())
tm.assert_series_equal(empty_str, empty.str.wrap(42))
tm.assert_series_equal(empty_str, empty.str.get(0))
tm.assert_series_equal(empty_str, empty_bytes.str.decode('ascii'))
tm.assert_series_equal(empty_bytes, empty.str.encode('ascii'))
tm.assert_series_equal(empty_str, empty.str.isalnum())
tm.assert_series_equal(empty_str, empty.str.isalpha())
tm.assert_series_equal(empty_str, empty.str.isdigit())
tm.assert_series_equal(empty_str, empty.str.isspace())
tm.assert_series_equal(empty_str, empty.str.islower())
tm.assert_series_equal(empty_str, empty.str.isupper())
tm.assert_series_equal(empty_str, empty.str.istitle())
tm.assert_series_equal(empty_str, empty.str.isnumeric())
tm.assert_series_equal(empty_str, empty.str.isdecimal())
tm.assert_series_equal(empty_str, empty.str.capitalize())
tm.assert_series_equal(empty_str, empty.str.swapcase())
tm.assert_series_equal(empty_str, empty.str.normalize('NFC'))
if compat.PY3:
table = str.maketrans('a', 'b')
else:
import string
table = string.maketrans('a', 'b')
tm.assert_series_equal(empty_str, empty.str.translate(table))
def test_empty_str_methods_to_frame(self):
empty = Series(dtype=str)
empty_df = DataFrame([])
tm.assert_frame_equal(empty_df, empty.str.partition('a'))
tm.assert_frame_equal(empty_df, empty.str.rpartition('a'))
def test_ismethods(self):
values = ['A', 'b', 'Xy', '4', '3A', '', 'TT', '55', '-', ' ']
str_s = Series(values)
alnum_e = [True, True, True, True, True, False, True, True, False,
False]
alpha_e = [True, True, True, False, False, False, True, False, False,
False]
digit_e = [False, False, False, True, False, False, False, True, False,
False]
# TODO: unused
num_e = [False, False, False, True, False, False, # noqa
False, True, False, False]
space_e = [False, False, False, False, False, False, False, False,
False, True]
lower_e = [False, True, False, False, False, False, False, False,
False, False]
upper_e = [True, False, False, False, True, False, True, False, False,
False]
title_e = [True, False, True, False, True, False, False, False, False,
False]
tm.assert_series_equal(str_s.str.isalnum(), Series(alnum_e))
tm.assert_series_equal(str_s.str.isalpha(), Series(alpha_e))
tm.assert_series_equal(str_s.str.isdigit(), Series(digit_e))
tm.assert_series_equal(str_s.str.isspace(), Series(space_e))
tm.assert_series_equal(str_s.str.islower(), Series(lower_e))
tm.assert_series_equal(str_s.str.isupper(), Series(upper_e))
tm.assert_series_equal(str_s.str.istitle(), Series(title_e))
assert str_s.str.isalnum().tolist() == [v.isalnum() for v in values]
assert str_s.str.isalpha().tolist() == [v.isalpha() for v in values]
assert str_s.str.isdigit().tolist() == [v.isdigit() for v in values]
assert str_s.str.isspace().tolist() == [v.isspace() for v in values]
assert str_s.str.islower().tolist() == [v.islower() for v in values]
assert str_s.str.isupper().tolist() == [v.isupper() for v in values]
assert str_s.str.istitle().tolist() == [v.istitle() for v in values]
def test_isnumeric(self):
# 0x00bc: ¼ VULGAR FRACTION ONE QUARTER
# 0x2605: ★ not number
# 0x1378: ፸ ETHIOPIC NUMBER SEVENTY
# 0xFF13: 3 Em 3
values = ['A', '3', u'¼', u'★', u'፸', u'3', 'four']
s = Series(values)
numeric_e = [False, True, True, False, True, True, False]
decimal_e = [False, True, False, False, False, True, False]
tm.assert_series_equal(s.str.isnumeric(), Series(numeric_e))
tm.assert_series_equal(s.str.isdecimal(), Series(decimal_e))
unicodes = [u'A', u'3', u'¼', u'★', u'፸', u'3', u'four']
assert s.str.isnumeric().tolist() == [v.isnumeric() for v in unicodes]
assert s.str.isdecimal().tolist() == [v.isdecimal() for v in unicodes]
values = ['A', np.nan, u'¼', u'★', np.nan, u'3', 'four']
s = Series(values)
numeric_e = [False, np.nan, True, False, np.nan, True, False]
decimal_e = [False, np.nan, False, False, np.nan, True, False]
tm.assert_series_equal(s.str.isnumeric(), Series(numeric_e))
tm.assert_series_equal(s.str.isdecimal(), Series(decimal_e))
def test_get_dummies(self):
s = Series(['a|b', 'a|c', np.nan])
result = s.str.get_dummies('|')
expected = DataFrame([[1, 1, 0], [1, 0, 1], [0, 0, 0]],
columns=list('abc'))
tm.assert_frame_equal(result, expected)
s = Series(['a;b', 'a', 7])
result = s.str.get_dummies(';')
expected = DataFrame([[0, 1, 1], [0, 1, 0], [1, 0, 0]],
columns=list('7ab'))
tm.assert_frame_equal(result, expected)
# GH9980, GH8028
idx = Index(['a|b', 'a|c', 'b|c'])
result = idx.str.get_dummies('|')
expected = MultiIndex.from_tuples([(1, 1, 0), (1, 0, 1),
(0, 1, 1)], names=('a', 'b', 'c'))
tm.assert_index_equal(result, expected)
def test_get_dummies_with_name_dummy(self):
# GH 12180
# Dummies named 'name' should work as expected
s = Series(['a', 'b,name', 'b'])
result = s.str.get_dummies(',')
expected = DataFrame([[1, 0, 0], [0, 1, 1], [0, 1, 0]],
columns=['a', 'b', 'name'])
tm.assert_frame_equal(result, expected)
idx = Index(['a|b', 'name|c', 'b|name'])
result = idx.str.get_dummies('|')
expected = MultiIndex.from_tuples([(1, 1, 0, 0), (0, 0, 1, 1),
(0, 1, 0, 1)],
names=('a', 'b', 'c', 'name'))
tm.assert_index_equal(result, expected)
def test_join(self):
values = Series(['a_b_c', 'c_d_e', np.nan, 'f_g_h'])
result = values.str.split('_').str.join('_')
tm.assert_series_equal(values, result)
# mixed
mixed = Series(['a_b', NA, 'asdf_cas_asdf', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.split('_').str.join('_')
xp = Series(['a_b', NA, 'asdf_cas_asdf', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('a_b_c'), u('c_d_e'), np.nan, u('f_g_h')])
result = values.str.split('_').str.join('_')
tm.assert_series_equal(values, result)
def test_len(self):
values = Series(['foo', 'fooo', 'fooooo', np.nan, 'fooooooo'])
result = values.str.len()
exp = values.map(lambda x: len(x) if notna(x) else NA)
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['a_b', NA, 'asdf_cas_asdf', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.len()
xp = Series([3, NA, 13, NA, NA, 3, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('foo'), u('fooo'), u('fooooo'), np.nan, u(
'fooooooo')])
result = values.str.len()
exp = values.map(lambda x: len(x) if notna(x) else NA)
tm.assert_series_equal(result, exp)
def test_findall(self):
values = Series(['fooBAD__barBAD', NA, 'foo', 'BAD'])
result = values.str.findall('BAD[_]*')
exp = Series([['BAD__', 'BAD'], NA, [], ['BAD']])
tm.assert_almost_equal(result, exp)
# mixed
mixed = Series(['fooBAD__barBAD', NA, 'foo', True, datetime.today(),
'BAD', None, 1, 2.])
rs = Series(mixed).str.findall('BAD[_]*')
xp = Series([['BAD__', 'BAD'], NA, [], NA, NA, ['BAD'], NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA, u('foo'), u('BAD')])
result = values.str.findall('BAD[_]*')
exp = Series([[u('BAD__'), u('BAD')], NA, [], [u('BAD')]])
tm.assert_almost_equal(result, exp)
def test_find(self):
values = Series(['ABCDEFG', 'BCDEFEF', 'DEFGHIJEF', 'EFGHEF', 'XXXX'])
result = values.str.find('EF')
tm.assert_series_equal(result, Series([4, 3, 1, 0, -1]))
expected = np.array([v.find('EF') for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.rfind('EF')
tm.assert_series_equal(result, Series([4, 5, 7, 4, -1]))
expected = np.array([v.rfind('EF') for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.find('EF', 3)
tm.assert_series_equal(result, Series([4, 3, 7, 4, -1]))
expected = np.array([v.find('EF', 3) for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.rfind('EF', 3)
tm.assert_series_equal(result, Series([4, 5, 7, 4, -1]))
expected = np.array([v.rfind('EF', 3) for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.find('EF', 3, 6)
tm.assert_series_equal(result, Series([4, 3, -1, 4, -1]))
expected = np.array([v.find('EF', 3, 6) for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.rfind('EF', 3, 6)
tm.assert_series_equal(result, Series([4, 3, -1, 4, -1]))
expected = np.array([v.rfind('EF', 3, 6) for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
with tm.assert_raises_regex(TypeError,
"expected a string object, not int"):
result = values.str.find(0)
with tm.assert_raises_regex(TypeError,
"expected a string object, not int"):
result = values.str.rfind(0)
def test_find_nan(self):
values = Series(['ABCDEFG', np.nan, 'DEFGHIJEF', np.nan, 'XXXX'])
result = values.str.find('EF')
tm.assert_series_equal(result, Series([4, np.nan, 1, np.nan, -1]))
result = values.str.rfind('EF')
tm.assert_series_equal(result, Series([4, np.nan, 7, np.nan, -1]))
result = values.str.find('EF', 3)
tm.assert_series_equal(result, Series([4, np.nan, 7, np.nan, -1]))
result = values.str.rfind('EF', 3)
tm.assert_series_equal(result, Series([4, np.nan, 7, np.nan, -1]))
result = values.str.find('EF', 3, 6)
tm.assert_series_equal(result, Series([4, np.nan, -1, np.nan, -1]))
result = values.str.rfind('EF', 3, 6)
tm.assert_series_equal(result, Series([4, np.nan, -1, np.nan, -1]))
def test_index(self):
def _check(result, expected):
if isinstance(result, Series):
tm.assert_series_equal(result, expected)
else:
tm.assert_index_equal(result, expected)
for klass in [Series, Index]:
s = klass(['ABCDEFG', 'BCDEFEF', 'DEFGHIJEF', 'EFGHEF'])
result = s.str.index('EF')
_check(result, klass([4, 3, 1, 0]))
expected = np.array([v.index('EF') for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = s.str.rindex('EF')
_check(result, klass([4, 5, 7, 4]))
expected = np.array([v.rindex('EF') for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = s.str.index('EF', 3)
_check(result, klass([4, 3, 7, 4]))
expected = np.array([v.index('EF', 3) for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = s.str.rindex('EF', 3)
_check(result, klass([4, 5, 7, 4]))
expected = np.array([v.rindex('EF', 3) for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = s.str.index('E', 4, 8)
_check(result, klass([4, 5, 7, 4]))
expected = np.array([v.index('E', 4, 8) for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = s.str.rindex('E', 0, 5)
_check(result, klass([4, 3, 1, 4]))
expected = np.array([v.rindex('E', 0, 5) for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
with tm.assert_raises_regex(ValueError,
"substring not found"):
result = s.str.index('DE')
with tm.assert_raises_regex(TypeError,
"expected a string "
"object, not int"):
result = s.str.index(0)
# test with nan
s = Series(['abcb', 'ab', 'bcbe', np.nan])
result = s.str.index('b')
tm.assert_series_equal(result, Series([1, 1, 0, np.nan]))
result = s.str.rindex('b')
tm.assert_series_equal(result, Series([3, 1, 2, np.nan]))
def test_pad(self):
values = Series(['a', 'b', NA, 'c', NA, 'eeeeee'])
result = values.str.pad(5, side='left')
exp = Series([' a', ' b', NA, ' c', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='right')
exp = Series(['a ', 'b ', NA, 'c ', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='both')
exp = Series([' a ', ' b ', NA, ' c ', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
# mixed
mixed = Series(['a', NA, 'b', True, datetime.today(), 'ee', None, 1, 2.
])
rs = Series(mixed).str.pad(5, side='left')
xp = Series([' a', NA, ' b', NA, NA, ' ee', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
mixed = Series(['a', NA, 'b', True, datetime.today(), 'ee', None, 1, 2.
])
rs = Series(mixed).str.pad(5, side='right')
xp = Series(['a ', NA, 'b ', NA, NA, 'ee ', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
mixed = Series(['a', NA, 'b', True, datetime.today(), 'ee', None, 1, 2.
])
rs = Series(mixed).str.pad(5, side='both')
xp = Series([' a ', NA, ' b ', NA, NA, ' ee ', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('a'), u('b'), NA, u('c'), NA, u('eeeeee')])
result = values.str.pad(5, side='left')
exp = Series([u(' a'), u(' b'), NA, u(' c'), NA, u('eeeeee')])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='right')
exp = Series([u('a '), u('b '), NA, u('c '), NA, u('eeeeee')])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='both')
exp = Series([u(' a '), u(' b '), NA, u(' c '), NA, u('eeeeee')])
tm.assert_almost_equal(result, exp)
def test_pad_fillchar(self):
values = Series(['a', 'b', NA, 'c', NA, 'eeeeee'])
result = values.str.pad(5, side='left', fillchar='X')
exp = Series(['XXXXa', 'XXXXb', NA, 'XXXXc', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='right', fillchar='X')
exp = Series(['aXXXX', 'bXXXX', NA, 'cXXXX', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='both', fillchar='X')
exp = Series(['XXaXX', 'XXbXX', NA, 'XXcXX', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not str"):
result = values.str.pad(5, fillchar='XY')
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not int"):
result = values.str.pad(5, fillchar=5)
def test_pad_width(self):
# GH 13598
s = Series(['1', '22', 'a', 'bb'])
for f in ['center', 'ljust', 'rjust', 'zfill', 'pad']:
with tm.assert_raises_regex(TypeError,
"width must be of "
"integer type, not*"):
getattr(s.str, f)('f')
def test_translate(self):
def _check(result, expected):
if isinstance(result, Series):
tm.assert_series_equal(result, expected)
else:
tm.assert_index_equal(result, expected)
for klass in [Series, Index]:
s = klass(['abcdefg', 'abcc', 'cdddfg', 'cdefggg'])
if not compat.PY3:
import string
table = string.maketrans('abc', 'cde')
else:
table = str.maketrans('abc', 'cde')
result = s.str.translate(table)
expected = klass(['cdedefg', 'cdee', 'edddfg', 'edefggg'])
_check(result, expected)
# use of deletechars is python 2 only
if not compat.PY3:
result = s.str.translate(table, deletechars='fg')
expected = klass(['cdede', 'cdee', 'eddd', 'ede'])
_check(result, expected)
result = s.str.translate(None, deletechars='fg')
expected = klass(['abcde', 'abcc', 'cddd', 'cde'])
_check(result, expected)
else:
with tm.assert_raises_regex(
ValueError, "deletechars is not a valid argument"):
result = s.str.translate(table, deletechars='fg')
# Series with non-string values
s = Series(['a', 'b', 'c', 1.2])
expected = Series(['c', 'd', 'e', np.nan])
result = s.str.translate(table)
tm.assert_series_equal(result, expected)
def test_center_ljust_rjust(self):
values = Series(['a', 'b', NA, 'c', NA, 'eeeeee'])
result = values.str.center(5)
exp = Series([' a ', ' b ', NA, ' c ', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.ljust(5)
exp = Series(['a ', 'b ', NA, 'c ', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.rjust(5)
exp = Series([' a', ' b', NA, ' c', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
# mixed
mixed = Series(['a', NA, 'b', True, datetime.today(), 'c', 'eee', None,
1, 2.])
rs = Series(mixed).str.center(5)
xp = Series([' a ', NA, ' b ', NA, NA, ' c ', ' eee ', NA, NA, NA
])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
rs = Series(mixed).str.ljust(5)
xp = Series(['a ', NA, 'b ', NA, NA, 'c ', 'eee ', NA, NA, NA
])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
rs = Series(mixed).str.rjust(5)
xp = Series([' a', NA, ' b', NA, NA, ' c', ' eee', NA, NA, NA
])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('a'), u('b'), NA, u('c'), NA, u('eeeeee')])
result = values.str.center(5)
exp = Series([u(' a '), u(' b '), NA, u(' c '), NA, u('eeeeee')])
tm.assert_almost_equal(result, exp)
result = values.str.ljust(5)
exp = Series([u('a '), u('b '), NA, u('c '), NA, u('eeeeee')])
tm.assert_almost_equal(result, exp)
result = values.str.rjust(5)
exp = Series([u(' a'), u(' b'), NA, u(' c'), NA, u('eeeeee')])
tm.assert_almost_equal(result, exp)
def test_center_ljust_rjust_fillchar(self):
values = Series(['a', 'bb', 'cccc', 'ddddd', 'eeeeee'])
result = values.str.center(5, fillchar='X')
expected = Series(['XXaXX', 'XXbbX', 'Xcccc', 'ddddd', 'eeeeee'])
tm.assert_series_equal(result, expected)
expected = np.array([v.center(5, 'X') for v in values.values],
dtype=np.object_)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.ljust(5, fillchar='X')
expected = Series(['aXXXX', 'bbXXX', 'ccccX', 'ddddd', 'eeeeee'])
tm.assert_series_equal(result, expected)
expected = np.array([v.ljust(5, 'X') for v in values.values],
dtype=np.object_)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.rjust(5, fillchar='X')
expected = Series(['XXXXa', 'XXXbb', 'Xcccc', 'ddddd', 'eeeeee'])
tm.assert_series_equal(result, expected)
expected = np.array([v.rjust(5, 'X') for v in values.values],
dtype=np.object_)
tm.assert_numpy_array_equal(result.values, expected)
# If fillchar is not a charatter, normal str raises TypeError
# 'aaa'.ljust(5, 'XY')
# TypeError: must be char, not str
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not str"):
result = values.str.center(5, fillchar='XY')
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not str"):
result = values.str.ljust(5, fillchar='XY')
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not str"):
result = values.str.rjust(5, fillchar='XY')
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not int"):
result = values.str.center(5, fillchar=1)
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not int"):
result = values.str.ljust(5, fillchar=1)
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not int"):
result = values.str.rjust(5, fillchar=1)
def test_zfill(self):
values = Series(['1', '22', 'aaa', '333', '45678'])
result = values.str.zfill(5)
expected = Series(['00001', '00022', '00aaa', '00333', '45678'])
tm.assert_series_equal(result, expected)
expected = np.array([v.zfill(5) for v in values.values],
dtype=np.object_)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.zfill(3)
expected = Series(['001', '022', 'aaa', '333', '45678'])
tm.assert_series_equal(result, expected)
expected = np.array([v.zfill(3) for v in values.values],
dtype=np.object_)
tm.assert_numpy_array_equal(result.values, expected)
values = Series(['1', np.nan, 'aaa', np.nan, '45678'])
result = values.str.zfill(5)
expected = Series(['00001', np.nan, '00aaa', np.nan, '45678'])
tm.assert_series_equal(result, expected)
def test_split(self):
values = Series(['a_b_c', 'c_d_e', NA, 'f_g_h'])
result = values.str.split('_')
exp = Series([['a', 'b', 'c'], ['c', 'd', 'e'], NA, ['f', 'g', 'h']])
tm.assert_series_equal(result, exp)
# more than one char
values = Series(['a__b__c', 'c__d__e', NA, 'f__g__h'])
result = values.str.split('__')
tm.assert_series_equal(result, exp)
result = values.str.split('__', expand=False)
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['a_b_c', NA, 'd_e_f', True, datetime.today(), None, 1,
2.])
result = mixed.str.split('_')
exp = Series([['a', 'b', 'c'], NA, ['d', 'e', 'f'], NA, NA, NA, NA, NA
])
assert isinstance(result, Series)
tm.assert_almost_equal(result, exp)
result = mixed.str.split('_', expand=False)
assert isinstance(result, Series)
tm.assert_almost_equal(result, exp)
# unicode
values = Series([u('a_b_c'), u('c_d_e'), NA, u('f_g_h')])
result = values.str.split('_')
exp = Series([[u('a'), u('b'), u('c')], [u('c'), u('d'), u('e')], NA,
[u('f'), u('g'), u('h')]])
tm.assert_series_equal(result, exp)
result = values.str.split('_', expand=False)
tm.assert_series_equal(result, exp)
# regex split
values = Series([u('a,b_c'), u('c_d,e'), NA, u('f,g,h')])
result = values.str.split('[,_]')
exp = Series([[u('a'), u('b'), u('c')], [u('c'), u('d'), u('e')], NA,
[u('f'), u('g'), u('h')]])
tm.assert_series_equal(result, exp)
def test_rsplit(self):
values = Series(['a_b_c', 'c_d_e', NA, 'f_g_h'])
result = values.str.rsplit('_')
exp = Series([['a', 'b', 'c'], ['c', 'd', 'e'], NA, ['f', 'g', 'h']])
tm.assert_series_equal(result, exp)
# more than one char
values = Series(['a__b__c', 'c__d__e', NA, 'f__g__h'])
result = values.str.rsplit('__')
tm.assert_series_equal(result, exp)
result = values.str.rsplit('__', expand=False)
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['a_b_c', NA, 'd_e_f', True, datetime.today(), None, 1,
2.])
result = mixed.str.rsplit('_')
exp = Series([['a', 'b', 'c'], NA, ['d', 'e', 'f'], NA, NA, NA, NA, NA
])
assert isinstance(result, Series)
tm.assert_almost_equal(result, exp)
result = mixed.str.rsplit('_', expand=False)
assert isinstance(result, Series)
tm.assert_almost_equal(result, exp)
# unicode
values = Series([u('a_b_c'), u('c_d_e'), NA, u('f_g_h')])
result = values.str.rsplit('_')
exp = Series([[u('a'), u('b'), u('c')], [u('c'), u('d'), u('e')], NA,
[u('f'), u('g'), u('h')]])
tm.assert_series_equal(result, exp)
result = values.str.rsplit('_', expand=False)
tm.assert_series_equal(result, exp)
# regex split is not supported by rsplit
values = Series([u('a,b_c'), u('c_d,e'), NA, u('f,g,h')])
result = values.str.rsplit('[,_]')
exp = Series([[u('a,b_c')], [u('c_d,e')], NA, [u('f,g,h')]])
tm.assert_series_equal(result, exp)
# setting max number of splits, make sure it's from reverse
values = Series(['a_b_c', 'c_d_e', NA, 'f_g_h'])
result = values.str.rsplit('_', n=1)
exp = Series([['a_b', 'c'], ['c_d', 'e'], NA, ['f_g', 'h']])
tm.assert_series_equal(result, exp)
def test_split_noargs(self):
# #1859
s = Series(['<NAME>', '<NAME>'])
result = s.str.split()
expected = ['Travis', 'Oliphant']
assert result[1] == expected
result = s.str.rsplit()
assert result[1] == expected
def test_split_maxsplit(self):
# re.split 0, str.split -1
s = Series(['bd asdf jfg', 'kjasdflqw asdfnfk'])
result = s.str.split(n=-1)
xp = s.str.split()
tm.assert_series_equal(result, xp)
result = s.str.split(n=0)
tm.assert_series_equal(result, xp)
xp = s.str.split('asdf')
result = s.str.split('asdf', n=0)
tm.assert_series_equal(result, xp)
result = s.str.split('asdf', n=-1)
tm.assert_series_equal(result, xp)
def test_split_no_pat_with_nonzero_n(self):
s = Series(['split once', 'split once too!'])
result = s.str.split(n=1)
expected = Series({0: ['split', 'once'], 1: ['split', 'once too!']})
tm.assert_series_equal(expected, result, check_index_type=False)
def test_split_to_dataframe(self):
s = Series(['nosplit', 'alsonosplit'])
result = s.str.split('_', expand=True)
exp = DataFrame({0: Series(['nosplit', 'alsonosplit'])})
tm.assert_frame_equal(result, exp)
s = Series(['some_equal_splits', 'with_no_nans'])
result = s.str.split('_', expand=True)
exp = DataFrame({0: ['some', 'with'],
1: ['equal', 'no'],
2: ['splits', 'nans']})
tm.assert_frame_equal(result, exp)
s = Series(['some_unequal_splits', 'one_of_these_things_is_not'])
result = s.str.split('_', expand=True)
exp = DataFrame({0: ['some', 'one'],
1: ['unequal', 'of'],
2: ['splits', 'these'],
3: [NA, 'things'],
4: [NA, 'is'],
5: [NA, 'not']})
tm.assert_frame_equal(result, exp)
s = Series(['some_splits', 'with_index'], index=['preserve', 'me'])
result = s.str.split('_', expand=True)
exp = DataFrame({0: ['some', 'with'], 1: ['splits', 'index']},
index=['preserve', 'me'])
tm.assert_frame_equal(result, exp)
with tm.assert_raises_regex(ValueError, "expand must be"):
s.str.split('_', expand="not_a_boolean")
def test_split_to_multiindex_expand(self):
idx = Index(['nosplit', 'alsonosplit'])
result = idx.str.split('_', expand=True)
exp = idx
tm.assert_index_equal(result, exp)
assert result.nlevels == 1
idx = Index(['some_equal_splits', 'with_no_nans'])
result = idx.str.split('_', expand=True)
exp = MultiIndex.from_tuples([('some', 'equal', 'splits'), (
'with', 'no', 'nans')])
tm.assert_index_equal(result, exp)
assert result.nlevels == 3
idx = Index(['some_unequal_splits', 'one_of_these_things_is_not'])
result = idx.str.split('_', expand=True)
exp = MultiIndex.from_tuples([('some', 'unequal', 'splits', NA, NA, NA
), ('one', 'of', 'these', 'things',
'is', 'not')])
tm.assert_index_equal(result, exp)
assert result.nlevels == 6
with tm.assert_raises_regex(ValueError, "expand must be"):
idx.str.split('_', expand="not_a_boolean")
def test_rsplit_to_dataframe_expand(self):
s = Series(['nosplit', 'alsonosplit'])
result = s.str.rsplit('_', expand=True)
exp = DataFrame({0: Series(['nosplit', 'alsonosplit'])})
tm.assert_frame_equal(result, exp)
s = Series(['some_equal_splits', 'with_no_nans'])
result = s.str.rsplit('_', expand=True)
exp = DataFrame({0: ['some', 'with'],
1: ['equal', 'no'],
2: ['splits', 'nans']})
tm.assert_frame_equal(result, exp)
result = s.str.rsplit('_', expand=True, n=2)
exp = DataFrame({0: ['some', 'with'],
1: ['equal', 'no'],
2: ['splits', 'nans']})
tm.assert_frame_equal(result, exp)
result = s.str.rsplit('_', expand=True, n=1)
exp = DataFrame({0: ['some_equal', 'with_no'], 1: ['splits', 'nans']})
tm.assert_frame_equal(result, exp)
s = Series(['some_splits', 'with_index'], index=['preserve', 'me'])
result = s.str.rsplit('_', expand=True)
exp = DataFrame({0: ['some', 'with'], 1: ['splits', 'index']},
index=['preserve', 'me'])
tm.assert_frame_equal(result, exp)
def test_rsplit_to_multiindex_expand(self):
idx = Index(['nosplit', 'alsonosplit'])
result = idx.str.rsplit('_', expand=True)
exp = idx
tm.assert_index_equal(result, exp)
assert result.nlevels == 1
idx = Index(['some_equal_splits', 'with_no_nans'])
result = idx.str.rsplit('_', expand=True)
exp = MultiIndex.from_tuples([('some', 'equal', 'splits'), (
'with', 'no', 'nans')])
tm.assert_index_equal(result, exp)
assert result.nlevels == 3
idx = Index(['some_equal_splits', 'with_no_nans'])
result = idx.str.rsplit('_', expand=True, n=1)
exp = MultiIndex.from_tuples([('some_equal', 'splits'),
('with_no', 'nans')])
tm.assert_index_equal(result, exp)
assert result.nlevels == 2
def test_split_nan_expand(self):
# gh-18450
s = Series(["foo,bar,baz", NA])
result = s.str.split(",", expand=True)
exp = DataFrame([["foo", "bar", "baz"], [NA, NA, NA]])
tm.assert_frame_equal(result, exp)
# check that these are actually np.nan and not None
# TODO see GH 18463
# tm.assert_frame_equal does not differentiate
assert all(np.isnan(x) for x in result.iloc[1])
def test_split_with_name(self):
# GH 12617
# should preserve name
s = Series(['a,b', 'c,d'], name='xxx')
res = s.str.split(',')
exp = Series([['a', 'b'], ['c', 'd']], name='xxx')
tm.assert_series_equal(res, exp)
res = s.str.split(',', expand=True)
exp = DataFrame([['a', 'b'], ['c', 'd']])
tm.assert_frame_equal(res, exp)
idx = Index(['a,b', 'c,d'], name='xxx')
res = idx.str.split(',')
exp = Index([['a', 'b'], ['c', 'd']], name='xxx')
assert res.nlevels == 1
tm.assert_index_equal(res, exp)
res = idx.str.split(',', expand=True)
exp = MultiIndex.from_tuples([('a', 'b'), ('c', 'd')])
assert res.nlevels == 2
tm.assert_index_equal(res, exp)
def test_partition_series(self):
values = Series(['a_b_c', 'c_d_e', NA, 'f_g_h'])
result = values.str.partition('_', expand=False)
exp = Series([('a', '_', 'b_c'), ('c', '_', 'd_e'), NA,
('f', '_', 'g_h')])
tm.assert_series_equal(result, exp)
result = values.str.rpartition('_', expand=False)
exp = Series([('a_b', '_', 'c'), ('c_d', '_', 'e'), NA,
('f_g', '_', 'h')])
tm.assert_series_equal(result, exp)
# more than one char
values = Series(['a__b__c', 'c__d__e', NA, 'f__g__h'])
result = values.str.partition('__', expand=False)
exp = Series([('a', '__', 'b__c'), ('c', '__', 'd__e'), NA,
('f', '__', 'g__h')])
tm.assert_series_equal(result, exp)
result = values.str.rpartition('__', expand=False)
exp = Series([('a__b', '__', 'c'), ('c__d', '__', 'e'), NA,
('f__g', '__', 'h')])
tm.assert_series_equal(result, exp)
# None
values = Series(['a b c', 'c d e', NA, 'f g h'])
result = values.str.partition(expand=False)
exp = Series([('a', ' ', 'b c'), ('c', ' ', 'd e'), NA,
('f', ' ', 'g h')])
tm.assert_series_equal(result, exp)
result = values.str.rpartition(expand=False)
exp = Series([('a b', ' ', 'c'), ('c d', ' ', 'e'), NA,
('f g', ' ', 'h')])
tm.assert_series_equal(result, exp)
# Not splited
values = Series(['abc', 'cde', NA, 'fgh'])
result = values.str.partition('_', expand=False)
exp = Series([('abc', '', ''), ('cde', '', ''), NA, ('fgh', '', '')])
tm.assert_series_equal(result, exp)
result = values.str.rpartition('_', expand=False)
exp = Series([('', '', 'abc'), ('', '', 'cde'), NA, ('', '', 'fgh')])
tm.assert_series_equal(result, exp)
# unicode
values = Series([u'a_b_c', u'c_d_e', NA, u'f_g_h'])
result = values.str.partition('_', expand=False)
exp = Series([(u'a', u'_', u'b_c'), (u'c', u'_', u'd_e'),
NA, (u'f', u'_', u'g_h')])
tm.assert_series_equal(result, exp)
result = values.str.rpartition('_', expand=False)
exp = Series([(u'a_b', u'_', u'c'), (u'c_d', u'_', u'e'),
NA, (u'f_g', u'_', u'h')])
tm.assert_series_equal(result, exp)
# compare to standard lib
values = Series(['A_B_C', 'B_C_D', 'E_F_G', 'EFGHEF'])
result = values.str.partition('_', expand=False).tolist()
assert result == [v.partition('_') for v in values]
result = values.str.rpartition('_', expand=False).tolist()
assert result == [v.rpartition('_') for v in values]
def test_partition_index(self):
values = Index(['a_b_c', 'c_d_e', 'f_g_h'])
result = values.str.partition('_', expand=False)
exp = Index(np.array([('a', '_', 'b_c'), ('c', '_', 'd_e'), ('f', '_',
'g_h')]))
tm.assert_index_equal(result, exp)
assert result.nlevels == 1
result = values.str.rpartition('_', expand=False)
exp = Index(np.array([('a_b', '_', 'c'), ('c_d', '_', 'e'), (
'f_g', '_', 'h')]))
tm.assert_index_equal(result, exp)
assert result.nlevels == 1
result = values.str.partition('_')
exp = Index([('a', '_', 'b_c'), ('c', '_', 'd_e'), ('f', '_', 'g_h')])
tm.assert_index_equal(result, exp)
assert isinstance(result, MultiIndex)
assert result.nlevels == 3
result = values.str.rpartition('_')
exp = Index([('a_b', '_', 'c'), ('c_d', '_', 'e'), ('f_g', '_', 'h')])
tm.assert_index_equal(result, exp)
assert isinstance(result, MultiIndex)
assert result.nlevels == 3
def test_partition_to_dataframe(self):
values = Series(['a_b_c', 'c_d_e', NA, 'f_g_h'])
result = values.str.partition('_')
exp = DataFrame({0: ['a', 'c', np.nan, 'f'],
1: ['_', '_', np.nan, '_'],
2: ['b_c', 'd_e', np.nan, 'g_h']})
tm.assert_frame_equal(result, exp)
result = values.str.rpartition('_')
exp = DataFrame({0: ['a_b', 'c_d', np.nan, 'f_g'],
1: ['_', '_', np.nan, '_'],
2: ['c', 'e', np.nan, 'h']})
tm.assert_frame_equal(result, exp)
values = Series(['a_b_c', 'c_d_e', NA, 'f_g_h'])
result = values.str.partition('_', expand=True)
exp = DataFrame({0: ['a', 'c', np.nan, 'f'],
1: ['_', '_', np.nan, '_'],
2: ['b_c', 'd_e', np.nan, 'g_h']})
tm.assert_frame_equal(result, exp)
result = values.str.rpartition('_', expand=True)
exp = DataFrame({0: ['a_b', 'c_d', np.nan, 'f_g'],
1: ['_', '_', np.nan, '_'],
2: ['c', 'e', np.nan, 'h']})
tm.assert_frame_equal(result, exp)
def test_partition_with_name(self):
# GH 12617
s = Series(['a,b', 'c,d'], name='xxx')
res = s.str.partition(',')
exp = DataFrame({0: ['a', 'c'], 1: [',', ','], 2: ['b', 'd']})
tm.assert_frame_equal(res, exp)
# should preserve name
res = s.str.partition(',', expand=False)
exp = Series([('a', ',', 'b'), ('c', ',', 'd')], name='xxx')
tm.assert_series_equal(res, exp)
idx = Index(['a,b', 'c,d'], name='xxx')
res = idx.str.partition(',')
exp = MultiIndex.from_tuples([('a', ',', 'b'), ('c', ',', 'd')])
assert res.nlevels == 3
| tm.assert_index_equal(res, exp) | pandas.util.testing.assert_index_equal |
""" test scalar indexing, including at and iat """
from datetime import (
datetime,
timedelta,
)
import numpy as np
import pytest
from pandas import (
DataFrame,
Series,
Timedelta,
Timestamp,
date_range,
)
import pandas._testing as tm
from pandas.tests.indexing.common import Base
class TestScalar(Base):
@pytest.mark.parametrize("kind", ["series", "frame"])
def test_at_and_iat_get(self, kind):
def _check(f, func, values=False):
if f is not None:
indices = self.generate_indices(f, values)
for i in indices:
result = getattr(f, func)[i]
expected = self.get_value(func, f, i, values)
tm.assert_almost_equal(result, expected)
d = getattr(self, kind)
# iat
for f in [d["ints"], d["uints"]]:
_check(f, "iat", values=True)
for f in [d["labels"], d["ts"], d["floats"]]:
if f is not None:
msg = "iAt based indexing can only have integer indexers"
with pytest.raises(ValueError, match=msg):
self.check_values(f, "iat")
# at
for f in [d["ints"], d["uints"], d["labels"], d["ts"], d["floats"]]:
_check(f, "at")
@pytest.mark.parametrize("kind", ["series", "frame"])
def test_at_and_iat_set(self, kind):
def _check(f, func, values=False):
if f is not None:
indices = self.generate_indices(f, values)
for i in indices:
getattr(f, func)[i] = 1
expected = self.get_value(func, f, i, values)
tm.assert_almost_equal(expected, 1)
d = getattr(self, kind)
# iat
for f in [d["ints"], d["uints"]]:
_check(f, "iat", values=True)
for f in [d["labels"], d["ts"], d["floats"]]:
if f is not None:
msg = "iAt based indexing can only have integer indexers"
with pytest.raises(ValueError, match=msg):
_check(f, "iat")
# at
for f in [d["ints"], d["uints"], d["labels"], d["ts"], d["floats"]]:
_check(f, "at")
class TestAtAndiAT:
# at and iat tests that don't need Base class
def test_float_index_at_iat(self):
ser = Series([1, 2, 3], index=[0.1, 0.2, 0.3])
for el, item in ser.items():
assert ser.at[el] == item
for i in range(len(ser)):
assert ser.iat[i] == i + 1
def test_at_iat_coercion(self):
# as timestamp is not a tuple!
dates = date_range("1/1/2000", periods=8)
df = DataFrame(np.random.randn(8, 4), index=dates, columns=["A", "B", "C", "D"])
s = df["A"]
result = s.at[dates[5]]
xp = s.values[5]
assert result == xp
# GH 7729
# make sure we are boxing the returns
s = Series(["2014-01-01", "2014-02-02"], dtype="datetime64[ns]")
expected = Timestamp("2014-02-02")
for r in [lambda: s.iat[1], lambda: s.iloc[1]]:
result = r()
assert result == expected
s = Series(["1 days", "2 days"], dtype="timedelta64[ns]")
expected = Timedelta("2 days")
for r in [lambda: s.iat[1], lambda: s.iloc[1]]:
result = r()
assert result == expected
def test_iat_invalid_args(self):
pass
def test_imethods_with_dups(self):
# GH6493
# iat/iloc with dups
s = Series(range(5), index=[1, 1, 2, 2, 3], dtype="int64")
result = s.iloc[2]
assert result == 2
result = s.iat[2]
assert result == 2
msg = "index 10 is out of bounds for axis 0 with size 5"
with pytest.raises(IndexError, match=msg):
s.iat[10]
msg = "index -10 is out of bounds for axis 0 with size 5"
with pytest.raises(IndexError, match=msg):
s.iat[-10]
result = s.iloc[[2, 3]]
expected = Series([2, 3], [2, 2], dtype="int64")
tm.assert_series_equal(result, expected)
df = s.to_frame()
result = df.iloc[2]
expected = Series(2, index=[0], name=2)
tm.assert_series_equal(result, expected)
result = df.iat[2, 0]
assert result == 2
def test_frame_at_with_duplicate_axes(self):
# GH#33041
arr = np.random.randn(6).reshape(3, 2)
df = DataFrame(arr, columns=["A", "A"])
result = df.at[0, "A"]
expected = df.iloc[0]
tm.assert_series_equal(result, expected)
result = df.T.at["A", 0]
tm.assert_series_equal(result, expected)
# setter
df.at[1, "A"] = 2
expected = Series([2.0, 2.0], index=["A", "A"], name=1)
tm.assert_series_equal(df.iloc[1], expected)
def test_at_getitem_dt64tz_values(self):
# gh-15822
df = DataFrame(
{
"name": ["John", "Anderson"],
"date": [
Timestamp(2017, 3, 13, 13, 32, 56),
Timestamp(2017, 2, 16, 12, 10, 3),
],
}
)
df["date"] = df["date"].dt.tz_localize("Asia/Shanghai")
expected = Timestamp("2017-03-13 13:32:56+0800", tz="Asia/Shanghai")
result = df.loc[0, "date"]
assert result == expected
result = df.at[0, "date"]
assert result == expected
def test_mixed_index_at_iat_loc_iloc_series(self):
# GH 19860
s = Series([1, 2, 3, 4, 5], index=["a", "b", "c", 1, 2])
for el, item in s.items():
assert s.at[el] == s.loc[el] == item
for i in range(len(s)):
assert s.iat[i] == s.iloc[i] == i + 1
with pytest.raises(KeyError, match="^4$"):
s.at[4]
with pytest.raises(KeyError, match="^4$"):
s.loc[4]
def test_mixed_index_at_iat_loc_iloc_dataframe(self):
# GH 19860
df = DataFrame(
[[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]], columns=["a", "b", "c", 1, 2]
)
for rowIdx, row in df.iterrows():
for el, item in row.items():
assert df.at[rowIdx, el] == df.loc[rowIdx, el] == item
for row in range(2):
for i in range(5):
assert df.iat[row, i] == df.iloc[row, i] == row * 5 + i
with pytest.raises(KeyError, match="^3$"):
df.at[0, 3]
with pytest.raises(KeyError, match="^3$"):
df.loc[0, 3]
def test_iat_setter_incompatible_assignment(self):
# GH 23236
result = DataFrame({"a": [0, 1], "b": [4, 5]})
result.iat[0, 0] = None
expected = DataFrame({"a": [None, 1], "b": [4, 5]})
tm.assert_frame_equal(result, expected)
def test_iat_dont_wrap_object_datetimelike():
# GH#32809 .iat calls go through DataFrame._get_value, should not
# call maybe_box_datetimelike
dti = date_range("2016-01-01", periods=3)
tdi = dti - dti
ser = Series(dti.to_pydatetime(), dtype=object)
ser2 = Series(tdi.to_pytimedelta(), dtype=object)
df = | DataFrame({"A": ser, "B": ser2}) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 17 2015
This script will grab the feature data from extracted feature files
for all images in an automated class file.
Can bin data by category or leave each image separate.
This particular script was edited to use neural nets to estimate the
number of cells in a diatom chain or return a 1 (if not a diatom chain)
for each image.
@author: <NAME>
"""
###old information about this script
# script to extract the biovolume estimates from IFCB V2 feature files
# and sum them per category for class files
# this will read a directory of class files and search the feature path for
# those files, pulling the biovolume from them
# 06/13/2017 DWH
# this script is a modified version of the biovolume grabber script
# this script will take the biovolume value for each cell, convert it to
# units of carbon following formulas from Menden_Deuer and Lessard 2000
# then sum the total carbon per category
####end old information
from scipy.io import loadmat
import os
import pandas as pd
import numpy as np
import pickle
__author__ = 'dhenrichs'
infile = 'chain_correction.cfg' # name of the config file to use
def grab_config_variables(infile):
'''Will load the necessary variables from the config file'''
if infile not in os.listdir():
print('Config file NOT FOUND')
print('Please provide the correct config file')
raise Exception('Config file error')
else:
#load config file
with open(infile,'r') as f:
in_config = [line[:-1] for line in f]
outdict = {}
for line in in_config:
if len(line) < 1:
pass
elif line[0] == '#':
pass
elif '=' in line:
temp = line.split(' ', maxsplit=2)
outdict[temp[0]] = temp[2].replace("'", "").replace(" ", "") #remove any quote marks
return outdict
#read in the config file
input_dict = grab_config_variables(infile)
#copy the config variables to individual variables
feature_path = input_dict['feature_path']
class_path = input_dict['class_path']
outpath = input_dict['outpath']
date_limiter = input_dict['date_limiter']
automated_or_manual = input_dict['automated_or_manual']
model_path = input_dict['model_path']
ifcb_style = input_dict['ifcb_style']
if input_dict['cnn'] == 'True':
cnn = True
elif input_dict['cnn'] == 'False':
cnn = False
else:
raise ValueError('cnn must be True or False')
if ifcb_style == 'old':
filename_length = 21
filestart = 'I'
else:
filename_length = 24
filestart = 'D'
def grab_features(in_feature, in_class, automated, models=None):
"""Open the feature file and obtain the listed features for each image.
"""
feature_data = load_feature_file(in_feature)
outdata = pd.DataFrame(feature_data, index=feature_data.index,
columns=['Biovolume', 'Area', 'MajorAxisLength', 'MinorAxisLength',
'summedBiovolume', 'summedArea', 'summedMajorAxisLength',
'summedMinorAxisLength', 'EquivDiameter', 'Extent', 'H90',
'H180', 'Orientation', 'Perimeter', 'summedPerimeter'])
if automated == 'automated':
if cnn:
category_list, class_data, roinums = load_class_file_automated_CNNstyle(in_class)
else:
category_list, class_data, roinums = load_class_file_automated(in_class)
elif automated == 'manual':
#print "loading class file..."
category_list, class_data, roinums = load_class_file_manual(in_class)
#outdata['class'] = class_data
else:
return None
if ifcb_style == 'new':
#create a temporary dataframe to hold the data; this should solve the issue of feature file sizes not matching up with
#class file sizes due to MATLAB scripts randomly skipping images
classified_data = pd.DataFrame(class_data, index=roinums, columns=['class'])
print(len(class_data), len(class_data[0]), outdata.shape)
#concat the two dataframes based on intersection of index
outdata = pd.concat([outdata, classified_data], axis=1).dropna(subset=['Biovolume'])
else:
print(len(class_data), len(class_data[0]), outdata.shape)
if len(class_data) == outdata.shape[0]:
outdata['class'] = class_data
else:
classified_data = pd.DataFrame(class_data, index=roinums, columns=['class'])
outdata = pd.concat([outdata, classified_data], axis=1).dropna(subset=['Biovolume'])
outdata['num_cells'] = 1
for index, image in outdata.iterrows():
outdata.loc[index, ['num_cells']] = get_num_cells_in_chain(image['class'], image, models)
return outdata
def calculate_carbon_from_biovolume(invalue, category):
"""Calculate the cellular carbon from the given biovolume value based on
what category the image is assigned and how large it is. Conversion
formulas are from Table 4 in Menden-Deuer and Lessard (2000).
inputs:
invalue (float) = the biovolume value from the features file
category (str) = the category to which the image was assigned
returns:
carbon_value (float) = the carbon calculated from the formulas
"""
# categories as of 8/1/2021 CNN classifier
# may need to update these as needed
diatoms = ['Asterionellopsis', 'Asterionellopsis_single', 'Centric', 'Chaetoceros', 'Chaetoceros_danicus', 'Chaetoceros_peruvianus',
'Chaetoceros_simplex', 'Chaetoceros_single', 'Chaetoceros_socialis', 'Corethron', 'Cylindrotheca',
'Cymatosira', 'DactFragCeratul', 'Dactyliosolen_blavyanus', 'Ditylum', 'Ephemera', 'Eucampia', 'Eucampia_cornuta', 'Guinardia',
'Hemiaulus_curved', 'Hemiaulus_straight', 'Leptocylindrus', 'Licmophora', 'Odontella', 'Paralia', 'Pleurosigma', 'Pseudonitzschia',
'Rhizosolenia', 'Skeletonema', 'Thalassionema', 'Thalassiosira', 'centric10', 'pennate', 'pennate_rod', ]
if category in diatoms:
if invalue > 3000.: # diatoms > 3000 cubic microns (um**3)
carbon_value = (10**(-0.933)) * (invalue ** 0.881)
else:
carbon_value = (10**(-0.541)) * (invalue ** 0.811)
else:
if invalue < 3000.: # protist plankton < 3000 cubic microns (um**3)
carbon_value = (10**(-0.583)) * (invalue ** 0.860)
else:
carbon_value = (10**(-0.665)) * (invalue ** 0.939)
return carbon_value
def get_num_cells_in_chain(in_class, in_features, models):
"""Will use the feature data and a pre-made neural network to estimate the
number of cells in the diatom chain based upon the classifier output for
each image. Only diatoms will be counted in this way.
"""
diatom_chains = ['Asterionellopsis', 'Chaetoceros', 'Cymatosira', 'DactFragCeratul', 'Eucampia', 'Eucampia_cornuta',
'Guinardia', 'Hemiaulus_curved', 'Leptocylindrus', 'Pseudonitzschia', 'Skeletonema', 'Thalassionema',
'Thalassiosira']
#print in_class
#print in_class[1]
if in_class in diatom_chains:
#print
#print "in_class", in_class
#print 'in_features', in_features.shape
temp_features = in_features.copy()
dump = temp_features.pop('class')
dump = temp_features.pop('num_cells')
#print 'temp_features', temp_features.shape, temp_features.values
temp_counts = models['{0}_scaler'.format(in_class)].transform(temp_features.values.reshape(1, -1))
temp_counts = models[in_class].predict(temp_counts)
temp = models['{0}_scaler_targets'.format(in_class)].inverse_transform(temp_counts)
if int(round(temp)) < 1:
temp = 1
#print temp
else:
temp = 1
return int(round(temp))
def load_models(indir):
"""Load the pre-trained models for the given classes."""
spp = ['Asterionellopsis', 'Chaetoceros', 'Cymatosira', 'DactFragCeratul', 'Eucampia', 'Eucampiacornuta',
'Guinardia', 'Hemiaulus', 'Leptocylindrus', 'Pseudonitzschia', 'Skeletonema', 'Thalassionema',
'Thalassiosira']
models = {}
for cat in spp:
print(indir + cat)
temp = open("{0}{1}_net.pkl".format(indir, cat), 'rb')
models[cat] = pickle.load(temp, encoding='latin1')
models['{0}_scaler'.format(cat)] = pickle.load(open("{0}{1}_scaler.pkl".format(indir, cat), 'rb'), encoding='latin1')
models['{0}_scaler_targets'.format(cat)] = pickle.load(open("{0}{1}_scaler_targets.pkl".format(indir, cat), 'rb'), encoding='latin1')
# update hemiaulus to hemiaulus_curved
models['Hemiaulus_curved'] = models['Hemiaulus']
models['Hemiaulus_curved_scaler'] = models['Hemiaulus_scaler']
models['Hemiaulus_curved_scaler_targets'] = models['Hemiaulus_scaler_targets']
return models
def load_class_file_automated(in_class):
"""Load the automated classifier results and list of class names.
Returns:
category_list = list of category names
class_data = list classifications for each roi image
"""
f = loadmat(class_path + in_class, verify_compressed_data_integrity=False)
print(f.keys())
class_data = f['TBclass_above_threshold'] #use this line for automated classifier results; can be 'TBclass_above_optthresh' if available
class_data = [category[0][0] for category in class_data] #un-nest the MATLAB stuff #use this line for automated classifier results
category_list = f['class2useTB']
category_list = [category[0][0] for category in category_list] #un-nest the MATLAB stuff
roinum = f['roinum']
return category_list, class_data, roinum
def load_class_file_automated_CNNstyle(in_class):
"""Load the automated classifier results and list of class names.
Returns:
category_list = list of category names
class_data = list classifications for each roi image
"""
f = loadmat(class_path + in_class, verify_compressed_data_integrity=False)
class_data = f['TBclass_above_threshold'] #use this line for automated classifier results; can be 'TBclass_above_optthresh' if available
class_data = [category[0] for category in class_data[0]] #un-nest the MATLAB stuff #use this line for automated classifier results
category_list = f['class2useTB']
category_list = [category[0][0] for category in category_list] #un-nest the MATLAB stuff
roinum = f['roinum']
roinum = [num[0] for num in roinum]
return category_list, class_data, roinum
def load_class_file_manual(in_class):
"""Load the manual correction output from the classifier. This will provide the
corrected information about what class each image belongs in.
"""
#the structure of the mat file variable with the classes is slightly different in manual files
#classlist is a table of shape (num_rois x 3) with the columns being: roinum, manual category, automated category
#print "loading mat file..."
f = loadmat(class_path + in_class)
roinums = None
class_data_manual = f['classlist']
class_data = f['classlist'][:,2]
roinums = f['classlist'][:,0]
#print "starting for loop..."
for index, value in enumerate(class_data):
if not np.isnan(class_data_manual[index, 1]):
class_data[index] = class_data_manual[index,1]
roinums = [roinums[x] for x,y in enumerate(class_data) if not np.isnan(y)]
class_data = [x for x in class_data if not np.isnan(x)] #it appears this is working as intended but the roinums need to adjusted too
#print "getting category list..."
#print f['class2use_manual']
#print class_data
#print len(class_data)
category_list = f['class2use_manual']
#print len(category_list), max(class_data)
#category_list = [category[0] for category in category_list[0] if len(category) > 0]
category_list = [category[0] if len(category) > 0 else '' for category in category_list[0]]
#print len(category_list), max(class_data)
class_data = [category_list[int(x-1)] for x in class_data]
#print "finished for loop..."
return category_list, class_data, roinums
def load_feature_file(in_feature):
"""Load the feature file into a pandas dataframe."""
f = | pd.read_csv(feature_path + in_feature, index_col=0) | pandas.read_csv |
import ast
import numpy as np
import pandas as pd
from clevercsv import csv2df
from collections import Counter
from src import constants
def get_sequence(dataset, column, annotations):
for item in annotations[dataset]:
if item["header"] == column:
return item["sequence"], item["tokens"], list(item["tags"].keys())
def get_new_annotations(dataset_q, column_q, annotations):
for new_annotation in annotations:
if len(new_annotation) == 3:
dataset, column, canonical_type = new_annotation
else:
dataset, column, canonical_type, encodings = new_annotation
if dataset == dataset_q and column == column_q:
return canonical_type
def read_data(_data_path, dataset_name, dtype=str):
if dataset_name in ["mass_6.csv", "geoplaces2.csv", "rodents.csv"]:
encoding = "ISO-8859-1"
else:
encoding = "utf-8"
return pd.read_csv(
_data_path + dataset_name,
encoding=encoding,
dtype=dtype,
skipinitialspace=True,
index_col=None,
keep_default_na=False,
header="infer",
)
def read_dataset(dataset_name, data_folder):
filename = data_folder + dataset_name + ".csv"
if dataset_name in datasets:
encoding, header = datasets[dataset_name]
return pd.read_csv(
filename,
sep=",",
dtype=str,
encoding=encoding,
keep_default_na=False,
skipinitialspace=True,
header=header,
)
else:
raise Exception(f"{filename} not known.")
def bot_read_data(_data_path, dataset_name):
if dataset_name in ["mass_6.csv", "geoplaces2.csv", "rodents.csv"]:
encoding = "ISO-8859-1"
elif dataset_name in ["usp05.csv", "Midwest_Survey_nominal.csv", "kr-vs-kp.csv", "Satellite.csv", "cylinder-bands.csv",
"jungle_chess_2pcs_endgame_lion_elephant.csv", "rodents.csv", "wholesale-customers.csv"]:
df = csv2df(_data_path + dataset_name, skipinitialspace=True, index_col=None)
df.columns = [col.replace("\"", "") for col in df.columns]
return df
else:
encoding = "utf-8"
return pd.read_csv(
_data_path + dataset_name,
encoding=encoding,
skipinitialspace=True,
index_col=None,
header="infer",
)
def load_data(path):
df = pd.read_csv(path)
sentences = df["tokens"].to_list()
sentences = [ast.literal_eval(sentence) for sentence in sentences]
labels = df["labels"].to_list()
labels = [ast.literal_eval(label) for label in labels]
labels = [label[0] for label in labels]
# labels = [[CLASSES[label[0]],] for label in labels]
tags = df["tags"].to_list()
tags = [ast.literal_eval(tag) for tag in tags]
tags = [tag for tag in tags]
return sentences, labels, tags
def save_metadata_inputs(file_name, labels, tags, tokens):
df = | pd.DataFrame({"tokens": tokens, "tags": tags, "labels": labels}) | pandas.DataFrame |
#!/usr/bin/env python
### Up to date as of 10/2019 ###
'''Section 0: Import python libraries
This code has a number of dependencies, listed below.
They can be installed using the virtual environment "slab23"
that is setup using script 'library/setup3env.sh'.
Additional functions are housed in file 'slab2functions.py'
and imported below.
There are some additional dependencies used by the function file
that do not need to be installed separately.
'''
# stdlib imports
from datetime import datetime
import os.path
import argparse
import numpy as np
from pandas import DataFrame
import pandas as pd
import warnings
import slab2functions as s2f
import math
import mapio.gmt as gmt
from functools import partial
from multiprocess import Pool
import loops as loops
from scipy import ndimage
import psutil
import cProfile
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
def main(args):
'''Section 1: Setup
In this section:
(1) Identify necessary input files
(2) Load parameters from '[slab]input.par'
(3) Define optional boxes for PDF/print testing
(4) Define output file names
(5) Gathering optional arguments, setting defaults
(6) Define search ellipsoid parameters
(7) Define Average active source profiles
(8) Define reference model (Slab1.0 and/or slab guides)
(9) Define Trench Locations
(10) Open and modify input dataset
(11) Calculate seismogenic zone thickness
(12) Record variable parameters used for this model
(13) Define search grid
(14) Identify tomography datasets
(15) Initialize arrays for Section 2 '''
print('Start Section 1 of 7: Setup')
print(' Loading inputs...')
''' ------ (1) Identify necessary input files ------ '''
trenches = 'library/misc/trenches_usgs_2017_depths.csv'
agesFile = 'library/misc/interp_age.3.2g.nc'
ageerrorsFile = 'library/misc/interp_ageerror.3.2g.nc'
polygonFile = 'library/misc/slab_polygons.txt'
addFile = 'library/misc/addagain.csv'
parFile = args.parFile
pd.options.mode.chained_assignment = None
warnings.filterwarnings("ignore", message="invalid value encountered in less")
warnings.filterwarnings("ignore", message="invalid value encountered in true_divide")
warnings.filterwarnings("ignore", message="invalid value encountered in greater")
warnings.filterwarnings("ignore", message="invalid value encountered in double_scalars")
''' ------ (2) Load parameters from '[slab]input.par' ------'''
for line in open(parFile):
plist = line.split()
if len(plist)>2:
if plist[0] == 'inFile':
inFile = plist[2]
if plist[0] == 'use_box':
use_box = plist[2]
if plist[0] == 'latmin':
latmin = np.float64(plist[2])
if plist[0] == 'latmax':
latmax = np.float64(plist[2])
if plist[0] == 'lonmin':
lonmin = np.float64(plist[2])
if plist[0] == 'lonmax':
lonmax = np.float64(plist[2])
if plist[0] == 'slab':
slab = plist[2]
if plist[0] == 'grid':
grid = np.float64(plist[2])
if plist[0] == 'radius1':
radius1 = np.float64(plist[2])
if plist[0] == 'radius2':
radius2 = np.float64(plist[2])
if plist[0] == 'sdr':
sdr = np.float64(plist[2])
if plist[0] == 'ddr':
ddr = np.float64(plist[2])
if plist[0] == 'taper':
taper = np.float64(plist[2])
if plist[0] == 'T':
T = np.float64(plist[2])
if plist[0] == 'node':
node = np.float64(plist[2])
if plist[0] == 'filt':
filt = np.float64(plist[2])
if plist[0] == 'maxdist':
maxdist = np.float64(plist[2])
if plist[0] == 'minunc':
minunc = np.float64(plist[2])
if plist[0] == 'mindip':
mindip = np.float64(plist[2])
if plist[0] == 'minstk':
minstk = np.float64(plist[2])
if plist[0] == 'maxthickness':
maxthickness = np.float64(plist[2])
if plist[0] == 'seismo_thick':
seismo_thick = np.float64(plist[2])
if plist[0] == 'dipthresh':
dipthresh = np.float64(plist[2])
if plist[0] == 'fracS':
fracS = np.float64(plist[2])
if plist[0] == 'kdeg':
kdeg = np.float64(plist[2])
if plist[0] == 'knot_no':
knot_no = np.float64(plist[2])
if plist[0] == 'rbfs':
rbfs = np.float64(plist[2])
# loop through to find latest slab input file if specified
polyname = slab
if slab == 'kur' or slab == 'izu':
polyname = 'jap'
if inFile == 'latest':
yearmax = 0
monthmax = 0
for filename in os.listdir('Input'):
if filename.endswith('.csv'):
try:
slabname,datei,instring = filename.split('_')
except:
continue
if slabname == polyname and instring == 'input.csv':
try:
monthi, yeari = datei.split('-')
except:
continue
yeari = int(yeari)
monthi = int(monthi)
if yeari >= yearmax:
yearmax = yeari
inFile = 'Input/%s'%filename
if monthi > monthmax:
monthmax = monthi
inFile = 'Input/%s'%filename
print (' using input file: %s'%inFile)
if slab == 'mue' or slab == 'phi' or slab == 'cot' or slab == 'sul' or slab == 'ryu':
if args.undergrid is None:
if slab == 'mue':
print ('This slab is truncated by the Caribbean (car) slab, argument -u cardepgrid is required')
print ('Exiting .... ')
exit()
if slab == 'cot':
print ('This slab is truncated by the Halmahera (hal) slab, argument -u haldepgrid is required')
print ('Exiting .... ')
exit()
if slab == 'sul':
print ('This slab is truncated by the Halmahera (hal) slab, argument -u haldepgrid is required')
print ('Exiting .... ')
exit()
if slab == 'phi':
print ('This slab is truncated by the Halmahera (hal) slab, argument -u haldepgrid is required')
print ('Exiting .... ')
exit()
if slab == 'ryu':
print ('This slab is truncated by the Kurils-Japan (kur) slab, argument -u kurdepgrid is required')
print ('Exiting .... ')
exit()
else:
undergrid = args.undergrid
''' ------ (4) Define output file names ------ '''
date = datetime.today().strftime('%m.%d.%y')
now = datetime.now()
time = '%s.%s' % (now.hour, now.minute)
folder = '%s_slab2_%s' % (slab, date)
os.system('mkdir Output/%s'%folder)
outFile = 'Output/%s/%s_slab2_res_%s.csv' % (folder, slab, date)
dataFile = 'Output/%s/%s_slab2_dat_%s.csv' % (folder, slab, date)
nodeFile = 'Output/%s/%s_slab2_nod_%s.csv' % (folder, slab, date)
fillFile = 'Output/%s/%s_slab2_fil_%s.csv' % (folder, slab, date)
rempFile = 'Output/%s/%s_slab2_rem_%s.csv' % (folder, slab, date)
clipFile = 'Output/%s/%s_slab2_clp_%s.csv' % (folder, slab, date)
these_params = 'Output/%s/%s_slab2_par_%s.csv' % (folder, slab, date)
datainfo = 'Output/%s/%s_slab2_din_%s.csv' % (folder, slab, date)
nodeinfo = 'Output/%s/%s_slab2_nin_%s.csv' % (folder, slab, date)
suppFile = 'Output/%s/%s_slab2_sup_%s.csv' % (folder, slab, date)
nodexFile = 'Output/%s/%s_slab2_nox_%s.csv' % (folder, slab, date)
nodeuFile = 'Output/%s/%s_slab2_nou_%s.csv' % (folder, slab, date)
depTextFile = 'Output/%s/%s_slab2_dep_%s.txt' % (folder, slab, date)
depGridFile = 'Output/%s/%s_slab2_dep_%s.grd' % (folder, slab, date)
strTextFile = 'Output/%s/%s_slab2_str_%s.txt' % (folder, slab, date)
strGridFile = 'Output/%s/%s_slab2_str_%s.grd' % (folder, slab, date)
dipTextFile = 'Output/%s/%s_slab2_dip_%s.txt' % (folder, slab, date)
dipGridFile = 'Output/%s/%s_slab2_dip_%s.grd' % (folder, slab, date)
uncTextFile = 'Output/%s/%s_slab2_unc_%s.txt' % (folder, slab, date)
uncGridFile = 'Output/%s/%s_slab2_unc_%s.grd' % (folder, slab, date)
thickTextFile = 'Output/%s/%s_slab2_thk_%s.txt' % (folder, slab, date)
thickGridFile = 'Output/%s/%s_slab2_thk_%s.grd' % (folder, slab, date)
savedir = 'Output/%s'%folder
''' ------ (3) Define optional boxes for PDF/print testing ------'''
if args.test is not None:
testlonmin = args.test[0]
testlonmax = args.test[1]
testlatmin = args.test[2]
testlatmax = args.test[3]
if testlonmin < 0:
testlonmin += 360
if testlonmax < 0:
testlonmax += 360
testarea = [testlonmin, testlonmax, testlatmin, testlatmax]
printtest = True
os.system('mkdir Output/PDF%s' % (slab))
os.system('mkdir Output/multitest_%s' % (slab))
f = open(datainfo, 'w+')
f.write('dataID, nodeID, used_or_where_filtered')
f.write('\n')
f.close()
f = open(datainfo, 'w+')
f.write('nodeID, len(df), status, details')
f.write('\n')
f.close()
else:
# an area not in range of any slab polygon
testarea = [220, 230, 15, 20]
printtest = False
''' --- (5) Gathering optional arguments, setting defaults ---'''
if use_box == 'yes':
check = 1
slab = s2f.rectangleIntersectsPolygon(lonmin, lonmax, latmin,
latmax, polygonFile)
if isinstance(slab, str):
slab = slab
else:
try:
slab = slab[0]
except:
print('System exit because box does not intersect slab polygon')
raise SystemExit()
elif use_box == 'no':
check = 0
lon1, lon2, lat1, lat2 = s2f.determine_polygon_extrema(slab,
polygonFile)
lonmin = float(lon1)
lonmax = float(lon2)
latmin = float(lat1)
latmax = float(lat2)
else:
print('use_box in slab2input.par must be "yes" or "no"')
raise SystemExit()
''' ------ (6) Define search ellipsoid parameters ------'''
alen = radius1
blen = radius2
ec = math.sqrt(1-((math.pow(blen, 2))/(math.pow(alen, 2))))
mdist = alen * ec
''' ------ (7) Define Average active source profiles ------'''
# Different because alu is variable E/W
if slab == 'alu':
AA_data = pd.read_csv('library/avprofiles/alu_av5.csv')
global_average = False
elif slab == 'him':
AA_data = pd.read_csv('library/avprofiles/him_av.csv')
global_average = False
elif slab == 'kur' or slab == 'izu':
AA_source = 'library/avprofiles/%s_av.txt' % 'jap'
AA_data = pd.read_table(AA_source, delim_whitespace=True,\
header=None, names=['dist', 'depth'])
AA_data = AA_data[AA_data.dist < 125]
global_average = False
# Use RF data like AA data to constrain flat slab in Mexico
elif slab == 'cam':
AA_source = 'library/avprofiles/%s_av.txt' % slab
AA_data = pd.read_table(AA_source, delim_whitespace=True,\
header=None, names=['dist', 'depth'])
RF_data = pd.read_csv('library/avprofiles/cam_RF_av.csv')
AA_data = pd.concat([AA_data,RF_data],sort=True)
global_average = False
else:
global_average = False
# See if there is a averace active source profile for this slab
try:
AA_source = 'library/avprofiles/%s_av.txt' % slab
AA_data = pd.read_table(AA_source, delim_whitespace=True,\
header=None, names=['dist', 'depth'])
# If there is no profile for this slab, use the global profile
except:
AA_global = pd.read_csv('library/avprofiles/global_as_av2.csv')
AA_data = AA_global[['dist', 'depth']]
global_average = True
if slab == 'phi' or slab == 'mue':
AA_data = AA_data[AA_data.dist < 10]
if slab == 'cot':
AA_data = AA_data[AA_data.dist < 10]
if slab == 'ita' or slab == 'puy':
AA_data = AA_data[AA_data.dist < 1]
''' ------ (8) Define reference model (Slab1.0 and/or slab guides) ------'''
polyname = slab
if slab == 'kur' or slab == 'izu':
polyname = 'jap'
# Search for slab guides in library/slabguides
slabguide = None
slabguide2 = None
for SGfile in os.listdir('library/slabguides'):
if SGfile[0:3] == polyname:
SGfile1 = SGfile
slabguide = gmt.GMTGrid.load('library/slabguides/%s'%SGfile1)
# Find secondary slab guide for regions where there are two
if polyname == 'sum' or polyname == 'man' or polyname == 'phi' or polyname =='sam' or polyname == 'sco' or polyname == 'mak' or polyname == 'jap':
for f in os.listdir('library/slabguides'):
if f[0:3] == polyname and f != SGfile:
print ('f',f)
SGfile2 = f
slabguide2 = gmt.GMTGrid.load('library/slabguides/%s'%SGfile2)
break
break
# Get Slab1.0 grid where applicable
try:
depgrid = s2f.get_grid(slab, 'depth')
except:
print (' Slab1.0 does not exist in this region, using slab guide')
depgrid = gmt.GMTGrid.load('library/slabguides/%s'%SGfile1)
slabguide = None
# Calculate strike and dip grids
strgrid, dipgrid = s2f.mkSDgrd(depgrid)
slab1data = s2f.mkSlabData(depgrid, strgrid, dipgrid, printtest)
slab1data.to_csv('gradtest.csv',header=True,index=False)
# Add slab guide data to Slab1.0 grids where necessary
if slabguide is not None:
print ('slab guide for this model:',slabguide)
guidestr, guidedip = s2f.mkSDgrd(slabguide)
guidedata = s2f.mkSlabData(slabguide, guidestr, guidedip, printtest)
if SGfile1 == 'phi_SG_north':
guidedata = guidedata[guidedata.lat>14]
elif slab == 'ryu':
guidedata = guidedata[guidedata.lon>137]
slab1data = slab1data[slab1data.lat<=137]
slab1data = pd.concat([slab1data, guidedata],sort=True)
slab1data = slab1data.reset_index(drop=True)
if slabguide2 is not None:
print ('secondary slab guide for this model:',slabguide2)
guidestr, guidedip = s2f.mkSDgrd(slabguide2)
guidedata = s2f.mkSlabData(slabguide2, guidestr, guidedip, printtest)
if SGfile2 == 'phi_SG_north':
guidedata = guidedata[guidedata.lat>14]
slab1data = pd.concat([slab1data, guidedata],sort=True)
slab1data = slab1data.reset_index(drop=True)
#slab1data.to_csv('slab1data.csv',header=True,index=False)
''' ------ (9) Define Trench Locations ------'''
TR_data = pd.read_csv(trenches)
if slab == 'izu' or slab == 'kur':
TR_data = TR_data[TR_data.slab == 'jap']
else:
TR_data = TR_data[TR_data.slab == slab]
TR_data = TR_data.reset_index(drop=True)
TR_data.loc[TR_data.lon < 0, 'lon']+=360
''' ------ (10) Open and modify input dataset ------'''
eventlistALL = pd.read_table('%s' % inFile, sep=',', dtype={
'lon': np.float64, 'lat': np.float64,'depth': np.float64,
'unc': np.float64, 'etype': str, 'ID': np.int, 'mag': np.float64,
'S1': np.float64, 'D1': np.float64, 'R1': np.float64,
'S2': np.float64, 'D2': np.float64, 'R2': np.float64,
'src': str, 'time': str, 'mlon': np.float64, 'mlat': np.float64,
'mdep': np.float64})
ogcolumns = ['lat', 'lon', 'depth', 'unc', 'etype', 'ID', 'mag', 'time', \
'S1', 'D1', 'R1','S2', 'D2', 'R2', 'src']
kagancols = ['lat', 'lon', 'depth', 'unc', 'etype', 'ID', 'mag', 'time', \
'S1', 'D1', 'R1','S2', 'D2', 'R2', 'src', 'mlon', 'mlat', 'mdep']
eventlist = eventlistALL[kagancols]
if printtest:
lat65 = eventlist[eventlist.lat>65]
if len(lat65)>0:
s2f.addToDataInfo(lat65, 0, 'eventlist = eventlist[eventlist.lat <= 65]', datainfo,'df')
dataGP = eventlist[eventlist.etype == 'GP']
if len(dataGP>0):
s2f.addToDataInfo(dataGP, 0, 'eventlist = eventlist[eventlist.etype != GP]', datainfo,'df')
eventlist = eventlist[eventlist.lat <= 65]
eventlist = eventlist[eventlist.etype != 'GP']
maxID = eventlistALL['ID'].max()
# Add/Remove manually identified points that don't follow general rules
remdir = 'library/points_to_remove/current_files'
for badFile in os.listdir(remdir):
if badFile[0:3] == slab or badFile[0:3] == 'ALL' or ((slab == 'izu' or slab == 'kur') and badFile[0:3] == 'jap'):
print (' manually removing points listed in:',badFile)
donotuse = pd.read_csv('%s/%s'%(remdir,badFile))
eventlist = s2f.removePoints(donotuse, eventlist, lonmin,
lonmax, latmin, latmax, printtest, datainfo, True, slab)
doubleuse = pd.read_csv(addFile)
eventlist, maxID = s2f.doublePoints(doubleuse, eventlist, maxID)
if slab == 'kur':
eventlist.loc[eventlist.etype == 'TO', 'unc'] = 100
if slab == 'sul' or slab == 'man':
eventlist = eventlist[eventlist.etype != 'CP']
if slab == 'him':
eventlist = eventlist[eventlist.src != 'schulte']
if slab == 'sumz' or slab == 'kur' or slab == 'jap' or slab == 'izu':
if printtest:
lat65 = eventlist[eventlist.etype=='TO']
if len(lat65)>0:
s2f.addToDataInfo(lat65, 0, 'eventlist = eventlist[eventlist.etype != TO]', datainfo,'df')
eventlist = eventlist[eventlist.etype != 'TO']
if slab == 'kurz':
if printtest:
lat65 = eventlist[eventlist.etype=='ER']
if len(lat65)>0:
s2f.addToDataInfo(lat65, 0, 'eventlist = eventlist[eventlist.etype != ER]', datainfo,'df')
eventlist = eventlist[eventlist.etype != 'ER']
if slab == 'sol':
if printtest:
lat65 = eventlist[(eventlist.etype == 'BA') & (eventlist.lon <= 149)]
if len(lat65)>0:
s2f.addToDataInfo(lat65, 0, 'eventlist[(eventlist.etype != BA) | (eventlist.lon > 149)]', datainfo,'df')
eventlist = eventlist[(eventlist.etype != 'BA') | (eventlist.lon > 149)]
TR_data = TR_data[TR_data.lon>149]
if slab == 'man':
if printtest:
lat65 = eventlist[(eventlist.etype == 'BA') & (eventlist.lon >= 120)]
if len(lat65)>0:
s2f.addToDataInfo(lat65, 0, 'eventlist[(eventlist.etype == BA) & (eventlist.lon >= 120)]', datainfo,'df')
eventlist = eventlist[(eventlist.etype != 'BA') | ((eventlist.lon < 120)|(eventlist.lat > 15))]
if slab == 'sum':
if printtest:
lat65 = eventlist[(eventlist.etype == 'BA') & (eventlist.lat > 21)]
if len(lat65)>0:
s2f.addToDataInfo(lat65, 0, 'eventlist[(eventlist.etype != BA) | (eventlist.lon > 149)]', datainfo,'df')
eventlist = eventlist[(eventlist.etype != 'BA') | (eventlist.lat <= 21)]
if slab == 'ryu':
ryutodata = eventlist[(eventlist.etype == 'TO')&(eventlist.lon>133)]
if slab == 'hel':
eventlist.loc[eventlist.etype == 'RF', 'etype'] = 'CP'
if slab == 'puyz' or slab == 'mak':
eventlist = eventlist[eventlist.src != 'ddgap']
# Set default uncertainties for events without uncertainties
eventlist.loc[eventlist.etype == 'EQ', 'unc'] = 15.0
eventlist.loc[eventlist.etype == 'CP', 'unc'] = 5.0
eventlist.loc[eventlist.etype == 'BA', 'unc'] = 1.0
eventlist.loc[eventlist.etype == 'TO', 'unc'] = 40.0
eventlist.loc[(eventlist.etype == 'ER') & (eventlist.unc <5), 'unc'] = 5.0
if slab == 'puy':
eventlist.loc[(eventlist.etype == 'ER') & (eventlist.unc <15), 'unc'] = 15.0
eventlist.loc[eventlist.mlon < 0, 'mlon'] += 360
# Ensure all data are within longitudes 0-360
eventlist.loc[eventlist.lon < 0, 'lon']+=360
# Define mean depth of bathymetry (for constraining interp outboard trench)
meanBAlist = eventlist[eventlist.etype == 'BA']
meanBA = meanBAlist['depth'].mean()
del eventlistALL
''' ----- (11) Calculate seismogenic zone thickness ------ '''
# define seismogenic thickness parameters. change if needed
maxdep = 65
maxdepdiff = 20
origorcentl = 'c'
origorcentd = 'c'
slaborev = 'e'
lengthlim = -50
ogcolumns = eventlist.columns
eventlist = s2f.getReferenceKagan(slab1data, eventlist, origorcentl, origorcentd)
if slab != 'hin':
seismo_thick, taper_start = s2f.getSZthickness(eventlist,folder,slab,maxdep,maxdepdiff,origorcentl,origorcentd,slaborev,savedir,lengthlim)
else:
seismo_thick = 20
taper_start = 20
if slab == 'hel' or slab == 'car' or slab == 'mak':
seismo_thick = 40
if slab == 'sol':
seismo_thick = 40
if slab == 'alu' or slab == 'cot' or slab == 'sul':
seismo_thick = 10
if slab == 'sol':
eventlistE = eventlist[eventlist.lon>148]
eventlistW = eventlist[eventlist.lon<=148]
eventlistE = s2f.cmtfilter(eventlistE,seismo_thick,printtest,datainfo,slab)
eventlist = pd.concat([eventlistE,eventlistW],sort=True)
if slab == 'sum':
eventlistS = eventlist[eventlist.lat<=22]
eventlistN = eventlist[eventlist.lat>22]
eventlistS = s2f.cmtfilter(eventlistS,seismo_thick,printtest,datainfo,slab)
eventlist = pd.concat([eventlistS,eventlistN],sort=True)
if slab != 'hal' and slab != 'him' and slab != 'pam' and slab != 'hin' and slab != 'sol' and slab != 'sum' and slab != 'cas':
eventlist = s2f.cmtfilter(eventlist,seismo_thick,printtest,datainfo,slab)
eventlist = eventlist[ogcolumns]
''' ------ (12) Record variable parameters used for this model ------'''
f = open(these_params, 'w+')
f.write('Parameters used to create file for slab_Date_time: %s_%s_%s \n' \
%(slab, date, time))
f.write('\n')
f.close()
f = open(these_params, 'a')
f.write('inFile: %s \n' % inFile)
f.write('use_box: %s \n' % use_box)
f.write('latmin: %s \n' % str(latmin))
f.write('latmax: %s \n' % str(latmax))
f.write('lonmin: %s \n' % str(lonmin))
f.write('lonmax: %s \n' % str(lonmax))
f.write('slab: %s \n' % slab)
f.write('grid: %s \n' % str(grid))
f.write('radius1: %s \n' % str(radius1))
f.write('radius2: %s \n' % str(radius2))
f.write('alen: %s \n' % str(alen))
f.write('blen: %s \n' % str(blen))
f.write('sdr: %s \n' % str(sdr))
f.write('ddr: %s \n' % str(ddr))
f.write('taper: %s \n' % str(taper))
f.write('T: %s \n' % str(T))
f.write('node: %s \n' % str(node))
f.write('filt: %s \n' % str(filt))
f.write('maxdist: %s \n' % str(maxdist))
f.write('mindip: %s \n' % str(mindip))
f.write('minstk: %s \n' % str(minstk))
f.write('maxthickness: %s \n' % str(maxthickness))
f.write('seismo_thick: %s \n' % str(seismo_thick))
f.write('dipthresh: %s \n' % str(dipthresh))
f.write('fracS: %s \n' % str(fracS))
f.write('knot_no: %s \n' % str(knot_no))
f.write('kdeg: %s \n' % str(kdeg))
f.write('rbfs: %s \n' % str(rbfs))
if slab == 'mue' or slab == 'phi' or slab == 'cot' or slab == 'sul' or slab == 'ryu':
f.write('undergrid: %s \n' % str(undergrid))
f.close()
''' ------ (13) Define search grid ------ '''
print(' Creating search grid...')
#Creates a grid over the slab region
regular_grid = s2f.create_grid_nodes3(grid, lonmin, lonmax, latmin, latmax)
grid_in_polygon = s2f.createGridInPolygon2(regular_grid, slab, polygonFile)
lons = grid_in_polygon[:, 0]
lats = grid_in_polygon[:, 1]
lons = np.round(lons,decimals=1)
lats = np.round(lats,decimals=1)
lons[lons <0] += 360
slab1guide,slab1query = s2f.makeReference(slab1data,lons,lats,grid,printtest,slab)
''' ------ (14) Identify tomography datasets ------ '''
## Identify how many tomography datasets are included
tomo_data = eventlist[eventlist.etype == 'TO']
if len(tomo_data) > 0 and slab != 'sam':
sources = tomo_data.src
TOsrc = set()
for x in sources:
TOsrc.add(x)
tomo_sets = TOsrc
tomo = True
else:
tomo_sets = 0
tomo = False
premulti = pd.DataFrame()
postmulti = pd.DataFrame()
OGmulti = pd.DataFrame()
elistAA = pd.DataFrame()
loncuts,latcuts,elistcuts = s2f.getlatloncutoffs(lons,lats,eventlist,printtest)
''' ------ (15) Initialize arrays for Section 2 ------ '''
# Creates list of events that were used for the model based on ID
used_all = np.zeros((1, 2))
used_TO = np.zeros((1, 2))
warnings.filterwarnings('ignore', 'Mean of empty slice.')
pd.options.mode.chained_assignment = None
'''Section 2: First loop
This Accomplishes:
1) Calculate error for each used tomography model.
This is accomplished by determining the difference between measured
depths for tomography and earthquake data, which will be used
outside of the loop.
2) Identify data to constrain depth/coordinate of center of Benioff Zone.
2a) Identify local strike, dip, and depth of Slab1.0.
If Slab 1.0 does not exist, acquire strike from closest trench
location with a strike oriented perpendicularly to this lon/lat.
If extending beyond Slab1.0 depths perpendicularly, find nearest and
most perpendicular point on Slab1.0, and define depth to
search from based on dip and depth of that point on Slab1.0. The
dip is defined as the dip of the local Slab1.0 point.
If extending along strike from Slab1.0, define depth to search from
based on mean depth of data within defined radius of node. The
dip of the node is defined as 0.
2b) Filter by ellipsoid oriented perpendicularly to Slab1.0.
If the local dip is less than mindip, orient ellipsoid vertically
and along strike found in (2a).
If the local dip is greater than mindip, orient ellipsoid
perpendicular to strike/dip found in (2a).
The long axis of the ellipse is defined as radius1, the short axis
is defined as radius2.
The shallow extent of the ellipsoid is defined as sdr at depths
above seismo_thick, and is tapered to 3*sdr at depths greater
than seismo_thick.
The deep extent of the ellipsoid is defined as sdr at depths above
seismo_thick, and is tapered to ddr at depths greater than
seismo_thick.
2c) Nodes outboard of the trench are only constrained by bathymetry.
Nodes inboard of the trench are constrained by all but bathymetry.
2d) Conditionally add average active source/average reciever functions.
If within the distance of the longest AS profile from the trench
identify the average AS profile depth at that distance from
trench. If there is no active source point within the search
ellipsoid defined in (2b), add an average active source data
point to the set of data to constrain the depth at this node.
Reciever functions in cam and alu are being utilized similarly with
defined distances from trench and distances along strike from
key profiles that need to be utilized in the absence of
seismicity.
2e) If information other than tomography is available above 300 km
depth, all tomography is filtered at that node.
2f) If less than two data points are available to constrain a node, no
depth is resolved at that node.
2g) If |strike of Slab1.0 at node - strike of Slab1.0 at farthest data|
> minstrk, filter data at ends until < minstrk.
If this node is outside of Slab1.0, reduce long axis of search
ellipsoid prior to starting filters.
The output of this loop is two numpy arrays and list of nodes with data:
used_TO: local difference between tomography and earthquake depths and
a tomography dataset identifier
used_all: indices for the data used and their associated nodes
This one is created to prevent the need for re-filtering
in later loops
'''
print("Start Section 2 of 7: First loop")
lons1 = (np.ones(len(lons))*-9999).astype(np.float64)
lats1 = (np.ones(len(lons))*-9999).astype(np.float64)
deps1 = (np.ones(len(lons))*-9999).astype(np.float64)
strs1 = (np.ones(len(lons))*-9999).astype(np.float64)
dips1 = (np.ones(len(lons))*-9999).astype(np.float64)
nIDs1 = (np.ones(len(lons))*-9999).astype(np.float64)
aleng = (np.ones(len(lons))*-9999).astype(np.float64)
bleng = (np.ones(len(lons))*-9999).astype(np.float64)
cleng = (np.ones(len(lons))*-9999).astype(np.float64)
sleng = (np.ones(len(lons))*-9999).astype(np.float64)
dleng = (np.ones(len(lons))*-9999).astype(np.float64)
elons1 = (np.ones(len(lons))*-9999).astype(np.float64)
elats1 = (np.ones(len(lons))*-9999).astype(np.float64)
edeps1 = (np.ones(len(lons))*-9999).astype(np.float64)
estrs1 = (np.ones(len(lons))*-9999).astype(np.float64)
edips1 = (np.ones(len(lons))*-9999).astype(np.float64)
enIDs1 = (np.ones(len(lons))*-9999).astype(np.float64)
ealeng = (np.ones(len(lons))*-9999).astype(np.float64)
ebleng = (np.ones(len(lons))*-9999).astype(np.float64)
ecleng = (np.ones(len(lons))*-9999).astype(np.float64)
esleng = (np.ones(len(lons))*-9999).astype(np.float64)
edleng = (np.ones(len(lons))*-9999).astype(np.float64)
if args.nCores is not None:
if args.nCores > 1 and args.nCores < 8:
pooling = True
elif args.nCores == 1:
pooling = False
else:
pooling = False
else:
pooling = False
cutcount = 1
allnewnodes = None
for cut in range(len(loncuts)):
theselats = latcuts[cut]
theselons = loncuts[cut]
theseevents = elistcuts[cut]
indices = range(len(theselats))
if cut == 0:
i2 = 0
cutcount+=1
if pooling:
pool1 = Pool(args.nCores)
partial_loop1 = partial(loops.loop1, theselons, theselats, testarea, slab,
depgrid, strgrid, dipgrid, slab1query, theseevents,
seismo_thick, alen, blen, mdist, sdr, ddr, mindip, maxID,
AA_data, TR_data, maxdist, maxthickness, minstk,
tomo_sets, meanBA,slab1guide,grid,slab1data,dipthresh,datainfo,nodeinfo)
pts = pool1.map(partial_loop1, indices) #$$#
pool1.close()
pool1.join()
for i in range(len(indices)):
thisnode = pts[i]
if thisnode[13]:
lons1[i2] = thisnode[0]
lats1[i2] = thisnode[1]
deps1[i2] = thisnode[2]
strs1[i2] = thisnode[3]
dips1[i2] = thisnode[4]
nIDs1[i2] = thisnode[5]
aleng[i2] = thisnode[6]
bleng[i2] = thisnode[7]
cleng[i2] = thisnode[8]
sleng[i2] = thisnode[14]
dleng[i2] = thisnode[15]
nused_TO = thisnode[9]
if len(nused_TO) > 0:
if used_TO is not None:
used_TO = np.vstack((used_TO, nused_TO))
nused_all = thisnode[10]
if len(nused_all) > 0:
if used_all is not None:
used_all = np.vstack((used_all, nused_all))
AAadd = thisnode[11]
if len(AAadd)>0:
if AAadd['unc'].mean() > 5:
AAadd['etype'] = 'RF'
elistAA = pd.concat([elistAA, AAadd],sort=True)
newnodes = thisnode[12]
if len(newnodes)>0:
if allnewnodes is not None:
allnewnodes = np.vstack((allnewnodes,newnodes))
else:
allnewnodes = newnodes
if not thisnode[13] and np.isfinite(thisnode[2]):
elons1[i2] = thisnode[0]
elats1[i2] = thisnode[1]
edeps1[i2] = thisnode[2]
estrs1[i2] = thisnode[3]
edips1[i2] = thisnode[4]
enIDs1[i2] = thisnode[5]
ealeng[i2] = thisnode[6]
ebleng[i2] = thisnode[7]
ecleng[i2] = thisnode[8]
esleng[i2] = thisnode[14]
edleng[i2] = thisnode[15]
i2 += 1
else:
for nodeno in range(len(theselons)):
alon, alat, alocdep, alocstr, alocdip, anID, aaleng, ableng, acleng, aused_TO, aused_tmp, atrimmedAA, newnodes, anydata, asleng, adleng = loops.loop1(theselons, theselats, testarea, slab, depgrid, strgrid, dipgrid, slab1query, theseevents, seismo_thick, alen, blen, mdist, sdr, ddr, mindip, maxID, AA_data, TR_data, maxdist, maxthickness, minstk, tomo_sets, meanBA, slab1guide, grid, slab1data, dipthresh, datainfo, nodeinfo, nodeno)
if anydata:
lons1[i2] = alon
lats1[i2] = alat
deps1[i2] = alocdep
strs1[i2] = alocstr
dips1[i2] = alocdip
nIDs1[i2] = anID
aleng[i2] = aaleng
bleng[i2] = ableng
cleng[i2] = acleng
sleng[i2] = asleng
dleng[i2] = adleng
nused_TO = aused_TO
if len(nused_TO) > 0:
if used_TO is not None:
used_TO = np.vstack((used_TO, nused_TO))
nused_all = aused_tmp
if len(nused_all) > 0:
if used_all is not None:
used_all = np.vstack((used_all, nused_all))
AAadd = atrimmedAA
if len(AAadd)>0:
if AAadd['unc'].mean() > 5:
AAadd['etype'] = 'RF'
elistAA = pd.concat([elistAA, AAadd],sort=True)
if len(newnodes)>0:
if allnewnodes is not None:
allnewnodes = np.vstack((allnewnodes,newnodes))
else:
allnewnodes = newnodes
if not anydata and np.isfinite(alocdep):
elons1[i2] = alon
elats1[i2] = alat
edeps1[i2] = alocdep
estrs1[i2] = alocstr
edips1[i2] = alocdip
enIDs1[i2] = anID
ealeng[i2] = aaleng
ebleng[i2] = ableng
ecleng[i2] = acleng
esleng[i2] = asleng
edleng[i2] = adleng
i2 += 1
lons1 = lons1[lons1>-999]
lats1 = lats1[lats1>-999]
deps1 = deps1[(deps1>-999)|np.isnan(deps1)]
strs1 = strs1[strs1>-999]
dips1 = dips1[dips1>-999]
nIDs1 = nIDs1[nIDs1>-999]
aleng = aleng[aleng>-999]
bleng = bleng[bleng>-999]
cleng = cleng[cleng>-999]
sleng = sleng[sleng>-999]
dleng = dleng[dleng>-999]
elons1 = elons1[edleng>-999]
elats1 = elats1[edleng>-999]
edeps1 = edeps1[(edeps1>-999)|np.isnan(edeps1)]
estrs1 = estrs1[edleng>-999]
edips1 = edips1[edleng>-999]
enIDs1 = enIDs1[edleng>-999]
ealeng = ealeng[edleng>-999]
ebleng = ebleng[edleng>-999]
ecleng = ecleng[edleng>-999]
esleng = esleng[edleng>-999]
edleng = edleng[edleng>-999]
testdf = pd.DataFrame({'lon':lons1,'lat':lats1,'depth':deps1,'strike':strs1,'dip':dips1,'id':nIDs1,'alen':aleng,'blen':bleng,'clen':cleng,'slen':sleng,'dlen':dleng})
testdf.to_csv('firstloop.csv',header=True,index=False,na_rep=np.nan)
if allnewnodes is not None:
theseIDs = []
for i in range(len(allnewnodes)):
if allnewnodes[i,1]>0:
thisnID = int('%i%i'%(allnewnodes[i,0]*10,allnewnodes[i,1]*10))
else:
thisnID = int('%i0%i'%(allnewnodes[i,0]*10,allnewnodes[i,1]*-10))
theseIDs.append(thisnID)
newlonsdf1 = pd.DataFrame({'lon':allnewnodes[:,0],'lat':allnewnodes[:,1],'nID':theseIDs})
newlonsdf = newlonsdf1.drop_duplicates(['nID'])
theselons = newlonsdf['lon'].values
theselats = newlonsdf['lat'].values
if grid == 0.2:
grid2 = 0.1
elif grid == 0.1:
grid2 = 0.05
else:
grid2 = grid
slab1guide,slab1query = s2f.makeReference(slab1data,theselons,theselats,grid2,printtest,slab)
newlats = []
newlons = []
newdeps = []
newstrs = []
newdips = []
newnIDs = []
newalen = []
newblen = []
newclen = []
newslen = []
newdlen = []
enewlats = []
enewlons = []
enewdeps = []
enewstrs = []
enewdips = []
enewnIDs = []
enewalen = []
enewblen = []
enewclen = []
enewslen = []
enewdlen = []
if pooling:
indices = range(len(theselons))
pool1 = Pool(args.nCores)
partial_loop1 = partial(loops.loop1, theselons, theselats, testarea, slab,
depgrid, strgrid, dipgrid, slab1query, eventlist,
seismo_thick, alen, blen, mdist, sdr, ddr, mindip, maxID,
AA_data, TR_data, maxdist, maxthickness, minstk,
tomo_sets, meanBA,slab1guide,grid,slab1data,dipthresh,datainfo,nodeinfo)
pts = pool1.map(partial_loop1, indices)
pool1.close()
pool1.join()
for i in range(len(indices)):
thisnode = pts[i]
if thisnode[13]:
newlons.append(thisnode[0])
newlats.append(thisnode[1])
newdeps.append(thisnode[2])
newstrs.append(thisnode[3])
newdips.append(thisnode[4])
newnIDs.append(thisnode[5])
newalen.append(thisnode[6])
newblen.append(thisnode[7])
newclen.append(thisnode[8])
newslen.append(thisnode[14])
newdlen.append(thisnode[15])
nused_TO = thisnode[9]
if len(nused_TO) > 0:
if used_TO is not None:
used_TO = np.vstack((used_TO, nused_TO))
nused_all = thisnode[10]
if len(nused_all) > 0:
if used_all is not None:
used_all = np.vstack((used_all, nused_all))
AAadd = thisnode[11]
if len(AAadd)>0:
if AAadd['unc'].mean() > 5:
AAadd['etype'] = 'RF'
elistAA = pd.concat([elistAA, AAadd],sort=True)
if not thisnode[13] and np.isfinite(thisnode[2]):
enewlons.append(thisnode[0])
enewlats.append(thisnode[1])
enewdeps.append(thisnode[2])
enewstrs.append(thisnode[3])
enewdips.append(thisnode[4])
enewnIDs.append(thisnode[5])
enewalen.append(thisnode[6])
enewblen.append(thisnode[7])
enewclen.append(thisnode[8])
enewslen.append(thisnode[14])
enewdlen.append(thisnode[15])
else:
for nodeno in range(len(theselons)):
alon, alat, alocdep, alocstr, alocdip, anID, aalen, ablen, aclen, aused_TO, aused_tmp, atrimmedAA, newnodes, anydata, aslen, adlen = loops.loop1(theselons, theselats, testarea, slab, depgrid, strgrid, dipgrid, slab1query, eventlist, seismo_thick, alen, blen, mdist, sdr, ddr, mindip, maxID, AA_data, TR_data, maxdist, maxthickness, minstk, tomo_sets, meanBA, slab1guide, grid, slab1data, dipthresh, datainfo, nodeinfo, nodeno)
if anydata:
newlons.append(alon)
newlats.append(alat)
newdeps.append(alocdep)
newstrs.append(alocstr)
newdips.append(alocdip)
newnIDs.append(anID)
newalen.append(aalen)
newblen.append(ablen)
newclen.append(aclen)
newslen.append(aslen)
newdlen.append(adlen)
nused_TO = aused_TO
if len(nused_TO) > 0:
if used_TO is not None:
used_TO = np.vstack((used_TO, nused_TO))
nused_all = aused_tmp
if len(nused_all) > 0:
if used_all is not None:
used_all = np.vstack((used_all, nused_all))
AAadd = atrimmedAA
if len(AAadd)>0:
if AAadd['unc'].mean() > 5:
AAadd['etype'] = 'RF'
elistAA = pd.concat([elistAA, AAadd],sort=True)
if not anydata and np.isfinite(alocdep):
enewlons.append(alon)
enewlats.append(alat)
enewdeps.append(alocdep)
enewstrs.append(alocstr)
enewdips.append(alocdip)
enewnIDs.append(anID)
enewalen.append(aalen)
enewblen.append(ablen)
enewclen.append(aclen)
enewslen.append(aslen)
enewdlen.append(adlen)
#np.savetxt('%s_diptest.csv'%slab, allnewnodes, header='lon,lat,depth,strike,dip',fmt='%.2f', delimiter=',',comments='')
if printtest:
fig = plt.figure(figsize=(20, 10))
ax1 = fig.add_subplot(131)
con = ax1.scatter(lons1,lats1,c=dips1,s=10,edgecolors='none',cmap='plasma')
ax1.set_ylabel('Latitude')
ax1.axis('equal')
plt.grid()
title = 'Diptest'
ax1.set_title(title)
cbar = fig.colorbar(con)
cbar.set_label('Dip')
ax2 = fig.add_subplot(132)
con = ax2.scatter(allnewnodes[:,0], allnewnodes[:,1],c=allnewnodes[:,1],s=10,edgecolors='none',cmap='plasma')
ax2.set_xlabel('Longitude')
ax2.set_ylabel('Latitude')
ax2.axis('equal')
plt.grid()
cbar = fig.colorbar(con)
cbar.set_label('Dip')
ax3 = fig.add_subplot(133)
con = ax3.scatter(newlons, newlats,c=newdips,s=10,edgecolors='none',cmap='plasma')
ax3.set_xlabel('Longitude')
ax3.set_ylabel('Latitude')
ax3.axis('equal')
plt.grid()
cbar = fig.colorbar(con)
cbar.set_label('Dip')
figtitle = 'diptest.png'
fig.savefig(figtitle)
plt.close()
lons1 = np.append(lons1, [newlons])
lats1 = np.append(lats1, [newlats])
deps1 = np.append(deps1, [newdeps])
strs1 = np.append(strs1, [newstrs])
dips1 = np.append(dips1, [newdips])
nIDs1 = np.append(nIDs1, [newnIDs])
aleng = np.append(aleng, [newalen])
bleng = np.append(bleng, [newblen])
cleng = np.append(cleng, [newclen])
sleng = np.append(sleng, [newslen])
dleng = np.append(dleng, [newdlen])
elons1 = np.append(elons1, [enewlons])
elats1 = np.append(elats1, [enewlats])
edeps1 = np.append(edeps1, [enewdeps])
estrs1 = np.append(estrs1, [enewstrs])
edips1 = np.append(edips1, [enewdips])
enIDs1 = np.append(enIDs1, [enewnIDs])
ealeng = np.append(ealeng, [enewalen])
ebleng = np.append(ebleng, [enewblen])
ecleng = np.append(ecleng, [enewclen])
esleng = np.append(esleng, [enewslen])
edleng = np.append(edleng, [enewdlen])
#print ('lon',len(elons1),'lat',len(elats1),'ogdep',len(edeps1),'ogstr',len(estrs1),'ogdip',len(edips1),'nID',len(enIDs1),'alen',len(ealeng),'blen',len(ebleng),'clen',len(ecleng),'slen',len(esleng),'dlen',len(edleng))
emptynodes = pd.DataFrame({'lon':elons1,'lat':elats1,'ogdep':edeps1,'ogstr':estrs1,'ogdip':edips1,'nID':enIDs1,'alen':ealeng,'blen':ebleng,'clen':ecleng,'slen':esleng,'dlen':edleng})
#emptynodes.to_csv('emptynodes.csv',header=True,index=False)
refdeps = pd.DataFrame({'lon':lons1, 'lat':lats1, 'ogdep':deps1})
if global_average:
''' # need to fix this after adjusting based on BA depth at trench
AA_global['depthtest'] = (AA_global['depth'].values*100).astype(int)
for index, row in elistAA.iterrows():
depthAA = row['depth']
depthtestAA = int(100*row['depth'])
thisdepth = AA_global[AA_global.depthtest == depthtestAA]
uncAA = thisdepth['unc'].values[0]
elistAA.loc[elistAA.depth == depthAA, 'unc'] = uncAA*2
'''
elistAA['unc'] = 10.0
elistcuts.append(elistAA)
eventlist2 = pd.concat(elistcuts,sort=True)
eventlist = eventlist2.reset_index(drop=True)
del eventlist2
eventlist = eventlist.drop_duplicates(['ID'])
eventlist = eventlist.reset_index(drop=True)
# Remove first line of zeros
used_TO = used_TO[~np.all(used_TO ==0, axis=1)]
used_all = used_all[~np.all(used_all ==0, axis=1)]
'''Section 3: Calculate tomography uncertainties
Here we use the output from the first loop to calculate tomography uncertainties.
For each tomography dataset, we calculate the standard deviation of the distribution of "differences".
We apply this standard deviation as the uncertainty value for each tomography datum from that dataset.
'''
print("Start Section 3 of 7: Assigning tomography uncertainties")
if tomo:
for idx, src in enumerate(tomo_sets):
tomog = used_TO[:][used_TO[:, 1] == idx]
tmp_std = np.std(tomog[:, 0])
if tmp_std > 40.:
tmp_std = 40.
elif tmp_std < 15.:
tmp_std = 15.
elif np.isnan(tmp_std):
tmp_std = 40
eventlist['unc'][eventlist['src'] == src] = tmp_std
'''Section 4: Second loop
The purpose of this loop is to determine a set of "pre-shifted" slab points that do not utilize receiver function data.
This output dataset will represent a transition from slab surface at shallow depths to slab center at deeper depths.
The only output from this loop is an array of the form [ lat lon dep unc nodeID ]
'''
print("Start Section 4 of 7: Second loop")
bzlons, bzlats, bzdeps, stds2, nIDs2 = [], [], [], [], []
lats2, lons2, str2, dip2, centsurf = [], [], [], [], []
bilats, bilons, binods, bistds = [], [], [], []
biindx, bistrs, bidips, bideps = [], [], [], []
baleng, bbleng, bcleng, onlyto = [], [], [], []
rlist = pd.DataFrame()
if pooling:
pool2 = Pool(args.nCores)
npass = args.nCores
partial_loop2 = partial(loops.loop2, testarea, lons1, lats1, nIDs1, deps1, strs1, dips1, used_all, eventlist, sdr, ddr, seismo_thick, slab, maxthickness, rlist, mindip, aleng, bleng, cleng)
indices = range(len(lats1))
pts2 = pool2.map(partial_loop2, indices)
pool2.close()
pool2.join()
for i in range(len(indices)):
thisnode = pts2[i]
if np.isfinite(thisnode[0]):
bzlons.append(thisnode[0])
bzlats.append(thisnode[1])
bzdeps.append(thisnode[2])
stds2.append(thisnode[3])
nIDs2.append(thisnode[4])
lats2.append(thisnode[5])
lons2.append(thisnode[6])
str2.append(thisnode[7])
dip2.append(thisnode[8])
centsurf.append(thisnode[9])
baleng.append(thisnode[20])
bbleng.append(thisnode[21])
bcleng.append(thisnode[22])
onlyto.append(thisnode[23])
if np.isfinite(thisnode[10]):
bilats.append(thisnode[10])
bilons.append(thisnode[11])
binods.append(thisnode[12])
bistds.append(thisnode[13])
biindx.append(thisnode[14])
bistrs.append(thisnode[15])
bidips.append(thisnode[16])
bideps.append(thisnode[17])
rlist = thisnode[18]
if len(rlist) > 0:
removeIDs = np.array(rlist.ID)
thisID = np.ones(len(removeIDs))*thisnode[4]
removearray = list(zip(thisID, removeIDs))
removeIDID = np.array(removearray)
used_all = used_all[~(np.in1d(used_all[:, 1], removeIDID) & np.in1d(used_all[:, 0], thisID))]
multi = thisnode[19]
if len(multi) > 0:
premulti = pd.concat([premulti, multi],sort=True)
del pts2
else:
npass = 1
for nodeno in range(len(lats1)):
bpeak_lon, bpeak_lat, bpeak_depth, bstd, bnID, blat, blon, bcstr, bcdip, bcentsurf, bbilats, bbilons, bbinods, bbistds, bbiindx, bbistrs, bbidips, bbideps, brlist, bpremulti, alen, blen, clen, onlyt = loops.loop2(testarea, lons1, lats1, nIDs1, deps1, strs1, dips1, used_all, eventlist, sdr, ddr, seismo_thick, slab, maxthickness, rlist, mindip, aleng, bleng, cleng, nodeno)
if np.isfinite(bpeak_lon):
bzlons.append(bpeak_lon)
bzlats.append(bpeak_lat)
bzdeps.append(bpeak_depth)
stds2.append(bstd)
nIDs2.append(bnID)
lats2.append(blat)
lons2.append(blon)
str2.append(bcstr)
dip2.append(bcdip)
centsurf.append(bcentsurf)
baleng.append(alen)
bbleng.append(blen)
bcleng.append(clen)
onlyto.append(onlyt)
if np.isfinite(bbilats):
bilats.append(bbilats)
bilons.append(bbilons)
binods.append(bbinods)
bistds.append(bbistds)
biindx.append(bbiindx)
bistrs.append(bbistrs)
bidips.append(bbidips)
bideps.append(bbideps)
rlist = brlist
if len(rlist) > 0:
removeIDs = np.array(rlist.ID)
thisID = np.ones(len(removeIDs))*bnID
removearray = list(zip(thisID, removeIDs))
removeIDID = np.array(removearray)
used_all = used_all[~(np.in1d(used_all[:, 1], removeIDID) & np.in1d(used_all[:, 0], thisID))]
multi = bpremulti
if len(multi) > 0:
premulti = pd.concat([premulti, multi],sort=True)
tmp_res = pd.DataFrame({'bzlon':bzlons,'bzlat':bzlats,'depth':bzdeps,'stdv':stds2,'nID':nIDs2,'lat':lats2,'lon':lons2,'ogstr':str2,'ogdip':dip2,'centsurf':centsurf,'alen':baleng,'blen':bbleng,'clen':bcleng,'onlyto':onlyto})
for j in range(len(bilats)):
lon = bilons[j]
lat = bilats[j]
nID = binods[j]
stdv = bistds[j]
stk = bistrs[j]
dep = bideps[j]
dip = bidips[j]
if dip <= mindip:
peak_depth = s2f.findMultiDepth(lon, lat, nID, tmp_res, grid, premulti, stk, slab, dep, alen, printtest)
peak_lon = lon
peak_lat = lat
else:
peak_lon, peak_lat, peak_depth = s2f.findMultiDepthP(lon, lat, nID, tmp_res, grid, premulti, stk, slab, dep, dip, alen, printtest)
tmp_res.loc[tmp_res.nID == nID, 'bzlon'] = peak_lon
tmp_res.loc[tmp_res.nID == nID, 'bzlat'] = peak_lat
tmp_res.loc[tmp_res.nID == nID, 'depth'] = peak_depth
tmp_res = s2f.addGuidePoints(tmp_res, slab)
if slab == 'sol':
tmp_res = tmp_res[(tmp_res.bzlon>142) & (tmp_res.bzlon<164)]
if slab == 'sul':
tmp_res = tmp_res[(tmp_res.bzlon<123.186518923) | (tmp_res.depth<100)]
tmp_res = tmp_res[(tmp_res.bzlon<122.186518923) | (tmp_res.depth<200)]
# Save data used to file
used_IDs = used_all[:, 1]
used_data = eventlist[eventlist['ID'].isin(used_IDs)]
used_data = used_data[['lat', 'lon', 'depth', 'unc', 'etype', 'ID', 'mag', 'time', 'S1', 'D1', 'R1', 'S2', 'D2', 'R2', 'src']]
used_data = used_data.drop_duplicates(['ID'])
used_data.loc[used_data.lon < 0, 'lon']+=360
if slab == 'hel':
used_data.loc[used_data.etype == 'CP', 'etype']='RF'
used_data.to_csv(dataFile, header=True, index=False, float_format='%0.2f', na_rep = float('nan'), chunksize=100000)
#tmp_res.to_csv('nodetest.csv', header=True, index=False, float_format='%0.2f', na_rep = float('nan'), chunksize=100000)
'''Section 5: Calculate shifts
Here we use the output of the second loop to calculate shifting locations for non-RF results.
A user-specified lithospheric thickness can be read in or lithosphere thickness will be calculated using the nearest oceanic plate age.
The taper and fracshift is set in the paramter file for each subduction zone. fracshift was determined via testing each individual
subduztion zone to match seismicity. Shift direction is determined by the strike and dip of a surface created using the output from the second loop.
A clipping mask is also created in this section using the shifted output data.
'''
print("Start Section 5 of 7: Calculate shifts")
# Calculate shift for each node
print(" Calculating shift...")
surfnode = 0.5
data0 = tmp_res[(tmp_res.stdv > -0.000001)&(tmp_res.stdv < 0.000001)]
tmp_res = tmp_res[(tmp_res.stdv < -0.000001)|(tmp_res.stdv > 0.000001)]
if use_box == 'yes':
if lonmin<0:
lonmin+=360
if lonmax<0:
lonmax+=360
TR_data = TR_data[(TR_data.lon<lonmax)&(TR_data.lon>lonmin)]
TR_data = TR_data[(TR_data.lat<latmax)&(TR_data.lat>latmin)]
TR_data = TR_data.reset_index(drop=True)
# Read in age grid files
ages = gmt.GMTGrid.load(agesFile)
ages_error = gmt.GMTGrid.load(ageerrorsFile)
shift_out, maxthickness = s2f.slabShift_noGMT(tmp_res, node, T, TR_data, seismo_thick, taper, ages, ages_error, filt, slab, maxthickness, grid, 'bzlon', 'bzlat', 'depth', fracS, npass, meanBA, printtest, kdeg, knot_no, rbfs, use_box)
del ages
del ages_error
tmp_res['pslon'] = tmp_res['lon'].values*1.0
tmp_res['pslat'] = tmp_res['lat'].values*1.0
tmp_res['psdepth'] = tmp_res['depth'].values*1.0
tmp_res = tmp_res[['pslon', 'pslat', 'bzlon', 'bzlat', 'psdepth', 'stdv', 'nID', 'ogstr', 'ogdip', 'centsurf', 'alen', 'blen', 'clen']]
shift_out = shift_out.merge(tmp_res)
shift_out.loc[shift_out.pslon < 0, 'pslon']+=360
shift_out['avstr'] = np.nan
shift_out['avdip'] = np.nan
shift_out['avrke'] = np.nan
'''Section 6: Third loop
The purpose of this loop is to produce the final location measurements for the slab.
Here we edit the input data by adding the shift to the depths, then calculate a PDF with receiver functions included.
The only output from this loop is a 10 column array with all results necessary to build the output.
Output is of the format [ lat lon dep unc shift_mag shift_unc avg_str avg_dip avg_rak pre-shift_dep pre-shift_str pre-shift_dip nodeID ]
'''
print("Start Section 6 of 7: Third (final) loop")
bilats, bilons, binods, bistds = [], [], [], []
biindx, bistrs, bidips, bideps = [], [], [], []
if pooling:
pool3 = Pool(args.nCores)
partial_loop3 = partial(loops.loop3, shift_out, testarea, used_all, eventlist, sdr, ddr, seismo_thick, these_params, slab, maxthickness, mindip, taper)
indices = shift_out['nID'].values
pts3 = pool3.map(partial_loop3, indices)
pool3.close()
pool3.join()
for i in range(len(indices)):
thisnode = pts3[i]
if np.isfinite(thisnode[0]):
nID = thisnode[13]
shift_out.loc[shift_out.nID == nID, 'depth'] = thisnode[0]
shift_out.loc[shift_out.nID == nID, 'stdv'] = thisnode[1]
shift_out.loc[shift_out.nID == nID, 'avstr'] = thisnode[2]
shift_out.loc[shift_out.nID == nID, 'avdip'] = thisnode[3]
shift_out.loc[shift_out.nID == nID, 'avrke'] = thisnode[4]
shift_out.loc[shift_out.nID == nID, 'lon'] = thisnode[15]
shift_out.loc[shift_out.nID == nID, 'lat'] = thisnode[16]
if np.isfinite(thisnode[5]):
bilats.append(thisnode[5])
bilons.append(thisnode[6])
binods.append(thisnode[7])
bistds.append(thisnode[8])
biindx.append(thisnode[9])
bistrs.append(thisnode[10])
bidips.append(thisnode[11])
bideps.append(thisnode[12])
multi = thisnode[14]
if len(multi) > 0:
postmulti = pd.concat([postmulti, multi],sort=True)
del pts3
else:
for nodeno in shift_out['nID'].values:
crdepth, crstd, crstrike, crdip, crrake, cbilats, cbilons, cbinods, cbistds, cbiindx, cbistrs, cbidips, cbideps, cnID, cpostmulti, cpeak_lon, cpeak_lat = loops.loop3(shift_out, testarea, used_all, eventlist, sdr, ddr, seismo_thick, these_params, slab, maxthickness, mindip, taper, nodeno)
if np.isfinite(crdepth):
nID = cnID
shift_out.loc[shift_out.nID == nID, 'depth'] = crdepth
shift_out.loc[shift_out.nID == nID, 'stdv'] = crstd
shift_out.loc[shift_out.nID == nID, 'avstr'] = crstrike
shift_out.loc[shift_out.nID == nID, 'avdip'] = crdip
shift_out.loc[shift_out.nID == nID, 'avrke'] = crrake
shift_out.loc[shift_out.nID == nID, 'lon'] = cpeak_lon
shift_out.loc[shift_out.nID == nID, 'lat'] = cpeak_lat
if np.isfinite(cbilats):
bilats.append(cbilats)
bilons.append(cbilons)
binods.append(cbinods)
bistds.append(cbistds)
biindx.append(cbiindx)
bistrs.append(cbistrs)
bidips.append(cbidips)
bideps.append(cbideps)
multi = cpostmulti
if len(multi) > 0:
postmulti = pd.concat([postmulti, multi],sort=True)
shift_out.loc[shift_out.lon < 0, 'lon']+=360
for j in range(len(bilats)):
lon = bilons[j]
lat = bilats[j]
nID = binods[j]
stdv = bistds[j]
stk = bistrs[j]
dep = bideps[j]
dip = bidips[j]
if dip <= mindip:
peak_depth = s2f.findMultiDepth(lon, lat, nID, shift_out, grid, postmulti, stk, slab, dep, alen, printtest)
peak_lon = lon
peak_lat = lat
else:
peak_lon, peak_lat, peak_depth = s2f.findMultiDepthP(lon, lat, nID, shift_out, grid, postmulti, stk, slab, dep, dip, alen, printtest)
shift_out.loc[shift_out.nID == nID, 'lon'] = peak_lon
shift_out.loc[shift_out.nID == nID, 'lat'] = peak_lat
shift_out.loc[shift_out.nID == nID, 'depth'] = peak_depth
# Save nodes to file
shift_out.loc[shift_out.lon < 0, 'lon']+=360
dip90s = 90.0-shift_out['ogdip'].values
vertunc = shift_out['stdv'].values * (np.sin(np.radians(dip90s)))
horzunc = shift_out['stdv'].values * (np.cos(np.radians(dip90s)))
shift_out['vstdv'] = vertunc
shift_out['hstdv'] = horzunc
if slab == 'sum' or slab == 'kur':
shift_out, rempts = s2f.removeSZnodes(shift_out, fracS, 0.4, seismo_thick)
elif slab == 'camz' or slab == 'sulz':
shift_out, rempts = s2f.removeSZnodes(shift_out, fracS, 0.8, seismo_thick)
elif slab != 'sol' and slab != 'phi' and slab != 'sul' and slab != 'alu' and slab != 'sum':
shift_out, rempts = s2f.removeSZnodes(shift_out, fracS, 0.1, seismo_thick)
else:
rempts = pd.DataFrame()
if len(rempts) > 0:
rempts = rempts[['lon', 'lat', 'depth', 'stdv', 'smag', 'shiftstd', 'avstr', 'avdip', 'avrke', 'psdepth', 'sstr', 'sdip', 'nID', 'pslon', 'pslat', 'bzlon', 'bzlat', 'centsurf','thickness', 'alen', 'blen', 'clen', 'ogstr', 'ogdip','hstdv','vstdv']]
rempts.to_csv(rempFile, header=True, index=False, na_rep=np.nan, float_format='%.2f')
shift_out = shift_out[['lon', 'lat', 'depth', 'stdv', 'smag', 'shiftstd', 'avstr', 'avdip', 'avrke', 'psdepth', 'sstr', 'sdip', 'nID', 'pslon', 'pslat', 'bzlon', 'bzlat', 'centsurf','thickness', 'alen', 'blen', 'clen', 'ogstr', 'ogdip','hstdv','vstdv']]
shift_out.to_csv(nodeFile, header=True, index=False, na_rep=np.nan, float_format='%.2f')
if slab == 'manz' or slab == 'solz' or slab == 'phiz':
lowernodes, shift_out = s2f.nodesift(shift_out, grid)
if slab == 'izuz':
midshiftout = shift_out[(shift_out.lat > 15)&(shift_out.lat < 28)]
outshiftout = shift_out[(shift_out.lat <= 15)|(shift_out.lat >= 28)]
midshiftout = midshiftout[midshiftout.depth<300]
shift_out = pd.concat([midshiftout,outshiftout],sort=True)
if slab == 'solz' or slab == 'sumz':
nodesOG, projnodes = s2f.extendEdges(shift_out,grid,slab)
shift_out = pd.concat([projnodes, shift_out],sort=True)
'''Section 7: Create output
Here we put together all of the output data into the correct form for saving to output files.
First we create a surface with fine spacing of the final data, then we filter it and apply the clipping mask.
Second we populate the output array, and finally we save it.
The output file is of the format [lon lat dep_raw str_raw dip_raw shift_mag dep_shift dep_shift_smooth str_shift_smooth dip_shift_smooth dz1 dz2 dz3 avg_str avg_dip avg_rak]
This file has a regular spacing of fine nodes corresponding to the final surface
The columns for shift_mag, avg_str, avg_dip, and avg_rak are only populated where there was a pre-shift datum.
'''
print("Start Section 7 of 7: Create output")
# Create final surfaces for output
print(" Creating surfaces...")
shift_out = shift_out[(shift_out.nID != 2642178)& (shift_out.nID != 2646182)& (shift_out.nID != 2646184)& (shift_out.nID != 2646186)& (shift_out.nID != 1454068)& (shift_out.nID != 1122062)& (shift_out.nID != 1123062)&(shift_out.nID !=1454068)& (shift_out.nID != 16790448) & (shift_out.nID != 16790449)]
if slab == 'man':
shift_out = shift_out[(shift_out.bzlat > 13.5)|(shift_out.bzlon < 121)]
surfdata = np.zeros((len(shift_out), 4))
surfdata[:, 0], surfdata[:, 1], surfdata[:, 2], surfdata[:, 3] = shift_out['lon'].values, shift_out['lat'].values, shift_out['depth'].values, shift_out['stdv'].values
errordata = np.zeros((len(shift_out), 4))
errordata[:, 0], errordata[:, 1], errordata[:, 2], errordata[:, 3] = shift_out['lon'].values, shift_out['lat'].values, shift_out['stdv'].values, np.ones(len(shift_out))
errordataB = np.zeros((len(shift_out), 4))
errordataB[:, 0], errordataB[:, 1], errordataB[:, 2], errordataB[:, 3] = shift_out['lon'].values, shift_out['lat'].values, shift_out['shiftstd'].values, np.ones(len(shift_out))
thickdata = np.zeros((len(shift_out),4))
thickdata[:, 0], thickdata[:, 1], thickdata[:, 2], thickdata[:, 3] = shift_out['lon'].values, shift_out['lat'].values, shift_out['thickness'].values, np.ones(len(shift_out))
if slab == 'sum':
Surfgrid, xi, dl = s2f.chunksurface(surfdata, node, T, slab, grid, 'depth', time, 'test.txt', filt, pd.DataFrame(), npass, TR_data, meanBA, kdeg, knot_no, rbfs, shift_out,'fin','og','lon',100,110,105)
flipornot = 'flip'
elif slab == 'jap':
Surfgrid, xi, dl = s2f.chunksurface(surfdata, node, T, slab, grid, 'depth', time, 'test.txt', filt, pd.DataFrame(), npass, TR_data, meanBA, kdeg, knot_no, rbfs, shift_out,'fin','og','lat',30,40,35)
flipornot = 'flip'
else:
Surfgrid, xi, dl = s2f.pySurface3(surfdata, node, T, slab, grid, 'depth', time, 'test.txt', filt, pd.DataFrame(), npass, TR_data, meanBA, kdeg, knot_no, rbfs, shift_out,'fin','og')
flipornot = 'dontflip'
sigma = (filt/2.0) / node
Errorgrid = s2f.makeErrorgrid(Surfgrid, xi, errordata)
Errorgrid2 = s2f.makeErrorgrid(Surfgrid, xi, errordataB)
thickgrid = s2f.makeErrorgrid(Surfgrid, xi, thickdata)
if slab == 'puy':
filt2 = 0.6
Filtgrid = s2f.specialpuyfilt(Surfgrid,xi,filt,filt2,node)
Errorgrid = s2f.specialpuyfilt(Errorgrid,xi,filt,filt2,node)
Errorgrid2 = s2f.specialpuyfilt(Errorgrid2,xi,filt,filt2,node)
thickgrid = s2f.specialpuyfilt(thickgrid,xi,filt,filt2,node)
elif slab == 'kur':
filt2 = 1.5
Filtgrid = s2f.specialkurfilt(Surfgrid,xi,filt,filt2,node)
Errorgrid = s2f.specialkurfilt(Errorgrid,xi,filt,filt2,node)
Errorgrid2 = s2f.specialkurfilt(Errorgrid2,xi,filt,filt2,node)
thickgrid = s2f.specialkurfilt(thickgrid,xi,filt,filt2,node)
elif slab == 'izu':
filt2 = 1.5
Filtgrid = s2f.specializufilt(Surfgrid,xi,filt,filt2,node)
Errorgrid = s2f.specializufilt(Errorgrid,xi,filt,filt2,node)
Errorgrid2 = s2f.specializufilt(Errorgrid2,xi,filt,filt2,node)
thickgrid = s2f.specializufilt(thickgrid,xi,filt,filt2,node)
else:
Filtgrid = ndimage.filters.gaussian_filter(Surfgrid, sigma, mode='reflect')
Errorgrid = ndimage.filters.gaussian_filter(Errorgrid, sigma, mode='reflect')
Errorgrid2 = ndimage.filters.gaussian_filter(Errorgrid2, sigma, mode='reflect')
thickgrid = ndimage.filters.gaussian_filter(thickgrid, sigma, mode='reflect')
strgrid3, dipgrid3 = s2f.mkSDgrddata(xi, Filtgrid, flipornot)
resdata = np.zeros((len(xi),5))
resdata[:,0] = xi[:,0]
resdata[:,1] = xi[:,1]
resdata[:,2] = Filtgrid.flatten()
resdata[:,3] = strgrid3.flatten()
resdata[:,4] = dipgrid3.flatten()
print(" Identifying contour extents for clipping mask...")
newres = s2f.mkContourClip(shift_out, TR_data, node, resdata, False,slab)
print(" Assigning and sorting clipping mask polygon...")
if len(TR_data)>0:
clip = s2f.clippingmask(newres,TR_data,node,False,slab,'first')
else:
clip = s2f.noTrenchPolygon(newres, node, False, slab)
mask = s2f.maskdatag(clip, xi)
mask.shape = Surfgrid.shape
Filtgrid = (Filtgrid*mask)
Surfgrid = (Surfgrid*mask)
Errorgrid = (Errorgrid*mask)
Errorgrid2 = (Errorgrid2*mask)
thickgrid = (thickgrid*mask)
dipgrid3 = (dipgrid3*mask)
strgrid3 = (strgrid3*mask)
smooth_dif = Surfgrid.flatten()-Filtgrid.flatten()
# Create output array
print(" Populating output array...")
output = (np.zeros([len(xi), 10]) * np.nan)
output[:, 0] = xi[:, 0] # lon Longitude at node (not shifted)
output[:, 1] = xi[:, 1] # lat Latitude at node
output[:, 2] = Surfgrid.flatten() # dep_shift Post-shift surface depth before smoothing
output[:, 3] = Filtgrid.flatten() # dep_shift_smooth Post-shift surface depth after smoothing
output[:, 4] = strgrid3.flatten() # str_shift_smooth Post-shift surface strike after smoothing (strike was not smoothed - only depth was smoothed)
output[:, 5] = dipgrid3.flatten() # dip_shift_smooth Post-shift surface dip after smoothing
output[:, 6] = Errorgrid.flatten() # dz1 Interpolated, but unsmoothed uncertainty from raw data
output[:, 7] = Errorgrid2.flatten() #dz2 Interpolated, unsmoothed uncertainty from shift
output[:, 8] = smooth_dif.flatten() # dz3 error induced by smoothing (taken as the standard deviation of smoothed-unsmoothed)
output[:, 9] = thickgrid.flatten() #dz2 Interpolated, unsmoothed thickness
output[:, 0][output[:, 0]<0]+=360
clip.loc[clip.lon < 0, 'lon']+=360
output[:,2][output[:,3] > shift_out['depth'].max()] = np.nan
output[:,3][output[:,3] > shift_out['depth'].max()] = np.nan
output[:,4][output[:,3] > shift_out['depth'].max()] = np.nan
output[:,5][output[:,3] > shift_out['depth'].max()] = np.nan
output[:,6][output[:,3] > shift_out['depth'].max()] = np.nan
output[:,7][output[:,3] > shift_out['depth'].max()] = np.nan
output[:,8][output[:,3] > shift_out['depth'].max()] = np.nan
output[:,9][output[:,3] > shift_out['depth'].max()] = np.nan
if slab == 'phi' or slab == 'sul' or slab == 'cot':
halfolder = 'hal_slab2_12.22.17'
print ('clipping grid by underriding model: %s ... '%undergrid)
output = s2f.underclip(output,undergrid)
finoutput = output[np.isfinite(output[:,3])]
newres = pd.DataFrame({'lon':finoutput[:,0], 'lat':finoutput[:,1], 'depth':finoutput[:,3], 'strike':finoutput[:,4], 'dip':finoutput[:,5]})
clip = s2f.clippingmask(newres,TR_data,node,False,slab,'first')
if slab == 'ryu':
kurfolder = 'kur_slab2_12.22.17'
print ('clipping grid by underriding model: %s ... '%undergrid)
output = s2f.underclip(output,undergrid)
finoutput = output[np.isfinite(output[:,3])]
newres = pd.DataFrame({'lon':finoutput[:,0], 'lat':finoutput[:,1], 'depth':finoutput[:,3], 'strike':finoutput[:,4], 'dip':finoutput[:,5]})
clip = s2f.clippingmask(newres,TR_data,node,False,slab,'first')
if slab == 'mue':
carfolder = 'car_slab2_12.22.17'
print ('clipping grid by underriding model: %s ... '%undergrid)
output = s2f.underclip(output,undergrid)
finoutput = output[np.isfinite(output[:,3])]
newres = pd.DataFrame({'lon':finoutput[:,0], 'lat':finoutput[:,1], 'depth':finoutput[:,3], 'strike':finoutput[:,4], 'dip':finoutput[:,5]})
clip = s2f.clippingmask(newres,TR_data,node,False,slab,'first')
if slab == 'kur':
output = output[output[:,1] >= 35]
clip = clip[clip.lat >= 35]
clip['dist1'] = np.abs(35-clip['lat'].values)
closest = clip[clip.dist1 == clip['dist1'].min()]
lonc, latc = closest['lon'].values[0], closest['lat'].values[0]
clip['dist2'] = np.abs(lonc-clip['lon'].values)
clip['dist3'] = clip['dist1'].values/clip['dist2'].values/clip['dist2'].values
closest2 = clip[clip.dist3 == clip['dist3'].min()]
lonc2, latc2 = closest2['lon'].values[0], closest2['lat'].values[0]
clip.loc[(clip.lon == lonc)&(clip.lat == latc), 'lat'] = 35.0
clip.loc[(clip.lon == lonc2)&(clip.lat == latc2), 'lat'] = 35.0
if clip['lon'].values[0] != clip['lon'].values[-1] or clip['lat'].values[0] != clip['lat'].values[-1]:
pointbeg = clip.iloc[[0]]
clip = pd.concat([clip, pointbeg],sort=True)
clip = clip[['lon','lat']]
# Save results to file
print(" Saving results and data to file...")
np.savetxt(outFile, output, header='lon,lat,raw_dep,dep_shift_smooth,str_shift_smooth,dip_shift_smooth,dz1,dz2,dz3,thickness',fmt='%.2f', delimiter=',',comments='')
# Save clipping mask to file
clip = clip[['lon', 'lat']]
clip.to_csv(clipFile, float_format='%.2f', sep=' ', header=False, index=False)
if slab == 'izu' or slab == 'jap' or slab == 'sol' or slab == 'man' or slab == 'ker' or slab == 'hinz' or slab == 'pamz':
print(" PSYCH! Solving for vertical component of this slab region ...")
clip, output, supplement, nodes, deepnodes = s2f.splitsurface(nodeFile,outFile,clipFile,trenches,node,filt,grid,slab, knot_no, kdeg, rbfs, folder)
supplement = supplement[['lon','lat','depth','strike','dip','dz1','dz2','dz3','thickness']]
nodes.to_csv(nodeuFile, header=True, index=False, na_rep=np.nan, float_format='%.2f')
deepnodes.to_csv(nodexFile, header=True, index=False, na_rep=np.nan, float_format='%.2f')
supplement.to_csv(suppFile, header=True, index=False, na_rep=np.nan, float_format='%.4f')
if slab == 'izu':
output = output[output[:,1] <= 35]
clip = clip[clip.lat <= 35]
clip['dist1'] = np.abs(35-clip['lat'].values)
closest = clip[clip.dist1 == clip['dist1'].min()]
lonc, latc = closest['lon'].values[0], closest['lat'].values[0]
clip['dist2'] = np.abs(lonc-clip['lon'].values)
clip['dist3'] = clip['dist1'].values/clip['dist2'].values/clip['dist2'].values
closest2 = clip[clip.dist3 == clip['dist3'].min()]
lonc2, latc2 = closest2['lon'].values[0], closest2['lat'].values[0]
clip.loc[(clip.lon == lonc)&(clip.lat == latc), 'lat'] = 35.0
clip.loc[(clip.lon == lonc2)&(clip.lat == latc2), 'lat'] = 35.0
if clip['lon'].values[0] != clip['lon'].values[-1] or clip['lat'].values[0] != clip['lat'].values[-1]:
pointbeg = clip.iloc[[0]]
clip = pd.concat([clip, pointbeg],sort=True)
clip = clip[['lon','lat']]
print(" Saving results and data to file...")
clip = clip[['lon', 'lat']]
clip.to_csv(clipFile, float_format='%.2f', sep=' ', header=False, index=False)
np.savetxt(outFile, output, header='lon,lat,raw_dep,dep_shift_smooth,str_shift_smooth,dip_shift_smooth,dz1,dz2,dz3,thickness',fmt='%.2f', delimiter=',',comments='')
xmin = np.min(output[:,0])
xmax = np.max(output[:,0])
ymin = np.min(output[:,1])
ymax = np.max(output[:,1])
deps = | pd.DataFrame({'lon':output[:,0], 'lat': output[:,1], 'depth':output[:,3]*-1.0}) | pandas.DataFrame |
# -*- coding: utf-8 -*-
# @Time : 2021/4/20 12:54
# @File : danjuan_fund_data_analysis.py
# @Author : Rocky <EMAIL>
# 蛋卷数据分析
import datetime
import sys
from collections import defaultdict
sys.path.append('..')
from configure.settings import DBSelector
from common.BaseService import BaseService
import pandas as pd
WEEK_DAY = -7 # 上一周的价格
class DanjuanAnalyser(BaseService):
def __init__(self):
super(DanjuanAnalyser, self).__init__('../log/Danjuan_analysis.log')
def select_collection(self,current_date):
'''
根据日期选择数据库
'''
self.db = DBSelector().mongo(location_type='qq')
doc = self.db['db_danjuan'][f'danjuan_fund_{current_date}']
return doc
def get_top_plan(self,collection,top=10):
fund_dict = {}
for item in collection.find({},{'holding':1}):
plan_holding = item.get('holding',[]) # list
for hold in plan_holding:
name = hold['fd_name']
if hold['percent']>0:
fund_dict.setdefault(name,0)
fund_dict[name]+=1
fund_dict=list(sorted(fund_dict.items(),key=lambda x:x[1],reverse=True))[:top]
return fund_dict
def get_top_plan_percent(self,collection,top=10):
fund_dict = {}
for item in collection.find({},{'holding':1}):
plan_holding = item.get('holding',[]) # list
for hold in plan_holding:
name = hold['fd_name']
percent =hold['percent']
fund_dict.setdefault(name,0)
fund_dict[name]+=percent
fund_dict=list(sorted(fund_dict.items(),key=lambda x:x[1],reverse=True))[:top]
return fund_dict
def start(self):
today=datetime.datetime.now()
last_week = today + datetime.timedelta(days=WEEK_DAY)
last_week_str = last_week.strftime('%Y-%m-%d')
# 因为没有执行上周的数据,用历史数据替代
last_week_str = '2021-04-20' # 需要已经保存的库
today_doc = self.select_collection(self.today)
last_week_doc = self.select_collection(last_week_str)
# 持有个数
fund_dict = self.get_top_plan(today_doc,20)
self.pretty(fund_dict,self.today,'count')
old_fund_dict = self.get_top_plan(last_week_doc,20)
self.pretty(old_fund_dict,last_week_str,'count')
diff_set = self.new_fund(fund_dict,old_fund_dict)
print('新增的基金入围')
print(diff_set)
# 按持有比例
new_fund_percent = self.get_top_plan_percent(today_doc,20)
old_fund_percent = self.get_top_plan_percent(last_week_doc,20)
self.pretty(new_fund_percent,self.today,'percent')
self.pretty(old_fund_percent,last_week_str,'percnet')
# 清仓
clean_fund = self.clear_warehouse_fund(today_doc,200)
self.simple_display(clean_fund,self.today)
def simple_display(self,data,date):
for i in data:
print(i)
df = | pd.DataFrame(data,columns=['fund','clear_num']) | pandas.DataFrame |
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import re
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.decomposition import TruncatedSVD
from sklearn import preprocessing, model_selection, metrics
import lightgbm as lgb
import gc
train_df = pd.read_csv('../input/train.csv', parse_dates=["activation_date"])
test_df = pd.read_csv('../input/test.csv', parse_dates=["activation_date"])
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import seaborn as sns
import random
import nltk
nltk.data.path.append("/media/sayantan/Personal/nltk_data")
from nltk.stem.snowball import RussianStemmer
from fuzzywuzzy import fuzz
from nltk.corpus import stopwords
from tqdm import tqdm
from scipy.stats import skew, kurtosis
from scipy.spatial.distance import cosine, cityblock, jaccard, canberra, euclidean, minkowski, braycurtis
from nltk import word_tokenize
stopwords = stopwords.words('russian')
def genFeatures(x):
x["activation_weekday"] = x["activation_date"].dt.weekday
x["monthday"] = x["activation_date"].dt.day
x["weekinmonday"] = x["monthday"] // 7
##################Added in set 1 - 0.01 Improvement
x['price_new'] = np.log1p(x.price) # log transform improves co-relation with deal_price
x['count_null_in_row'] = x.isnull().sum(axis=1)# works
x['has_description'] = x.description.isnull().astype(int)
x['has_image'] = x.image.isnull().astype(int)
x['has_image_top'] = x.image_top_1.isnull().astype(int)
x['has_param1'] = x.param_1.isnull().astype(int)
x['has_param2'] = x.param_2.isnull().astype(int)
x['has_param3'] = x.param_3.isnull().astype(int)
x['has_price'] = x.price.isnull().astype(int)
#################Added in set 2 - 0.00x Improvement
x["description"].fillna("NA", inplace=True)
x["desc_nwords"] = x["description"].apply(lambda x: len(x.split()))
x['len_description'] = x['description'].apply(lambda x: len(x))
x["title_nwords"] = x["title"].apply(lambda x: len(x.split()))
x['len_title'] = x['title'].apply(lambda x: len(x))
x['params'] = x['param_1'].fillna('') + ' ' + x['param_2'].fillna('') + ' ' + x['param_3'].fillna('')
x['params'] = x['params'].str.strip()
x['len_params'] = x['params'].apply(lambda x: len(x))
x['words_params'] = x['params'].apply(lambda x: len(x.split()))
x['symbol1_count'] = x['description'].str.count('↓')
x['symbol2_count'] = x['description'].str.count('\*')
x['symbol3_count'] = x['description'].str.count('✔')
x['symbol4_count'] = x['description'].str.count('❀')
x['symbol5_count'] = x['description'].str.count('➚')
x['symbol6_count'] = x['description'].str.count('ஜ')
x['symbol7_count'] = x['description'].str.count('.')
x['symbol8_count'] = x['description'].str.count('!')
x['symbol9_count'] = x['description'].str.count('\?')
x['symbol10_count'] = x['description'].str.count(' ')
x['symbol11_count'] = x['description'].str.count('-')
x['symbol12_count'] = x['description'].str.count(',')
####################
return x
train_df = genFeatures(train_df)
test_df = genFeatures(test_df)
test_df['deal_probability']=10.0
############################
english_stemmer = nltk.stem.SnowballStemmer('russian')
def clean_text(text):
#text = re.sub(r'(\d+),(\d+)', r'\1.\2', text)
text = text.replace(u'²', '2')
text = text.lower()
text = re.sub(u'[^a-zа-я0-9]', ' ', text)
text = re.sub('\s+', ' ', text)
return text.strip()
def stem_tokens(tokens, stemmer):
stemmed = []
for token in tokens:
#stemmed.append(stemmer.lemmatize(token))
stemmed.append(stemmer.stem(token))
return stemmed
def preprocess_data(line,
exclude_stopword=True,
encode_digit=False):
## tokenize
line = clean_text(line)
tokens = [x.lower() for x in nltk.word_tokenize(line)]
## stem
tokens_stemmed = stem_tokens(tokens, english_stemmer)#english_stemmer
if exclude_stopword:
tokens_stemmed = [x for x in tokens_stemmed if x not in stopwords]
return ' '.join(tokens_stemmed)
train_test = pd.concat((train_df, test_df), axis = 'rows')
## After cleaning => then find intersection
train_test["title_clean"]= list(train_test[["title"]].apply(lambda x: preprocess_data(x["title"]), axis=1))
train_test["desc_clean"]= list(train_test[["description"]].apply(lambda x: preprocess_data(x["description"]), axis=1))
train_test["params_clean"]= list(train_test[["params"]].apply(lambda x: preprocess_data(x["params"]), axis=1))
train_test['count_common_words_title_desc'] = train_test.apply(lambda x: len(set(str(x['title_clean']).lower().split()).intersection(set(str(x['desc_clean']).lower().split()))), axis=1)
train_test['count_common_words_title_params'] = train_test.apply(lambda x: len(set(str(x['title_clean']).lower().split()).intersection(set(str(x['params_clean']).lower().split()))), axis=1)
train_test['count_common_words_params_desc'] = train_test.apply(lambda x: len(set(str(x['params_clean']).lower().split()).intersection(set(str(x['desc_clean']).lower().split()))), axis=1)
print("Cleaned texts..")
###################
# Count Nouns
import pymorphy2
morph = pymorphy2.MorphAnalyzer(result_type=None)
from fastcache import clru_cache as lru_cache
@lru_cache(maxsize=1000000)
def lemmatize_pos(word):
_, tag, norm_form, _, _ = morph.parse(word)[0]
return norm_form, tag.POS
def getPOS(x, pos1 = 'NOUN'):
lemmatized = []
x = clean_text(x)
#x = re.sub(u'[.]', ' ', x)
for s in x.split():
s, pos = lemmatize_pos(s)
if pos != None:
if pos1 in pos:
lemmatized.append(s)
return ' '.join(lemmatized)
train_test['get_nouns_title'] = list(train_test.apply(lambda x: getPOS(x['title'], 'NOUN'), axis=1))
train_test['get_nouns_desc'] = list(train_test.apply(lambda x: getPOS(x['description'], 'NOUN'), axis=1))
train_test['get_adj_title'] = list(train_test.apply(lambda x: getPOS(x['title'], 'ADJ'), axis=1))
train_test['get_adj_desc'] = list(train_test.apply(lambda x: getPOS(x['description'], 'ADJ'), axis=1))
train_test['get_verb_title'] = list(train_test.apply(lambda x: getPOS(x['title'], 'VERB'), axis=1))
train_test['get_verb_desc'] = list(train_test.apply(lambda x: getPOS(x['description'], 'VERB'), axis=1))
# Count digits
def count_digit(x):
x = clean_text(x)
return len(re.findall(r'\b\d+\b', x))
train_test['count_of_digit_in_title'] = list(train_test.apply(lambda x: count_digit(x['title']), axis=1))
train_test['count_of_digit_in_desc'] = list(train_test.apply(lambda x: count_digit(x['description']), axis=1))
train_test['count_of_digit_in_params'] = list(train_test.apply(lambda x: count_digit(x['params']), axis=1))
## get unicode features
count_unicode = lambda x: len([c for c in x if ord(c) > 1105])
count_distunicode = lambda x: len({c for c in x if ord(c) > 1105})
train_test['count_of_unicode_in_title'] = list(train_test.apply(lambda x: count_unicode(x['title']), axis=1))
train_test['count_of_unicode_in_desc'] = list(train_test.apply(lambda x: count_distunicode(x['description']), axis=1))
train_test['count_of_distuni_in_title'] = list(train_test.apply(lambda x: count_unicode(x['title']), axis=1))
train_test['count_of_distuni_in_desc'] = list(train_test.apply(lambda x: count_distunicode(x['description']), axis=1))
###
count_caps = lambda x: len([c for c in x if c.isupper()])
train_test['count_caps_in_title'] = list(train_test.apply(lambda x: count_caps(x['title']), axis=1))
train_test['count_caps_in_desc'] = list(train_test.apply(lambda x: count_caps(x['description']), axis=1))
import string
count_punct = lambda x: len([c for c in x if c in string.punctuation])
train_test['count_punct_in_title'] = list(train_test.apply(lambda x: count_punct(x['title']), axis=1))
train_test['count_punct_in_desc'] = list(train_test.apply(lambda x: count_punct(x['description']), axis=1))
print("Computed POS Features and others..")
train_test['count_common_nouns'] = train_test.apply(lambda x: len(set(str(x['get_nouns_title']).lower().split()).intersection(set(str(x['get_nouns_desc']).lower().split()))), axis=1)
train_test['count_common_adj'] = train_test.apply(lambda x: len(set(str(x['get_adj_title']).lower().split()).intersection(set(str(x['get_adj_desc']).lower().split()))), axis=1)
train_test['ratio_of_unicode_in_title'] = train_test['count_of_unicode_in_title'] / train_test['len_title']
train_test['ratio_of_unicode_in_desc'] = train_test['count_of_unicode_in_desc'] / train_test['len_description']
train_test['ratio_of_punct_in_title'] = train_test['count_punct_in_title'] / train_test['len_title']
train_test['ratio_of_punct_in_desc'] = train_test['count_punct_in_desc'] / train_test['len_description']
train_test['ratio_of_cap_in_title'] = train_test['count_caps_in_title'] / train_test['len_title']
train_test['ratio_of_cap_in_desc'] = train_test['count_caps_in_desc'] / train_test['len_description']
train_test['count_nouns_in_title'] = train_test["get_nouns_title"].apply(lambda x: len(x.split()))
train_test['count_nouns_in_desc'] = train_test['get_nouns_desc'].apply(lambda x: len(x.split()))
train_test['count_adj_in_title'] = train_test["get_adj_title"].apply(lambda x: len(x.split()))
train_test['count_adj_in_desc'] = train_test['get_adj_desc'].apply(lambda x: len(x.split()))
train_test['count_verb_title'] = train_test['get_verb_title'].apply(lambda x: len(x.split()))
train_test['count_verb_desc'] = train_test['get_verb_desc'].apply(lambda x: len(x.split()))
train_test['ratio_nouns_in_title'] = train_test["count_nouns_in_title"] / train_test["title_nwords"]
train_test['ratio_nouns_in_desc'] = train_test["count_nouns_in_desc"] / train_test["desc_nwords"]
train_test['ratio_adj_in_title'] = train_test["count_adj_in_title"] / train_test["title_nwords"]
train_test['ratio_adj_in_desc'] = train_test["count_adj_in_desc"] / train_test["desc_nwords"]
train_test['ratio_vrb_in_title'] = train_test["count_verb_title"] / train_test["title_nwords"]
train_test['ratio_vrb_in_desc'] = train_test["count_verb_desc"] / train_test["desc_nwords"]
train_test["title"]= list(train_test[["title"]].apply(lambda x: clean_text(x["title"]), axis=1))
train_test["description"]= list(train_test[["description"]].apply(lambda x: clean_text(x["description"]), axis=1))
train_test["params"]= list(train_test[["params"]].apply(lambda x: clean_text(x["params"]), axis=1))
#######################
### Save
#######################
train_df = train_test.loc[train_test.deal_probability != 10].reset_index(drop = True)
test_df = train_test.loc[train_test.deal_probability == 10].reset_index(drop = True)
for c in train_df.columns:
if train_df[c].dtype == 'float64':
train_df[c] = train_df[c].astype('float32')
test_df[c] = test_df[c].astype('float32')
train_df.to_feather('../train_basic_features.pkl')
test_df.to_feather('../test__basic_features.pkl')
#######################
### Label Enc
#######################
from sklearn.preprocessing import StandardScaler, OneHotEncoder, LabelEncoder, MinMaxScaler
cat_vars = ["user_id", "region", "city", "parent_category_name", "category_name", "user_type", "param_1", "param_2", "param_3"]
for col in cat_vars:
lbl = preprocessing.LabelEncoder()
lbl.fit(list(train_df[col].values.astype('str')) + list(test_df[col].values.astype('str')))
train_df[col] = lbl.transform(list(train_df[col].values.astype('str')))
test_df[col] = lbl.transform(list(test_df[col].values.astype('str')))
train_df.to_feather('../train_basic_features_lblencCats.pkl')
test_df.to_feather('../test__basic_features_lblencCats.pkl')
#######################
### One hots
#######################
train_df=pd.read_feather('../train_basic_features_lblencCats.pkl')
test_df=pd.read_feather('../test__basic_features_lblencCats.pkl')
from sklearn.externals import joblib
le = OneHotEncoder()
X = le.fit_transform(np.array(train_df.user_id.values.tolist() + test_df.user_id.values.tolist()).reshape(-1,1))
joblib.dump(X, "../user_id_onehot.pkl")
X = le.fit_transform(np.array(train_df.region.values.tolist() + test_df.region.values.tolist()).reshape(-1,1))
joblib.dump(X, "../region_onehot.pkl")
X = le.fit_transform(np.array(train_df.city.values.tolist() + test_df.city.values.tolist()).reshape(-1,1))
joblib.dump(X, "../city_onehot.pkl")
X = le.fit_transform(np.array(train_df.parent_category_name.values.tolist() + test_df.parent_category_name.values.tolist()).reshape(-1,1))
joblib.dump(X, "../parent_category_name_onehot.pkl")
X = le.fit_transform(np.array(train_df.category_name.values.tolist() + test_df.category_name.values.tolist()).reshape(-1,1))
joblib.dump(X, "../category_name_onehot.pkl")
X = le.fit_transform(np.array(train_df.user_type.values.tolist() + test_df.user_type.values.tolist()).reshape(-1,1))
joblib.dump(X, "../user_type_onehot.pkl")
X = le.fit_transform(np.array(train_df.param_1.values.tolist() + test_df.param_1.values.tolist()).reshape(-1,1))
joblib.dump(X, "../param_1_onehot.pkl")
X = le.fit_transform(np.array(train_df.param_2.values.tolist() + test_df.param_2.values.tolist()).reshape(-1,1))
joblib.dump(X, "../param_2_onehot.pkl")
X = le.fit_transform(np.array(train_df.param_3.values.tolist() + test_df.param_3.values.tolist()).reshape(-1,1))
joblib.dump(X, "../param_3_onehot.pkl")
train_df.drop(cat_vars, inplace = True, axis = 'columns')
test_df.drop(cat_vars, inplace = True, axis = 'columns')
train_df.to_feather('../train_basic_features_woCats.pkl')
test_df.to_feather('../test__basic_features_woCats.pkl')
#######################
### Tfidf
#######################
train_df=pd.read_feather('../train_basic_features_woCats.pkl')
test_df=pd.read_feather('../test__basic_features_woCats.pkl')
from sklearn.externals import joblib
### TFIDF Vectorizer ###
train_df['params'] = train_df['params'].fillna('NA')
test_df['params'] = test_df['params'].fillna('NA')
tfidf_vec = TfidfVectorizer(ngram_range=(1,3),max_features = 10000,#min_df=3, max_df=.85,
analyzer='word', token_pattern= r'\w{1,}',
use_idf=1, smooth_idf=0, sublinear_tf=1,)
#TfidfVectorizer(ngram_range=(1,2))
full_tfidf = tfidf_vec.fit_transform(train_df['params'].values.tolist() + test_df['params'].values.tolist())
train_tfidf = tfidf_vec.transform(train_df['params'].values.tolist())
test_tfidf = tfidf_vec.transform(test_df['params'].values.tolist())
del full_tfidf
print("TDIDF Params UNCLEAN..")
joblib.dump([train_tfidf, test_tfidf], "../params_tfidf.pkl")
### TFIDF Vectorizer ###
train_df['title_clean'] = train_df['title_clean'].fillna('NA')
test_df['title_clean'] = test_df['title_clean'].fillna('NA')
tfidf_vec = TfidfVectorizer(ngram_range=(1,2),max_features = 20000,#,min_df=3, max_df=.85,
analyzer='word', token_pattern= r'\w{1,}',
use_idf=1, smooth_idf=0, sublinear_tf=1,)
full_tfidf = tfidf_vec.fit_transform(train_df['title_clean'].values.tolist() + test_df['title_clean'].values.tolist())
train_tfidf = tfidf_vec.transform(train_df['title_clean'].values.tolist())
test_tfidf = tfidf_vec.transform(test_df['title_clean'].values.tolist())
joblib.dump([train_tfidf, test_tfidf], "../title_tfidf.pkl")
del full_tfidf
print("TDIDF TITLE CLEAN..")
### TFIDF Vectorizer ###
train_df['desc_clean'] = train_df['desc_clean'].fillna(' ')
test_df['desc_clean'] = test_df['desc_clean'].fillna(' ')
tfidf_vec = TfidfVectorizer(ngram_range=(1,2), max_features = 20000, #,min_df=3, max_df=.85,
analyzer='word', token_pattern= r'\w{1,}',
use_idf=1, smooth_idf=0, sublinear_tf=1,)
full_tfidf = tfidf_vec.fit_transform(train_df['desc_clean'].values.tolist() + test_df['desc_clean'].values.tolist())
train_tfidf = tfidf_vec.transform(train_df['desc_clean'].values.tolist())
test_tfidf = tfidf_vec.transform(test_df['desc_clean'].values.tolist())
joblib.dump([train_tfidf, test_tfidf], "../desc_tfidf.pkl")
del full_tfidf
print("TDIDF DESC CLEAN..")
### TFIDF Vectorizer ###
train_df['get_nouns_title'] = train_df['get_nouns_title'].fillna(' ')
test_df['get_nouns_title'] = test_df['get_nouns_title'].fillna(' ')
tfidf_vec = TfidfVectorizer(ngram_range=(1,1), max_features = 10000)
full_tfidf = tfidf_vec.fit_transform(train_df['get_nouns_title'].values.tolist() + test_df['get_nouns_title'].values.tolist())
train_tfidf = tfidf_vec.transform(train_df['get_nouns_title'].values.tolist())
test_tfidf = tfidf_vec.transform(test_df['get_nouns_title'].values.tolist())
joblib.dump([train_tfidf, test_tfidf], "../nouns_title_tfidf.pkl")
del full_tfidf
print("TDIDF Title Noun..")
### TFIDF Vectorizer ###
train_df['get_nouns_desc'] = train_df['get_nouns_desc'].fillna(' ')
test_df['get_nouns_desc'] = test_df['get_nouns_desc'].fillna(' ')
tfidf_vec = TfidfVectorizer(ngram_range=(1,1), max_features = 10000)
full_tfidf = tfidf_vec.fit_transform(train_df['get_nouns_desc'].values.tolist() + test_df['get_nouns_desc'].values.tolist())
train_tfidf = tfidf_vec.transform(train_df['get_nouns_desc'].values.tolist())
test_tfidf = tfidf_vec.transform(test_df['get_nouns_desc'].values.tolist())
joblib.dump([train_tfidf, test_tfidf], "../nouns_desc_tfidf.pkl")
del full_tfidf
print("TDIDF Desc Noun..")
### TFIDF Vectorizer ###
train_df['get_adj_title'] = train_df['get_adj_title'].fillna(' ')
test_df['get_adj_title'] = test_df['get_adj_title'].fillna(' ')
tfidf_vec = TfidfVectorizer(ngram_range=(1,1), max_features = 10000)
full_tfidf = tfidf_vec.fit_transform(train_df['get_adj_title'].values.tolist() + test_df['get_adj_title'].values.tolist())
train_tfidf = tfidf_vec.transform(train_df['get_adj_title'].values.tolist())
test_tfidf = tfidf_vec.transform(test_df['get_adj_title'].values.tolist())
joblib.dump([train_tfidf, test_tfidf], "../adj_title_tfidf.pkl")
del full_tfidf
print("TDIDF TITLE Adj..")
### TFIDF Vectorizer ###
train_df['get_adj_desc'] = train_df['get_adj_desc'].fillna(' ')
test_df['get_adj_desc'] = test_df['get_adj_desc'].fillna(' ')
tfidf_vec = TfidfVectorizer(ngram_range=(1,1), max_features = 10000)
full_tfidf = tfidf_vec.fit_transform(train_df['get_adj_desc'].values.tolist() + test_df['get_adj_desc'].values.tolist())
train_tfidf = tfidf_vec.transform(train_df['get_adj_desc'].values.tolist())
test_tfidf = tfidf_vec.transform(test_df['get_adj_desc'].values.tolist())
joblib.dump([train_tfidf, test_tfidf], "../adj_desc_tfidf.pkl")
del full_tfidf
print("TDIDF Desc Adj..")
### TFIDF Vectorizer ###
train_df['get_verb_title'] = train_df['get_verb_title'].fillna(' ')
test_df['get_verb_title'] = test_df['get_verb_title'].fillna(' ')
tfidf_vec = TfidfVectorizer(ngram_range=(1,1), max_features = 10000)
full_tfidf = tfidf_vec.fit_transform(train_df['get_verb_title'].values.tolist() + test_df['get_verb_title'].values.tolist())
train_tfidf = tfidf_vec.transform(train_df['get_verb_title'].values.tolist())
test_tfidf = tfidf_vec.transform(test_df['get_verb_title'].values.tolist())
joblib.dump([train_tfidf, test_tfidf], "../verb_title_tfidf.pkl")
del full_tfidf
print("TDIDF TITLE Verb..")
### TFIDF Vectorizer ###
train_df['get_verb_desc'] = train_df['get_verb_desc'].fillna(' ')
test_df['get_verb_desc'] = test_df['get_verb_desc'].fillna(' ')
tfidf_vec = TfidfVectorizer(ngram_range=(1,1), max_features = 10000)
full_tfidf = tfidf_vec.fit_transform(train_df['get_verb_desc'].values.tolist() + test_df['get_verb_desc'].values.tolist())
train_tfidf = tfidf_vec.transform(train_df['get_verb_desc'].values.tolist())
test_tfidf = tfidf_vec.transform(test_df['get_verb_desc'].values.tolist())
joblib.dump([train_tfidf, test_tfidf], "../verb_desc_tfidf.pkl")
del full_tfidf
print("TDIDF Desc Verb..")
###############################
# Sentence to seq
###############################
print('Generate Word Sequences')
train_df=pd.read_feather('../train_basic_features_woCats.pkl')
test_df=pd.read_feather('../test__basic_features_woCats.pkl')
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
MAX_NUM_OF_WORDS = 100000
TIT_MAX_SEQUENCE_LENGTH = 100
df = pd.concat((train_df, test_df), axis = 'rows')
tokenizer = Tokenizer(num_words=MAX_NUM_OF_WORDS)
tokenizer.fit_on_texts(df['title'].tolist())
sequences = tokenizer.texts_to_sequences(df['title'].tolist())
titleSequences = pad_sequences(sequences, maxlen=TIT_MAX_SEQUENCE_LENGTH)
joblib.dump(titleSequences, "../titleSequences.pkl")
MAX_NUM_OF_WORDS = 10000
TIT_MAX_SEQUENCE_LENGTH = 20
tokenizer = Tokenizer(num_words=MAX_NUM_OF_WORDS)
tokenizer.fit_on_texts(df['params'].tolist())
sequences = tokenizer.texts_to_sequences(df['params'].tolist())
titleSequences = pad_sequences(sequences, maxlen=TIT_MAX_SEQUENCE_LENGTH)
joblib.dump(titleSequences, "../paramSequences.pkl")
MAX_NUM_OF_WORDS = 100000
TIT_MAX_SEQUENCE_LENGTH = 100
tokenizer = Tokenizer(num_words=MAX_NUM_OF_WORDS)
tokenizer.fit_on_texts(df['description'].tolist())
sequences = tokenizer.texts_to_sequences(df['description'].tolist())
titleSequences = pad_sequences(sequences, maxlen=TIT_MAX_SEQUENCE_LENGTH)
joblib.dump(titleSequences, "../descSequences.pkl")
#######OHC WeekDay
from sklearn.preprocessing import StandardScaler, OneHotEncoder, LabelEncoder, MinMaxScaler
le = OneHotEncoder()
X = le.fit_transform(np.array(train_df.activation_weekday.values.tolist() + test_df.activation_weekday.values.tolist()).reshape(-1,1))
################################################
# Cat encoding
################################################
train_df=pd.read_feather('../train_basic_features.pkl')
test_df=pd.read_feather('../test__basic_features.pkl')
def catEncode(train_char, test_char, y, colLst = [], nbag = 10, nfold = 20, minCount = 3, postfix = ''):
train_df = train_char.copy()
test_df = test_char.copy()
if not colLst:
print("Empty ColLst")
for c in train_char.columns:
data = train_char[[c]].copy()
data['y'] = y
enc_mat = np.zeros((y.shape[0],4))
enc_mat_test = np.zeros((test_char.shape[0],4))
for bag in np.arange(nbag):
kf = model_selection.KFold(n_splits= nfold, shuffle=True, random_state=2017*bag)
for dev_index, val_index in kf.split(range(data['y'].shape[0])):
dev_X, val_X = data.iloc[dev_index,:], data.iloc[val_index,:]
datax = dev_X.groupby([c]).agg([len,np.mean,np.std, np.median])
datax.columns = ['_'.join(col).strip() for col in datax.columns.values]
# datax = datax.loc[datax.y_len > minCount]
ind = c + postfix
datax.rename(columns = {'y_mean': ('y_mean_' + ind), 'y_std': ('y_std_' + ind),
'y_len_': ('y_len' + ind), 'y_median_': ('y_median' + ind),}, inplace = True)
# datax[c+'_medshftenc'] = datax['y_median']-med_y
# datax.drop(['y_len','y_mean','y_std','y_median'],axis=1,inplace=True)
datatst = test_char[[c]].copy()
val_X = val_X.join(datax,on=[c], how='left').fillna(np.mean(y))
datatst = datatst.join(datax,on=[c], how='left').fillna(np.mean(y))
enc_mat[val_index,...] += val_X[list(set(datax.columns)-set([c]))]
enc_mat_test += datatst[list(set(datax.columns)-set([c]))]
enc_mat_test /= (nfold * nbag)
enc_mat /= (nbag)
enc_mat = pd.DataFrame(enc_mat)
enc_mat.columns=[ind + str(x) for x in list(set(datax.columns)-set([c]))]
enc_mat_test = pd.DataFrame(enc_mat_test)
enc_mat_test.columns=enc_mat.columns
train_df = pd.concat((enc_mat.reset_index(drop = True),train_df.reset_index(drop = True)), axis=1)
test_df = pd.concat([enc_mat_test.reset_index(drop = True),test_df.reset_index(drop = True)],axis=1)
else:
print("Not Empty ColLst")
data = train_char[colLst].copy()
data['y'] = y
enc_mat = np.zeros((y.shape[0],4))
enc_mat_test = np.zeros((test_char.shape[0],4))
for bag in np.arange(nbag):
kf = model_selection.KFold(n_splits= nfold, shuffle=True, random_state=2017*bag)
for dev_index, val_index in kf.split(range(data['y'].shape[0])):
dev_X, val_X = data.iloc[dev_index,:], data.iloc[val_index,:]
datax = dev_X.groupby(colLst).agg([len,np.mean,np.std, np.median])
datax.columns = ['_'.join(col).strip() for col in datax.columns.values]
# datax = datax.loc[datax.y_len > minCount]
ind = '_'.join(colLst) + postfix
datax.rename(columns = {'y_mean': ('y_mean_' + ind), 'y_std': ('y_std_' + ind),
'y_len': ('y_len_' + ind), 'y_median': ('y_median_' + ind),}, inplace = True)
datatst = test_char[colLst].copy()
val_X = val_X.join(datax,on=colLst, how='left').fillna(np.mean(y))
datatst = datatst.join(datax,on=colLst, how='left').fillna(np.mean(y))
print(val_X[list(set(datax.columns)-set(colLst))].columns)
enc_mat[val_index,...] += val_X[list(set(datax.columns)-set(colLst))]
enc_mat_test += datatst[list(set(datax.columns)-set(colLst))]
enc_mat_test /= (nfold * nbag)
enc_mat /= (nbag)
enc_mat = pd.DataFrame(enc_mat)
enc_mat.columns=[ind + str(x) for x in list(set(datax.columns)-set([c]))]
enc_mat_test = pd.DataFrame(enc_mat_test)
enc_mat_test.columns=enc_mat.columns
train_df = pd.concat((enc_mat.reset_index(drop = True),train_df.reset_index(drop = True)), axis=1)
test_df = pd.concat([enc_mat_test.reset_index(drop = True),test_df.reset_index(drop = True)],axis=1)
print(train_df.columns)
print(test_df.columns)
for c in train_df.columns:
if train_df[c].dtype == 'float64':
train_df[c] = train_df[c].astype('float32')
test_df[c] = test_df[c].astype('float32')
return train_df, test_df
catCols = ['user_id', 'region', 'city', 'parent_category_name',
'category_name', 'user_type']
train_df, test_df = catEncode(train_df[catCols].copy(), test_df[catCols].copy(), train_df.deal_probability.values, nbag = 10, nfold = 10, minCount = 0)
train_df.to_feather('../train_cat_targetenc.pkl')
test_df.to_feather('../test_cat_targetenc.pkl')
################################################################
# Tfidf - part 2
################################################################
import os; os.environ['OMP_NUM_THREADS'] = '1'
from sklearn.decomposition import TruncatedSVD
import nltk
nltk.data.path.append("/media/sayantan/Personal/nltk_data")
from nltk.stem.snowball import RussianStemmer
from nltk.corpus import stopwords
import time
from typing import List, Dict
from sklearn.feature_extraction import DictVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer as Tfidf
from sklearn.model_selection import KFold
from sklearn.externals import joblib
from scipy.sparse import hstack, csr_matrix
import pandas as pd
import numpy as np
import gc
from sklearn.preprocessing import StandardScaler, OneHotEncoder, LabelEncoder, MinMaxScaler
from sklearn import model_selection
english_stemmer = nltk.stem.SnowballStemmer('russian')
def clean_text(text):
#text = re.sub(r'(\d+),(\d+)', r'\1.\2', text)
text = text.replace(u'²', '2')
text = text.lower()
text = re.sub(u'[^a-zа-я0-9]', ' ', text)
text = re.sub('\s+', ' ', text)
return text.strip()
def stem_tokens(tokens, stemmer):
stemmed = []
for token in tokens:
#stemmed.append(stemmer.lemmatize(token))
stemmed.append(stemmer.stem(token))
return stemmed
def preprocess_data(line,
exclude_stopword=True,
encode_digit=False):
## tokenize
line = clean_text(line)
tokens = [x.lower() for x in nltk.word_tokenize(line)]
## stem
tokens_stemmed = stem_tokens(tokens, english_stemmer)#english_stemmer
if exclude_stopword:
tokens_stemmed = [x for x in tokens_stemmed if x not in stopwords]
return ' '.join(tokens_stemmed)
stopwords = stopwords.words('russian')
train_per=pd.read_csv('../input/train_active.csv', usecols = ['param_1', 'param_2', 'param_3'])#,'title','description'])
test_per=pd.read_csv('../input/test_active.csv', usecols = ['param_1', 'param_2', 'param_3'])#,'title','description'])
train_test = pd.concat((train_per, test_per), axis = 'rows')
del train_per, test_per; gc.collect()
train_test['params'] = train_test['param_1'].fillna('') + ' ' + train_test['param_2'].fillna('') + ' ' + train_test['param_3'].fillna('')
import re
train_test.drop(['param_1', 'param_2', 'param_3'], axis = 'columns', inplace=True)
train_test["params"]= list(train_test[["params"]].apply(lambda x: clean_text(x["params"]), axis=1))
import re
train_df=pd.read_feather('../train_basic_features_woCats.pkl')
test_df=pd.read_feather('../test__basic_features_woCats.pkl')
from sklearn.externals import joblib
### TFIDF Vectorizer ###
train_df['params'] = train_df['params'].fillna('NA')
test_df['params'] = test_df['params'].fillna('NA')
tfidf_vec = TfidfVectorizer(ngram_range=(1,3),max_features = 10000,#min_df=3, max_df=.85,
analyzer='word', token_pattern= r'\w{1,}',
use_idf=1, smooth_idf=0, sublinear_tf=1,)
#TfidfVectorizer(ngram_range=(1,2))
full_tfidf = tfidf_vec.fit_transform(train_test['params'].values.tolist() + train_df['params'].values.tolist() + test_df['params'].values.tolist())
train_tfidf = tfidf_vec.transform(train_df['params'].values.tolist())
test_tfidf = tfidf_vec.transform(test_df['params'].values.tolist())
del full_tfidf
print("TDIDF Params UNCLEAN..")
joblib.dump([train_tfidf, test_tfidf], "../params_tfidf2.pkl")
tfidf_vec = TfidfVectorizer(ngram_range=(1,1),max_features = 10000,max_df=.4,#min_df=3,
analyzer='word', token_pattern= r'\w{1,}',
use_idf=1, smooth_idf=0, sublinear_tf=1,)
#TfidfVectorizer(ngram_range=(1,2))
full_tfidf = tfidf_vec.fit_transform(train_test['params'].values.tolist() + train_df['params'].values.tolist() + test_df['params'].values.tolist())
train_tfidf = tfidf_vec.transform(train_df['params'].values.tolist())
test_tfidf = tfidf_vec.transform(test_df['params'].values.tolist())
del full_tfidf
print("TDIDF Params UNCLEAN..")
joblib.dump([train_tfidf, test_tfidf], "../params_tfidf3.pkl")
del(train_test); gc.collect()
train_per=pd.read_csv('../input/train_active.csv', usecols = ['title'])#,'title','description'])
test_per=pd.read_csv('../input/test_active.csv', usecols = ['title'])#,'title','description'])
train_test = pd.concat((train_per, test_per), axis = 'rows')
del train_per, test_per; gc.collect()
train_test.fillna('NA', inplace=True)
train_test["title_clean"]= list(train_test[["title"]].apply(lambda x: preprocess_data(x["title"]), axis=1))
train_df['title_clean'] = train_df['title_clean'].fillna('NA')
test_df['title_clean'] = test_df['title_clean'].fillna('NA')
tfidf_vec = TfidfVectorizer(ngram_range=(1,2),max_features = 20000,#,min_df=3, max_df=.85,
analyzer='word', token_pattern= r'\w{1,}',
use_idf=1, smooth_idf=0, sublinear_tf=1,)
full_tfidf = tfidf_vec.fit_transform(train_test['title_clean'].values.tolist()+train_df['title_clean'].values.tolist() + test_df['title_clean'].values.tolist())
train_tfidf = tfidf_vec.transform(train_df['title_clean'].values.tolist())
test_tfidf = tfidf_vec.transform(test_df['title_clean'].values.tolist())
joblib.dump([train_tfidf, test_tfidf], "../title_tfidf2.pkl")
del full_tfidf
print("TDIDF TITLE CLEAN..")
train_df['title_clean'] = train_df['title_clean'].fillna('NA')
test_df['title_clean'] = test_df['title_clean'].fillna('NA')
tfidf_vec = TfidfVectorizer(ngram_range=(1,1),max_features = 20000, max_df=.4,#,min_df=3,
analyzer='word', token_pattern= r'\w{1,}',
use_idf=1, smooth_idf=0, sublinear_tf=1,)
full_tfidf = tfidf_vec.fit_transform(train_test['title_clean'].values.tolist()+train_df['title_clean'].values.tolist() + test_df['title_clean'].values.tolist())
train_tfidf = tfidf_vec.transform(train_df['title_clean'].values.tolist())
test_tfidf = tfidf_vec.transform(test_df['title_clean'].values.tolist())
joblib.dump([train_tfidf, test_tfidf], "../title_tfidf3.pkl")
del full_tfidf
print("TDIDF TITLE CLEAN..")
del(train_test); gc.collect()
###Too slow###
'''
train_per=pd.read_csv('../input/train_active.csv', usecols = ['description'])#,'title','description'])
test_per=pd.read_csv('../input/test_active.csv', usecols = ['description'])#,'title','description'])
train_per.fillna(' ', inplace=True)
test_per.fillna(' ', inplace=True)
train_test["desc_clean"]= list(train_test[["description"]].apply(lambda x: preprocess_data(x["description"]), axis=1))
### TFIDF Vectorizer ###
train_df['desc_clean'] = train_df['desc_clean'].fillna(' ')
test_df['desc_clean'] = test_df['desc_clean'].fillna(' ')
tfidf_vec = TfidfVectorizer(ngram_range=(1,2), max_features = 20000, stop_words = stopwords#,min_df=3,
analyzer='word', token_pattern= r'\w{1,}',
use_idf=1, smooth_idf=0, sublinear_tf=1,)
full_tfidf = tfidf_vec.fit_transform(train_test['desc_clean'].values.tolist()+train_df['desc_clean'].values.tolist() + test_df['desc_clean'].values.tolist())
train_tfidf = tfidf_vec.transform(train_df['desc_clean'].values.tolist())
test_tfidf = tfidf_vec.transform(test_df['desc_clean'].values.tolist())
joblib.dump([train_tfidf, test_tfidf], "../desc_tfidf2.pkl")
del full_tfidf
print("TDIDF DESC CLEAN..")
tfidf_vec = TfidfVectorizer(ngram_range=(1,1), max_features = 20000, max_df=.4,#,min_df=3,
analyzer='word', token_pattern= r'\w{1,}',
use_idf=1, smooth_idf=0, sublinear_tf=1,)
full_tfidf = tfidf_vec.fit_transform(train_test['desc_clean'].values.tolist()+train_df['desc_clean'].values.tolist() + test_df['desc_clean'].values.tolist())
train_tfidf = tfidf_vec.transform(train_df['desc_clean'].values.tolist())
test_tfidf = tfidf_vec.transform(test_df['desc_clean'].values.tolist())
joblib.dump([train_tfidf, test_tfidf], "../desc_tfidf3.pkl")
del full_tfidf
print("TDIDF DESC CLEAN..")
'''
##########################################
# 13. Chargram -- too slow
##########################################
from collections import Counter
train_df=pd.read_feather('../train_basic_features_woCats.pkl')
test_df=pd.read_feather('../test__basic_features_woCats.pkl')
def char_ngrams(s):
s = s.lower()
s = s.replace(u' ', '')
result = Counter()
len_s = len(s)
for n in [3, 4, 5]:
result.update(s[i:i+n] for i in range(len_s - n + 1))
return ' '.join(list(result))
data = pd.concat((train_df, test_df), axis = 'rows')
data['param_chargram'] = list(data[['params']].apply(lambda x: char_ngrams(x['params']), axis=1))
data['title_chargram'] = list(data[['title']].apply(lambda x: char_ngrams(x['title']), axis=1))
#data['desc_chargram'] = list(data[['description']].apply(lambda x: char_ngrams(x['description']), axis=1))
#data['count_common_chargram'] = data.apply(lambda x: len(set(str(x['title_chargram']).lower().split()).intersection(set(str(x['desc_chargram']).lower().split()))), axis=1)
train_df = data.loc[data.deal_probability != 10].reset_index(drop = True)
test_df = data.loc[data.deal_probability == 10].reset_index(drop = True)
del(data); gc.collect()
#####Chargram -TFIDF
tfidf_vec = TfidfVectorizer(ngram_range=(1,3),max_features = 10000, min_df=3, max_df=.75)
full_tfidf = tfidf_vec.fit_transform(train_df['title_chargram'].values.tolist() + test_df['title_chargram'].values.tolist())
train_tfidf = tfidf_vec.transform(train_df['title_chargram'].values.tolist())
test_tfidf = tfidf_vec.transform(test_df['title_chargram'].values.tolist())
from sklearn.externals import joblib
joblib.dump([train_tfidf, test_tfidf], '../title_chargram_tfidf.pkl')
tfidf_vec = TfidfVectorizer(ngram_range=(1,3),max_features = 10000, min_df=3, max_df=.75)
full_tfidf = tfidf_vec.fit_transform(train_df['param_chargram'].values.tolist() + test_df['param_chargram'].values.tolist())
train_tfidf = tfidf_vec.transform(train_df['param_chargram'].values.tolist())
test_tfidf = tfidf_vec.transform(test_df['param_chargram'].values.tolist())
from sklearn.externals import joblib
joblib.dump([train_tfidf, test_tfidf], '../param_chargram_tfidf.pkl')
#######Chargram of Cat and Parent cat
def clean_text(text):
#text = re.sub(r'(\d+),(\d+)', r'\1.\2', text)
text = text.replace(u'²', '2')
text = text.lower()
text = re.sub(u'[^a-zа-я0-9]', ' ', text)
text = re.sub('\s+', ' ', text)
return text.strip()
train_df = pd.read_feather('../train_basic_features.pkl')
test_df = pd.read_feather('../test__basic_features.pkl')
data = pd.concat([train_df, test_df], axis= 'rows')
data['categories'] = data["parent_category_name"].fillna(' ') + data["category_name"].fillna(' ')
data['cat_chargram'] = list(data[['categories']].apply(lambda x: char_ngrams(x['categories']), axis=1))
train_df = data.loc[data.deal_probability != 10].reset_index(drop = True)
test_df = data.loc[data.deal_probability == 10].reset_index(drop = True)
del(data); gc.collect()
tfidf_vec = TfidfVectorizer(ngram_range=(1,3),max_features = 1000, min_df=3, max_df=.75)
full_tfidf = tfidf_vec.fit_transform(train_df['cat_chargram'].values.tolist() + test_df['cat_chargram'].values.tolist())
train_tfidf = tfidf_vec.transform(train_df['cat_chargram'].values.tolist())
test_tfidf = tfidf_vec.transform(test_df['cat_chargram'].values.tolist())
from sklearn.externals import joblib
joblib.dump([train_tfidf, test_tfidf], '../cat_chargram_tfidf.pkl')
##############################
## New Kaggle Ftr
##############################
import pandas as pd
import gc
used_cols = ['item_id', 'user_id']
train = pd.read_csv('../input/train.csv', usecols=used_cols)
train_active = pd.read_csv('../input/train_active.csv', usecols=used_cols)
test = pd.read_csv('../input/test.csv', usecols=used_cols)
test_active = pd.read_csv('../input/test_active.csv', usecols=used_cols)
train_periods = pd.read_csv('../input/periods_train.csv', parse_dates=['date_from', 'date_to'])
test_periods = | pd.read_csv('../input/periods_test.csv', parse_dates=['date_from', 'date_to']) | pandas.read_csv |
import os
import time
import torch
import torch.nn.modules.distance
import torch.utils.data as td
import pandas as pd
import numpy as np
import datetime
from csl_common.utils import log
from csl_common.utils.nn import Batch
import csl_common.utils.ds_utils as ds_utils
from datasets import multi, affectnet, vggface2, wflw, w300
from constants import TRAIN, VAL
from networks import aae
from aae_training import AAETraining
import aae_training
import config as cfg
eps = 1e-8
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
WITH_LOSS_ZREG = False
class AAEUnsupervisedTraining(AAETraining):
def __init__(self, datasets, args, session_name='debug', **kwargs):
super().__init__(datasets, args, session_name, **kwargs)
def _get_network(self, pretrained):
return aae.AAE(self.args.input_size, pretrained_encoder=pretrained)
def _print_iter_stats(self, stats):
means = | pd.DataFrame(stats) | pandas.DataFrame |
import numpy as np
import partitioning
import pickle
import h5py
import pandas as pd
import sys
VGGM = -5.24
VGGS = 8.17
def map_labels(labels):
return labels - 1
def soften_ordinal_labels(labels, m=0.05):
# this function softens the ordinal labels for better training.
labels_ = labels.copy()
labels_[labels==1] = 1.0 - m
for l in range(labels.shape[0]):
# assuming first dimension is batch and second dimension is classes
maxindex = np.argmax(labels[l])
if maxindex == 0:
labels_[l,1] = m
elif maxindex == labels.shape[1]-1:
labels_[l,-2] = m
else:
labels_[l,maxindex-1] = m / 2.0
labels_[l,maxindex+1] = m / 2.0
return labels_.astype(np.float32)
def normalize_features(x):
return (x - VGGM) / VGGS
class Dataset:
def __init__(self, image_hdf5_file, label_csv_file, label_name, clabel_name=None):
self.imf = h5py.File(image_hdf5_file, 'r')
self.codes = self.imf['features']
self.raw_label_data = pd.read_csv(label_csv_file)
self.labels = map_labels(self.raw_label_data[label_name].to_numpy())
self.vmap = self.raw_label_data[['img_id', 'pcd', 'oa11', 'lsoa11']].to_numpy()
self.label_name=label_name
# clabel_name is the name of the label that will be used to constrain partitioning.
# e.g. Images coming from the same lsoa will not be separated during partitioning.
# all of them will remain in the same partition.
# if it is None, then no constrain is given.
if clabel_name is not None:
self.clabels = self.raw_label_data[clabel_name].to_numpy()
else:
self.clabels = None
# population label distribution
self.label_types, self.label_counts = np.unique(self.labels, return_counts=True)
self.label_dist = self.label_counts / np.float(self.label_counts.sum())
self.num_labels = self.label_types.size
self.batch_inds = np.zeros(self.num_labels, dtype=np.int)
print('Label types: {}'.format(self.label_types))
print('Label counts: {}'.format(self.label_counts))
print('Label distributions: {}'.format(self.label_dist))
# = indices to keep track of which samples are used so far within
# = training. important to count epochs, rather than iterations.
self.batch_ind = 0
self.batch_ind_test = 0
self.batch_ind_valid = 0
# = place holders
self.train_part = []
self.test_part = []
self.validation_part = []
# = this function gets features and labels of samples with ids in the
# = list rows.
def get_data_part(self, rows, noise_std=1):
srows = sorted(rows)
if np.isscalar(srows):
srows = [srows]
d1 = normalize_features(self.codes[srows, 0, :])
d2 = normalize_features(self.codes[srows, 1, :])
d3 = normalize_features(self.codes[srows, 2, :])
d4 = normalize_features(self.codes[srows, 3, :])
if noise_std is not None:
d1 += np.random.normal(loc=0, scale=0.05, size=d1.shape)
d2 += np.random.normal(loc=0, scale=0.05, size=d2.shape)
d3 += np.random.normal(loc=0, scale=0.05, size=d3.shape)
d4 += np.random.normal(loc=0, scale=0.05, size=d4.shape)
l = np.zeros((d1.shape[0], self.num_labels), dtype=np.float)
lpart = self.labels[srows]
for lab_, k in zip(self.label_types, range(self.num_labels)):
try:
l[lpart == lab_, k] = 1
except:
print(srows, lab_, k, lpart, l)
sys.exit(1)
return d1, d2, d3, d4, l
def get_train_batch(self, batch_size):
rows = self.train_part[self.batch_ind: self.batch_ind + batch_size]
d1, d2, d3, d4, l = self.get_data_part(rows)
self.batch_ind += batch_size
if self.batch_ind >= len(self.train_part):
self.batch_ind = 0
return d1, d2, d3, d4, l
def get_balanced_train_batch(self, batch_size):
rows = []
lsize = self.label_types.size
lbatch_size = np.int(batch_size / np.float(lsize))
for l in self.label_types:
lrows = np.random.permutation(np.where(self.labels[self.train_part] == l)[0])
lrows = lrows[:lbatch_size].astype(np.int)
rows += list(np.asarray(self.train_part)[lrows])
d1, d2, d3, d4, l = self.get_data_part(rows)
return d1, d2, d3, d4, l
def get_train_data(self):
rows = self.train_part
d1, d2, d3, d4, l = self.get_data_part(rows)
return d1, d2, d3, d4, l
def get_validation_batch(self, batch_size):
rows = self.validation_part[self.batch_ind_valid: self.batch_ind_valid + batch_size]
d1, d2, d3, d4, l = self.get_data_part(rows)
self.batch_ind_valid += batch_size
if self.batch_ind_valid >= len(self.validation_part):
self.batch_ind_valid = 0
return d1, d2, d3, d4, l
def get_balanced_validation_batch(self, batch_size):
rows = []
lsize = self.label_types.size
lbatch_size = np.int(batch_size / np.float(lsize))
for l in self.label_types:
lrows = np.random.permutation(np.where(self.labels[self.validation_part] == l)[0])
lrows = lrows[:lbatch_size].astype(np.int)
rows += list(np.asarray(self.validation_part)[lrows])
d1, d2, d3, d4, l = self.get_data_part(rows)
return d1, d2, d3, d4, l
def get_validation_data(self):
rows = self.validation_part
d1, d2, d3, d4, l = self.get_data_part(rows)
return d1, d2, d3, d4, l
def get_test_batch(self, batch_size):
rows = self.test_part[self.batch_ind_test: self.batch_ind_test + batch_size]
d1, d2, d3, d4, l = self.get_data_part(rows)
self.batch_ind_test += batch_size
if self.batch_ind_test >= len(self.test_part):
self.batch_ind_test = 0
return d1, d2, d3, d4, l
def get_test_data(self):
rows = self.test_part
d1, d2, d3, d4, l = self.get_data_part(rows)
return d1, d2, d3, d4, l
def test_iterator(self, batch_num=1):
num_iter = np.int(np.ceil(len(self.test_part) / batch_num))
for n in range(num_iter):
rows = self.test_part[n*batch_num : (n+1)*batch_num]
yield self.get_data_part(rows), self.vmap[sorted(rows),:]
def validation_iterator(self, batch_num=1):
num_iter = np.int(np.ceil(len(self.validation_part) / batch_num))
for n in range(num_iter):
rows = self.validation_part[n*batch_num : (n+1)*batch_num]
yield self.get_data_part(rows)
def write_preds(self, preds, fname):
srows = sorted(self.test_part)
data_matrix=np.append(self.vmap[srows,:],self.labels[srows,np.newaxis],axis=1)
data_matrix=np.append(data_matrix,preds[:,np.newaxis],axis=1)
pred_matrix=pd.DataFrame(data=data_matrix,columns=['img_id', 'pcd', 'oa11', 'lsoa11',self.label_name,'predicted'])
pred_matrix.to_csv(fname,index=False)
class Dataset_CrossValidation(Dataset):
def __init__(self, image_hdf5_file, label_csv_file, label_name, clabel_name=None):
Dataset.__init__(self,
image_hdf5_file,
label_csv_file,
label_name,
clabel_name=clabel_name)
def pick_label(self, part_gen, part_file, part_kn=5, part_kp=0, vsize=0.1, seed=None):
'''
This runs at every creation instance
label_type: 'cat' (categorial), 'cont' (continuous)
'''
if part_gen == 1:
# = this part creates partitions from the data and saves them in a specified file.
# = the partitioning is k-folds and stratified.
# = it also allows constraints, see above comment, as clabels.
print('==================================== generating partitions from selected classes =====================================')
self.kpartitions=partitioning.partition_stratified_kfold(part_kn,
self.labels,
seed=seed,
clabels=self.clabels)
print('==================================== generating partitions =====================================')
self.kpartitions=partitioning.partition_stratified_kfold(part_kn,
self.labels,
seed=seed,
clabels=self.clabels)
pickle.dump(self.kpartitions, open(part_file, 'wb'))
else:
# = reads a partitioning that was written before.
# = e.g. if 5 fold cross-validation is used, then this file simply
# = will have 5 partitions written in it. self.kpartitions is a
# = list with 5 members and each member has a list of data sample
# = ids.
self.kpartitions=pickle.load(open(part_file, 'rb'))
# = creates training and test part from the self.kpartitions.
# = e.g. in 5 fold cross validation, it uses the fold part_kp (1...5)
# = as test and combines the remaining 4 as training data.
_train_part, self.test_part = partitioning.get_partition_stratified_kfold(part_kp, self.kpartitions)
# = vsize indicates the portion of the training set that will be used as validation.
# = the default value is set to 0.1, meaning 10% of all training examples.
if vsize > 0.0:
self.train_part, self.validation_part = partitioning.decimate_partition_stratified(_train_part, self.labels, psize=1.0 - vsize)
else:
self.train_part = _train_part
# = this class simply divides the dataset into three classes:
# == Train (T)
# == Validation (V)
# == Test (T)
class Dataset_TVT(Dataset):
def __init__(self, image_hdf5_file, label_csv_file, label_name, clabel_name=None):
Dataset.__init__(self,
image_hdf5_file,
label_csv_file,
label_name,
clabel_name=clabel_name)
def pick_label(self, part_gen, part_file, train_size, valid_size, psize=1.0, seed=None):
'''
This runs at every creation instance
label_type: 'cat' (categorial), 'cont' (continuous)
'''
if part_gen == 1:
# = this part creates partitions from the data and saves them in a specified file.
# = the partitioning is stratified and only 3 parts: train, test and validation.
# = it also allows constrains, see above comment, as clabels.
print('==================================== generating partitions =====================================')
_train_part, self.validation_part, self.test_part = partitioning.partition_stratified_validation(self.labels,
train_size,
valid_size,
seed=seed,
clabels=self.clabels)
pickle.dump(_train_part, open(part_file + '_train', 'wb'))
pickle.dump(self.validation_part, open(part_file + '_validation', 'wb'))
pickle.dump(self.test_part, open(part_file + '_test', 'wb'))
else:
# = reads a partitioning that was written before.
# = there are three files: validation, test and train
# = this part reads all of them.
_train_part=pickle.load(open(part_file + '_train', 'rb'))
self.validation_part=pickle.load(open(part_file + '_validation', 'rb'))
self.test_part=pickle.load(open(part_file + '_test', 'rb'))
# = psize indicates the percentage of training data to be used during
# = training. If it is 1.0, then we use all the training data. So,
# = self.train_part = _train_part
# = if it is less then 1.0 then we take a subset of the training data
# = with the same proportions of classes, i.e. stratified.
# = Note that inside decimate_partition_stratified code, we randomly
# = permute over the samples. So, every time you run this code,
# = training will happen with another subset of size psize.
if psize < 1.0:
self.train_part = partitioning.decimate_partition_stratified(_train_part, self.labels, psize=psize)
else:
self.train_part = _train_part
def write_preds_validation(self, preds, fname):
srows = sorted(self.validation_part)
data_matrix=np.append(self.vmap[srows,:],self.labels[srows,np.newaxis],axis=1)
data_matrix=np.append(data_matrix,preds[:,np.newaxis],axis=1)
pred_matrix= | pd.DataFrame(data=data_matrix,columns=['img_id', 'pcd', 'oa11', 'lsoa11',self.label_name,'predicted']) | pandas.DataFrame |
import os
from deepblast.dataset.utils import state_f, revstate_f
import pandas as pd
import numpy as np
from collections import Counter
def read_mali(root, tool='manual', report_ids=False):
""" Reads in all alignments.
Parameters
----------
root : path
Path to root directory
tool : str
Specifies which tools alignments should be extracted for.
Returns
-------
pd.DataFrame
Three columns, one for each sequence and the resulting alignment.
If `report_ids` is specified, then the pdb id and the query/hit
ids are also reported as additional columns.
"""
res = []
pdbs = []
dirs = []
for path, directories, files in os.walk(root):
for f in files:
if '.ali' in f and tool in f and ('manual2' not in f):
fname = os.path.join(path, f)
lines = open(fname).readlines()
X = lines[0].rstrip().upper()
Y = lines[1].rstrip().upper()
S = ''.join(
list(map(revstate_f, map(state_f, list(zip(X, Y))))))
res.append((X.replace('-', ''), Y.replace('-', ''), S))
pdbs.append(os.path.basename(f).split(f'.{tool}.ali')[0])
dirs.append(os.path.basename(path))
res = | pd.DataFrame(res) | pandas.DataFrame |
import pytest
from pandas import Interval, DataFrame
from pandas.testing import assert_frame_equal
from datar.base.funs import *
from datar.base import table, pi, paste0
from datar.stats import rnorm
from .conftest import assert_iterable_equal
def test_cut():
z = rnorm(10000)
tab = table(cut(z, breaks=range(-6, 7)))
assert tab.shape == (1, 12)
assert tab.columns.tolist() == [
Interval(-6, -5, closed='right'),
Interval(-5, -4, closed='right'),
Interval(-4, -3, closed='right'),
Interval(-3, -2, closed='right'),
Interval(-2, -1, closed='right'),
Interval(-1, 0, closed='right'),
Interval(0, 1, closed='right'),
Interval(1, 2, closed='right'),
| Interval(2, 3, closed='right') | pandas.Interval |
from datetime import timedelta
import pandas as pd
from estimate_start_times.concurrency_oracle import HeuristicsConcurrencyOracle
from estimate_start_times.config import Configuration as StartTimeConfiguration
from estimate_start_times.config import EventLogIDs as StartTimeEventLogIDs
from .config import Configuration
from .discovery import discover_batches_martins21
from .utils import get_batch_instance_enabled_time, get_batch_instance_start_time, get_naive_batch_case_processing_waiting_times
class BatchProcessingAnalysis:
"""
Discover the batches in an event log and calculate its waiting times (total, created, ready...).
"""
def __init__(self, event_log: pd.DataFrame, config: Configuration):
# Set event log
self.event_log = event_log
# Event log with batching information
self.batch_event_log = event_log.copy()
# Set configuration
self.config = config
# Set log IDs to ease access within class
self.log_ids = config.log_ids
# Set concurrency oracle
start_time_config = StartTimeConfiguration(
log_ids=StartTimeEventLogIDs(
case=self.log_ids.case,
activity=self.log_ids.activity,
enabled_time=self.log_ids.enabled_time,
start_time=self.log_ids.start_time,
end_time=self.log_ids.end_time,
resource=self.log_ids.resource,
),
consider_start_times=True
)
self.concurrency_oracle = HeuristicsConcurrencyOracle(self.event_log, start_time_config)
def analyze_batches(self) -> pd.DataFrame:
# Discover activity instance enabled times
self.concurrency_oracle.add_enabled_times(self.batch_event_log)
# Discover batches
self.batch_event_log = discover_batches_martins21(self.batch_event_log, self.config)
# Calculate batching waiting times
self._calculate_waiting_times()
# Return event log with batches and waiting times
return self.batch_event_log
def _calculate_waiting_times(self):
# Create empty batch time columns
self.batch_event_log[self.log_ids.batch_pt] = timedelta(0)
self.batch_event_log[self.log_ids.batch_wt] = timedelta(0)
self.batch_event_log[self.log_ids.batch_total_wt] = timedelta(0)
self.batch_event_log[self.log_ids.batch_creation_wt] = timedelta(0)
self.batch_event_log[self.log_ids.batch_ready_wt] = timedelta(0)
self.batch_event_log[self.log_ids.batch_other_wt] = timedelta(0)
# If report checkpoints is true, create columns for report
if self.config.report_batch_checkpoints:
self.batch_event_log[self.log_ids.batch_case_enabled] = pd.NaT
self.batch_event_log[self.log_ids.batch_instance_enabled] = pd.NaT
self.batch_event_log[self.log_ids.batch_start] = pd.NaT
# Calculate waiting times
batch_events = self.batch_event_log[~ | pd.isna(self.batch_event_log[self.log_ids.batch_id]) | pandas.isna |
import boto3
import xmltodict
import requests
import pandas as pd
from custom_tokenizer import find_start_end
from tqdm import tqdm_notebook
class AMT:
def __init__(self, production=False):
environments = {
"production": {
"endpoint": "https://mturk-requester.us-east-1.amazonaws.com",
"preview": "https://www.mturk.com/mturk/preview"
},
"sandbox": {
"endpoint":
"https://mturk-requester-sandbox.us-east-1.amazonaws.com",
"preview": "https://workersandbox.mturk.com/mturk/preview"
}
}
self.mturk_environment = environments["production"] if production else environments["sandbox"]
session = boto3.Session(profile_name='default')
self.client = session.client(
service_name='mturk',
region_name='us-east-1',
endpoint_url=self.mturk_environment['endpoint'],
)
def balance(self):
return self.client.get_account_balance()['AvailableBalance']
def create_hits(self, question_xml, task_attributes, df):
count = 0
total = len(df)
print(df.index)
# do one pair per HIT
for pair in zip(df[::2].itertuples(), df[1::2].itertuples()):
row_one, row_two = pair
print(row_one.Index, row_two.Index)
xml = question_xml
xml = xml.replace('${question_1}',row_one.question)
xml = xml.replace('${response_1}',row_one.response_filtered)
xml = xml.replace('${question_2}',row_two.question)
xml = xml.replace('${response_2}',row_two.response_filtered)
# Add a URL to a base directory for images.
img_url = "TODO"
if requests.head(img_url+str(row_one.Index)+".jpg").status_code != requests.codes.ok:
print("Image Not found:", row_one.Index)
continue
if requests.head(img_url+str(row_two.Index)+".jpg").status_code != requests.codes.ok:
print("Image Not found:", row_two.Index)
continue
xml = xml.replace('${image_id_1}',str(row_one.Index))
xml = xml.replace('${image_id_2}',str(row_two.Index))
response = self.client.create_hit(
**task_attributes,
Question=xml
)
hit_type_id = response['HIT']['HITTypeId']
df.loc[row_one.Index, 'hit_id'] = response['HIT']['HITId']
df.loc[row_two.Index, 'hit_id'] = response['HIT']['HITId']
df.loc[row_one.Index, 'hit_idx'] = '1'
df.loc[row_two.Index, 'hit_idx'] = '2'
count += 2
print("Just created HIT {}, {}/{}".format(response['HIT']['HITId'], count, total))
print("You can view the HITs here:")
print(self.mturk_environment['preview']+"?groupId={}".format(hit_type_id))
def generate_qualifying_task(self, df, example_indices=None):
# https://docs.aws.amazon.com/AWSMechTurk/latest/AWSMturkAPI/ApiReference_QuestionFormDataStructureArticle.html
def add_image(img_id):
xml = "<EmbeddedBinary><EmbeddedMimeType><Type>image</Type><SubType>jpg</SubType></EmbeddedMimeType>"
xml += "<DataURL>TODO"+str(img_id)+".jpg</DataURL>"
xml += "<AltText>Image not found. Please contact the requester</AltText>"
xml += "<Width>100</Width><Height>100</Height></EmbeddedBinary>"
return xml
def add_data(img_id, question, response):
xml = "<Overview>"
xml += add_image(img_id)
xml += "<Text>Question: "+question+"</Text>"
xml += "<Text>Response: "+response+"</Text></Overview>"
return xml
def add_q(identifier, display_name, question_text, is_example=False, true_answer=None, explanation=None):
xml = "<Question><QuestionIdentifier>"+identifier+"</QuestionIdentifier>"
xml += "<DisplayName>"+display_name+"</DisplayName>"
xml += "<IsRequired>true</IsRequired>"
xml += "<QuestionContent><Text>"+question_text+"</Text></QuestionContent>"
# add possible answers
xml += "<AnswerSpecification><SelectionAnswer><StyleSuggestion>radiobutton</StyleSuggestion>"
xml += "<Selections>"
for a in ['yes', 'no']:
xml += "<Selection><SelectionIdentifier>"+a+"</SelectionIdentifier>"
xml += "<Text>"+a
xml += " [CORRECT ANSWER:{}]".format(explanation) if is_example and a == true_answer else ""
xml += "</Text></Selection>"
xml += "</Selections></SelectionAnswer></AnswerSpecification></Question>"
return xml
def add_answer(identifier, true_answer):
xml = "<Question><QuestionIdentifier>"+identifier+"</QuestionIdentifier>"
for a in ['yes', 'no']:
xml += "<AnswerOption><SelectionIdentifier>"+a+"</SelectionIdentifier>"
xml += "<AnswerScore>"+('1' if a == true_answer else '0')+"</AnswerScore></AnswerOption>"
xml += "</Question>"
return xml
# link to magic AWS XML template things
questions_xml = "<QuestionForm xmlns='http://mechanicalturk.amazonaws.com/AWSMechanicalTurkDataSchemas/2005-10-01/QuestionForm.xsd'>"
answers_xml = "<AnswerKey xmlns='http://mechanicalturk.amazonaws.com/AWSMechanicalTurkDataSchemas/2005-10-01/AnswerKey.xsd'>"
# add help text
questions_xml += "<Overview><Text> The images were found on Instagram and the questions were asked of the posters. The questions were generated by a bot and the responses are free-text from social media users, so either one could be wrong. Answer the two questions for each image. The first few are simply examples. Make sure you understand each one, then click the option that says [CORRECT ANSWER].</Text></Overview>"
# add questions and answers
for idx, row in df.iterrows():
is_example = example_indices is not None and idx in example_indices
is_q_relevant = 'yes' if row.q_relevant else 'no'
is_r_relevant = 'yes' if row.r_relevant else 'no'
questions_xml += add_data(idx, row.question, row.response_filtered)
questions_xml += add_q("q_relevant_{}".format(idx), "{}.{}".format(idx,1), "Is the question valid with respect to the image?", is_example=is_example, true_answer=is_q_relevant, explanation=row.get('q_relevant_explanation'))
answers_xml += add_answer("q_relevant_{}".format(idx), is_q_relevant)
questions_xml += add_q("r_relevant_{}".format(idx), "{}.{}".format(idx,2), "Is the response valid with respect to the image?", is_example=is_example, true_answer=is_r_relevant, explanation=row.get('r_relevant_explanation'))
answers_xml += add_answer("r_relevant_{}".format(idx), is_r_relevant)
# add method for calculating score
answers_xml += "<QualificationValueMapping><PercentageMapping>"
answers_xml += "<MaximumSummedScore>"+str(len(df)*2)+"</MaximumSummedScore>"
answers_xml += "</PercentageMapping></QualificationValueMapping>"
# wrap up xml
answers_xml += "</AnswerKey>"
questions_xml += "</QuestionForm>"
qualification = self.client.create_qualification_type(
Name='Question/Response Classification Task Understanding',
Keywords='test, qualification',
Description='This is a brief test to ensure workers understand the task set-up (half the "questions" are just examples)',
QualificationTypeStatus='Active',
RetryDelayInSeconds=60,
Test=questions_xml,
AnswerKey=answers_xml,
TestDurationInSeconds=300)
return qualification['QualificationType']['QualificationTypeId']
def get_reviewable_HITs(self):
HITIds = []
response = self.client.list_reviewable_hits()
token = response.get('NextToken')
HITIds.extend([HIT['HITId'] for HIT in response['HITs']])
while(token is not None):
response = self.client.list_reviewable_hits(NextToken=token)
token = response.get('NextToken')
HITIds.extend([HIT['HITId'] for HIT in response['HITs']])
return HITIds
def populate_results(self, df, ids=None):
assert('hit_id' in df.columns)
if ids is None:
# skip rows that are already filled out
if 'q_relevant' in df.columns and 'r_relevant' in df.columns:
ids = list(df[pd.isnull(df.q_relevant) | | pd.isnull(df.r_relevant) | pandas.isnull |
import numpy as np
import pandas as pd
import scipy.integrate
import tqdm
def single_nutrient(params, time, gamma_max, nu_max, precursor_mass_ref, Km,
omega, phi_R, phi_P, num_muts=1, volume=1E-3):
"""
Defines the system of ordinary differenetial equations (ODEs) which describe
accumulation of biomass on a single nutrient source.
Parameters
----------
params: list, [M, Mr, Mp, precursors, nutrients]
A list of the parameters whose dynamics are described by the ODEs.
M : positive float
Total protein biomass of the system
Mr : positive float, must be < M
Ribosomal protein biomass of the system
Mp : positive float, must be < M
Metabbolic protein biomass of the system
precursors : positive float
Mass of precursors in the cell. This is normalized to
total protein biomass when calculating the translational
capacity.
nutrients : positive float
Mass of nutrients in the system.
time : float
Evaluated time step of the system.
gamma_max: positive float
The maximum translational capacity in units of inverse time.
nu_max : positive float
The maximum nutritional capacity in units of inverse time.
precursor_conc_ref : positive float
The dissociation constant of charged tRNA to the elongating ribosome.
Km : positive float
The Monod constant for growth on the specific nutrient source.
This is in units of molar.
omega: positive float
The yield coefficient of the nutrient source in mass of amino acid
produced per mass of nutrient.
phi_R : float, [0, 1]
The fraction of the proteome occupied by ribosomal protein mass
phi_P : float, [0, 1]
The fraction of the proteome occupied by metabolic protein mass
num_muts: int
The number of mutants whose dynamics need to be tracked.
volume: float, default 1 mL
The volume of the system for calculation of concentrations.
Returns
-------
out: list, [dM_dt, dMr_dt, dMp_dt, dprecursors_dt, dnutrients_dt]
A list of the evaluated ODEs at the specified time step.
dM_dt : The dynamics of the total protein biomass.
dMr_dt : The dynamics of the ribosomal protein biomass.
dMp_dt : the dynamics of the metabolic protein biomass.
dprecursors_dt : The dynamics of the precursor/charged-tRNA pool.
dnutrients_dt : The dynamics of the nutrients in the growth medium
"""
# Define constants
AVO = 6.022E23
OD_CONV = 6E17
#TODO: Put in data validation
# Unpack the parameters
if num_muts > 1:
nutrients = params[-1]
M, Mr, Mp, precursors = np.reshape(params[:-1], (4, num_muts))
else:
M, Mr, Mp, precursors, nutrients = params
# Compute the precursor mass fraction and nutrient concentration
precursor_mass_frac = precursors / M
nutrient_conc = nutrients / (AVO * volume)
# Compute the two capacities
gamma = gamma_max * precursor_mass_frac / (precursor_mass_frac + precursor_mass_ref)
nu = nu_max * nutrient_conc / (nutrient_conc + Km)
# ODEs for biomass accumulation
dM_dt = gamma * Mr
dMr_dt = phi_R * dM_dt
dMp_dt = phi_P * dM_dt
# ODE for precursors and nutrients
dprecursors_dt = nu * Mp - dM_dt
dnutrients_dt = -nu * Mp/ omega
_out = [dM_dt, dMr_dt, dMp_dt, dprecursors_dt]
if num_muts > 1:
dnutrients_dt = np.sum(dnutrients_dt)
out = [value for deriv in _out for value in deriv]
out.append(dnutrients_dt)
return out
def dilution_cycle(time, fun, fun_params, fun_args, nutrient_dict,
target_mass=1, num_dilutions=10,
colnames=None, num_muts=1, **int_kwargs):
"""
Integrates a desired function with periodic dilutions and returns a
dataframe of the complete integration.
Parameters
-----------
ty
time: numpy-array
The time interval to integrate for a single growth cycle. This
time interval will be repeated for each dilution.
fun: function
The function you wish to integrate
fun_params : list
List of parameters to feed into the function
fun_args : dict
Arguments to feed the integration function. Must be a dict as
some arguements are accessed (in the case of mutants)
nutrient_dict : dict
A dictionary of the indices and values to reset the nutrient conditions
for each dilution event. The keys correspond to the indices those of the
`fun_params` which define the nutrient conditions. The value corresponds
to the desired reset value.
num_dilutions : int
The number of dilution cycles that should be performed
dilution_factor : float or int
The factor by which the parameters should be decreased for each dilution
event. Note that this does not apply to the nutrient parameters which
are reset by `nutrient_dict.`
colnames : list of str, optional
The desired column names of the output. If `None`, columns will be
left arbitrarily named.
**int_kwargs: dict
kwargs to be fed to the ODE solver.
"""
# TODO: Put in type checks.
# Perform the initial integration
out = scipy.integrate.odeint(fun, fun_params, time, args=fun_args,
**int_kwargs)
# Instantiate the dataframes
if colnames != None:
initial_df = pd.DataFrame(out, columns=colnames)
else:
initial_df = pd.DataFrame(out)
# Specify that this is the inaugural integration
initial_df['dilution_cycle'] = 0
# Keep track of the time
initial_df['time'] = time
stop_time = time[-1]
# Add to the storage list
dfs = [initial_df]
# Iterate through each dilution cycle.
for n in tqdm.tqdm(range(num_dilutions)):
# Compute the dilution factor if not specified
dilution_factor = out[-1, :num_muts].sum() / target_mass
# Reset the parameters
fun_params = list(out[-1, :] / dilution_factor)
for k, v in nutrient_dict.items():
fun_params[k] = v
# Rerun the integration
out = scipy.integrate.odeint(fun, fun_params, time, args=fun_args,
**int_kwargs)
# Set up the dataframe
if colnames != None:
_df = pd.DataFrame(out, columns=colnames)
else:
_df = pd.DataFrame(out)
# Update time and keep track of the dilution cycle
_df['time'] = stop_time + time
_df['dilution_cycle'] = n + 1
# Add the dataframe to the storage list
dfs.append(_df)
# Update the stoptime
stop_time += time[-1]
# Concatenate the dataframes and return
dil_df = | pd.concat(dfs) | pandas.concat |
import csv
import GetOldTweets3 as got
import numpy as np
import pandas as pd
import re
import time
from datetime import datetime, timezone, date, timedelta
from urllib.error import HTTPError, URLError
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
counter = 0
since = | pd.to_datetime('2019-07-22') | pandas.to_datetime |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import pandas as pd
import numpy as np
import seaborn as sns
from lightfm.evaluation import precision_at_k, recall_at_k
def model_perf_plots(df):
"""Function to plot model performance metrics.
Args:
df (pandas.DataFrame): Dataframe in tidy format, with ['epoch','level','value'] columns
Returns:
object: matplotlib axes
"""
g = sns.FacetGrid(df, col="metric", hue="stage", col_wrap=2, sharey=False)
g = g.map(sns.scatterplot, "epoch", "value").add_legend()
def compare_metric(df_list, metric="prec", stage="test"):
"""Function to combine and prepare list of dataframes into tidy format.
Args:
df_list (list): List of dataframes
metrics (str): name of metric to be extracted, optional
stage (str): name of model fitting stage to be extracted, optional
Returns:
pandas.DataFrame: Metrics
"""
colnames = ["model" + str(x) for x in list(range(1, len(df_list) + 1))]
models = [
df[(df["stage"] == stage) & (df["metric"] == metric)]["value"]
.reset_index(drop=True)
.values
for df in df_list
]
output = pd.DataFrame(zip(*models), columns=colnames).stack().reset_index()
output.columns = ["epoch", "data", "value"]
return output
def track_model_metrics(
model,
train_interactions,
test_interactions,
k=10,
no_epochs=100,
no_threads=8,
show_plot=True,
**kwargs
):
"""Function to record model's performance at each epoch, formats the performance into tidy format,
plots the performance and outputs the performance data.
Args:
model (LightFM instance): fitted LightFM model
train_interactions (scipy sparse COO matrix): train interactions set
test_interactions (scipy sparse COO matrix): test interaction set
k (int): number of recommendations, optional
no_epochs (int): Number of epochs to run, optional
no_threads (int): Number of parallel threads to use, optional
**kwargs: other keyword arguments to be passed down
Returns:
pandas.DataFrame, LightFM model, matplotlib axes:
- Performance traces of the fitted model
- Fitted model
- Side effect of the method
"""
# initialising temp data storage
model_prec_train = [0] * no_epochs
model_prec_test = [0] * no_epochs
model_rec_train = [0] * no_epochs
model_rec_test = [0] * no_epochs
# fit model and store train/test metrics at each epoch
for epoch in range(no_epochs):
model.fit_partial(
interactions=train_interactions, epochs=1, num_threads=no_threads, **kwargs
)
model_prec_train[epoch] = precision_at_k(
model, train_interactions, k=k, **kwargs
).mean()
model_prec_test[epoch] = precision_at_k(
model, test_interactions, k=k, **kwargs
).mean()
model_rec_train[epoch] = recall_at_k(
model, train_interactions, k=k, **kwargs
).mean()
model_rec_test[epoch] = recall_at_k(
model, test_interactions, k=k, **kwargs
).mean()
# collect the performance metrics into a dataframe
fitting_metrics = pd.DataFrame(
zip(model_prec_train, model_prec_test, model_rec_train, model_rec_test),
columns=[
"model_prec_train",
"model_prec_test",
"model_rec_train",
"model_rec_test",
],
)
# convert into tidy format
fitting_metrics = fitting_metrics.stack().reset_index()
fitting_metrics.columns = ["epoch", "level", "value"]
# exact the labels for each observation
fitting_metrics["stage"] = fitting_metrics.level.str.split("_").str[-1]
fitting_metrics["metric"] = fitting_metrics.level.str.split("_").str[1]
fitting_metrics.drop(["level"], axis=1, inplace=True)
# replace the metric keys to improve visualisation
metric_keys = {"prec": "Precision", "rec": "Recall"}
fitting_metrics.metric.replace(metric_keys, inplace=True)
# plots the performance data
if show_plot:
model_perf_plots(fitting_metrics)
return fitting_metrics, model
def similar_users(user_id, user_features, model, N=10):
"""Function to return top N similar users based on https://github.com/lyst/lightfm/issues/244#issuecomment-355305681
Args:
user_id (int): id of user to be used as reference
user_features (scipy sparse CSR matrix): user feature matric
model (LightFM instance): fitted LightFM model
N (int): Number of top similar users to return
Returns:
pandas.DataFrame: top N most similar users with score
"""
_, user_representations = model.get_user_representations(features=user_features)
# Cosine similarity
scores = user_representations.dot(user_representations[user_id, :])
user_norms = np.linalg.norm(user_representations, axis=1)
user_norms[user_norms == 0] = 1e-10
scores /= user_norms
best = np.argpartition(scores, -(N + 1))[-(N + 1) :]
return pd.DataFrame(
sorted(zip(best, scores[best] / user_norms[user_id]), key=lambda x: -x[1])[1:],
columns=["userID", "score"],
)
def similar_items(item_id, item_features, model, N=10):
"""Function to return top N similar items
based on https://github.com/lyst/lightfm/issues/244#issuecomment-355305681
Args:
item_id (int): id of item to be used as reference
item_features (scipy sparse CSR matrix): item feature matric
model (LightFM instance): fitted LightFM model
N (int): Number of top similar items to return
Returns:
pandas.DataFrame: top N most similar items with score
"""
_, item_representations = model.get_item_representations(features=item_features)
# Cosine similarity
scores = item_representations.dot(item_representations[item_id, :])
item_norms = np.linalg.norm(item_representations, axis=1)
item_norms[item_norms == 0] = 1e-10
scores /= item_norms
best = np.argpartition(scores, -(N + 1))[-(N + 1) :]
return pd.DataFrame(
sorted(zip(best, scores[best] / item_norms[item_id]), key=lambda x: -x[1])[1:],
columns=["itemID", "score"],
)
def prepare_test_df(test_idx, uids, iids, uid_map, iid_map, weights):
"""Function to prepare test df for evaluation
Args:
test_idx (slice): slice of test indices
uids (numpy.ndarray): Array of internal user indices
iids (numpy.ndarray): Array of internal item indices
uid_map (dict): Keys to map internal user indices to external ids.
iid_map (dict): Keys to map internal item indices to external ids.
weights (numpy.float32 coo_matrix): user-item interaction
Returns:
pandas.DataFrame: user-item selected for testing
"""
test_df = pd.DataFrame(
zip(
uids[test_idx],
iids[test_idx],
[list(uid_map.keys())[x] for x in uids[test_idx]],
[list(iid_map.keys())[x] for x in iids[test_idx]],
),
columns=["uid", "iid", "userID", "itemID"],
)
dok_weights = weights.todok()
test_df["rating"] = test_df.apply(lambda x: dok_weights[x.uid, x.iid], axis=1)
return test_df[["userID", "itemID", "rating"]]
def prepare_all_predictions(
data,
uid_map,
iid_map,
interactions,
model,
num_threads,
user_features=None,
item_features=None,
):
"""Function to prepare all predictions for evaluation.
Args:
data (pandas df): dataframe of all users, items and ratings as loaded
uid_map (dict): Keys to map internal user indices to external ids.
iid_map (dict): Keys to map internal item indices to external ids.
interactions (np.float32 coo_matrix): user-item interaction
model (LightFM instance): fitted LightFM model
num_threads (int): number of parallel computation threads
user_features (np.float32 csr_matrix): User weights over features
item_features (np.float32 csr_matrix): Item weights over features
Returns:
pandas.DataFrame: all predictions
"""
users, items, preds = [], [], [] # noqa: F841
item = list(data.itemID.unique())
for user in data.userID.unique():
user = [user] * len(item)
users.extend(user)
items.extend(item)
all_predictions = | pd.DataFrame(data={"userID": users, "itemID": items}) | pandas.DataFrame |
import pandas as pd
from matplotlib import pyplot as plt
import seaborn as sns
import numpy as np
begge_kjonn_5 = pd.read_csv("begge_kjonn_5.csv")
gutter_5 = pd.read_csv("gutter_5.csv")
jenter_5 = pd.read_csv("jenter_5.csv")
jenter_gutter_5 = pd.concat([gutter_5, jenter_5]).reset_index(drop=True)
begge_kjonn_8 = pd.read_csv("begge_kjonn_8.csv")
gutter_8 = pd.read_csv("gutter_8.csv")
jenter_8 = pd.read_csv("jenter_8.csv")
jenter_gutter_8 = pd.concat([gutter_8, jenter_8]).reset_index(drop=True)
# Save tables to excel file, for the heck of it
begge_kjonn_9 = pd.read_csv("begge_kjonn_9.csv")
gutter_9 = pd.read_csv("gutter_9.csv")
jenter_9 = pd.read_csv("jenter_9.csv")
jenter_gutter_9 = | pd.concat([gutter_9, jenter_9]) | pandas.concat |
from faker import Faker
import pandas as pd
import datetime
import numpy as np
import matplotlib.pylab as plt
'''
class which generates fake and sample data
'''
class fake_data:
@staticmethod
def one_sentence():
"""
Returns text(string)
Parameters
-----------
"""
temp = Faker()
return temp.text()
@staticmethod
def many_sentences(how_many):
"""
Returns Pandas Dataframe with text
Parameters
-----------
how_many
number of rows of text in dataframe
"""
temp = Faker()
data = []
for i in range(how_many):
data.append(temp.text())
return | pd.DataFrame(data, columns=["text"]) | pandas.DataFrame |
"""Parsers to convert uncontrolled cell grids into representations of StarTable blocks.
parse_blocks() emits a stream of blocks objects.
This in principle allows early abort of reads as well as generic postprocessing (
as discussed in store-module docstring).
parse_blocks() switches between different parsers depending on the StarTable block type:
- Metadata
- Directive
- Table
- Template (not yet implemented)
For each of these:
- The intended input is a two-dimensional "cell grid" i.e. a sequence of rows, with each row
being a sequence of values (where a "sequence" is usually a list, but can also be e.g. a tuple).
Rows need not be of equal length; only the relevant portion of a cell grid will be parsed
depending on the relevant block type.
- The output is a representation of the StarTable block, either as:
- A pdtable-style block object e.g. Table
- A JSON-like data structure ready for serialization via e.g. json.dump() (only implemented for
table blocks at this stage); or
- The original, raw cell grid, in case the user wants to do some low-level processing.
"""
from abc import abstractmethod
import itertools
import re
from typing import Sequence, Optional, Tuple, Any, Iterable, List, Union, Dict
from collections import defaultdict
import pandas as pd
import warnings
from pdtable import BlockType, BlockIterator
from pdtable import Table
from pdtable.io._json import to_json_serializable, JsonData, JsonDataPrecursor
from pdtable.table_origin import (
LocationSheet,
NullLocationFile,
TableOrigin,
InputIssue,
InputIssueTracker,
NullInputIssueTracker,
)
from .columns import parse_column
from .fixer import ParseFixer
from ... import frame
from ...auxiliary import MetadataBlock, Directive
from ...table_metadata import TableMetadata
# Typing alias: 2D grid of cells with rows and cols. Intended indexing: cell_grid[row][col]
CellGrid = Sequence[Sequence]
def make_metadata_block(cells: CellGrid, origin: Optional[str] = None, **_) -> MetadataBlock:
mb = MetadataBlock(origin)
for row in cells:
if len(row) > 1 and row[0] is not None:
key_field = row[0].strip()
if len(key_field) > 0 and key_field[-1] == ":":
mb[key_field[:-1]] = row[1].strip()
return mb
def make_directive(cells: CellGrid, origin: Optional[str] = None, **_) -> Directive:
name = cells[0][0][3:]
directive_lines = [row[0] for row in cells[1:]]
return Directive(name, directive_lines, origin)
def make_fixer(origin, fixer=None, **kwargs):
""" Determine if user has supplied custom fixer
Else return default ParseFixer() instance.
"""
if fixer is not None:
if type(fixer) is type:
# It's a class, not an instance. Make an instance here.
fixer = fixer()
else:
fixer = ParseFixer()
assert fixer is not None
fixer.origin = origin
# fixer.reset_fixes()
return fixer
def parse_column_names(column_names_raw: Sequence[Union[str, None]]) -> List[str]:
"""Parses column names from the sequence read from file
Rejects everything after first blank cell, since there can be comments there.
Strips column names.
"""
return [
c.strip() for c in itertools.takewhile(lambda x: not _is_cell_blank(x), column_names_raw)
]
def make_table_json_precursor(cells: CellGrid, origin, fixer:ParseFixer) -> Tuple[JsonDataPrecursor, bool]:
"""Parses cell grid into a JSON-like data structure but with some non-JSON-native values
Parses cell grid to a JSON-like data structure of nested "objects" (dict), "arrays" (list),
and values, including values with types that map 1:1 to JSON-native types, as well as some
value types that don't directly map to JSON types.
This JSON data "precursor" can then be sent for further processing:
- Parsing to pdtable-style Table block object
- Conversion to a "pure" JSON data object in which all values are of JSON-native types.
Also returns a bool "transposed" flag.
"""
table_name: str = cells[0][0][2:]
transposed = table_name.endswith("*")
if transposed:
# Chop off the transpose decorator from the name
table_name = table_name[:-1]
fixer.table_name = table_name
# internally hold destinations as json-compatible dict
destinations = {dest: None for dest in cells[1][0].strip().split(" ")}
if transposed:
# Column names are in lines' first cell
column_names = parse_column_names([line[0] for line in cells[2:]])
else:
# Column names are on line 2 (zero-based)
column_names = parse_column_names(cells[2])
column_names = _fix_duplicate_column_names(column_names, fixer)
n_col = len(column_names)
if transposed:
units = [line[1] for line in cells[2 : 2 + n_col]]
else:
units = cells[3][:n_col]
units = [unit.strip() for unit in units]
if transposed:
data_lines = [line[2:] for line in cells[2 : 2 + n_col]]
len_longest_line = max(len(line) for line in data_lines)
# Find last non-blank data row
n_row = 0
for i_row in range(len_longest_line):
# Does this row have non-blank cells?
for line in data_lines:
if len(line) >= i_row + 1 and not _is_cell_blank(line[i_row]):
# Found a non-blank cell. This row is legit. Go check next row.
n_row = i_row + 1
break
else:
# No non-blank cells found on this row. This row is blank. Go no further.
break
# Collate data rows
data_rows = zip(
*(
line[:n_row] # trim empty cells off of long lines
if len(line) >= n_row
else line + [None] * (n_row - len(line)) # pad short lines with empty cells
for line in data_lines
)
)
else:
data_rows = [line[:n_col] for line in cells[4:]]
data_rows = [list(row) for row in data_rows]
# ensure all data columns are populated
for i_row, row in enumerate(data_rows):
if len(row) < n_col:
fix_row = fixer.fix_missing_rows_in_column_data(
row=i_row, row_data=row, num_columns=n_col
)
data_rows[i_row] = fix_row
# build dictionary of columns iteratively to allow meaningful error messages
columns = dict(zip(column_names, [[]] * len(column_names)))
for name, unit, values in zip(column_names, units, zip(*data_rows)):
try:
fixer.column_name = name
columns[name] = parse_column(unit, values, fixer)
except ValueError as e:
raise ValueError(
f"Unable to parse value in column '{name}' of table '{table_name}' as '{unit}'"
) from e
fixer.report()
return (
{
"name": table_name,
"columns": columns,
"units": units,
"destinations": destinations,
"origin": origin,
},
transposed,
)
def _make_table(cells: CellGrid, origin, fixer) -> Table:
"""Parses cell grid into a pdtable-style Table block object."""
json_precursor, transposed = make_table_json_precursor(
cells, origin=str(origin.input_location), fixer=fixer,
)
return Table(
frame.make_table_dataframe(
| pd.DataFrame(json_precursor["columns"]) | pandas.DataFrame |
from pathlib import Path
import numba as nb
import numpy as np
import pandas as pd
from astropy.time import Time
from scipy.optimize import curve_fit
import ysvisutilpy2005ud as yvu
PI = np.pi
D2R = PI / 180
DATAPATH = Path('data')
SAVEPATH = Path('figs')
SAVEPATH.mkdir(exist_ok=True)
# ********************************************************************************************************** #
# * COMBINE MSI AND DEVOGELE DATA * #
# ********************************************************************************************************** #
dats = pd.read_csv(DATAPATH/"pol_ud_data.csv", sep=',')
dats.insert(loc=7, column="dPr", value=dats["dP"])
dat2 = | pd.read_csv(DATAPATH/"2020PSJ.....1...15D.csv") | pandas.read_csv |
import math
__author__ = 'r_milk01'
import os
import pandas as pd
from configparser import ConfigParser
import matplotlib.pyplot as plt
import matplotlib
import itertools
import logging
import difflib
import colors as color_util
TIMINGS = ['usr', 'sys', 'wall']
MEASURES = ['max', 'avg']
SPECIALS = ['run', 'threads', 'ranks', 'cores']
'''markers = {0: u'tickleft', 1: u'tickright', 2: u'tickup', 3: u'tickdown', 4: u'caretleft', u'D': u'diamond',
6: u'caretup', 7: u'caretdown', u's': u'square', u'|': u'vline', u'': u'nothing', u'None': u'nothing',
None: u'nothing', u'x': u'x', 5: u'caretright', u'_': u'hline', u'^': u'triangle_up', u' ': u'nothing',
u'd': u'thin_diamond', u'h': u'hexagon1', u'+': u'plus', u'*': u'star', u',': u'pixel', u'o': u'circle',
u'.': u'point', u'1': u'tri_down', u'p': u'pentagon', u'3': u'tri_left', u'2': u'tri_up', u'4': u'tri_right',
u'H': u'hexagon2', u'v': u'triangle_down', u'8': u'octagon', u'<': u'triangle_left', u'>': u'triangle_right'}
'''
MARKERS = ['s', 'o', 4, 5, 7, '|', '*', 1, 2, 3, 4, 6, 7]
FIGURE_OUTPUTS = ['png', 'pdf', 'pgf']
# pd.options.display.mpl_style = 'default'
# matplotlib.rc('font', family='sans-serif')
# matplotlib.rc('xtick', labelsize=20)
# matplotlib.rc('ytick', labelsize=20)
SMALL_SIZE = 11
MEDIUM_SIZE = 13
BIGGER_SIZE = 16
matplotlib.rc('font', size=MEDIUM_SIZE, family='sans-serif') # controls default text sizes
matplotlib.rc('axes', titlesize=BIGGER_SIZE) # fontsize of the axes title
matplotlib.rc('axes', labelsize=BIGGER_SIZE) # fontsize of the x and y labels
matplotlib.rc('xtick', labelsize=SMALL_SIZE) # fontsize of the tick labels
matplotlib.rc('ytick', labelsize=SMALL_SIZE) # fontsize of the tick labels
matplotlib.rc('legend', fontsize=SMALL_SIZE) # legend fontsize
matplotlib.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title
# http://nerdjusttyped.blogspot.de/2010/07/type-1-fonts-and-matplotlib-figures.html
matplotlib.rcParams['ps.useafm'] = True
matplotlib.rcParams['text.usetex'] = True
matplotlib.rcParams['pgf.texsystem'] = 'pdflatex'
def common_substring(strings, glue='_'):
first, last = strings[0], strings[-1]
seq = difflib.SequenceMatcher(None, first, last, autojunk=False)
mb = seq.get_matching_blocks()
return glue.join([first[m.a : m.a + m.size] for m in mb]).replace(os.path.sep, '')
def make_val(val, round_digits=3):
try:
return round(float(val), round_digits)
except ValueError:
return str(val)
def m_strip(s, timings=None, measures=None):
timings = timings or TIMINGS
measures = measures or MEASURES
for t, m in itertools.product(timings, measures):
s = s.replace('_{}_{}'.format(m, t), '')
return s
def read_files(dirnames, specials=None):
current = None
specials = specials or SPECIALS
header = {'memory': [], 'profiler': [], 'params': [], 'errors': []}
for fn in dirnames:
assert os.path.isdir(fn)
prof = os.path.join(fn, 'profiler.csv')
try:
new = pd.read_csv(prof)
except pd.parser.CParserError as e:
logging.error('Failed parsing {}'.format(prof))
raise e
header['profiler'] = list(new.columns.values)
params = ConfigParser()
param_fn = ['dsc_parameter.log', 'dxtc_parameter.log']
subdirs = ['', 'logs', 'logdata']
params.read([os.path.join(fn, sd, pfn) for sd, pfn in itertools.product(subdirs, param_fn)])
p = {}
for section in params.sections():
p.update({'{}.{}'.format(section, n): make_val(v) for n, v in params.items(section)})
p['grids.total_macro_cells'] = math.pow(p['grids.macro_cells_per_dim'], p['grids.dim'])
p['grids.total_fine_cells'] = p['grids.total_macro_cells'] * math.pow(
p['grids.micro_cells_per_macrocell_dim'], p['grids.dim']
)
param = pd.DataFrame(p, index=[0])
# mem
mem = os.path.join(fn, 'memory.csv')
mem = pd.read_csv(mem)
new = pd.concat((new, param, mem), axis=1)
header['memory'] = mem.columns.values
header['params'] = param.columns.values
err = os.path.join(fn, 'errors.csv')
if os.path.isfile(err):
err = pd.read_csv(err)
header['errors'] = err.columns.values
new = pd.concat((new, err), axis=1)
current = current.append(new, ignore_index=True) if current is not None else new
# ideal speedup account for non-uniform thread/rank ratio across columns
count = len(current['ranks'])
cmp_value = lambda j: current['grids.total_macro_cells'][j] / (current['ranks'][j] * current['threads'][j])
values = [cmp_value(i) / cmp_value(0) for i in range(0, count)]
current.insert(len(specials), 'ideal_scaleup', pd.Series(values))
cmp_value = lambda j: current['ranks'][j] * current['threads'][j]
values = [cmp_value(i) / cmp_value(0) for i in range(0, count)]
current.insert(len(specials), 'ideal_speedup', pd.Series(values))
cores = [cmp_value(i) for i in range(0, count)]
current.insert(len(specials), 'cores', pd.Series(cores))
cmp_value = lambda j: current['grids.total_macro_cells'][j] / (current['ranks'][j] * current['threads'][j])
values = [cmp_value(i) / cmp_value(0) for i in range(0, count)]
current.insert(len(specials), 'ideal_time', pd.Series(values))
return header, current
def sorted_f(frame, ascending=True, sort_cols=None):
sort_cols = sort_cols or ['ranks', 'threads']
return frame.sort_values(by=sort_cols, na_position='last', ascending=ascending)
def speedup(headerlist, current, baseline_name, specials=None, round_digits=3, timings=None, measures=None):
timings = timings or TIMINGS
measures = measures or MEASURES
specials = specials or SPECIALS
t_sections = set([m_strip(h) for h in headerlist]) - set(specials)
for sec in t_sections:
for t, m in itertools.product(timings, measures):
source_col = '{}_{}_{}'.format(sec, m, t)
source = current[source_col]
speedup_col = source_col + '_speedup'
ref_value = source[0]
values = [round(ref_value / source[i], round_digits) for i in range(len(source))]
current[speedup_col] = pd.Series(values)
# relative part of overall absolut timing category
abspart_col = source_col + '_abspart'
ref_value = lambda j: float(current['{}_{}_{}'.format(baseline_name, m, t)][j])
values = [round(source[i] / ref_value(i), round_digits) for i in range(len(source))]
current[abspart_col] = pd.Series(values)
# relative part of overall total walltime
wallpart_col = source_col + '_wallpart'
ref_value = lambda j: float(current['{}_{}_{}'.format(baseline_name, m, 'wall')][j])
values = [round(source[i] / ref_value(i), round_digits) for i in range(len(source))]
current[wallpart_col] = pd.Series(values)
for m in measures:
# thread efficiency
source_col = '{}_{}_{}'.format(sec, m, 'usr')
threadeff_col = source_col + '_threadeff'
wall = current['{}_{}_{}'.format(sec, m, 'wall')]
source = current[source_col]
value = lambda j: float(source[j] / (current['threads'][j] * wall[j]))
values = [round(value(i), round_digits) for i in range(len(source))]
current[threadeff_col] = pd.Series(values)
current = sorted_f(current, True)
return current
def scaleup(headerlist, current, baseline_name, specials=None, round_digits=3, timings=None, measures=None):
timings = timings or TIMINGS
measures = measures or MEASURES
specials = specials or SPECIALS
t_sections = set([m_strip(h) for h in headerlist]) - set(specials)
for sec in t_sections:
for t, m in itertools.product(timings, measures):
source_col = '{}_{}_{}'.format(sec, m, t)
source = current[source_col]
speedup_col = '{}_{}'.format(source_col, 'scaleup')
ref_value = source[0]
values = [round(ref_value / source[i], round_digits) for i in range(len(source))]
current[speedup_col] = pd.Series(values)
# relative part of overall absolut timing category
abspart_col = source_col + '_abspart'
ref_value = lambda j: float(current['{}_{}_{}'.format(baseline_name, m, t)][j])
values = [round(source[i] / ref_value(i), round_digits) for i in range(len(source))]
current[abspart_col] = pd.Series(values)
# relative part of overall total walltime
wallpart_col = source_col + '_wallpart'
ref_value = lambda j: float(current['{}_{}_{}'.format(baseline_name, m, 'wall')][j])
values = [round(source[i] / ref_value(i), round_digits) for i in range(len(source))]
current[wallpart_col] = | pd.Series(values) | pandas.Series |
import os
from nose.tools import *
import unittest
import pandas as pd
from py_entitymatching.utils.generic_helper import get_install_path
import py_entitymatching.catalog.catalog_manager as cm
import py_entitymatching.utils.catalog_helper as ch
from py_entitymatching.io.parsers import read_csv_metadata
datasets_path = os.sep.join([get_install_path(), 'tests', 'test_datasets'])
catalog_datasets_path = os.sep.join([get_install_path(), 'tests',
'test_datasets', 'catalog'])
path_a = os.sep.join([datasets_path, 'A.csv'])
path_b = os.sep.join([datasets_path, 'B.csv'])
path_c = os.sep.join([datasets_path, 'C.csv'])
class CatalogManagerTestCases(unittest.TestCase):
def setUp(self):
cm.del_catalog()
def tearDown(self):
cm.del_catalog()
def test_get_property_valid_df_name_1(self):
# cm.del_catalog()
df = read_csv_metadata(path_a)
self.assertEqual(cm.get_property(df, 'key'), 'ID')
# cm.del_catalog()
def test_get_property_valid_df_name_2(self):
# cm.del_catalog()
self.assertEqual(cm.get_catalog_len(), 0)
A = read_csv_metadata(path_a)
B = read_csv_metadata(path_b, key='ID')
C = read_csv_metadata(path_c, ltable=A, rtable=B)
self.assertEqual(cm.get_property(C, 'key'), '_id')
self.assertEqual(cm.get_property(C, 'fk_ltable'), 'ltable_ID')
self.assertEqual(cm.get_property(C, 'fk_rtable'), 'rtable_ID')
self.assertEqual(cm.get_property(C, 'ltable').equals(A), True)
self.assertEqual(cm.get_property(C, 'rtable').equals(B), True)
# cm.del_catalog()
@raises(AssertionError)
def test_get_property_invalid_df_1(self):
cm.get_property(10, 'key')
@raises(AssertionError)
def test_get_property_invalid_path_1(self):
# cm.del_catalog()
A = read_csv_metadata(path_a)
cm.get_property(A, None)
# cm.del_catalog()
@raises(KeyError)
def test_get_property_df_notin_catalog(self):
# cm.del_catalog()
A = pd.read_csv(path_a)
cm.get_property(A, 'key')
# cm.del_catalog()
def test_set_property_valid_df_name_value(self):
# cm.del_catalog()
df = pd.read_csv(path_a)
cm.set_property(df, 'key', 'ID')
self.assertEqual(cm.get_property(df, 'key'), 'ID')
# cm.del_catalog()
@raises(AssertionError)
def test_set_property_invalid_df(self):
# cm.del_catalog()
cm.set_property(None, 'key', 'ID')
# cm.del_catalog()
@raises(AssertionError)
def test_set_property_valid_df_invalid_prop(self):
# cm.del_catalog()
A = pd.read_csv(path_a)
cm.set_property(A, None, 'ID')
# cm.del_catalog()
def test_init_properties_valid(self):
# cm.del_catalog()
A = pd.read_csv(path_a)
cm.init_properties(A)
self.assertEqual(cm.is_dfinfo_present(A), True)
# cm.del_catalog()
@raises(AssertionError)
def test_init_properties_invalid_df(self):
cm.init_properties(None)
def test_get_all_properties_valid_1(self):
# cm.del_catalog()
A = read_csv_metadata(path_a)
m = cm.get_all_properties(A)
self.assertEqual(len(m), 1)
self.assertEqual(m['key'], 'ID')
# cm.del_catalog()
def test_get_all_properties_valid_2(self):
# cm.del_catalog()
A = read_csv_metadata(path_a)
B = read_csv_metadata(path_b, key='ID')
C = read_csv_metadata(path_c, ltable=A, rtable=B)
m = cm.get_all_properties(C)
self.assertEqual(len(m), 5)
self.assertEqual(m['key'], '_id')
self.assertEqual(m['fk_ltable'], 'ltable_ID')
self.assertEqual(m['fk_rtable'], 'rtable_ID')
self.assertEqual(m['ltable'].equals(A), True)
self.assertEqual(m['rtable'].equals(B), True)
# cm.del_catalog()
@raises(AssertionError)
def test_get_all_properties_invalid_df_1(self):
# cm.del_catalog()
C = cm.get_all_properties(None)
@raises(KeyError)
def test_get_all_properties_invalid_df_2(self):
# cm.del_catalog()
A = pd.read_csv(path_a)
C = cm.get_all_properties(A)
def test_del_property_valid_df_name(self):
A = read_csv_metadata(path_a)
cm.del_property(A, 'key')
self.assertEqual(len(cm.get_all_properties(A)), 0)
@raises(AssertionError)
def test_del_property_invalid_df(self):
cm.del_property(None, 'key')
@raises(AssertionError)
def test_del_property_invalid_property(self):
A = read_csv_metadata(path_a)
cm.del_property(A, None)
@raises(KeyError)
def test_del_property_df_notin_catalog(self):
A = pd.read_csv(path_a)
cm.del_property(A, 'key')
@raises(KeyError)
def test_del_property_prop_notin_catalog(self):
A = read_csv_metadata(path_a)
cm.del_property(A, 'key1')
def test_del_all_properties_valid_1(self):
A = read_csv_metadata(path_a)
cm.del_all_properties(A)
self.assertEqual(cm.is_dfinfo_present(A), False)
def test_del_all_properties_valid_2(self):
A = read_csv_metadata(path_a)
B = read_csv_metadata(path_b)
C = read_csv_metadata(path_c, ltable=A, rtable=B)
cm.del_all_properties(C)
self.assertEqual(cm.is_dfinfo_present(C), False)
@raises(AssertionError)
def test_del_all_properties_invalid_df(self):
cm.del_all_properties(None)
@raises(KeyError)
def test_del_all_properties_df_notin_catalog(self):
A = pd.read_csv(path_a)
cm.del_all_properties(A)
def test_get_catalog_valid(self):
A = read_csv_metadata(path_a)
cg = cm.get_catalog()
self.assertEqual(len(cg), 1)
def test_del_catalog_valid(self):
A = read_csv_metadata(path_a)
cm.del_catalog()
cg = cm.get_catalog()
self.assertEqual(len(cg), 0)
def test_is_catalog_empty(self):
A = read_csv_metadata(path_a)
cm.del_catalog()
self.assertEqual(cm.is_catalog_empty(), True)
def test_is_dfinfo_present_valid_1(self):
A = read_csv_metadata(path_a)
status = cm.is_dfinfo_present(A)
self.assertEqual(status, True)
def test_is_dfinfo_present_valid_2(self):
A = pd.read_csv(path_a)
status = cm.is_dfinfo_present(A)
self.assertEqual(status, False)
@raises(AssertionError)
def test_is_dfinfo_present_invalid(self):
cm.is_dfinfo_present(None)
def test_is_property_present_for_df_valid_1(self):
A = read_csv_metadata(path_a)
status = cm.is_property_present_for_df(A, 'key')
self.assertEqual(status, True)
def test_is_property_present_for_df_valid_2(self):
A = read_csv_metadata(path_a)
status = cm.is_property_present_for_df(A, 'key1')
self.assertEqual(status, False)
@raises(AssertionError)
def test_is_property_present_for_df_invalid_df(self):
cm.is_property_present_for_df(None, 'key')
@raises(KeyError)
def test_is_property_present_for_df_notin_catalog(self):
A = pd.read_csv(path_a)
cm.is_property_present_for_df(A, 'key')
def test_catalog_len(self):
A = read_csv_metadata(path_a)
self.assertEqual(cm.get_catalog_len(), 1)
def test_set_properties_valid_1(self):
A = read_csv_metadata(path_a)
p = cm.get_all_properties(A)
B = pd.read_csv(path_b)
cm.init_properties(B)
cm.set_properties(B,p)
self.assertEqual(cm.get_all_properties(B)==p, True)
def test_set_properties_valid_2(self):
A = read_csv_metadata(path_a)
p = cm.get_all_properties(A)
B = pd.read_csv(path_b)
cm.set_properties(B,p)
self.assertEqual(cm.get_all_properties(B)==p, True)
@raises(AssertionError)
def test_set_properties_invalid_df_1(self):
cm.set_properties(None, {})
@raises(AssertionError)
def test_set_properties_invalid_dict_1(self):
A = read_csv_metadata(path_a)
cm.set_properties(A, None)
def test_set_properties_df_notin_catalog_replace_false(self):
A = read_csv_metadata(path_a)
cm.set_properties(A, {}, replace=False)
self.assertEqual(cm.get_key(A), 'ID')
# def test_has_property_valid_1(self):
# A = read_csv_metadata(path_a)
# self.assertEqual(cm.has_property(A, 'key'), True)
#
# def test_has_property_valid_2(self):
# A = read_csv_metadata(path_a)
# self.assertEqual(cm.has_property(A, 'key1'), False)
#
# @raises(AssertionError)
# def test_has_property_invalid_df(self):
# cm.has_property(None, 'key')
#
# @raises(AssertionError)
# def test_has_property_invalid_prop_name(self):
# A = read_csv_metadata(path_a)
# cm.has_property(A, None)
#
# @raises(KeyError)
# def test_has_property_df_notin_catalog(self):
# A = pd.read_csv(path_a)
# cm.has_property(A, 'key')
def test_copy_properties_valid_1(self):
A = read_csv_metadata(path_a)
A1 = pd.read_csv(path_a)
cm.copy_properties(A, A1)
self.assertEqual(cm.is_dfinfo_present(A1), True)
p = cm.get_all_properties(A)
p1 = cm.get_all_properties(A1)
self.assertEqual(p, p1)
self.assertEqual(cm.get_key(A1), cm.get_key(A))
def test_copy_properties_valid_2(self):
A = read_csv_metadata(path_a)
B = read_csv_metadata(path_b)
C = read_csv_metadata(path_c, ltable=A, rtable=B)
C1 = pd.read_csv(path_c)
cm.copy_properties(C, C1)
self.assertEqual(cm.is_dfinfo_present(C1), True)
p = cm.get_all_properties(C1)
p1 = cm.get_all_properties(C1)
self.assertEqual(p, p1)
self.assertEqual(cm.get_key(C1), cm.get_key(C))
self.assertEqual(cm.get_ltable(C1).equals(A), True)
self.assertEqual(cm.get_rtable(C1).equals(B), True)
self.assertEqual(cm.get_fk_ltable(C1), cm.get_fk_ltable(C))
self.assertEqual(cm.get_fk_rtable(C1), cm.get_fk_rtable(C))
@raises(AssertionError)
def test_copy_properties_invalid_tar_df(self):
A = read_csv_metadata(path_a)
cm.copy_properties(A, None)
@raises(AssertionError)
def test_copy_properties_invalid_src_df(self):
A = read_csv_metadata(path_a)
cm.copy_properties(None, A)
def test_copy_properties_update_false_1(self):
A = read_csv_metadata(path_a)
A1 = read_csv_metadata(path_a)
status=cm.copy_properties(A, A1, replace=False)
self.assertEqual(status, False)
def test_copy_properties_update_false_2(self):
A = read_csv_metadata(path_a)
A1 = pd.read_csv(path_a)
cm.copy_properties(A, A1, replace=False)
p = cm.get_all_properties(A)
p1 = cm.get_all_properties(A1)
self.assertEqual(p, p1)
self.assertEqual(cm.get_key(A1), cm.get_key(A))
@raises(KeyError)
def test_copy_properties_src_df_notin_catalog(self):
A = pd.read_csv(path_a)
A1 = pd.read_csv(path_a)
cm.copy_properties(A, A1)
def test_get_key_valid(self):
A = pd.read_csv(path_a)
cm.set_key(A, 'ID')
self.assertEqual(cm.get_key(A), 'ID')
@raises(AssertionError)
def test_get_key_invalid_df(self):
cm.get_key(None)
@raises(KeyError)
def test_get_key_df_notin_catalog(self):
A = pd.read_csv(path_a)
cm.get_key(A)
def test_set_key_valid(self):
A = pd.read_csv(path_a)
cm.set_key(A, 'ID')
self.assertEqual(cm.get_key(A), 'ID')
@raises(AssertionError)
def test_set_key_invalid_df(self):
cm.set_key(None, 'ID')
@raises(KeyError)
def test_set_key_notin_df(self):
A = pd.read_csv(path_a)
cm.set_key(A, 'ID1')
def test_set_key_with_dupids(self):
p = os.sep.join([catalog_datasets_path, 'A_dupid.csv'])
A = pd.read_csv(p)
status = cm.set_key(A, 'ID')
self.assertEqual(status, False)
def test_set_key_with_mvals(self):
p = os.sep.join([catalog_datasets_path, 'A_mvals.csv'])
A = pd.read_csv(p)
status = cm.set_key(A, 'ID')
self.assertEqual(status, False)
def test_get_fk_ltable_valid(self):
A = read_csv_metadata(path_a)
B = read_csv_metadata(path_b)
C = read_csv_metadata(path_c, ltable=A, rtable=B)
self.assertEqual(cm.get_fk_ltable(C), cm.get_property(C, 'fk_ltable'))
self.assertEqual(cm.get_fk_ltable(C), 'ltable_ID')
@raises(AssertionError)
def test_get_fk_ltable_invalid_df(self):
cm.get_fk_ltable(None)
def test_get_fk_rtable_valid(self):
A = read_csv_metadata(path_a)
B = read_csv_metadata(path_b)
C = read_csv_metadata(path_c, ltable=A, rtable=B)
self.assertEqual(cm.get_fk_rtable(C), cm.get_property(C, 'fk_rtable'))
self.assertEqual(cm.get_fk_rtable(C), 'rtable_ID')
@raises(AssertionError)
def test_get_fk_rtable_invalid_df(self):
cm.get_fk_rtable(None)
def test_set_fk_ltable_valid(self):
A = read_csv_metadata(path_a)
B = read_csv_metadata(path_b)
C = pd.read_csv(path_c)
cm.set_fk_ltable(C, 'ltable_ID')
self.assertEqual(cm.get_fk_ltable(C), 'ltable_ID')
@raises(AssertionError)
def test_set_fk_ltable_invalid_df(self):
cm.set_fk_ltable(None, 'ltable_ID')
@raises(KeyError)
def test_set_fk_ltable_invalid_col(self):
C = pd.read_csv(path_c)
cm.set_fk_ltable(C, 'ltable_ID1')
def test_set_fk_rtable_valid(self):
A = read_csv_metadata(path_a)
B = read_csv_metadata(path_b)
C = pd.read_csv(path_c)
cm.set_fk_rtable(C, 'rtable_ID')
self.assertEqual(cm.get_fk_rtable(C), 'rtable_ID')
@raises(AssertionError)
def test_set_fk_rtable_invalid_df(self):
cm.set_fk_rtable(None, 'rtable_ID')
@raises(KeyError)
def test_set_fk_rtable_invalid_col(self):
C = pd.read_csv(path_c)
cm.set_fk_rtable(C, 'rtable_ID1')
def test_validate_and_set_fk_ltable_valid(self):
A = read_csv_metadata(path_a)
B = read_csv_metadata(path_b)
C = pd.read_csv(path_c)
cm.validate_and_set_fk_ltable(C, 'ltable_ID', A, 'ID')
self.assertEqual(cm.get_fk_ltable(C), 'ltable_ID')
def test_validate_and_set_fk_ltable_err_case_1(self):
C = pd.read_csv(path_c)
p = os.sep.join([catalog_datasets_path, 'A_dupid.csv'])
A = pd.read_csv(p)
status = cm.validate_and_set_fk_ltable(C, 'ltable_ID', A, 'ID')
self.assertEqual(status, False)
self.assertEqual(cm.is_dfinfo_present(C), False)
def test_validate_and_set_fk_ltable_err_case_2(self):
C = pd.read_csv(path_c)
p = os.sep.join([catalog_datasets_path, 'A_inv_fk.csv'])
A = pd.read_csv(p)
status = cm.validate_and_set_fk_ltable(C, 'ltable_ID', A, 'ID')
self.assertEqual(status, False)
self.assertEqual(cm.is_dfinfo_present(C), False)
def test_validate_and_set_fk_rtable_valid(self):
A = read_csv_metadata(path_a)
C = pd.read_csv(path_c)
cm.validate_and_set_fk_rtable(C, 'ltable_ID', A, 'ID')
self.assertEqual(cm.get_fk_rtable(C), 'ltable_ID')
def test_validate_and_set_fk_rtable_err_case_1(self):
C = pd.read_csv(path_c)
p = os.sep.join([catalog_datasets_path, 'A_dupid.csv'])
A = pd.read_csv(p)
status = cm.validate_and_set_fk_rtable(C, 'ltable_ID', A, 'ID')
self.assertEqual(status, False)
self.assertEqual(cm.is_dfinfo_present(C), False)
def test_validate_and_set_fk_rtable_err_case_2(self):
C = pd.read_csv(path_c)
p = os.sep.join([catalog_datasets_path, 'A_inv_fk.csv'])
A = pd.read_csv(p)
status = cm.validate_and_set_fk_rtable(C, 'ltable_ID', A, 'ID')
self.assertEqual(status, False)
self.assertEqual(cm.is_dfinfo_present(C), False)
# def test_get_reqd_metadata_from_catalog_valid_1(self):
# A = read_csv_metadata(path_a)
# d = cm.get_reqd_metadata_from_catalog(A, 'key')
# self.assertEqual(d['key'], cm.get_key(A))
#
# def test_get_reqd_metadata_from_catalog_valid_2(self):
# A = read_csv_metadata(path_a)
# d = cm.get_reqd_metadata_from_catalog(A, ['key'])
# self.assertEqual(d['key'], cm.get_key(A))
#
# def test_get_reqd_metadata_from_catalog_valid_3(self):
# A = read_csv_metadata(path_a)
# B = read_csv_metadata(path_b, key='ID')
# C = read_csv_metadata(path_c, ltable=A, rtable=B)
# d = cm.get_reqd_metadata_from_catalog(C, ['key', 'fk_ltable', 'fk_rtable', 'ltable', 'rtable'])
# self.assertEqual(d['key'], cm.get_key(C))
# self.assertEqual(d['fk_ltable'], cm.get_fk_ltable(C))
# self.assertEqual(d['fk_rtable'], cm.get_fk_rtable(C))
# self.assertEqual(cm.get_ltable(C).equals(A), True)
# self.assertEqual(cm.get_rtable(C).equals(B), True)
#
# @raises(AssertionError)
# def test_get_reqd_metadata_from_catalog_err_1(self):
# cm.get_reqd_metadata_from_catalog(None, ['key'])
#
# @raises(AssertionError)
# def test_get_reqd_metadata_from_catalog_err_2(self):
# A = read_csv_metadata(path_a)
# B = read_csv_metadata(path_b, key='ID')
# C = read_csv_metadata(path_c, ltable=A, rtable=B)
# d = cm.get_reqd_metadata_from_catalog(C, ['key', 'fk_ltable1', 'fk_rtable', 'ltable', 'rtable'])
#
#
# def test_update_reqd_metadata_with_kwargs_valid_1(self):
# A = read_csv_metadata(path_a)
# d = cm.get_all_properties(A)
# metadata = {}
# cm._update_reqd_metadata_with_kwargs(metadata, d, ['key'])
# self.assertEqual(metadata['key'], d['key'])
#
# def test_update_reqd_metadata_with_kwargs_valid_2(self):
# A = read_csv_metadata(path_a)
# d = cm.get_all_properties(A)
# metadata = {}
# cm._update_reqd_metadata_with_kwargs(metadata, d, 'key')
# self.assertEqual(metadata['key'], d['key'])
#
# @raises(AssertionError)
# def test_update_reqf_metadata_with_kwargs_invalid_dict_1(self):
# A = read_csv_metadata(path_a)
# d = cm.get_all_properties(A)
# cm._update_reqd_metadata_with_kwargs(None, d, 'key')
#
# @raises(AssertionError)
# def test_update_reqf_metadata_with_kwargs_invalid_dict_2(self):
# A = read_csv_metadata(path_a)
# d = cm.get_all_properties(A)
# cm._update_reqd_metadata_with_kwargs(d, None, 'key')
#
# @raises(AssertionError)
# def test_update_reqd_metadata_with_kwargs_invalid_elts(self):
# A = read_csv_metadata(path_a)
# d = cm.get_all_properties(A)
# metadata = {}
# cm._update_reqd_metadata_with_kwargs(metadata, d, ['key1'])
# def test_get_diff_with_reqd_metadata_valid_1(self):
# A = read_csv_metadata(path_a)
# d = cm.get_all_properties(A)
# d1 = cm._get_diff_with_required_metadata(d, 'key1')
# self.assertEqual(len(d1), 1)
#
# def test_get_diff_with_reqd_metadata_valid_2(self):
# A = read_csv_metadata(path_a)
# d = cm.get_all_properties(A)
# d1 = cm._get_diff_with_required_metadata(d, ['key1'])
# self.assertEqual(len(d1), 1)
#
# @raises(AssertionError)
# def test_get_diff_with_reqd_metadata_invalid_dict(self):
# d1 = cm._get_diff_with_required_metadata(None, ['key1'])
# def test_is_all_reqd_metadata_present_valid_1(self):
# A = read_csv_metadata(path_a)
# d = cm.get_all_properties(A)
# self.assertEqual(cm.is_all_reqd_metadata_present(d, 'key'),True)
#
# def test_is_all_reqd_metadata_present_valid_2(self):
# A = read_csv_metadata(path_a)
# d = cm.get_all_properties(A)
# self.assertEqual(cm.is_all_reqd_metadata_present(d, ['key']),True)
#
# def test_is_all_reqd_metadata_present_valid_3(self):
# A = read_csv_metadata(path_a)
# d = cm.get_all_properties(A)
# self.assertEqual(cm.is_all_reqd_metadata_present(d, ['key1']), False)
#
# @raises(AssertionError)
# def test_is_all_reqd_metadata_present_invalid_dict(self):
# cm.is_all_reqd_metadata_present(None, 'key')
def test_show_properties_for_df_valid_1(self):
A = read_csv_metadata(path_a)
cm.show_properties(A)
def test_show_properties_for_df_valid_2(self):
A = pd.read_csv(path_a)
cm.show_properties(A)
def test_show_properties_for_df_valid_3(self):
A = read_csv_metadata(path_a)
B = read_csv_metadata(path_b, key='ID')
C = read_csv_metadata(path_c, ltable=A, rtable=B)
cm.show_properties(C)
def test_show_properties_for_objid_valid_1(self):
A = read_csv_metadata(path_a)
cm.show_properties_for_id(id(A))
@raises(KeyError)
def test_show_properties_for_objid_err_1(self):
A = pd.read_csv(path_a)
cm.show_properties_for_id(id(A))
def test_show_properties_for_objid_valid_3(self):
A = read_csv_metadata(path_a)
B = read_csv_metadata(path_b, key='ID')
C = read_csv_metadata(path_c, ltable=A, rtable=B)
cm.show_properties_for_id(id(C))
def test_validate_metadata_for_table_valid_1(self):
A = pd.read_csv(path_a)
status = cm._validate_metadata_for_table(A, 'ID', 'table', None, False)
self.assertEqual(status, True)
def test_validate_metadata_for_table_valid_2(self):
import logging
logger = logging.getLogger(__name__)
A = pd.read_csv(path_a)
status = cm._validate_metadata_for_table(A, 'ID', 'table', logger, True)
self.assertEqual(status, True)
@raises(AssertionError)
def test_validate_metadata_for_table_invalid_df(self):
status = cm._validate_metadata_for_table(None, 'ID', 'table', None, False)
@raises(KeyError)
def test_validate_metadata_for_table_key_notin_catalog(self):
A = pd.read_csv(path_a)
status = cm._validate_metadata_for_table(A, 'ID1', 'table', None, False)
@raises(KeyError)
def test_validate_metadata_for_table_key_notstring(self):
A = pd.read_csv(path_a)
status = cm._validate_metadata_for_table(A, None, 'table', None, False)
@raises(AssertionError)
def test_validate_metadata_for_table_key_notstring(self):
A = pd.read_csv(path_a)
status = cm._validate_metadata_for_table(A, 'zipcode', 'table', None, False)
def test_validate_metadata_for_candset_valid_1(self):
A = read_csv_metadata(path_a)
B = read_csv_metadata(path_b, key='ID')
C = read_csv_metadata(path_c, ltable=A, rtable=B)
status = cm._validate_metadata_for_candset(C, '_id', 'ltable_ID', 'rtable_ID', A, B, 'ID', 'ID', None, False)
self.assertEqual(status, True)
@raises(AssertionError)
def test_validate_metadata_for_candset_invalid_df(self):
status = cm._validate_metadata_for_candset(None, '_id', 'ltable_ID', 'rtable_ID', None, None,
'ID', 'ID', None, False)
@raises(KeyError)
def test_validate_metadata_for_candset_id_notin(self):
A = read_csv_metadata(path_a)
B = read_csv_metadata(path_b, key='ID')
C = read_csv_metadata(path_c, ltable=A, rtable=B)
status = cm._validate_metadata_for_candset(C, 'id', 'ltable_ID', 'rtable_ID', A, B, 'ID', 'ID', None, False)
@raises(KeyError)
def test_validate_metadata_for_candset_fk_ltable_notin(self):
A = read_csv_metadata(path_a)
B = read_csv_metadata(path_b, key='ID')
C = read_csv_metadata(path_c, ltable=A, rtable=B)
status = cm._validate_metadata_for_candset(C, '_id', 'ltableID', 'rtable_ID', A, B, 'ID', 'ID', None, False)
@raises(KeyError)
def test_validate_metadata_for_candset_fk_rtable_notin(self):
A = read_csv_metadata(path_a)
B = read_csv_metadata(path_b, key='ID')
C = read_csv_metadata(path_c, ltable=A, rtable=B)
status = cm._validate_metadata_for_candset(C, '_id', 'ltable_ID', 'rtableID', A, B, 'ID', 'ID', None, False)
@raises(AssertionError)
def test_validate_metadata_for_candset_invlaid_ltable(self):
B = pd.read_csv(path_b)
C = pd.read_csv(path_c)
status = cm._validate_metadata_for_candset(C, '_id', 'ltable_ID', 'rtable_ID', None, B, 'ID', 'ID', None, False)
@raises(AssertionError)
def test_validate_metadata_for_candset_invlaid_rtable(self):
B = pd.read_csv(path_b)
C = pd.read_csv(path_c)
status = cm._validate_metadata_for_candset(C, '_id', 'ltable_ID', 'rtable_ID', B, None, 'ID', 'ID', None, False)
@raises(KeyError)
def test_validate_metadata_for_candset_lkey_notin_ltable(self):
A = pd.read_csv(path_a)
B = pd.read_csv(path_b)
C = pd.read_csv(path_c)
status = cm._validate_metadata_for_candset(C, '_id', 'ltable_ID', 'rtable_ID', A, B, 'ID1', 'ID', None, False)
@raises(KeyError)
def test_validate_metadata_for_candset_rkey_notin_rtable(self):
A = pd.read_csv(path_a)
B = pd.read_csv(path_b)
C = pd.read_csv(path_c)
status = cm._validate_metadata_for_candset(C, '_id', 'ltable_ID', 'rtable_ID', A, B, 'ID', 'ID1', None, False)
def test_get_keys_for_ltable_rtable_valid(self):
A = read_csv_metadata(path_a)
B = read_csv_metadata(path_b, key='ID')
l_key, r_key = cm.get_keys_for_ltable_rtable(A, B, None, False)
self.assertEqual(l_key, 'ID')
self.assertEqual(r_key, 'ID')
@raises(AssertionError)
def test_get_keys_for_ltable_rtable_invalid_ltable(self):
A = read_csv_metadata(path_a)
B = read_csv_metadata(path_b, key='ID')
l_key, r_key = cm.get_keys_for_ltable_rtable(None, B, None, False)
@raises(AssertionError)
def test_get_keys_for_ltable_rtable_invalid_rtable(self):
A = read_csv_metadata(path_a)
B = read_csv_metadata(path_b, key='ID')
l_key, r_key = cm.get_keys_for_ltable_rtable(A, None, None, False)
def test_get_metadata_for_candset_valid(self):
A = read_csv_metadata(path_a)
B = read_csv_metadata(path_b, key='ID')
C = read_csv_metadata(path_c, ltable=A, rtable=B)
key, fk_ltable, fk_rtable, ltable, rtable, l_key, r_key = cm.get_metadata_for_candset(C, None, False)
self.assertEqual(key, '_id')
self.assertEqual(fk_ltable, 'ltable_ID')
self.assertEqual(fk_rtable, 'rtable_ID')
self.assertEqual(l_key, 'ID')
self.assertEqual(r_key, 'ID')
self.assertEqual(ltable.equals(A), True)
self.assertEqual(rtable.equals(B), True)
@raises(AssertionError)
def test_get_metadata_for_candset_invalid_df(self):
cm.get_metadata_for_candset(None, None, False)
#--- catalog ---
def test_catalog_singleton_isinstance(self):
from py_entitymatching.catalog.catalog import Singleton
x = Singleton(object)
x.__instancecheck__(object)
@raises(TypeError)
def test_catalog_singleton_call(self):
from py_entitymatching.catalog.catalog import Singleton
x = Singleton(object)
x.__call__()
# -- catalog helper --
def test_check_attrs_present_valid_1(self):
A = pd.read_csv(path_a)
status = ch.check_attrs_present(A, 'ID')
self.assertEqual(status, True)
def test_check_attrs_present_valid_2(self):
A = pd.read_csv(path_a)
status = ch.check_attrs_present(A, ['ID'])
self.assertEqual(status, True)
def test_check_attrs_present_valid_3(self):
A = pd.read_csv(path_a)
status = ch.check_attrs_present(A, ['_ID'])
self.assertEqual(status, False)
@raises(AssertionError)
def test_check_attrs_present_invalid_df(self):
ch.check_attrs_present(None, 'ID')
def test_check_attrs_invalid_None(self):
A = pd.read_csv(path_a)
status = ch.check_attrs_present(A, None)
self.assertEqual(status, False)
@raises(AssertionError)
def test_are_all_attrs_present_invalid_df(self):
ch.are_all_attrs_in_df(None, 'id')
def test_are_all_attrs_present_invalid_None(self):
A = pd.read_csv(path_a)
status = ch.are_all_attrs_in_df(A, None)
self.assertEqual(status, False)
def test_is_attr_unique_valid_1(self):
A = pd.read_csv(path_a)
status = ch.is_attr_unique(A, 'ID')
self.assertEqual(status, True)
def test_is_attr_unique_valid_2(self):
A = pd.read_csv(path_a)
status = ch.is_attr_unique(A, 'zipcode')
self.assertEqual(status, False)
@raises(AssertionError)
def test_is_attr_unique_invalid_df(self):
ch.is_attr_unique(None, 'zipcode')
@raises(AssertionError)
def test_is_attr_unique_invalid_attr(self):
A = pd.read_csv(path_a)
ch.is_attr_unique(A, None)
def test_does_contain_missing_values_valid_1(self):
A = pd.read_csv(path_a)
status = ch.does_contain_missing_vals(A, 'ID')
self.assertEqual(status, False)
def test_does_contain_missing_values_valid_2(self):
p = os.sep.join([catalog_datasets_path, 'A_mvals.csv'])
A = pd.read_csv(p)
status = ch.does_contain_missing_vals(A, 'ID')
self.assertEqual(status, True)
@raises(AssertionError)
def test_does_contain_missing_values_invalid_df(self):
ch.does_contain_missing_vals(None, 'zipcode')
@raises(AssertionError)
def test_does_invalid_attr(self):
A = pd.read_csv(path_a)
ch.does_contain_missing_vals(A, None)
def test_is_key_attribute_valid_1(self):
A = pd.read_csv(path_a)
status = ch.is_key_attribute(A, 'ID', True)
self.assertEqual(status, True)
def test_is_key_attribute_valid_2(self):
A = pd.read_csv(path_a)
status = ch.is_key_attribute(A, 'zipcode', True)
self.assertEqual(status, False)
def test_is_key_attribute_valid_3(self):
p = os.sep.join([catalog_datasets_path, 'A_mvals.csv'])
A = pd.read_csv(p)
status = ch.is_key_attribute(A, 'ID', True)
self.assertEqual(status, False)
def test_is_key_attribute_valid_4(self):
A = pd.DataFrame(columns=['id', 'name'])
status = ch.is_key_attribute(A, 'id')
self.assertEqual(status, True)
@raises(AssertionError)
def test_is_key_attribute_invalid_df(self):
ch.is_key_attribute(None, 'id')
@raises(AssertionError)
def test_is_key_attribute_invalid_attr(self):
A = pd.read_csv(path_a)
ch.is_key_attribute(A, None)
def test_check_fk_constraint_valid_1(self):
A = pd.read_csv(path_a)
B = pd.read_csv(path_b)
C = pd.read_csv(path_c)
status = ch.check_fk_constraint(C, 'ltable_ID', A, 'ID')
self.assertEqual(status, True)
status = ch.check_fk_constraint(C, 'rtable_ID', B, 'ID')
self.assertEqual(status, True)
@raises(AssertionError)
def test_check_fk_constraint_invalid_foreign_df(self):
ch.check_fk_constraint(None, 'rtable_ID', pd.DataFrame(), 'ID')
@raises(AssertionError)
def test_check_fk_constraint_invalid_base_df(self):
ch.check_fk_constraint(pd.DataFrame(), 'rtable_ID', None, 'ID')
@raises(AssertionError)
def test_check_fk_constraint_invalid_base_attr(self):
ch.check_fk_constraint(pd.DataFrame(), 'rtable_ID', pd.DataFrame(), None)
@raises(AssertionError)
def test_check_fk_constraint_invalid_foreign_attr(self):
ch.check_fk_constraint(pd.DataFrame(), None, pd.DataFrame(), 'ID')
def test_check_fk_constraint_invalid_attr_notin(self):
A = pd.read_csv(path_a)
B = pd.read_csv(path_b)
C = pd.read_csv(path_c)
status = ch.check_fk_constraint(C, 'ltable_ID', A, 'ID1')
self.assertEqual(status, False)
def test_check_fk_constraint_invalid_attr_mval(self):
A = pd.read_csv(path_a)
B = pd.read_csv(path_b)
C = pd.read_csv(path_c)
C.ix[0, 'ltable_ID'] = pd.np.NaN
status = ch.check_fk_constraint(C, 'ltable_ID', A, 'ID')
self.assertEqual(status, False)
def test_does_contain_rows_valid_1(self):
A = | pd.read_csv(path_a) | pandas.read_csv |
# *****************************************************************************
# Copyright (c) 2020, Intel Corporation All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# *****************************************************************************
import numba
import numpy as np
import pandas as pd
import platform
import pyarrow.parquet as pq
import random
import string
import unittest
from pandas.api.types import CategoricalDtype
import sdc
from sdc.str_arr_ext import StringArray
from sdc.tests.test_base import TestCase
from sdc.tests.test_utils import (count_array_OneDs,
count_array_REPs,
count_parfor_OneDs,
count_parfor_REPs,
dist_IR_contains,
get_start_end,
skip_numba_jit)
class TestJoin(TestCase):
@skip_numba_jit
def test_join1(self):
def test_impl(n):
df1 = pd.DataFrame({'key1': np.arange(n) + 3, 'A': np.arange(n) + 1.0})
df2 = pd.DataFrame({'key2': 2 * np.arange(n) + 1, 'B': n + np.arange(n) + 1.0})
df3 = pd.merge(df1, df2, left_on='key1', right_on='key2')
return df3.B.sum()
hpat_func = self.jit(test_impl)
n = 11
self.assertEqual(hpat_func(n), test_impl(n))
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
n = 11111
self.assertEqual(hpat_func(n), test_impl(n))
@skip_numba_jit
def test_join1_seq(self):
def test_impl(df1, df2):
df3 = df1.merge(df2, left_on='key1', right_on='key2')
return df3
hpat_func = self.jit(test_impl)
n = 11
df1 = pd.DataFrame({'key1': np.arange(n) + 3, 'A': np.arange(n) + 1.0})
df2 = pd.DataFrame({'key2': 2 * np.arange(n) + 1, 'B': n + np.arange(n) + 1.0})
pd.testing.assert_frame_equal(hpat_func(df1, df2), test_impl(df1, df2))
n = 11111
df1 = pd.DataFrame({'key1': np.arange(n) + 3, 'A': np.arange(n) + 1.0})
df2 = pd.DataFrame({'key2': 2 * np.arange(n) + 1, 'B': n + np.arange(n) + 1.0})
pd.testing.assert_frame_equal(hpat_func(df1, df2), test_impl(df1, df2))
@skip_numba_jit
def test_join1_seq_str(self):
def test_impl():
df1 = pd.DataFrame({'key1': ['foo', 'bar', 'baz']})
df2 = pd.DataFrame({'key2': ['baz', 'bar', 'baz'], 'B': ['b', 'zzz', 'ss']})
df3 = pd.merge(df1, df2, left_on='key1', right_on='key2')
return df3.B
hpat_func = self.jit(test_impl)
self.assertEqual(set(hpat_func()), set(test_impl()))
@skip_numba_jit
def test_join1_seq_str_na(self):
# test setting NA in string data column
def test_impl():
df1 = pd.DataFrame({'key1': ['foo', 'bar', 'baz']})
df2 = | pd.DataFrame({'key2': ['baz', 'bar', 'baz'], 'B': ['b', 'zzz', 'ss']}) | pandas.DataFrame |
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.metrics import roc_auc_score, confusion_matrix
from lifelines import CoxPHFitter
from datautils.dataset import Dataset
from datautils.data import Data
from datautils.helper import save_output
from tqdm import tqdm
import argparse
#%%
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument('--imputation_mode', default="mean")
arg_parser.add_argument('--seed', type=int, default=42)
arg_parser.add_argument('--positive_weight', type=int, default=56)
ARGS = arg_parser.parse_args()
#%%
print(f"Running Cox with imputation_mode = {ARGS.imputation_mode}, seed = {ARGS.seed}")
print('Arguments:', ARGS)
#%%
dataset = Dataset("data/challenge_data",
batchSize=100,
train_ratio=0.8,
normalize=True,
padding=False,
imputeForward=(False if ARGS.imputation_mode == "mean" else True),
calculateDelay=False,
seed=ARGS.seed)
#%%
columns = list(dataset.train_data.features.keys())[:-2]
# dataset.train_data.x.shape
# dataset.val_data.x.shape
# dataset.test_data.x.shape
#%%
# create windowing system here
T = 6
#idx = 10
def process_data(d: Data, T: int) -> (pd.DataFrame, np.array):
npa = d.x
target_npa = d.y
processed = []
labels = []
print("* Processing data...")
for idx in tqdm(range(npa.shape[0])):
if target_npa[idx].sum() == 0:
processed.extend([[row,7,1] for row in npa[idx]])
else:
sepsis_count = 0
for i in range(npa[idx].shape[0]):
t = (T + 1) - sepsis_count
t = t if t >= 1 else 1
s = 1 if t > T else 0
processed.append([npa[idx][i],t,s])
sepsis_count += 1 if target_npa[idx][i][0] == 1 else 0
labels.extend(target_npa[idx].flatten().tolist())
return ( | pd.DataFrame(processed, columns=["x","t","s"]) | pandas.DataFrame |
import os
import logging
from datetime import datetime, timedelta
import configparser
from data import \
download_yahoo_data,\
map_tickers,\
generate_rsi_features,\
add_targets_and_split, \
get_rsi_feature_names
import joblib
import numerapi
import pandas as pd
from sklearn.ensemble import GradientBoostingRegressor
from dateutil.relativedelta import relativedelta, FR
TARGET_NAME = "target"
PREDICTION_NAME = "signal"
TRAINED_MODEL_PREFIX = './trained_model'
# Pull model id from "MODEL_ID" environment variable
# defaults to None, change to a model id from
MODEL_ID = os.getenv('MODEL_ID', None)
MODEL = GradientBoostingRegressor(subsample=0.1)
napi = numerapi.SignalsAPI()
def download_data(live_data_date):
eligible_tickers = pd.Series(napi.ticker_universe(), name="bloomberg_ticker")
logging.info(f"Number of eligible tickers: {len(eligible_tickers)}")
yfinance_tickers = map_tickers(eligible_tickers, "bloomberg_ticker", "yahoo")
logging.info(f"Number of yahoo tickers: {len(yfinance_tickers)}")
num_days_lag = 5
if os.path.exists('full_data.csv'):
full_data = | pd.read_csv('full_data.csv') | pandas.read_csv |
from __future__ import annotations
import numpy as np
import pandas as pd
from lamarck.utils import objective_ascending_map
def rank_formatter(name):
def deco(rank_func):
def wrapper(obj, *a, **kw):
return rank_func(obj, *a, **kw).astype(int).rename(name)
return wrapper
return deco
class RankCalculator:
"""
Fitness calculations based on the simulation results.
"""
results: pd.DataFrame
out: list | str
def __init__(self, results_df: pd.DataFrame = pd.DataFrame(), out: list | str = ''):
self.update(results_df=results_df, out=out)
def update(self, results_df: pd.DataFrame = None, out: list | str = None) -> None:
if results_df is not None:
self.results = results_df.copy()
if out is not None:
self.out = out
@rank_formatter('Rank')
def single(self, objective: str) -> pd.Series:
"""
Ranks one `output` to optimize according to a defined `objective`.
"""
return self.results[self.out]\
.rank(method='min', ascending=objective_ascending_map[objective])
@rank_formatter('Rank')
def ranked(self, objectives: list[str]) -> pd.Series:
"""
Get the Gene Ranks based on a set of `outputs` and `objectives` in order of priority.
"""
ranks = [
self.results[priority].rank(method='min',
ascending=objective_ascending_map[objective])
for priority, objective in zip(self.out, objectives)]
rank = ranks[-1]
for r in ranks[::-1]:
order = int(np.log10(r.max())) + 1
factor = 10**order
rscore = r * factor + rank
rank = rscore.rank(method='min')
return rank
@rank_formatter('Rank')
def pareto(self, objectives: list[str]) -> pd.Series:
"""
Get the Pareto Ranks based on the `pareto fronts` and the `crowds` Series.
"""
fronts = self.pareto_fronts(objectives)
crowds = self.pareto_crowds(fronts)
r1 = fronts.rank(method='dense', ascending=True)
r2 = crowds.rank(method='dense', ascending=False)
order1 = int(np.log10(r2.max())) + 1
factor1 = 10**order1
return (r1 * factor1 + r2).rank(method='min')
def pareto_fronts(self, objectives: list[str]) -> pd.Series:
"""
Get the Pareto Fronts.
"""
norm_df = normalize_df_by_objective(self.results, self.out, objectives)
dominators = get_dominators(norm_df)
return get_fronts(dominators).rename('Front')
def pareto_crowds(self, fronts: pd.Series) -> pd.Series:
"""
Get the Pareto Crowds.
"""
frontvals = sorted(fronts.unique())
crowds = pd.Series(np.zeros(len(self.results[self.out])), index=self.results.index)
for front in frontvals:
f = fronts == front
crowds[f] = get_crowd(self.results[f])
return crowds.rename('Crowd')
def normalize_series_by_objective(series, objective):
maxval = series.max()
minval = series.min()
data_range = maxval - minval
abs_series = series - minval
if objective == 'max':
norm_series = abs_series/data_range
elif objective == 'min':
norm_series = 1 - abs_series/data_range
return norm_series
def normalize_df_by_objective(df, outputs, objectives):
data_dict = {
output: normalize_series_by_objective(df[output], objective)
for output, objective in zip(outputs, objectives)
}
return pd.DataFrame(data_dict, index=df.index)
def get_dominators(normalized_df):
"""
Get the `dominators` based on the `nomalized_by_objective` df.
"""
def dominator_mapper(row):
diff = normalized_df - row
f_equals = (diff == 0).all(axis=1)
f_dominant = (diff >= 0).all(axis=1)
return normalized_df.index[~f_equals & f_dominant]
return normalized_df.apply(dominator_mapper, axis=1)
def get_fronts(dominators):
"""
Get the array of `front` values base on the `dominators` array.
"""
def isin_deco(arr):
def isin(row):
return row.isin(arr).all()
return isin
dom_arr = np.array([])
front = 1
fronts = pd.Series(np.zeros(len(dominators)), index=dominators.index)
for _ in range(9):
isin = isin_deco(dom_arr)
f = dominators.apply(isin) & (fronts == 0)
fronts[f] = front
dom_arr = np.concatenate((dom_arr, f[f].index.to_numpy()))
front += 1
fronts[fronts == 0] = front
return fronts.astype(int)
def get_crowd(df):
s = pd.Series(np.zeros(len(df)), index=df.index)
for _, cs in df.iteritems():
infval = | pd.Series([np.inf]) | pandas.Series |
"""
Optimizer Class Constructs Mean-Variance Related Optimization Problems with Constraints
2 Major Functionality:
- Optimize Weight based on Constraints & Objectives
- Simulate Random Weight Scenarios
For the first functionality, all the addition of objective/constraints are performed with the following methods.
- add_objective()
- add_constraint()
For the second functionality, all the weight-related constraints can be passed in as arguments in the following method:
- simulate()
"""
import math
import warnings
import inspect
from collections import defaultdict
import numpy as np
import pandas as pd
from scipy.optimize import minimize
import seaborn as sns
import matplotlib.pyplot as plt
import plotly.express as px
from .Exceptions import *
from .Constraints import ConstraintGenerator as ConstGen
from .Objectives import ObjectiveGenerator as ObjGen
from .Metrics import MetricGenerator as MetGen
class Optimizer:
def __init__(self, ret_data, moment_data, beta_data=None, asset_names=None):
"""
Initializes an Optimizer instance with data
Default constraints are: weight bound is (0,1), Leverage is 1 (Long only portfolio without additional margin)
:param ret_data: pd.DataFrame/np.ndarray, return data
:param moment_data: pd.DataFrame/np.ndarray, moment data (covariance/coskewness/... matrix)
:param beta_data: pd.Series/np.ndarray, optional, beta data for each asset
If not provided then beta related constraints/objectives can not be optimized/simulated
:param asset_names: List[str], optional, list of asset names
"""
self.ret_vec, self.moment_mat, self.assets, self.moment, self.beta_vec = Optimizer.init_checker(ret_data, moment_data,
asset_names, beta_data)
self.weight_sols = None
self.objective = None
self.objective_sol = None
self.objective_args = None
self.obj_creator = ObjGen(self.ret_vec, self.moment_mat, self.moment, self.assets, self.beta_vec)
self.const_creator = ConstGen(self.ret_vec, self.moment_mat, self.moment, self.assets, self.beta_vec)
self.metric_creator = MetGen(self.ret_vec, self.moment_mat, self.moment, self.assets, self.beta_vec)
self.bounds, self.constraints = self.const_creator.create_constraint('weight', weight_bound=(0, 1), leverage=1)
self.leverage = 1
def add_objective(self, objective_type, **kwargs):
"""
Add an objective to the optimization problem. Call objective_options() to check all available options.
You can also input a customized objective by setting objective_type="custom".
The custom_func should follow the parameter structure of custom_func(w, **kwargs)
The limitation with custom_func is it cannot call the moment matrix/return matrix/beta vector that are passed into MetricGenerator
:param objective_type: str, objective name
:param kwargs: arguments to be passed into the objective when performing optimization
"""
if objective_type != "custom":
self.objective_args = tuple(kwargs.values())
self.objective = self.obj_creator.create_objective(objective_type, **kwargs)
else:
self.objective_args = tuple(kwargs.values())[1:]
self.objective = tuple(kwargs.values())[0]
def add_constraint(self, constraint_type, **kwargs):
"""
Add an objective to the optimization problem. Call constraint_options() to check all available options.
You can also input a customized objective by setting constraint_type="custom".
The custom_func should follow the parameter structure of custom_func(w, **kwargs)
The limitation with custom_func is it cannot call the moment matrix/return matrix/beta vector that are passed into MetricGenerator
:param objective_type: str, objective name
:param kwargs: arguments to be passed into the constraints
"""
if constraint_type == "custom":
self.constraints += tuple(kwargs.values())[0]
elif constraint_type == "weight":
bound, leverage = self.const_creator.create_constraint(constraint_type, **kwargs)
self.bounds = bound
self.leverage = kwargs['leverage']
self.constraints[0] = leverage[0] # Total Leverage is always the first constraint
else:
self.constraints += self.const_creator.create_constraint(constraint_type, **kwargs)
def clear(self, clear_obj=True, clear_constraints=True):
"""
Clear the optimization problem
:param clear_obj: bool, Clear the objective
:param clear_constraints: bool, clear the constraints. Note that weight and leverage will be defaulted to (0,1) and leverage of 1 after clearance
"""
if clear_constraints:
self.constraints = []
self.bounds, self.constraints = self.const_creator.create_constraint('weight', weight_bound=(0,1), leverage=1)
if clear_obj:
self.objective = None
def solve(self, x0=None, round_digit=4, **kwargs):
"""
Solves the optimization problem
:param x0: np.ndarray, default=None User can pass in an initial guess to avoid scipy from running into local minima
:param round_digit: int, default=4, round portfolio weight
:param kwargs: arguments for method clear()
"""
if type(self.objective) != np.ndarray:
res = minimize(self.objective, x0 = ConstGen.gen_random_weight(self.ret_vec.shape[0], self.bounds, self.leverage) if x0 is None else x0, options={'maxiter': 1000},
constraints=self.constraints, bounds=self.bounds, args=self.objective_args)
if not res.success:
self.clear(**kwargs)
raise OptimizeException(f"""Optimization has failed. Error Message: {res.message}.
Please adjust constraints/objectives or input an initial guess.""")
self.clear(**kwargs)
self.weight_sols = np.round(res.x, round_digit) + 0
else:
warnings.warn(f"""The problem formulated is not an optimization problem and is calculated numerically""")
self.weight_sols = np.round(self.objective, round_digit) + 0
self.clear(**kwargs)
def summary(self, risk_free=None, market_return=None, top_holdings=None, round_digit=4):
"""
Returns a tuple of dictionaries - Weight dictionary, Metrics Dictionary
:param risk_free: float, default=None, if pass in a float can compute additional metrics in summary
:param market_return: float, default=None, if pass in a float can compute additional materics in summary
:param top_holdings: int, default=None, number of holdings, if pass in can compute additional metrics in summary
:param round_digit: int, round the metrics to the xth decimal place
:return: tuple[dict]
"""
moment_dict = defaultdict(lambda: "Moment")
moment_dict[3] = "Skewness"
moment_dict[4] = "Kurtosis"
weight_dict = dict(zip(self.assets, self.weight_sols))
metric_dict = {'Expected Return': self.metric_creator.expected_return(self.weight_sols),
"Leverage": self.metric_creator.leverage(self.weight_sols),
"Number of Holdings": self.metric_creator.num_assets(self.weight_sols)}
# Portfolio Composition
if top_holdings:
metric_dict[f"Top {top_holdings} Holdings Concentrations"] = self.metric_creator.concentration(
self.weight_sols, top_holdings)
# Risk Only
if self.moment == 2:
metric_dict["Volatility"] = self.metric_creator.volatility(self.weight_sols)
# metric_dict["Correlation"] = self.metric_creator.correlation(self.weight_sols)
else:
metric_dict[f'{moment_dict[int(self.moment)]}'] = self.metric_creator.higher_moment(self.weight_sols)
# Risk-Reward
if self.beta_vec is not None:
metric_dict["Portfolio Beta"] = self.metric_creator.beta(self.weight_sols)
if risk_free is not None:
metric_dict["Sharpe Ratio"] = self.metric_creator.sharpe(self.weight_sols, risk_free)
if self.beta_vec is not None and risk_free is not None:
metric_dict["Treynor Ratio"] = self.metric_creator.treynor(self.weight_sols, risk_free)
if market_return is not None:
metric_dict["Jenson's Alpha"] = self.metric_creator.jenson_alpha(self.weight_sols, risk_free, market_return)
for item in metric_dict:
metric_dict[item] = np.round(metric_dict[item], round_digit)
weight_dict = {k: v for k, v in weight_dict.items() if v}
return weight_dict, metric_dict
def simulate(self, x='volatility', y='expected_return', iters=1000, weight_bound=(0,1), leverage=1, ret_format='df',
file_path=None, x_var=None, y_var=None):
"""
Simulate random weight scenarios with flexible x/y variables.
Call metric_options() to see all possible x,y combinations and their respective signature
:param x: str, name of metric 1. If returning a plot will be the x-axis metric
:param y: str, name of metric 2. If returning a plot will be the y-axis metric
:param iters: int, number of simulations
:param weight_bound: tuple/np.ndarray/List[tuple], weight bound
:param leverage: float, total leverage
:param ret_format: str, default='df', additional options ["plotly", "sns"
If selected sns will return a plt figure
If selected plotly will return a plotly.express figure
If selected df will return a dataframe with x,y, and weight values
:param file_path: str, default=None, path for saving plt figure
:param x_var: dict, optional. Additional parameters needed to compute x
:param y_var: dict, optional. Additional parmaeters needed to compute y
:return:
"""
if y_var is None:
y_var = dict()
if x_var is None:
x_var = dict()
x_val = np.zeros(iters)
y_val = np.zeros(iters)
weight_vals = np.zeros(shape=(iters, len(self.assets)))
individual_bound = ConstGen.construct_weight_bound(self.ret_vec.shape[0], (0,1), weight_bound)
for it in range(iters):
temp_weights = ConstGen.gen_random_weight(self.ret_vec.shape[0], individual_bound, leverage)
weight_vals[it] = temp_weights
x_val[it] = self.metric_creator.method_dict[x](temp_weights, **x_var)
y_val[it] = self.metric_creator.method_dict[y](temp_weights, **y_var)
if ret_format == 'sns': # Change to plt, fig format
fig, ax = plt.subplots(figsize=(18, 12));
ax = sns.scatterplot(x_val, y_val);
ax.set_title(f"{x} VS {y}")
plt.xlim(x_val.min(), x_val.max());
plt.ylim(y_val.min(), y_val.max());
plt.xlabel(x);
plt.ylabel(y);
if file_path:
plt.savefig(file_path)
plt.show()
else:
res_df = pd.DataFrame(columns=[x] + [y], data=np.concatenate([x_val.reshape(1, -1), y_val.reshape(1, -1)]).T)
res_df = pd.concat([res_df, | pd.DataFrame(columns=self.assets, data=weight_vals) | pandas.DataFrame |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow.keras import models, layers
import os
os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"
dftrain_raw = pd.read_csv('data/titanic/train.csv')
dftest_raw = pd.read_csv('data/titanic/test.csv')
dftrain_raw.head(10)
# %matplotlib inline
# config InlineBackend.figure_format = 'png'
# 可视化数据
## label 分布情况
# ax = dftrain_raw['Survived'].value_counts().plot(kind='bar', figsize=(12, 8), fontsize=15, rot=0)
# ax.set_ylabel('Counts', fontsize=15)
# ax.set_xlabel('Survived', fontsize=15)
# plt.show()
## 年龄分布
# ax = dftrain_raw['Age'].plot(kind='hist', bins=20, color='purple', figsize=(12, 8), fontsize=15)
# ax.set_xlabel('Frequency', fontsize=15)
# ax.set_ylabel('Age', fontsize=15)
# plt.show()
## 年龄 和 label 的相关性
# ax = dftrain_raw.query('Survived == 0')['Age'].plot(kind='density',
# figsize=(12, 8), fontsize=15)
# dftrain_raw.query('Survived==1')['Age'].plot(kind='density', figsize=(12, 8), fontsize=15)
# ax.legend(['Survived==0', 'Survived==1'], fontsize=12)
# ax.set_ylabel('Density', fontsize=15)
# ax.set_xlabel('Age', fontsize=15)
# plt.show()
# Survived:0代表死亡,1代表存活【y标签】
# Pclass:乘客所持票类,有三种值(1,2,3) 【转换成onehot编码】
# Name:乘客姓名 【舍去】
# Sex:乘客性别 【转换成bool特征】
# Age:乘客年龄(有缺失) 【数值特征,添加“年龄是否缺失”作为辅助特征】
# SibSp:乘客兄弟姐妹/配偶的个数(整数值) 【数值特征】
# Parch:乘客父母/孩子的个数(整数值)【数值特征】
# Ticket:票号(字符串)【舍去】
# Fare:乘客所持票的价格(浮点数,0-500不等) 【数值特征】
# Cabin:乘客所在船舱(有缺失) 【添加“所在船舱是否缺失”作为辅助特征】
# Embarked:乘客登船港口:S、C、Q(有缺失)【转换成onehot编码,四维度 S,C,Q,nan】
# 数据预处理
def preprocessing(dfdata):
dfresult = pd.DataFrame()
# Pclass
dfPclass = pd.get_dummies(dfdata['Pclass'])
dfPclass.columns = ['Pclass_' + str(x) for x in dfPclass.columns]
dfresult = pd.concat([dfresult, dfPclass], axis=1)
# Sex
dfSex = pd.get_dummies(dfdata['Sex'])
dfresult = pd.concat([dfresult, dfSex], axis=1)
# Age
dfresult['Age'] = dfdata['Age'].fillna(0)
dfresult['Age_null'] = pd.isna(dfdata['Age']).astype('int32')
# SibSp, Parch, Face
dfresult['SibSp'] = dfdata['SibSp']
dfresult['Parch'] = dfdata['Parch']
dfresult['Fare'] = dfdata['Fare']
# Cabin
dfresult['Cabin_null'] = | pd.isna(dfdata['Cabin']) | pandas.isna |
import streamlit as st
import datetime
import pandas as pd
from plotly.subplots import make_subplots
from fbprophet import Prophet
from fbprophet.plot import plot_plotly
from plotly import graph_objs as go
import json
# App title
st.markdown('''
# Eindhoven STAR (Sound, Temperature, Air Quality, Rain) Environment Dashboard
---
''')
file = 'all_output.json'
with open(file) as train_file:
dict_train = json.load(train_file)
def string_to_dict(dict_string):
# Convert to proper json format
dict_string = dict_string.replace("'", '"').replace('u"', '"')
return json.loads(dict_string)
data = string_to_dict(dict_train)
ticker_data = | pd.DataFrame.from_dict(data) | pandas.DataFrame.from_dict |
"""
Classes and methods to load datasets.
"""
import numpy as np
import struct
from scipy.misc import imresize
from scipy import ndimage
import os
import os.path
import pandas as pd
import json
from collections import defaultdict
from pathlib import Path as pathlib_path
import pickle
'''
Contains helper methods and classes for loading each dataset.
'''
def sample(data, batch_size):
"""
Generic sampling function with uniform distribution.
data: numpy array or list of numpy arrays
batch_size: sample size
"""
if not isinstance(data, list):
idx = np.random.randint(len(data), size=batch_size)
return idx, data[idx],
else:
n = {len(x) for x in data}
assert len(n) == 1
n = n.pop()
idx = np.random.randint(n, size=batch_size)
return idx, tuple(x[idx] for x in data)
class MNIST(object):
"""
Class to load MNIST data.
"""
def __init__(self, ):
self.train_path = '../data/mnist_train'
self.test_path = '../data/mnist_test'
self.train_labels_path = self.train_path + '_labels'
self.test_labels_path = self.test_path + '_labels'
self.Xtr, self.ytr = self._get_data(self.train_path, self.train_labels_path)
self.Xte, self.yte = self._get_data(self.test_path, self.test_labels_path)
self.mu = np.mean(self.Xtr, axis=0)
self.sigma = np.std(self.Xtr, axis=0) + 1e-12
def train_set(self, ):
return self.Xtr, self.ytr
def test_set(self, ):
return self.Xte, self.yte
def sample(self, batch_size, dtype='train', binarize=True):
"""
Samples data from training or test set.
"""
_, (X, Y) = self._sample(dtype, batch_size)
if binarize:
X = self._binarize(X)
return X, Y
def _sample(self, dtype='train', batch_size=100):
"""
Samples data from training set.
"""
if dtype == 'train':
return sample([self.Xtr, self.ytr], batch_size)
elif dtype == 'test':
return sample([self.Xte, self.yte], batch_size)
else:
raise Exception('Training or test set not selected..')
def _binarize(self, data):
"""
Samples bernoulli distribution based on pixel intensities.
"""
return np.random.binomial(n=1, p=data)
def _get_data(self, data_path, labels_path):
"""
Reads MNIST data. Rescales image pixels to be between 0 and 1.
"""
data = self._read_mnist(data_path)
data = data / 255
labels = self._read_mnist(labels_path)
n = len(data)
data = data.reshape([n, -1])
return data, labels
def _read_mnist(self, path):
'''
Function to read MNIST data file, taken from
https://gist.github.com/tylerneylon/ce60e8a06e7506ac45788443f7269e40
'''
with open(path, 'rb') as file:
zero, dtype, dims = struct.unpack('>HBB', file.read(4))
shape = tuple(struct.unpack('>I', file.read(4))[0] for d in range(dims))
data = np.fromstring(file.read(), dtype=np.uint8)
return data.reshape(shape)
class JointMNIST(MNIST):
"""
MNIST data treated as two output variables consisting of the top halves and bottom halves of
each image.
"""
def __init__(self, n_paired):
"""
n_paired: number of paired examples (remaining examples are split into one of top or bottom halves)
"""
super(JointMNIST, self).__init__() # load data
self.n_paired = n_paired
self.split_point = int(784 / 2)
# joint and missing split
_n = len(self.Xtr)
self.x_and_y = set(np.random.randint(_n, size=self.n_paired))
_remain = set(np.arange(_n)) - set(self.x_and_y)
_x_size = int(len(_remain) / 2)
self.x_only = set(np.random.choice(list(_remain), size=_x_size, replace=False))
self.y_only = set(np.array(list(_remain - set(self.x_only))))
def sample(self, batch_size, dtype='train', binarize=True, include_labels=False):
# sample naively
idx, (batch, labels) = self._sample(dtype, batch_size)
if binarize:
batch = self._binarize(batch)
# handle test set case separately
if dtype == 'test':
X = batch[:, 0:self.split_point]
Y = batch[:, self.split_point:]
if include_labels:
return (X, labels), (Y, labels)
else:
return X, Y
# separate indices into paired and missing (for training set)
x_idx = np.array(list(set(idx) & self.x_only))
x_idx = np.array([np.argwhere(idx == x)[0, 0] for x in x_idx], dtype=np.int32)
y_idx = np.array(list(set(idx) & self.y_only))
y_idx = np.array([np.argwhere(idx == x)[0, 0] for x in y_idx], dtype=np.int32)
xy_idx = np.array(list(set(idx) & self.x_and_y))
xy_idx = np.array([np.argwhere(idx == x)[0, 0] for x in xy_idx], dtype=np.int32)
# create separate arrays for jointly observed and marginal data
X = batch[x_idx, 0:self.split_point]
Y = batch[y_idx, self.split_point:]
X_joint = batch[xy_idx, 0:self.split_point]
Y_joint = batch[xy_idx, self.split_point:]
if include_labels: # split label data too
lX = labels[x_idx]
lY = labels[y_idx]
l_joint = labels[xy_idx]
return (X, lX), (Y, lY), (X_joint, l_joint), (Y_joint, l_joint)
else:
return X, Y, X_joint, Y_joint
class JointStratifiedMNIST(MNIST):
"""
MNIST data treated as two output variables consisting of the top halves and bottom halves of
each image. Sampling scheme is stratified across the paired and unpaired datasets.
"""
def __init__(self, n_paired):
"""
n_paired: number of paired examples (remaining examples are split into one of top or bottom halves)
"""
super(JointStratifiedMNIST, self).__init__() # load data
self.n_paired = n_paired
self.split_point = int(784 / 2)
# joint and missing split
_n = len(self.Xtr)
self.x1_and_x2 = np.random.randint(_n, size=self.n_paired)
_remain = set(np.arange(_n)) - set(self.x1_and_x2)
_x_size = int(len(_remain) / 2)
self.x1_only = np.random.choice(list(_remain), size=_x_size, replace=False)
self.x2_only = np.array(list(_remain - set(self.x1_only)))
# separate the datasets
self.x1 = self.Xtr[self.x1_only, 0:self.split_point]
self.y1 = self.ytr[self.x1_only]
self.x2 = self.Xtr[self.x2_only, self.split_point:]
self.y2 = self.ytr[self.x2_only]
self.x12 = self.Xtr[self.x1_and_x2,:]
self.y12 = self.ytr[self.x1_and_x2]
def sample_stratified(self, n_paired_samples, n_unpaired_samples=250, dtype='train',
binarize=True, include_labels=False):
# test set case
if dtype == 'test':
idx, (batch, y) = sample([self.Xte, self.yte], n_paired_samples)
if binarize:
batch = self._binarize(batch)
x1 = batch[:, 0:self.split_point]
x2 = batch[:, self.split_point:]
if include_labels:
return (x1, y), (x2, y)
else:
return x1, x2
# training set case
elif dtype == 'train':
n_min = 2 * n_unpaired_samples // 5
n_min = max(1, n_min)
n_max = n_unpaired_samples - n_min
n_x1 = np.random.randint(low=n_min, high=n_max + 1)
n_x2 = n_unpaired_samples - n_x1
_, (batch_p, y12) = sample([self.x12, self.y12], n_paired_samples)
_, (x1, y1) = sample([self.x1, self.y1], n_x1)
_, (x2, y2) = sample([self.x2, self.y2], n_x2)
if binarize:
batch_p = self._binarize(batch_p)
x1 = self._binarize(x1)
x2 = self._binarize(x2)
x1p = batch_p[:,0:self.split_point]
x2p = batch_p[:,self.split_point:]
if include_labels:
return (x1, y1), (x2, y2), (x1p, y12), (x2p, y12)
else:
return x1, x2, x1p, x2p
class ColouredMNIST(MNIST):
"""
Based on dataset created in the paper: "Unsupervised Image-to-Image Translation Networks"
X dataset consists of MNIST digits with strokes coloured as red, blue, green.
Y dataset consists of MNIST digits transformed to an edge map, and then coloured as orange, magenta, teal.
A small paired dataset consists of a one-to-one mapping between colours in X and colours in Y of the same
MNIST digit.
"""
def __init__(self, n_paired):
"""
n_paired: number of paired examples to create
"""
super(ColouredMNIST, self).__init__() # load data
self.n_paired = n_paired
# colours for X and Y
self.x_colours = [(255, 0, 0), (0, 219, 0), (61, 18, 198)]
self.y_colours = [(255, 211, 0), (0, 191, 43), (0, 41, 191)]
# load from saved if exists
self._path = '../data/mnist_coloured.npz'
if os.path.isfile(self._path):
print("Loading data...", flush=True)
data = np.load(self._path)
self.M1 = data['arr_0']
self.M2 = data['arr_1']
self.M1_test = data['arr_2']
self.M2_test = data['arr_3']
print("Data loaded.", flush=True)
# create modalities if data doesn't exist
else:
self.M1, self.M2 = self._create_modalities(self.Xtr)
self.M1_test, self.M2_test = self._create_modalities(self.Xte)
print("Saving data...", flush=True)
np.savez(self._path, self.M1, self.M2, self.M1_test, self.M2_test)
print("Saved.", flush=True)
# separate indices
_n = len(self.Xtr)
self.x_and_y = set(np.random.randint(_n, size=self.n_paired))
_remain = set(np.arange(_n)) - set(self.x_and_y)
_x_size = int(len(_remain) / 2)
self.x_only = set(np.random.choice(list(_remain), size=_x_size, replace=False))
self.y_only = set(np.array(list(_remain - set(self.x_only))))
def sample(self, batch_size=100, dtype='train', include_labels=False):
"""
Sample minibatch.
"""
idx, (batch, labels) = self._sample(dtype, batch_size)
if dtype == 'test':
X = self.M1_test[idx]
Y = self.M2_test[idx]
X = np.reshape(X, newshape=[-1, 784 * 3])
Y = np.reshape(Y, newshape=[-1, 784 * 3])
if include_labels:
return (X, labels), (Y, labels)
else:
return X, Y
else:
# separate indices into paired and missing (for training set)
x_idx = np.array(list(set(idx) & self.x_only))
x_idx = np.array([np.argwhere(idx == x)[0, 0] for x in x_idx], dtype=np.int32)
y_idx = np.array(list(set(idx) & self.y_only))
y_idx = np.array([np.argwhere(idx == x)[0, 0] for x in y_idx], dtype=np.int32)
xy_idx = np.array(list(set(idx) & self.x_and_y))
xy_idx = np.array([np.argwhere(idx == x)[0, 0] for x in xy_idx], dtype=np.int32)
# create separate arrays for jointly observed and marginal data
X = self.M1[x_idx]
Y = self.M2[y_idx]
X_joint = self.M1[xy_idx]
Y_joint = self.M2[xy_idx]
# reshape
X = np.reshape(X, newshape=[-1, 784 * 3])
Y = np.reshape(Y, newshape=[-1, 784 * 3])
X_joint = np.reshape(X_joint, newshape=[-1, 784 * 3])
Y_joint = np.reshape(Y_joint, newshape=[-1, 784 * 3])
if include_labels: # split label data too
lX = labels[x_idx]
lY = labels[y_idx]
l_joint = labels[xy_idx]
return (X, lX), (Y, lY), (X_joint, l_joint), (Y_joint, l_joint)
else:
return X, Y, X_joint, Y_joint
def _create_modalities(self, data):
"""
Creates X and Y datasets from input MNIST data.
data: numpy array of MNIST digits, with dimensions: #digits x 784
"""
# randomly assign colours
x_bank, y_bank = self._sample_random_colours(len(data))
# colour digits
print("Colouring modalities...", flush=True)
X = self._colour(data, x_bank)
Y = self._colour(data, y_bank)
# reshape and scale
X = np.reshape(X, newshape=[-1, 28, 28, 3]) / 255
Y = np.reshape(Y, newshape=[-1, 28, 28, 3]) # normalized in _edge_map
# compute edge map
print("Computing edge map...", flush=True)
Y = self._edge_map(Y)
return X, Y
def _edge_map(self, data):
"""
Converts MNIST digits into corresponding edge map.
data: numpy array of MNIST digits, with dimensions: #images x height x width
"""
n = len(data)
edges = np.zeros(shape=data.shape)
for i in range(n):
im = data[i]
sx = ndimage.sobel(im, axis=0, mode='constant')
sy = ndimage.sobel(im, axis=1, mode='constant')
sob = np.hypot(sx, sy)
_max = np.max(sob)
edges[i] = sob / _max
return edges
def _colour(self, data, colours):
"""
Randomly colours MNIST digits into one of 3 colours.
data: numpy array of MNIST digits, with dimensions: #images x 784
colours: numpy array of colours, with dimensions: #images x 3
"""
rgb = []
for i in range(3):
rgb_comp = np.zeros(data.shape)
for j in range(len(data)):
ones = np.where(data[j] > 0)[0]
rgb_comp[j] = data[j]
rgb_comp[j, ones] = colours[j, i]
rgb.append(rgb_comp)
return np.stack(rgb, axis=-1)
def _sample_random_colours(self, n_samples):
"""
Draws random colours from each colour bank.
n_samples: number of random colours to draw
"""
x_bank = np.array(self.x_colours)
y_bank = np.array(self.y_colours)
idx = np.random.randint(len(x_bank), size=n_samples)
return x_bank[idx], y_bank[idx]
class ColouredStratifiedMNIST(ColouredMNIST):
"""
Based on dataset created in the paper: "Unsupervised Image-to-Image Translation Networks"
X dataset consists of MNIST digits with strokes coloured as red, blue, green.
Y dataset consists of MNIST digits transformed to an edge map, and then coloured as orange, magenta, teal.
A small paired dataset consists of a one-to-one mapping between colours in X and colours in Y of the same
MNIST digit.
"""
def __init__(self, n_paired, censor=False):
"""
n_paired: number of paired examples to create
"""
super(ColouredStratifiedMNIST, self).__init__(n_paired) # load data
self.x1_and_x2 = np.array(list(self.x_and_y))
self.x1_only = np.array(list(self.x_only))
self.x2_only = np.array(list(self.y_only))
# separate the datasets
self.x1 = self.M1[self.x1_only]
self.y1 = self.ytr[self.x1_only]
self.x2 = self.M2[self.x2_only]
self.y2 = self.ytr[self.x2_only]
self.x1p = self.M1[self.x1_and_x2]
self.x2p = self.M2[self.x1_and_x2]
self.yp = self.ytr[self.x1_and_x2]
if censor:
numbers_train = [0,1,2,3,4,5,6,7]
numbers_test = [8,9]
idx = []
for i, ix in enumerate(self.y1):
if ix in numbers_train:
idx.append(i)
self.y1 = self.y1[idx]
self.x1 = self.x1[idx]
idx = []
for i, ix in enumerate(self.y2):
if ix in numbers_train:
idx.append(i)
self.y2 = self.y2[idx]
self.x2 = self.x2[idx]
idx = []
for i, ix in enumerate(self.yp):
if ix in numbers_train:
idx.append(i)
self.yp = self.yp[idx]
self.x1p = self.x1p[idx]
self.x2p = self.x2p[idx]
idx = []
for i, ix in enumerate(self.yte):
if ix in numbers_test:
idx.append(i)
self.yte = self.yte[idx]
self.M1_test = self.M1_test[idx]
self.M2_test = self.M2_test[idx]
def sample_stratified(self, n_paired_samples, n_unpaired_samples=250, dtype='train', include_labels=False):
# test set case
if dtype == 'test':
_, (x1, x2, y) = sample([self.M1_test, self.M2_test, self.yte], n_paired_samples)
# reshape
x1 = np.reshape(x1, newshape=[-1, 784 * 3])
x2 = np.reshape(x2, newshape=[-1, 784 * 3])
if include_labels:
return (x1, y), (x2, y)
else:
return x1, x2
# training set case
elif dtype == 'train':
n_min = 2 * n_unpaired_samples // 5
n_min = max(1, n_min)
n_max = n_unpaired_samples - n_min
n_x1 = np.random.randint(low=n_min, high=n_max + 1)
n_x2 = n_unpaired_samples - n_x1
_, (x1p, x2p, yp) = sample([self.x1p, self.x2p, self.yp], n_paired_samples)
_, (x1, y1) = sample([self.x1, self.y1], n_x1)
_, (x2, y2) = sample([self.x2, self.y2], n_x2)
# reshape
x1 = np.reshape(x1, newshape=[-1, 784 * 3])
x2 = np.reshape(x2, newshape=[-1, 784 * 3])
x1p = np.reshape(x1p, newshape=[-1, 784 * 3])
x2p = np.reshape(x2p, newshape=[-1, 784 * 3])
if include_labels:
return (x1, y1), (x2, y2), (x1p, yp), (x2p, yp)
else:
return x1, x2, x1p, x2p
class Sketches(object):
def __init__(self, n_paired):
_raw_photo_path = '../data/sketchy/256x256/photo/tx_000100000000/'
_raw_sketch_path = '../data/sketchy/256x256/sketch/tx_000100000000/'
_data_path = '../data/sketch.npz'
if os.path.isfile(_data_path): # load processed data
print("Loading data...", flush=True)
data = np.load(_data_path)
self.x1 = data['arr_0']
self.x2 = data['arr_1']
self.ytr = data['arr_2']
self.x1_test = data['arr_3']
self.x2_test = data['arr_4']
self.yte = data['arr_5']
print("Data loaded.", flush=True)
else: # process data and load
x1 = []
x2 = []
y = []
train = []
test = []
print("Processing data..", flush=True)
categories = [p for p in os.listdir(_raw_photo_path)
if os.path.isdir(os.path.join(_raw_photo_path, p))]
i = 0
for cat in categories:
print("At category: ", cat, flush=True)
cat_photo_path = _raw_photo_path + cat + '/'
cat_sketch_path = _raw_sketch_path + cat + '/'
photo_files = [p for p in os.listdir(cat_photo_path)
if os.path.isfile(os.path.join(cat_photo_path, p))]
sketch_files = [p for p in os.listdir(cat_sketch_path)
if os.path.isfile(os.path.join(cat_sketch_path, p))]
for f in photo_files:
photo_path = cat_photo_path + f
photo = ndimage.imread(photo_path)
photo = imresize(photo, size=0.25, interp='cubic')
photo = np.reshape(photo, newshape=[1, -1])
sketches = [p for p in sketch_files if f.replace('.jpg','')+'-' in p]
is_train = np.random.binomial(n=1, p=0.85) # sort into train/test sets
for sk in sketches:
sketch_path = cat_sketch_path + sk
sketch = ndimage.imread(sketch_path)
sketch = imresize(sketch, size=0.25, interp='cubic')
sketch = np.reshape(sketch, newshape=[1, -1])
x1.append(photo)
x2.append(sketch)
y.append(cat)
if is_train == 1:
train.append(i)
else:
test.append(i)
i += 1
y = pd.Series(y)
y = pd.Categorical(y)
y = y.codes
assert len(x1) == len(x2)
x1 = np.concatenate(x1, axis=0)
x2 = np.concatenate(x2, axis=0)
print("x1 shape: ", x1.shape, flush=True)
print("x2 shape: ", x2.shape, flush=True)
self.x1 = x1[train]
self.x2 = x2[train]
self.ytr = y[train]
self.x1_test = x1[test]
self.x2_test = x2[test]
self.yte = y[test]
print("Saving data...", flush=True)
np.savez(_data_path, self.x1, self.x2, self.ytr, self.x1_test, self.x2_test, self.yte)
print("Saved.", flush=True)
# construct pairings
_n = len(self.x1)
self.x1_and_x2 = set(np.random.randint(_n, size=n_paired))
_remain = set(np.arange(_n)) - set(self.x1_and_x2)
_x_size = int(len(_remain) / 2)
self.x1_only = set(np.random.choice(list(_remain), size=_x_size, replace=False))
self.x2_only = set(np.array(list(_remain - set(self.x1_only))))
self.x1_and_x2 = np.array(list(self.x1_and_x2))
self.x1_only = np.array(list(self.x1_only))
self.x2_only = np.array(list(self.x2_only))
# separate out datasets
self.x1u = self.x1[self.x1_only]
self.y1u = self.ytr[self.x1_only]
self.x2u = self.x2[self.x2_only]
self.y2u = self.ytr[self.x2_only]
self.x1p = self.x1[self.x1_and_x2]
self.x2p = self.x2[self.x1_and_x2]
self.yp = self.ytr[self.x1_and_x2]
def sample_stratified(self, n_paired_samples, n_unpaired_samples=250, dtype='train', include_labels=False):
# test set case
if dtype == 'test':
_, (x1, x2, y) = sample([self.x1_test, self.x2_test, self.yte], n_paired_samples)
x1 = x1 / 255
x2 = x2 / 255
if include_labels:
return (x1, y), (x2, y)
else:
return x1, x2
# training set case
elif dtype == 'train':
n_min = 2 * n_unpaired_samples // 5
n_min = max(1, n_min)
n_max = n_unpaired_samples - n_min
n_x1 = np.random.randint(low=n_min, high=n_max + 1)
n_x2 = n_unpaired_samples - n_x1
_, (x1p, x2p, yp) = sample([self.x1p, self.x2p, self.yp], n_paired_samples)
_, (x1, y1) = sample([self.x1u, self.y1u], n_x1)
_, (x2, y2) = sample([self.x2u, self.y2u], n_x2)
x1 = x1 / 255
x2 = x2 / 255
x1p = x1p / 255
x2p = x2p / 255
if include_labels:
return (x1, y1), (x2, y2), (x1p, yp), (x2p, yp)
else:
return x1, x2, x1p, x2p
class DayNight(object):
def __init__(self,):
data_path = '../data/dnim.npz'
if os.path.isfile(data_path): # load processed data
print("Loading data...", flush=True)
data = np.load(data_path)
self.x1p = data['arr_0']
self.x2p = data['arr_1']
self.yp = data['arr_2']
self.x1 = data['arr_3']
self.x2 = data['arr_4']
self.y1 = data['arr_5']
self.y2 = data['arr_6']
self.x1_test = data['arr_7']
self.x2_test = data['arr_8']
self.y_test = data['arr_9']
print("Data loaded.", flush=True)
else: # process data and load
dnim_path = '../data/dnim/Image/'
dnim_stamps_path = '../data/dnim/time_stamp/'
print("Processing data..", flush=True)
dnim_stamps = [p for p in os.listdir(dnim_stamps_path)
if os.path.isfile(os.path.join(dnim_stamps_path, p))]
df = []
for i, st in enumerate(dnim_stamps):
path = dnim_stamps_path + st
tst = pd.read_csv(path, sep=' ', header=None, names=['f_name', 'date', 'h', 'm'])
tst['camera'] = [st.replace('.txt','')] * len(tst)
# train/test indicator
is_train = [1] * len(tst) if i < 11 else [0] * len(tst)
tst['is_train'] = | pd.Series(is_train) | pandas.Series |
from importlib import reload
import scipy
import numpy as np
#import matplotlib.pyplot as plt
import pandas as pd
import demosaurus
app = demosaurus.create_app()
def score_candidates(row):
print(row.publication_ppn)
author_name=str(row['name'])
author_role = row.role
publication_title = row.titelvermelding
publication_genres={
'CBK_genre':(str(row.CBK_genres).split(',') if row.CBK_genres else []),
'brinkman':(str(row.Brinkman_vorm).split(',') if row.Brinkman_vorm else []) }
publication_year= row.jaar_van_uitgave
try:
with app.app_context() as context:
candidates = demosaurus.link_thesaurus.thesaureer_this(author_name,author_role, publication_title, publication_genres, publication_year)
except:
print('Failed to obtain candidates for', row)
candidates = | pd.DataFrame() | pandas.DataFrame |
import re
import numpy as np
import pandas.compat as compat
import pandas as pd
from pandas.compat import u
from pandas.core.base import FrozenList, FrozenNDArray
from pandas.util.testing import assertRaisesRegexp, assert_isinstance
from pandas import Series, Index, DatetimeIndex, PeriodIndex
from pandas import _np_version_under1p7
import nose
import pandas.util.testing as tm
class CheckStringMixin(object):
def test_string_methods_dont_fail(self):
repr(self.container)
str(self.container)
bytes(self.container)
if not compat.PY3:
unicode(self.container)
def test_tricky_container(self):
if not hasattr(self, 'unicode_container'):
raise nose.SkipTest('Need unicode_container to test with this')
repr(self.unicode_container)
str(self.unicode_container)
bytes(self.unicode_container)
if not compat.PY3:
unicode(self.unicode_container)
class CheckImmutable(object):
mutable_regex = re.compile('does not support mutable operations')
def check_mutable_error(self, *args, **kwargs):
# pass whatever functions you normally would to assertRaises (after the Exception kind)
assertRaisesRegexp(TypeError, self.mutable_regex, *args, **kwargs)
def test_no_mutable_funcs(self):
def setitem(): self.container[0] = 5
self.check_mutable_error(setitem)
def setslice(): self.container[1:2] = 3
self.check_mutable_error(setslice)
def delitem(): del self.container[0]
self.check_mutable_error(delitem)
def delslice(): del self.container[0:3]
self.check_mutable_error(delslice)
mutable_methods = getattr(self, "mutable_methods", [])
for meth in mutable_methods:
self.check_mutable_error(getattr(self.container, meth))
def test_slicing_maintains_type(self):
result = self.container[1:2]
expected = self.lst[1:2]
self.check_result(result, expected)
def check_result(self, result, expected, klass=None):
klass = klass or self.klass
assert_isinstance(result, klass)
self.assertEqual(result, expected)
class TestFrozenList(CheckImmutable, CheckStringMixin, tm.TestCase):
mutable_methods = ('extend', 'pop', 'remove', 'insert')
unicode_container = FrozenList([u("\u05d0"), u("\u05d1"), "c"])
def setUp(self):
self.lst = [1, 2, 3, 4, 5]
self.container = | FrozenList(self.lst) | pandas.core.base.FrozenList |
"""
Function and classes used to identify barcodes
"""
from typing import *
import pandas as pd
import numpy as np
import pickle
import logging
from sklearn.neighbors import NearestNeighbors
# from pynndescent import NNDescent
from pathlib import Path
from itertools import groupby
from pysmFISH.logger_utils import selected_logger
from pysmFISH.data_models import Output_models
from pysmFISH.errors import Registration_errors
class simplify_barcodes_reference():
"""Utility Class use to convert excels files with codebook info
in smaller size pandas dataframe/parquet files to pass to dask
workers during the processing. This utility function must be
run before running the experiment analysis. The pipeline
require the output of this function.
"""
def __init__(self, barcode_fpath: str):
"""Class initialization
Args:
barcode_fpath (str): Path to the xlsx file with the codebook
"""
self.barcode_fpath = Path(barcode_fpath)
self.barcode_fname = self.barcode_fpath.stem
@staticmethod
def format_codeword(codeword: str):
"""[summary]
Args:
codeword (str): codeword representing a gene
Returns:
byte: codeword converted in byte representation
"""
str_num = codeword.split('[')[-1].split(']')[0]
converted_codeword = np.array([int(el) for el in list(str_num)]).astype(np.int8)
converted_codeword = converted_codeword.tobytes()
return converted_codeword
def convert_codebook(self):
used_gene_codebook_df = pd.read_excel(self.barcode_fpath)
# used_gene_codebook_df = pd.read_parquet(self.barcode_fpath)
self.codebook_df = used_gene_codebook_df.loc[:,['Barcode','Gene']]
self.codebook_df.rename(columns = {'Barcode':'Code'}, inplace = True)
self.codebook_df.Code = self.codebook_df.Code.apply(lambda x: self.format_codeword(x))
self.codebook_df.to_parquet(self.barcode_fpath.parent / (self.barcode_fname + '.parquet'))
def dots_hoods(coords: np.ndarray,pxl: int)->np.ndarray:
"""Function that calculate the coords of the peaks searching
neighborhood for identifying the barcodes.
Args:
coords (np.ndarray): coords of the identified peaks
pxl (int): size of the neighborhood in pixel
Returns:
np.ndarray: coords that define the neighborhood (r_tl,r_br,c_tl,c_tr)
"""
r_tl = coords[:,0]-pxl
r_br = coords[:,0]+pxl
c_tl = coords[:,1]-pxl
c_tr = coords[:,1]+pxl
r_tl = r_tl[:,np.newaxis]
r_br = r_br[:,np.newaxis]
c_tl = c_tl[:,np.newaxis]
c_tr = c_tr[:,np.newaxis]
chunks_coords = np.hstack((r_tl,r_br,c_tl,c_tr))
chunks_coords = chunks_coords.astype(int)
return chunks_coords
def extract_dots_images(barcoded_df: pd.DataFrame,registered_img_stack: np.ndarray,
experiment_fpath: str, metadata: dict):
"""Function used to extract the images corresponding to a barcode
after running the decoding identification. It can save the images
but to avoid increasing too much the space occupied by a processed
experiment an array with the maximum intensity value of the pxl in
each round is calculated and saved
Args:
barcoded_df (pd.DataFrame): Dataframe with decoded barcodes
for a specific field of view.
registered_img_stack (np.ndarray): Preprocessed image of a single field of view
the imaging round correspond to the z-stack position
experiment_fpath (str): Path to the folder of the experiment to process
metadata (dict): Overall experiment info
"""
round_intensity_labels = ['bit_' + str(el) +'_intensity' for el in np.arange(1,int(metadata['total_rounds'])+1)]
if isinstance(registered_img_stack, np.ndarray) and (barcoded_df.shape[0] >1):
experiment_fpath = Path(experiment_fpath)
barcodes_names = barcoded_df['barcode_reference_dot_id'].values
coords = barcoded_df.loc[:, ['r_px_registered', 'c_px_registered']].to_numpy()
barcodes_extraction_resolution = barcoded_df['barcodes_extraction_resolution'].values[0]
chunks_coords = dots_hoods(coords,barcodes_extraction_resolution)
chunks_coords[chunks_coords<0]=0
chunks_coords[chunks_coords>registered_img_stack.shape[1]]= registered_img_stack.shape[1]
for idx in np.arange(chunks_coords.shape[0]):
selected_region = registered_img_stack[:,chunks_coords[idx,0]:chunks_coords[idx,1]+1,chunks_coords[idx,2]:chunks_coords[idx,3]+1]
if selected_region.size >0:
max_array = selected_region.max(axis=(1,2))
barcoded_df.loc[barcoded_df.dot_id == barcodes_names[idx],round_intensity_labels] = max_array
# for channel in channels:
# all_regions[channel] = {}
# all_max[channel] = {}
# img_stack = registered_img_stack[channel]
# trimmed_df_channel = trimmed_df.loc[trimmed_df.channel == channel]
# if trimmed_df_channel.shape[0] >0:
# barcodes_names = trimmed_df_channel['barcode_reference_dot_id'].values
# coords = trimmed_df_channel.loc[:, ['r_px_registered', 'c_px_registered']].to_numpy()
# barcodes_extraction_resolution = trimmed_df_channel['barcodes_extraction_resolution'].values[0]
# chunks_coords = dots_hoods(coords,barcodes_extraction_resolution)
# chunks_coords[chunks_coords<0]=0
# chunks_coords[chunks_coords>img_stack.shape[1]]= img_stack.shape[1]
# for idx in np.arange(chunks_coords.shape[0]):
# selected_region = img_stack[:,chunks_coords[idx,0]:chunks_coords[idx,1]+1,chunks_coords[idx,2]:chunks_coords[idx,3]+1]
# if selected_region.size >0:
# max_array = selected_region.max(axis=(1,2))
# # all_regions[channel][barcodes_names[idx]]= selected_region
# all_max[channel][barcodes_names[idx]]= max_array
# barcoded_df.loc[barcoded_df.dot_id == barcodes_names[idx],round_intensity_labels] = max_array
# fpath = experiment_fpath / 'tmp' / 'combined_rounds_images' / (experiment_name + '_' + channel + '_img_dict_fov_' + str(fov) + '.pkl')
# pickle.dump(all_regions,open(fpath,'wb'))
# fpath = experiment_fpath / 'results' / (experiment_name + '_barcodes_max_array_dict_fov_' + str(fov) + '.pkl')
# pickle.dump(all_max,open(fpath,'wb'))
else:
barcoded_df.loc[:,round_intensity_labels] = np.nan
return barcoded_df
def identify_flipped_bits(codebook: pd.DataFrame, gene: str,
raw_barcode: ByteString)-> Tuple[ByteString, ByteString]:
"""Utility function used to identify the position of the bits that are
flipped after the nearest neighbors and the definition of the
acceptable hamming distance for a single dot.
Args:
codebook (pd.DataFrame): Codebook used for the decoding
gene (str): Name of the gene identified
raw_barcode (ByteString): identifide barcode from the images
Returns:
Tuple[ByteString, ByteString]: (flipped_position, flipping_direction)
"""
gene_barcode_str =codebook.loc[codebook.Gene == gene, 'Code'].values[0]
gene_barcode = np.frombuffer(gene_barcode_str, np.int8)
raw_barcode = np.frombuffer(raw_barcode, np.int8)
flipped_positions = np.where(raw_barcode != gene_barcode)[0].astype(np.int8)
flipping_directions = (gene_barcode[flipped_positions] - raw_barcode[flipped_positions]).astype(np.int8)
# flipped_positions = flipped_positions.tobytes()
# flipping_directions = flipping_directions.tobytes()
return flipped_positions,flipping_directions
def define_flip_direction(codebook_dict: dict,experiment_fpath: str,
output_df: pd.DataFrame):
"""Function used to determinethe the position of the bits that are
flipped after the nearest neighbors and the definition of the
acceptable hamming distance for fov.
Args:
codebook (dict): Codebooks used for the decoding
experiment_fpath (str): Path to the folder of the experiment to process
output_df (pd.DataFrame): Dataframe with the decoded results for
the specific fov.
"""
if output_df.shape[0] > 1:
correct_hamming_distance = 0
selected_hamming_distance = 3 / output_df.iloc[0].barcode_length
experiment_fpath = Path(experiment_fpath)
experiment_name = experiment_fpath.stem
channels = codebook_dict.keys()
all_evaluated = []
for channel in channels:
codebook = codebook_dict[channel]
fov = output_df.fov_num.values[0]
trimmed_df = output_df.loc[(output_df.dot_id == output_df.barcode_reference_dot_id) &
(output_df.channel == channel) &
(output_df['hamming_distance'] > correct_hamming_distance) &
(output_df['hamming_distance'] < selected_hamming_distance),
['barcode_reference_dot_id', 'decoded_genes', 'raw_barcodes','hamming_distance']]
trimmed_df = trimmed_df.dropna(subset=['decoded_genes'])
trimmed_df.loc[:,('flip_and_direction')] = trimmed_df.apply(lambda x: identify_flipped_bits(codebook,x.decoded_genes,x.raw_barcodes),axis=1)
trimmed_df['flip_position'] = trimmed_df['flip_and_direction'].apply(lambda x: x[0])
trimmed_df['flip_direction'] = trimmed_df['flip_and_direction'].apply(lambda x: x[1])
trimmed_df.drop(columns=['flip_and_direction'],inplace=True)
all_evaluated.append(trimmed_df)
all_evaluated = pd.concat(all_evaluated,axis=0,ignore_index=True,inplace=True)
fpath = experiment_fpath / 'results' / (experiment_name + '_' + channel + '_df_flip_direction_fov' + str(fov) + '.parquet')
all_evaluated.to_parquet(fpath)
# return trimmed_df
def chunk_dfs(dataframes_list: list, chunk_size: int):
"""
Functions modified from
https://stackoverflow.com/questions/45217120/how-to-efficiently-join-merge-concatenate-large-data-frame-in-pandas
yields n dataframes at a time where n == chunksize
"""
dfs = []
for f in dataframes_list:
dfs.append(f)
if len(dfs) == chunk_size:
yield dfs
dfs = []
if dfs:
yield dfs
def merge_with_concat(dfs: list)->pd.DataFrame:
"""Utility function used to merge dataframes
Args:
dsf (list): List with the dataframe to merge
Returns:
pd.DataFrame: Merged dataframe
"""
# dfs = (df.set_index(col, drop=True) for df in dfs)
merged = pd.concat(dfs, axis=0, join='outer', copy=False)
return merged
"""
Class used to extract the barcodes from the registered
counts using nearest neighbour
Parameters:
-----------
counts: pandas.DataFrame
pandas file with the fov counts after
registration
analysis_parameters: dict
parameters for data processing
codebook_df: pandas.DataFrame
pandas file with the codebook used to
deconvolve the barcode
NB: if there is a problem with the registration the barcode assigned
will be 0*barcode_length
"""
def extract_barcodes_NN_fast_multicolor(registered_counts_df: pd.DataFrame, analysis_parameters: Dict,
codebook_df: pd.DataFrame, metadata:dict)-> Tuple[pd.DataFrame,pd.DataFrame]:
"""Function used to extract the barcodes from the registered
counts using nearest neighbour. if there is a problem with the registration the barcode assigned
will be 0*barcode_length
Args:
registered_counts_df (pd.Dataframe): Fov counts after registration
analysis_parameters (Dict): Parameters for data processing
codebook_df (pd.DataFrame): codebook used to deconvolve the barcode
Returns:
Tuple[pd.DataFrame,pd.DataFrame]: (barcoded_round, all_decoded_dots_df)
"""
logger = selected_logger()
barcodes_extraction_resolution = analysis_parameters['BarcodesExtractionResolution']
RegistrationMinMatchingBeads = analysis_parameters['RegistrationMinMatchingBeads']
barcode_length = metadata['barcode_length']
registration_errors = Registration_errors()
stitching_channel = metadata['stitching_channel']
registered_counts_df.dropna(subset=['dot_id'],inplace=True)
# Starting level for selection of dots
dropping_counts = registered_counts_df.copy(deep=True)
all_decoded_dots_list = []
barcoded_round = []
if registered_counts_df['r_px_registered'].isnull().values.any():
all_decoded_dots_df = pd.DataFrame(columns = registered_counts_df.columns)
all_decoded_dots_df['decoded_genes'] = np.nan
all_decoded_dots_df['hamming_distance'] = np.nan
all_decoded_dots_df['number_positive_bits'] = np.nan
all_decoded_dots_df['barcode_reference_dot_id'] = np.nan
all_decoded_dots_df['raw_barcodes'] = np.nan
all_decoded_dots_df['barcodes_extraction_resolution'] = barcodes_extraction_resolution
# Save barcoded_round and all_decoded_dots_df
return registered_counts_df, all_decoded_dots_df
else:
for ref_round_number in np.arange(1,barcode_length+1):
#ref_round_number = 1
reference_round_df = dropping_counts.loc[dropping_counts.round_num == ref_round_number,:]
# Step one (all dots not in round 1)
compare_df = dropping_counts.loc[dropping_counts.round_num!=ref_round_number,:]
if (not reference_round_df.empty):
if not compare_df.empty:
nn = NearestNeighbors(n_neighbors=1, metric="euclidean")
nn.fit(reference_round_df[['r_px_registered','c_px_registered']])
dists, indices = nn.kneighbors(compare_df[['r_px_registered','c_px_registered']], return_distance=True)
# select only the nn that are below barcodes_extraction_resolution distance
idx_distances_below_resolution = np.where(dists <= barcodes_extraction_resolution)[0]
comp_idx = idx_distances_below_resolution
ref_idx = indices[comp_idx].flatten()
# Subset the dataframe according to the selected points
# The reference selected will have repeated points
comp_selected_df = compare_df.iloc[comp_idx]
ref_selected_df = reference_round_df.iloc[ref_idx]
# The size of ref_selected_df w/o duplicates may be smaller of reference_round_df if
# some of the dots in reference_round_df have no neighbours
# Test approach where we get rid of the single dots
comp_selected_df.loc[:,'barcode_reference_dot_id'] = ref_selected_df['dot_id'].values
ref_selected_df_no_duplicates = ref_selected_df.drop_duplicates()
ref_selected_df_no_duplicates.loc[:,'barcode_reference_dot_id'] = ref_selected_df_no_duplicates['dot_id'].values
# Collect singletons
# Remeber that this method works only because there are no duplicates inside the dataframes
# https://stackoverflow.com/questions/48647534/python-pandas-find-difference-between-two-data-frames
if reference_round_df.shape[0] > ref_selected_df_no_duplicates.shape[0]:
singletons_df = | pd.concat([reference_round_df,ref_selected_df_no_duplicates]) | pandas.concat |
import string
import warnings
import numpy as np
from pandas import (
DataFrame,
MultiIndex,
NaT,
Series,
date_range,
isnull,
period_range,
timedelta_range,
)
from .pandas_vb_common import tm
class GetNumericData:
def setup(self):
self.df = DataFrame(np.random.randn(10000, 25))
self.df["foo"] = "bar"
self.df["bar"] = "baz"
self.df = self.df._consolidate()
def time_frame_get_numeric_data(self):
self.df._get_numeric_data()
class Lookup:
def setup(self):
self.df = DataFrame(np.random.randn(10000, 8), columns=list("abcdefgh"))
self.df["foo"] = "bar"
self.row_labels = list(self.df.index[::10])[:900]
self.col_labels = list(self.df.columns) * 100
self.row_labels_all = np.array(
list(self.df.index) * len(self.df.columns), dtype="object"
)
self.col_labels_all = np.array(
list(self.df.columns) * len(self.df.index), dtype="object"
)
def time_frame_fancy_lookup(self):
self.df.lookup(self.row_labels, self.col_labels)
def time_frame_fancy_lookup_all(self):
self.df.lookup(self.row_labels_all, self.col_labels_all)
class Reindex:
def setup(self):
N = 10 ** 3
self.df = DataFrame(np.random.randn(N * 10, N))
self.idx = np.arange(4 * N, 7 * N)
self.idx_cols = np.random.randint(0, N, N)
self.df2 = DataFrame(
{
c: {
0: np.random.randint(0, 2, N).astype(np.bool_),
1: np.random.randint(0, N, N).astype(np.int16),
2: np.random.randint(0, N, N).astype(np.int32),
3: np.random.randint(0, N, N).astype(np.int64),
}[np.random.randint(0, 4)]
for c in range(N)
}
)
def time_reindex_axis0(self):
self.df.reindex(self.idx)
def time_reindex_axis1(self):
self.df.reindex(columns=self.idx_cols)
def time_reindex_axis1_missing(self):
self.df.reindex(columns=self.idx)
def time_reindex_both_axes(self):
self.df.reindex(index=self.idx, columns=self.idx_cols)
def time_reindex_upcast(self):
self.df2.reindex(np.random.permutation(range(1200)))
class Rename:
def setup(self):
N = 10 ** 3
self.df = DataFrame(np.random.randn(N * 10, N))
self.idx = np.arange(4 * N, 7 * N)
self.dict_idx = {k: k for k in self.idx}
self.df2 = DataFrame(
{
c: {
0: np.random.randint(0, 2, N).astype(np.bool_),
1: np.random.randint(0, N, N).astype(np.int16),
2: np.random.randint(0, N, N).astype(np.int32),
3: np.random.randint(0, N, N).astype(np.int64),
}[np.random.randint(0, 4)]
for c in range(N)
}
)
def time_rename_single(self):
self.df.rename({0: 0})
def time_rename_axis0(self):
self.df.rename(self.dict_idx)
def time_rename_axis1(self):
self.df.rename(columns=self.dict_idx)
def time_rename_both_axes(self):
self.df.rename(index=self.dict_idx, columns=self.dict_idx)
def time_dict_rename_both_axes(self):
self.df.rename(index=self.dict_idx, columns=self.dict_idx)
class Iteration:
# mem_itertuples_* benchmarks are slow
timeout = 120
def setup(self):
N = 1000
self.df = DataFrame(np.random.randn(N * 10, N))
self.df2 = DataFrame(np.random.randn(N * 50, 10))
self.df3 = DataFrame(
np.random.randn(N, 5 * N), columns=["C" + str(c) for c in range(N * 5)]
)
self.df4 = DataFrame(np.random.randn(N * 1000, 10))
def time_items(self):
# (monitor no-copying behaviour)
if hasattr(self.df, "_item_cache"):
self.df._item_cache.clear()
for name, col in self.df.items():
pass
def time_items_cached(self):
for name, col in self.df.items():
pass
def time_iteritems_indexing(self):
for col in self.df3:
self.df3[col]
def time_itertuples_start(self):
self.df4.itertuples()
def time_itertuples_read_first(self):
next(self.df4.itertuples())
def time_itertuples(self):
for row in self.df4.itertuples():
pass
def time_itertuples_to_list(self):
list(self.df4.itertuples())
def mem_itertuples_start(self):
return self.df4.itertuples()
def peakmem_itertuples_start(self):
self.df4.itertuples()
def mem_itertuples_read_first(self):
return next(self.df4.itertuples())
def peakmem_itertuples(self):
for row in self.df4.itertuples():
pass
def mem_itertuples_to_list(self):
return list(self.df4.itertuples())
def peakmem_itertuples_to_list(self):
list(self.df4.itertuples())
def time_itertuples_raw_start(self):
self.df4.itertuples(index=False, name=None)
def time_itertuples_raw_read_first(self):
next(self.df4.itertuples(index=False, name=None))
def time_itertuples_raw_tuples(self):
for row in self.df4.itertuples(index=False, name=None):
pass
def time_itertuples_raw_tuples_to_list(self):
list(self.df4.itertuples(index=False, name=None))
def mem_itertuples_raw_start(self):
return self.df4.itertuples(index=False, name=None)
def peakmem_itertuples_raw_start(self):
self.df4.itertuples(index=False, name=None)
def peakmem_itertuples_raw_read_first(self):
next(self.df4.itertuples(index=False, name=None))
def peakmem_itertuples_raw(self):
for row in self.df4.itertuples(index=False, name=None):
pass
def mem_itertuples_raw_to_list(self):
return list(self.df4.itertuples(index=False, name=None))
def peakmem_itertuples_raw_to_list(self):
list(self.df4.itertuples(index=False, name=None))
def time_iterrows(self):
for row in self.df.iterrows():
pass
class ToString:
def setup(self):
self.df = DataFrame(np.random.randn(100, 10))
def time_to_string_floats(self):
self.df.to_string()
class ToHTML:
def setup(self):
nrows = 500
self.df2 = DataFrame(np.random.randn(nrows, 10))
self.df2[0] = period_range("2000", periods=nrows)
self.df2[1] = range(nrows)
def time_to_html_mixed(self):
self.df2.to_html()
class ToDict:
params = [["dict", "list", "series", "split", "records", "index"]]
param_names = ["orient"]
def setup(self, orient):
data = np.random.randint(0, 1000, size=(10000, 4))
self.int_df = DataFrame(data)
self.datetimelike_df = self.int_df.astype("timedelta64[ns]")
def time_to_dict_ints(self, orient):
self.int_df.to_dict(orient=orient)
def time_to_dict_datetimelike(self, orient):
self.datetimelike_df.to_dict(orient=orient)
class ToNumpy:
def setup(self):
N = 10000
M = 10
self.df_tall = DataFrame(np.random.randn(N, M))
self.df_wide = DataFrame(np.random.randn(M, N))
self.df_mixed_tall = self.df_tall.copy()
self.df_mixed_tall["foo"] = "bar"
self.df_mixed_tall[0] = period_range("2000", periods=N)
self.df_mixed_tall[1] = range(N)
self.df_mixed_wide = self.df_wide.copy()
self.df_mixed_wide["foo"] = "bar"
self.df_mixed_wide[0] = period_range("2000", periods=M)
self.df_mixed_wide[1] = range(M)
def time_to_numpy_tall(self):
self.df_tall.to_numpy()
def time_to_numpy_wide(self):
self.df_wide.to_numpy()
def time_to_numpy_mixed_tall(self):
self.df_mixed_tall.to_numpy()
def time_to_numpy_mixed_wide(self):
self.df_mixed_wide.to_numpy()
def time_values_tall(self):
self.df_tall.values
def time_values_wide(self):
self.df_wide.values
def time_values_mixed_tall(self):
self.df_mixed_tall.values
def time_values_mixed_wide(self):
self.df_mixed_wide.values
class Repr:
def setup(self):
nrows = 10000
data = np.random.randn(nrows, 10)
arrays = np.tile(np.random.randn(3, nrows // 100), 100)
idx = MultiIndex.from_arrays(arrays)
self.df3 = DataFrame(data, index=idx)
self.df4 = DataFrame(data, index=np.random.randn(nrows))
self.df_tall = DataFrame(np.random.randn(nrows, 10))
self.df_wide = DataFrame(np.random.randn(10, nrows))
def time_html_repr_trunc_mi(self):
self.df3._repr_html_()
def time_html_repr_trunc_si(self):
self.df4._repr_html_()
def time_repr_tall(self):
repr(self.df_tall)
def time_frame_repr_wide(self):
repr(self.df_wide)
class MaskBool:
def setup(self):
data = np.random.randn(1000, 500)
df = DataFrame(data)
df = df.where(df > 0)
self.bools = df > 0
self.mask = isnull(df)
def time_frame_mask_bools(self):
self.bools.mask(self.mask)
def time_frame_mask_floats(self):
self.bools.astype(float).mask(self.mask)
class Isnull:
def setup(self):
N = 10 ** 3
self.df_no_null = DataFrame(np.random.randn(N, N))
sample = np.array([np.nan, 1.0])
data = np.random.choice(sample, (N, N))
self.df = DataFrame(data)
sample = np.array(list(string.ascii_letters + string.whitespace))
data = np.random.choice(sample, (N, N))
self.df_strings = | DataFrame(data) | pandas.DataFrame |
#!/usr/bin/python3
"""
AbxRxPro: Antibiotic Resistance Profiler
Version: 2.1.1-alpha
Last modified: 25/03/2021
Github: https://github.com/CaileanCarter/AbxRxPro
Author: <NAME>
Email: <EMAIL>
Institute affiliation: Quadram Institute, Norwich Research Park
AbxRxPro is a tool for the visualisation of phenotypic antibiotic resistance as a bubble plot.
It allows for the inclusion of genotypic data from major antibiotic resistance genotype identifying programmes
like RGI (resistance gene identifier), staramr and amrfinder. Gene frequencies are also plotted alongside.
Plots can be saved as profiles which can be loaded directly. Plots can also be exported as an interactive
HTML file. Plots include text overlay and user-defined colour scheme for the plot.
Plotting is done with Plotly and the plot will be displayed in your browser.
For help, run AbxRxPro without any arguments or the -h or --help flag.
Alternatively, check out the AbxRxPro documentation.
"""
import argparse
import glob
import json
import logging
from datetime import datetime
from itertools import product
from os import path, remove
import pandas as pd
import plotly.graph_objects as go
import plotly.io as pio
# from abxgenecorr import abxcorr
relativepath = path.dirname(path.abspath(__file__))
date = datetime.now()
logging.basicConfig(
filename = path.join(relativepath, "logs", f"{date.strftime('%y-%m-%d')}.log"),
format = '%(name)s - %(levelname)s - %(message)s',
level = logging.INFO
)
### BASIC FUNCTIONS ###
def delete_profile(profile):
logging.info(f"""{date.strftime("%d/%m/%Y %H:%M")} Starting new log. \nUser requested to delete profile: {profile}""")
# Check user wants to delete this profile
response = input(f"Are you sure you want to delete {profile}?: (y/n)\n")
# Handle response
if response == "n":
return
elif response == "y":
logging.info("User confirmed deletion of profile")
print(f"Deleting {profile}...")
else:
print("Sorry, response was not recognised. Please try again and respond with 'y' or 'n'.")
logging.warning(f"User did not provide a valid response. User entered: {response}")
return
with open(rf"{relativepath}\settings.json", 'r') as settings:
data = json.load(settings)
try:
del(data['profiles'][profile])
logging.info("Deleted profile from imported settings.")
except:
logging.error(f"Profile ({profile}) wasn't found and exception raised.")
raise ValueError("Profile not found. Please enter a valid profile ID. See profile list for available profiles.")
with open(rf"{relativepath}\settings.json", 'w') as settings:
logging.info("Settings with updated profile list has been saved.")
json.dump(data, settings, indent=4)
try:
remove(rf"{relativepath}\profiles\{profile}.json")
except FileNotFoundError:
logging.error(f"Profile files weren't found for {profile}. Exception raised.")
raise FileNotFoundError(f"Could not find profile file associated with {profile}")
try:
remove(rf"{relativepath}\profiles\{profile}_gf.json")
except FileNotFoundError:
logging.warning(f"No gene frequency file could be found for {profile}. Perhaps no genotype data available.")
logging.info("""Profile associated files were successfully removed. \nDone. \n \n""")
def list_profiles():
"List all saved antibiotic resistance profiles."
logging.info(f"""{date.strftime("%d/%m/%Y %H:%M")} Starting new log. \nUser requested to see profiles...""")
with open(rf"{relativepath}\settings.json", 'r') as settings:
data = json.load(settings)
profiles = data["profiles"]
profiletable = pd.DataFrame.from_dict(profiles, orient="index")
print(profiletable.to_string())
logging.info("""Profiles were successfully displayed. \nEnd. \n \n""")
parser.exit()
def _find_log():
logging.info("User is requesting location of log file...")
log = path.join(relativepath, "logs", date.strftime('%y-%m-%d'))
print(f"Location of relevant log file: \n{log}")
### HANDLES INPUT ###
class DataHandler:
"Takes user inputs relating to data files."
def __init__(self, antibiotics=None, profile=None, build=None, export=False, relativepath=None, show_genefrequency=False):
# user input
self.build = build
self.export = export
self.profile = profile
self.antibiotics = [antibiotic.capitalize() for antibiotic in antibiotics] if antibiotics else None
self.show_genefrequency = show_genefrequency
# internal variables
self.Colours = {
"R" : 'rgb(238, 102, 119)',
"I" : 'rgb(204, 187, 68)',
"S" : 'rgb(102, 204, 238)',
"U" : 'rgb(68, 119, 170)'
}
# self.Colours = {
# "R" : 'rgb(255, 65, 54)',
# "I" : 'rgb(255, 144, 14)',
# "S" : 'rgb(44, 160, 101)',
# "U" : 'rgb(93, 164, 214)'
# }
self.relativepath = relativepath if relativepath else path.dirname(path.abspath(__file__))
self.ClassSelection = {}
self.Genotype = False
# Data for plotting
self.isolateIDs = []
self.GeneFrequencies = {}
self.antibioticcolours = []
self.GeneCount = []
self.textoverlay = []
self.AbxAnnots = []
self.data = {}
def __call__(self, pheno=None, RGI=None, staramr=None, amrfinder=None, colours=None, dev_1=False, dev_2=False):
logging.info(
f"""{date.strftime("%d/%m/%Y %H:%M")} Starting new log.
User input parameters:
Phenotype file: {pheno}
RGI folder: {RGI}
staramr folder: {staramr}
amrfinder folder: {amrfinder}
Antibiotics: {self.antibiotics}
Profile name: {self.profile}
Build? {self.build}
Export? {self.export}
""")
if colours:
logging.info("Assigning colours")
self.set_colours(colours)
if self.build:
self.profile = self.build
if not pheno:
logging.error("No phenotypic data file was given.")
raise FileNotFoundError("No phenotypic data file given")
logging.info("Getting phenotypic data.")
self.get_pheno(pheno)
if not self.antibiotics:
logging.error(f"User did not provide any antibiotics to plot. \nAntibiotics variable: {self.antibiotics}")
raise ValueError("No list of antibiotics provided in phenotype file or using -a flag. \nCheck documentation for how antibiotics are declared.")
logging.info(f"Isolate IDs identified: {self.isolateIDs}")
logging.info("Getting antibiotics/classes from settings.")
self.load_antibiotic_settings()
self.antibiotics.sort()
self.data = {IsolateID : {} for IsolateID in self.isolateIDs}
if any([RGI, staramr, amrfinder]):
logging.info("Genotype files detected")
self.Genotype = True
else:
logging.info("No genotype files given")
self.show_genefrequency = True
if RGI:
logging.info("Getting RGI data...")
self.get_RGI(RGI)
if staramr:
logging.info("Getting staramr data...")
self.get_staramr(staramr)
if amrfinder:
logging.info("Getting amrfinder data...")
self.get_amrfinder(amrfinder)
if dev_1:
logging.warning("Exporting self.data; this is a development tool")
self._output_data()
if dev_2:
logging.warning("Exporting self.GeneFrequencies; this is a development tool")
self._output_genefrequencies()
# if corr:
# p = pd.read_excel(pheno, index_col=0)
# a = abxcorr(self.data, p)
logging.info("Creating plot annotations.")
self.make_annotations()
def load_antibiotic_settings(self):
with open(rf"{self.relativepath}\settings.json", 'r') as settings:
data = json.load(settings)
antibioticClass = data["antibiotics"]
for antibiotic in self.antibiotics:
Class = antibioticClass.get(antibiotic)
if Class:
self.ClassSelection.update({antibiotic : Class})
logging.info("Retrieved antibiotic classes.")
def set_colours(self, userScheme):
NewScheme = {}
for status, rgb in zip(self.Colours.keys(), userScheme):
NewScheme[status] = f"rgb{rgb}"
self.Colours = NewScheme
print("User defined colour scheme has been set.")
def get_pheno(self, pheno):
print("Reading antibiotic resistance phenotypic data...")
pheno_df = pd.read_excel(pheno, index_col=0)
pheno_df = pheno_df.sort_index(axis=1)
pheno_df.columns = map(str.capitalize, pheno_df.columns)
#Antibiotics on header
if not self.antibiotics:
self.antibiotics = list(pheno_df.keys())
else:
self.antibiotics.sort()
if any(filter(lambda antibiotic : antibiotic not in pheno_df.columns, self.antibiotics)):
raise ValueError(f"""Antibiotic given with -a flag which is not in the phenotypic Excel file column names. \nCheck that {self.antibiotics} are present in the Excel file column headers.""")
self.isolateIDs = list(pheno_df.index)
# Replace missing values with 'U' for 'undetermined' for antibiotic resistance provided by user.
# Then get the colour pallet for given identifier.
pheno_df = pheno_df.filter(items=self.antibiotics).fillna('U')
# Get the text overlay which is the user's original input
text = pheno_df.values.tolist()
self.textoverlay = [val for sublist in text for val in sublist]
try:
pheno_df = pheno_df.applymap(lambda user_colour: self.Colours[user_colour])
except KeyError as key:
raise KeyError(f"Invalid phenotypic resistance value: {key}. Accepted values are: R I, S, U.")
Colour_map = pheno_df.values.tolist()
self.antibioticcolours = [val for sublist in Colour_map for val in sublist]
logging.info("Phenotypic data loaded.")
print("Done.")
def assign_gene(self, ID, antibiotic, gene):
if antibiotic in self.data[ID].keys():
self.data[ID][antibiotic].append(gene)
else:
self.data[ID][antibiotic] = [gene]
if gene in self.GeneFrequencies.keys():
self.GeneFrequencies[gene]["isolates"].append(ID)
else:
self.GeneFrequencies[gene] = {"isolates" : [ID]}
def get_staramr(self, filepath):
print("Reading staramr data...")
all_files = glob.glob(path.join(filepath, "*_staramr.t*"))
logging.info("Attempting to concat files for staramr")
try:
staramr = pd.concat((pd.read_csv(file, sep="\t", index_col=0).assign(filename = path.basename(file)) for file in all_files))
except ValueError:
logging.error("Could not detect any staramr data files.")
raise FileNotFoundError("No files detected for staramr data")
try:
staramr = staramr[~staramr.Genotype.str.contains("None")] # Remove empty datasets
except TypeError:
logging.error("User provided resfinder file version.")
raise ValueError("You have provided the resfinder output of staramr instead of summary.tsv")
staramr["Predicted Phenotype"] = staramr["Predicted Phenotype"].str.title().str.split(", ") # split the row into a list
staramr["Genotype"] = staramr["Genotype"].str.split(", ")
staramr["filename"] = staramr["filename"].str.replace("_staramr.t.*$", "", regex=True)
for row in staramr.itertuples():
for antibiotic, gene in zip(row[2], row[1]): # iterate over all the antibiotic - gene pairs
self.assign_gene(row[3], antibiotic, gene) # give to the assign_gene function to sort out along with isolate ID.
logging.info("staramr data loaded.")
print("Done.")
def get_RGI(self, filepath):
print("Reading RGI data...")
all_files = glob.glob(path.join(filepath, "*_RGI.t*"))
logging.info("Attempting to concat files for RGI")
try:
RGI = pd.concat((pd.read_csv(file, sep="\t").assign(filename = path.basename(file)) for file in all_files)) # Concat all the .tsv files and include file name which has the isolate ID
except ValueError:
logging.error("Could not detect any RGI files")
raise FileNotFoundError("No files detected for RGI data")
RGI = RGI.filter(items=["Best_Hit_ARO", "Drug Class", "filename"])
RGI = RGI[~RGI["Drug Class"].isna()]
RGI["Drug Class"] = RGI["Drug Class"] \
.str.replace(" antibiotic", "") \
.str.title() \
.str.split("; ") # Tidies the antibiotic class list
RGI["filename"] = RGI["filename"].str.replace("_RGI.t.*$", "", regex=True)
def filter_antibiotics(Class):
"Checks to see if antibiotic class in one of those provided by the user."
antibiotics = [antibiotic + 's' for antibiotic in Class]
return list(filter(lambda x: x in self.ClassSelection.values(), antibiotics))
RGI["Drug Class"] = RGI["Drug Class"].apply(lambda Class: filter_antibiotics(Class))
RGI = RGI[RGI["Drug Class"].map(lambda x: len(x)) > 0] # remove rows with empty lists.
for row in RGI.itertuples():
for antibiotic in row[2]: # iterate over all the antibiotics
self.assign_gene(row[3], antibiotic, row[1]) # give to the assign_gene function to sort out along with isolate ID.
logging.info("RGI data loaded.")
print("Done.")
def get_amrfinder(self, filepath):
print("Reading amrfinder data...")
all_files = glob.glob(path.join(filepath, "*_amrfinder.t*"))
logging.info("Attempting to concat amrfinder files")
try:
amrfinder = pd.concat((pd.read_csv(file, sep="\t", index_col=0).assign(filename = path.basename(file)) for file in all_files))
except ValueError:
logging.error("Could not detect any amrfinder data files.")
raise FileNotFoundError("No files detected for amrfinder data")
amrfinder = amrfinder.filter(items=["Gene symbol", "Subclass", "filename"]) # extract relevant info
amrfinder = amrfinder[~amrfinder["Subclass"].isna()]
amrfinder["Subclass"] = amrfinder["Subclass"].str.capitalize() # tidy antibiotic names
amrfinder["filename"] = amrfinder["filename"].str.replace("_amrfinder.t.*$", "", regex=True)
for row in amrfinder.itertuples():
self.assign_gene(row[3], row[2], row[1]) # give to the assign_gene function to sort out along with isolate ID.
logging.info("amrfinder data loaded.")
print("Done.")
def make_annotations(self):
print("Writing graph annotations...")
for isolateID, antibiotic in product(self.isolateIDs, self.antibiotics):
annotation = ""
count = 5 # minimum marker size
if antibiotic in self.data[isolateID].keys():
annotation = f"<b>{antibiotic}:</b> <br>" + "<br>".join(set(self.data[isolateID][antibiotic]))
count += len(self.data[isolateID][antibiotic])
Class = self.ClassSelection.get(antibiotic)
if Class in self.data[isolateID].keys():
if annotation:
annotation += "<br>"
annotation += f"<b>{Class}:</b> <br>" + "<br>".join(set(self.data[isolateID][Class]))
count += len(self.data[isolateID][Class])
self.GeneCount.append(count)
if annotation:
self.AbxAnnots.append(annotation)
else:
self.AbxAnnots.append("<b>No genotype identified</b>")
print("Done.")
def _output_data(self):
with open(rf"{self.relativepath}\data.json", "w") as outputdata:
json.dump(self.data, outputdata, indent=4)
def _output_genefrequencies(self):
with open(rf"{self.relativepath}\genefrequencies.json", "w") as output:
json.dump(self.GeneFrequencies, output, indent=4)
### PLOTS PROFILES ###
class Display(DataHandler):
"Plots antibiotic resistance files."
def open_profile(self, profile):
logging.info(f"""{date.strftime("%d/%m/%Y %H:%M")} Starting new log.
Opening profile: {profile}""")
print(f"Loading {profile}...")
fig = pio.read_json(rf"{self.relativepath}\profiles\{profile}.json") # open main profile
try:
gffig = pio.read_json(rf"{self.relativepath}\profiles\{profile}_gf.json") # open gene frequency plot
except FileNotFoundError:
self.show_genefrequency = True
logging.warning("No gene frequency file associated with this profile. Possibly no genotypic data available.")
logging.info("Profile associated files openned.")
if self.export:
self.export_HTML(profile, fig, gffig) # export the profile if user requested
print(f"{profile} has loaded... \nNow plotting...")
pio.show(fig)
logging.info("Main plot displayed")
if not self.show_genefrequency:
pio.show(gffig)
logging.info("Gene frequency plot displayed")
logging.info("Done. \n \n")
print(f"{profile} is now displaying...")
def create_profile(self, profile, fig, gffig):
"Create a profile from inputted data which can be launched from again instead of inputting data and processing."
logging.info(f"Creating new profile with name: {profile}")
with open(rf"{self.relativepath}\settings.json", 'r') as settings:
profiles = json.load(settings)
if profile in profiles["profiles"].keys():
logging.error(f"Creating profile with name {profile} when it already exists.")
raise ValueError("Profile already exists. Please provide a different name or delete existing profile.")
time = datetime.now()
# Create summary of profile to be shown in -p
profiles['profiles'][profile] = {
"Date created" : time.strftime("%d/%m/%Y %H:%M"),
"Antibiotic count" : len(self.antibiotics),
"Number of isolates" : len(self.isolateIDs),
"Antibiotics" : ', '.join(self.antibiotics[:5]) + '...',
"Isolates" : ', '.join(self.isolateIDs[:5]) + '...'}
with open(rf"{self.relativepath}\settings.json", 'w') as settings:
logging.info("Saving new profile to settings.")
json.dump(profiles, settings, indent=4)
pio.write_json(fig, rf"{self.relativepath}\profiles\{profile}.json")
if self.Genotype:
pio.write_json(gffig, rf"{self.relativepath}\profiles\{profile}_gf.json")
logging.info("Profile saved.")
print("New profile has been saved.")
def export_HTML(self, profile, fig, gffig):
"Exports profiles to a HTML format which can be launched to a web browser and retain interactivity."
logging.info("Exporting profile.")
Download_path = path.join(path.expanduser('~'),'Downloads')
if path.exists(path.join(Download_path, f'{profile}.html')):
logging.warning("Profile already exported to Downloads folder.")
raise FileExistsError("Profile already in Downloads folder.")
Downloads = path.join(Download_path, f'{profile}.html')
go.Figure.write_html(fig, Downloads)
if self.Genotype:
Downloads = path.join(Download_path, f'{profile}_gf.html')
go.Figure.write_html(gffig, Downloads)
logging.info(f"Profiles were exported to: {Download_path}\\{profile}.html & {profile}_gf.html")
print(f"""Your antibiotic resistance profile has been exported to your Downloads folder:
{Download_path}\\{profile}.html """ + "& {profile}_gf.html" if self.Genotype else "")
def plot_frequencies(self):
"Plots gene frequencies"
logging.info("Plotting gene frequencies")
gf = | pd.DataFrame.from_dict(self.GeneFrequencies, orient="index") | pandas.DataFrame.from_dict |
import logging
import yaml
import os
import docker
import re
import sys
from tempfile import NamedTemporaryFile
import numpy as np
import pandas as pd
from pandas.errors import EmptyDataError
from docker.errors import NotFound, APIError
from io import StringIO
# from pynomer.client import NomerClient
# from ..core import URIMapper, URIManager, TaxId
from ..util.taxo_helper import *
pd.options.mode.chained_assignment = None
"""
https://github.com/globalbioticinteractions/globalbioticinteractions/wiki/Taxonomy-Matching
"""
class NoValidColumnException(Exception):
pass
class ConfigurationError(Exception):
pass
def create_mapping(df):
"""
Return a dict that keeps track of duplicated items in a DataFrame
"""
return (
df.reset_index()
.groupby(df.columns.tolist(), dropna=False)["index"]
.agg(["first", tuple])
.set_index("first")["tuple"]
.to_dict()
)
class TaxonomicEntityValidator:
def __init__(self, config):
self.logger = logging.getLogger(__name__)
self.config = config
self.taxo_to_matcher = {
"GBIF": "gbif",
"NCBI": "ncbi",
"IF": "indexfungorum",
"SILVA": "ncbi",
}
self.default_name_matcher = "globalnames"
self.nomer = NomerHelper()
def validate(self, df):
"""For a subset of columns (e.g. consumers and resources),
validate taxonomic ids and/or names against a source taxonomy.
Returns the input DataFrame with new columns containing the valid
ids and names for each query column.
"""
for column_config in self.config.columns:
# Set default values
assert column_config.uri_column != None
column_config.id_column = (
column_config.id_column if "id_column" in column_config else None
)
column_config.name_column = (
column_config.name_column if "name_column" in column_config else None
)
column_config.source_taxonomy = (
column_config.source_taxonomy
if "source_taxonomy" in column_config
else None
)
if not (column_config.id_column or column_config.name_column):
raise NoValidColumnException(
"You should specify at least one valid column containing the taxon names or ids."
)
# Map taxa to target taxonomy
self.logger.info(
f"Validate {df.shape[0]} taxa from columns ({column_config.id_column},{column_config.name_column})"
)
valid_df = self.validate_columns(
df,
id_column=column_config.id_column,
name_column=column_config.name_column,
source_taxonomy=column_config.source_taxonomy,
)
df[column_config.uri_column] = valid_df["iri"]
df[column_config.valid_name_column] = valid_df["valid_name"]
df[column_config.valid_id_column] = valid_df["valid_id"]
return df
def validate_columns(
self, df, id_column=None, name_column=None, source_taxonomy=None
):
"""
Taxonomic entity validation consists in checking that the pair (taxid, name)
is valid in a given taxonomy (both taxid and name are optional, but at least
one of them must exist). This function adds a column "valid_id" and a column
"valid_name" to the input DataFrame. If both values are NaN, the corresponding
entity is considered invalid.
"""
def add_prefix(col, src_taxo):
"""
Add the source taxonomy name as a prefix to all taxids in a column
"""
def return_prefixed(id, src_taxo):
if (
pd.notnull(id) and len(str(id).split(":")) == 2
): # .startswith(src_taxo + ":"):
return (
id
if not pd.isna(
pd.to_numeric(str(id).split(":")[-1], errors="coerce")
)
else np.nan
)
elif pd.notnull(id) and pd.isna(pd.to_numeric(id, errors="coerce")):
return np.nan
elif | pd.notnull(id) | pandas.notnull |
# -----------------------------------------------------------------------------
# Copyright (c) 2014--, The Qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
# TODO: Modify DB to fix 1084
from unittest import TestCase, main
from datetime import datetime
from os import close, remove
from os.path import join, basename, exists
from tempfile import mkstemp
import pandas as pd
from qiita_core.util import qiita_test_checker
from qiita_core.exceptions import IncompetentQiitaDeveloperError
from qiita_db.exceptions import (QiitaDBError, QiitaDBUnknownIDError,
QiitaDBStatusError, QiitaDBLookupError)
from qiita_db.study import Study, StudyPerson
from qiita_db.user import User
from qiita_db.util import get_mountpoint, get_count
from qiita_db.data import BaseData, RawData, PreprocessedData, ProcessedData
from qiita_db.metadata_template import PrepTemplate
@qiita_test_checker()
class BaseDataTests(TestCase):
"""Tests the BaseData class"""
def test_init(self):
"""Raises an error if trying to instantiate the base data"""
with self.assertRaises(IncompetentQiitaDeveloperError):
BaseData(1)
@qiita_test_checker()
class RawDataTests(TestCase):
"""Tests the RawData class"""
def setUp(self):
fd, self.seqs_fp = mkstemp(suffix='_seqs.fastq')
close(fd)
fd, self.barcodes_fp = mkstemp(suffix='_barcodes.fastq')
close(fd)
self.filetype = 2
self.filepaths = [(self.seqs_fp, 1), (self.barcodes_fp, 2)]
_, self.db_test_raw_dir = get_mountpoint('raw_data')[0]
with open(self.seqs_fp, "w") as f:
f.write("\n")
with open(self.barcodes_fp, "w") as f:
f.write("\n")
self._clean_up_files = []
# Create some new PrepTemplates
metadata_dict = {
'SKB8.640193': {'center_name': 'ANL',
'primer': 'GTGCCAGCMGCCGCGGTAA',
'barcode': 'GTCCGCAAGTTA',
'run_prefix': "s_G1_L001_sequences",
'platform': 'ILLUMINA',
'library_construction_protocol': 'AAAA',
'experiment_design_description': 'BBBB'}}
metadata = pd.DataFrame.from_dict(metadata_dict, orient='index')
self.pt1 = PrepTemplate.create(metadata, Study(1), "16S")
self.pt2 = PrepTemplate.create(metadata, Study(1), "18S")
self.prep_templates = [self.pt1, self.pt2]
def tearDown(self):
for f in self._clean_up_files:
remove(f)
def test_create(self):
"""Correctly creates all the rows in the DB for the raw data"""
# Check that the returned object has the correct id
exp_id = get_count("qiita.raw_data") + 1
obs = RawData.create(self.filetype, self.prep_templates,
self.filepaths)
self.assertEqual(obs.id, exp_id)
# Check that the raw data have been correctly added to the DB
obs = self.conn_handler.execute_fetchall(
"SELECT * FROM qiita.raw_data WHERE raw_data_id=%d" % exp_id)
# raw_data_id, filetype, link_filepaths_status
self.assertEqual(obs, [[exp_id, 2, 'idle']])
# Check that the raw data has been correctly linked with the prep
# templates
sql = """SELECT prep_template_id
FROM qiita.prep_template
WHERE raw_data_id = %s
ORDER BY prep_template_id"""
obs = self.conn_handler.execute_fetchall(sql, (exp_id,))
self.assertEqual(obs, [[self.pt1.id], [self.pt2.id]])
# Check that the files have been copied to right location
exp_seqs_fp = join(self.db_test_raw_dir,
"%d_%s" % (exp_id, basename(self.seqs_fp)))
self.assertTrue(exists(exp_seqs_fp))
self._clean_up_files.append(exp_seqs_fp)
exp_bc_fp = join(self.db_test_raw_dir,
"%d_%s" % (exp_id, basename(self.barcodes_fp)))
self.assertTrue(exists(exp_bc_fp))
self._clean_up_files.append(exp_bc_fp)
# Check that the filepaths have been correctly added to the DB
top_id = self.conn_handler.execute_fetchone(
"SELECT count(1) FROM qiita.filepath")[0]
obs = self.conn_handler.execute_fetchall(
"SELECT * FROM qiita.filepath WHERE filepath_id=%d or "
"filepath_id=%d" % (top_id - 1, top_id))
exp_seqs_fp = "%d_%s" % (exp_id, basename(self.seqs_fp))
exp_bc_fp = "%d_%s" % (exp_id, basename(self.barcodes_fp))
# filepath_id, path, filepath_type_id
exp = [[top_id - 1, exp_seqs_fp, 1, '852952723', 1, 5],
[top_id, exp_bc_fp, 2, '852952723', 1, 5]]
self.assertEqual(obs, exp)
# Check that the raw data have been correctly linked with the filepaths
obs = self.conn_handler.execute_fetchall(
"SELECT * FROM qiita.raw_filepath WHERE raw_data_id=%d" % exp_id)
# raw_data_id, filepath_id
self.assertEqual(obs, [[exp_id, top_id - 1], [exp_id, top_id]])
def test_create_error(self):
with self.assertRaises(QiitaDBError):
RawData.create(self.filetype, [PrepTemplate(1)], self.filepaths)
def test_get_filepaths(self):
"""Correctly returns the filepaths to the raw files"""
rd = RawData(1)
obs = rd.get_filepaths()
exp = [
(1, join(self.db_test_raw_dir, '1_s_G1_L001_sequences.fastq.gz'),
"raw_forward_seqs"),
(2, join(self.db_test_raw_dir,
'1_s_G1_L001_sequences_barcodes.fastq.gz'), "raw_barcodes")]
self.assertEqual(obs, exp)
def test_studies(self):
"""Correctly returns the study ids"""
rd = RawData(1)
self.assertEqual(rd.studies, [1])
def test_data_types(self):
"""Correctly returns the data_types of raw_data"""
rd = RawData(1)
self.assertEqual(rd.data_types(), ["18S"])
def test_data_types_id(self):
"""Correctly returns the data_types of raw_data"""
rd = RawData(1)
self.assertEqual(rd.data_types(ret_id=True), [2])
def test_filetype(self):
rd = RawData(1)
self.assertEqual(rd.filetype, "FASTQ")
def test_prep_templates(self):
rd = RawData(1)
self.assertEqual(rd.prep_templates, [1])
def test_link_filepaths_status(self):
rd = RawData(1)
self.assertEqual(rd.link_filepaths_status, 'idle')
def test_link_filepaths_status_setter(self):
rd = RawData(1)
self.assertEqual(rd.link_filepaths_status, 'idle')
rd._set_link_filepaths_status('linking')
self.assertEqual(rd.link_filepaths_status, 'linking')
rd._set_link_filepaths_status('unlinking')
self.assertEqual(rd.link_filepaths_status, 'unlinking')
rd._set_link_filepaths_status('failed: error')
self.assertEqual(rd.link_filepaths_status, 'failed: error')
def test_link_filepaths_status_setter_error(self):
rd = RawData(1)
with self.assertRaises(ValueError):
rd._set_link_filepaths_status('not a valid status')
def test_is_preprocessed(self):
self.assertTrue(RawData(1)._is_preprocessed())
rd = RawData.create(self.filetype, self.prep_templates, self.filepaths)
self.assertFalse(rd._is_preprocessed())
def test_clear_filepaths(self):
rd = RawData.create(self.filetype, [self.pt1], self.filepaths)
self.assertTrue(self.conn_handler.execute_fetchone(
"SELECT EXISTS(SELECT * FROM qiita.raw_filepath "
"WHERE raw_data_id=%s)", (rd.id,))[0])
# add files to clean before cleaning the filepaths
study_id = rd.studies[0]
path_for_removal = join(get_mountpoint("uploads")[0][1], str(study_id))
self._clean_up_files = [join(path_for_removal,
basename(f).split('_', 1)[1])
for _, f, _ in rd.get_filepaths()]
# cleaning the filepaths
rd.clear_filepaths()
self.assertFalse(self.conn_handler.execute_fetchone(
"SELECT EXISTS(SELECT * FROM qiita.raw_filepath "
"WHERE raw_data_id=%s)", (rd.id,))[0])
def test_clear_filepaths_error(self):
with self.assertRaises(QiitaDBError):
RawData(1).clear_filepaths()
def test_exists(self):
self.assertTrue(RawData.exists(1))
self.assertFalse(RawData.exists(1000))
def test_delete_error_no_exists(self):
# the raw data doesn't exist
with self.assertRaises(QiitaDBUnknownIDError):
RawData.delete(1000, 0)
def test_delete_error_raw_data_not_linked(self):
# the raw data and the prep template id are not linked
with self.assertRaises(QiitaDBError):
RawData.delete(1, self.pt2.id)
def test_delete_error_prep_template_no_exists(self):
# the prep template does not exist
with self.assertRaises(QiitaDBError):
RawData.delete(1, 1000)
def test_delete_error_linked_files(self):
# the raw data has linked files
with self.assertRaises(QiitaDBError):
RawData.delete(1, 1)
def test_delete(self):
rd = RawData.create(self.filetype, self.prep_templates,
self.filepaths)
sql_pt = """SELECT prep_template_id
FROM qiita.prep_template
WHERE raw_data_id = %s
ORDER BY prep_template_id"""
obs = self.conn_handler.execute_fetchall(sql_pt, (rd.id,))
self.assertEqual(obs, [[self.pt1.id], [self.pt2.id]])
# This delete call will only unlink the raw data from the prep template
RawData.delete(rd.id, self.pt2.id)
# Check that it successfully unlink the raw data from pt2
obs = self.conn_handler.execute_fetchall(sql_pt, (rd.id,))
self.assertEqual(obs, [[self.pt1.id]])
self.assertEqual(self.pt2.raw_data, None)
# If we try to remove the RawData now, it should raise an error
# because it still has files attached to it
with self.assertRaises(QiitaDBError):
RawData.delete(rd.id, self.pt1.id)
# Clear the files so we can actually remove the RawData
study_id = rd.studies[0]
path_for_removal = join(get_mountpoint("uploads")[0][1], str(study_id))
self._clean_up_files.extend([join(path_for_removal,
basename(f).split('_', 1)[1])
for _, f, _ in rd.get_filepaths()])
rd.clear_filepaths()
RawData.delete(rd.id, self.pt1.id)
obs = self.conn_handler.execute_fetchall(sql_pt, (rd.id,))
self.assertEqual(obs, [])
# Check that all expected rows have been deleted
sql = """SELECT EXISTS(
SELECT * FROM qiita.raw_filepath
WHERE raw_data_id = %s)"""
self.assertFalse(self.conn_handler.execute_fetchone(sql, (rd.id,))[0])
sql = """SELECT EXISTS(
SELECT * FROM qiita.raw_data
WHERE raw_data_id=%s)"""
self.assertFalse(self.conn_handler.execute_fetchone(sql, (rd.id,))[0])
def test_status(self):
rd = RawData(1)
s = Study(1)
self.assertEqual(rd.status(s), 'private')
# Since the status is inferred from the processed data, change the
# status of the processed data so we can check how it changes in the
# preprocessed data
pd = ProcessedData(1)
pd.status = 'public'
self.assertEqual(rd.status(s), 'public')
# Check that new raw data has sandbox as status since no
# processed data exists for them
rd = RawData.create(self.filetype, self.prep_templates, self.filepaths)
self.assertEqual(rd.status(s), 'sandbox')
def test_status_error(self):
# Let's create a new study, so we can check that the error is raised
# because the new study does not have access to the raw data
info = {
"timeseries_type_id": 1,
"metadata_complete": True,
"mixs_compliant": True,
"number_samples_collected": 25,
"number_samples_promised": 28,
"study_alias": "FCM",
"study_description": "Microbiome of people who eat nothing but "
"fried chicken",
"study_abstract": "Exploring how a high fat diet changes the "
"gut microbiome",
"emp_person_id": StudyPerson(2),
"principal_investigator_id": StudyPerson(3),
"lab_person_id": StudyPerson(1)
}
s = Study.create(User('<EMAIL>'), "Fried chicken microbiome",
[1], info)
rd = RawData(1)
with self.assertRaises(QiitaDBStatusError):
rd.status(s)
@qiita_test_checker()
class PreprocessedDataTests(TestCase):
"""Tests the PreprocessedData class"""
def setUp(self):
self.prep_template = PrepTemplate(1)
self.study = Study(1)
self.params_table = "preprocessed_sequence_illumina_params"
self.params_id = 1
fd, self.fna_fp = mkstemp(suffix='_seqs.fna')
close(fd)
fd, self.qual_fp = mkstemp(suffix='_seqs.qual')
close(fd)
self.filepaths = [(self.fna_fp, 4), (self.qual_fp, 5)]
_, self.db_test_ppd_dir = get_mountpoint(
'preprocessed_data')[0]
self.ebi_submission_accession = "EBI123456-A"
self.ebi_study_accession = "EBI123456-B"
with open(self.fna_fp, "w") as f:
f.write("\n")
with open(self.qual_fp, "w") as f:
f.write("\n")
self._clean_up_files = []
def tearDown(self):
for f in self._clean_up_files:
remove(f)
def test_create(self):
"""Correctly creates all the rows in the DB for preprocessed data"""
# Check that the returned object has the correct id
obs = PreprocessedData.create(
self.study, self.params_table,
self.params_id, self.filepaths, prep_template=self.prep_template,
ebi_submission_accession=self.ebi_submission_accession,
ebi_study_accession=self.ebi_study_accession)
self.assertEqual(obs.id, 3)
# Check that the preprocessed data have been correctly added to the DB
obs = self.conn_handler.execute_fetchall(
"SELECT * FROM qiita.preprocessed_data WHERE "
"preprocessed_data_id=3")
# preprocessed_data_id, preprocessed_params_table,
# preprocessed_params_id, submitted_to_insdc_status,
# ebi_submission_accession, ebi_study_accession, data_type_id,
# link_filepaths_status, vamps_status, processing_status
exp = [[3, "preprocessed_sequence_illumina_params", 1,
'not submitted', "EBI123456-A", "EBI123456-B", 2, 'idle',
'not submitted', 'not_processed']]
self.assertEqual(obs, exp)
# Check that the preprocessed data has been linked with its study
obs = self.conn_handler.execute_fetchall(
"SELECT * FROM qiita.study_preprocessed_data WHERE "
"preprocessed_data_id=3")
exp = [[1, 3]]
self.assertEqual(obs, exp)
# Check that the files have been copied to right location
exp_fna_fp = join(self.db_test_ppd_dir,
"3_%s" % basename(self.fna_fp))
self.assertTrue(exists(exp_fna_fp))
self._clean_up_files.append(exp_fna_fp)
exp_qual_fp = join(self.db_test_ppd_dir,
"3_%s" % basename(self.qual_fp))
self.assertTrue(exists(exp_qual_fp))
self._clean_up_files.append(exp_qual_fp)
# Check that the filepaths have been correctly added to the DB
obs_id = self.conn_handler.execute_fetchone(
"SELECT count(1) from qiita.filepath")[0]
obs = self.conn_handler.execute_fetchall(
"SELECT * FROM qiita.filepath WHERE filepath_id=%d or "
"filepath_id=%d" % (obs_id - 1, obs_id))
exp_fna_fp = "3_%s" % basename(self.fna_fp)
exp_qual_fp = "3_%s" % basename(self.qual_fp)
# filepath_id, path, filepath_type_id
exp = [[obs_id - 1, exp_fna_fp, 4, '852952723', 1, 3],
[obs_id, exp_qual_fp, 5, '852952723', 1, 3]]
self.assertEqual(obs, exp)
def test_create_data_type_only(self):
# Check that the returned object has the correct id
obs = PreprocessedData.create(self.study, self.params_table,
self.params_id, self.filepaths,
data_type="18S")
self.assertEqual(obs.id, 3)
# Check that the preprocessed data have been correctly added to the DB
obs = self.conn_handler.execute_fetchall(
"SELECT * FROM qiita.preprocessed_data WHERE "
"preprocessed_data_id=3")
# preprocessed_data_id, preprocessed_params_table,
# preprocessed_params_id, submitted_to_insdc_status,
# ebi_submission_accession, ebi_study_accession, data_type_id,
# link_filepaths_status, vamps_status, processing_status
exp = [[3, "preprocessed_sequence_illumina_params", 1,
'not submitted', None, None, 2, 'idle', 'not submitted',
'not_processed']]
self.assertEqual(obs, exp)
# Check that the preprocessed data has been linked with its study
obs = self.conn_handler.execute_fetchall(
"SELECT * FROM qiita.study_preprocessed_data WHERE "
"preprocessed_data_id=3")
exp = [[1, 3]]
self.assertEqual(obs, exp)
# Check that the files have been copied to right location
exp_fna_fp = join(self.db_test_ppd_dir,
"3_%s" % basename(self.fna_fp))
self.assertTrue(exists(exp_fna_fp))
self._clean_up_files.append(exp_fna_fp)
exp_qual_fp = join(self.db_test_ppd_dir,
"3_%s" % basename(self.qual_fp))
self.assertTrue(exists(exp_qual_fp))
self._clean_up_files.append(exp_qual_fp)
# Check that the filepaths have been correctly added to the DB
obs_id = self.conn_handler.execute_fetchone(
"SELECT count(1) from qiita.filepath")[0]
obs = self.conn_handler.execute_fetchall(
"SELECT * FROM qiita.filepath WHERE filepath_id=%d or "
"filepath_id=%d" % (obs_id - 1, obs_id))
exp_fna_fp = "3_%s" % basename(self.fna_fp)
exp_qual_fp = "3_%s" % basename(self.qual_fp)
# filepath_id, path, filepath_type_id
exp = [[obs_id - 1, exp_fna_fp, 4, '852952723', 1, 3],
[obs_id, exp_qual_fp, 5, '852952723', 1, 3]]
self.assertEqual(obs, exp)
# Check that the preprocessed data have been correctly
# linked with the filepaths
obs = self.conn_handler.execute_fetchall(
"SELECT * FROM qiita.preprocessed_filepath WHERE "
"preprocessed_data_id=3")
# preprocessed_data_id, filepath_id
self.assertEqual(obs, [[3, obs_id - 1], [3, obs_id]])
def test_delete_basic(self):
"""Correctly deletes a preprocessed data"""
# testing regular delete
ppd = PreprocessedData.create(
self.study, self.params_table,
self.params_id, self.filepaths, prep_template=self.prep_template,
ebi_submission_accession=self.ebi_submission_accession,
ebi_study_accession=self.ebi_study_accession)
PreprocessedData.delete(ppd.id)
# testing that the deleted preprocessed data can't be instantiated
with self.assertRaises(QiitaDBUnknownIDError):
PreprocessedData(ppd.id)
# and for completeness testing that it raises an error if ID
# doesn't exist
with self.assertRaises(QiitaDBUnknownIDError):
PreprocessedData.delete(ppd.id)
# testing that we can not remove cause the preprocessed data != sandbox
with self.assertRaises(QiitaDBStatusError):
PreprocessedData.delete(1)
def test_delete_advanced(self):
# testing that we can not remove cause preprocessed data has been
# submitted to EBI or VAMPS
ppd = PreprocessedData.create(
self.study, self.params_table,
self.params_id, self.filepaths, prep_template=self.prep_template,
ebi_submission_accession=self.ebi_submission_accession,
ebi_study_accession=self.ebi_study_accession)
# fails due to VAMPS submission
ppd.update_vamps_status('success')
with self.assertRaises(QiitaDBStatusError):
PreprocessedData.delete(ppd.id)
ppd.update_vamps_status('failed')
# fails due to EBI submission
ppd.update_insdc_status('success', 'AAAA', 'AAAA')
with self.assertRaises(QiitaDBStatusError):
PreprocessedData.delete(ppd.id)
def test_create_error_dynamic_table(self):
"""Raises an error if the preprocessed_params_table does not exist"""
with self.assertRaises(IncompetentQiitaDeveloperError):
PreprocessedData.create(self.study, "foo", self.params_id,
self.filepaths, data_type="18S")
with self.assertRaises(IncompetentQiitaDeveloperError):
PreprocessedData.create(self.study, "preprocessed_foo",
self.params_id, self.filepaths,
data_type="18S")
with self.assertRaises(IncompetentQiitaDeveloperError):
PreprocessedData.create(self.study, "foo_params", self.params_id,
self.filepaths, data_type="18S")
with self.assertRaises(IncompetentQiitaDeveloperError):
PreprocessedData.create(self.study, "preprocessed_foo_params",
self.params_id, self.filepaths,
data_type="18S")
def test_create_error_data_type(self):
with self.assertRaises(QiitaDBLookupError):
PreprocessedData.create(self.study,
"preprocessed_sequence_illumina_params",
self.params_id, self.filepaths,
data_type="Metabolomics")
with self.assertRaises(IncompetentQiitaDeveloperError):
PreprocessedData.create(self.study,
"preprocessed_sequence_illumina_params",
self.params_id, self.filepaths,
data_type="Metabolomics",
prep_template=self.prep_template)
def test_get_filepaths(self):
"""Correctly returns the filepaths to the preprocessed files"""
ppd = PreprocessedData(1)
obs = ppd.get_filepaths()
exp = [(3, join(self.db_test_ppd_dir, '1_seqs.fna'),
"preprocessed_fasta"),
(4, join(self.db_test_ppd_dir, '1_seqs.qual'),
"preprocessed_fastq"),
(5, join(self.db_test_ppd_dir, '1_seqs.demux'),
"preprocessed_demux")]
self.assertItemsEqual(obs, exp)
def test_processed_data(self):
"""Correctly returns the processed data id"""
ppd = PreprocessedData(1)
self.assertEqual(ppd.processed_data, [1])
def test_prep_template(self):
"""Correctly returns the prep template"""
ppd = PreprocessedData(1)
self.assertEqual(ppd.prep_template, 1)
def test_study(self):
"""Correctly returns the study"""
ppd = PreprocessedData(1)
self.assertEqual(ppd.study, 1)
def test_ebi_submission_accession(self):
"""Correctly returns the ebi_submission_accession"""
ppd = PreprocessedData(1)
self.assertEqual(ppd.ebi_submission_accession, 'EBI123456-AA')
def test_ebi_ebi_study_accession(self):
"""Correctly returns the ebi_study_accession"""
ppd = PreprocessedData(1)
self.assertEqual(ppd.ebi_study_accession, 'EBI123456-BB')
def test_set_ebi_submission_accession(self):
new = PreprocessedData.create(
self.study, self.params_table, self.params_id, self.filepaths,
prep_template=self.prep_template,
ebi_submission_accession=self.ebi_submission_accession,
ebi_study_accession=self.ebi_study_accession)
new.ebi_submission_accession = 'EBI12345-CC'
self.assertEqual(new.ebi_submission_accession, 'EBI12345-CC')
def test_ebi_study_accession(self):
new = PreprocessedData.create(
self.study, self.params_table,
self.params_id, self.filepaths, prep_template=self.prep_template,
ebi_submission_accession=self.ebi_submission_accession,
ebi_study_accession=self.ebi_study_accession)
new.ebi_study_accession = 'EBI12345-DD'
self.assertEqual(new.ebi_study_accession, 'EBI12345-DD')
def test_submitted_to_insdc_status(self):
"""submitted_to_insdc_status works correctly"""
# False case
pd = PreprocessedData(1)
self.assertEqual(pd.submitted_to_insdc_status(), 'submitting')
# True case
pd = PreprocessedData(2)
self.assertEqual(pd.submitted_to_insdc_status(), 'not submitted')
def test_update_insdc_status(self):
"""Able to update insdc status"""
pd = PreprocessedData(1)
self.assertEqual(pd.submitted_to_insdc_status(), 'submitting')
| pd.update_insdc_status('failed') | pandas.update_insdc_status |
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 12 12:37:58 2022
@author: gojja och willi
"""
import pandas as pd
from datetime import datetime
import matplotlib.pyplot as plt
from statsmodels.graphics.tsaplots import plot_acf, plot_pacf
from statsmodels.tsa.api import VAR
from scipy.stats import pearsonr
import numpy as np
from functools import reduce
from statsmodels.tsa.stattools import adfuller
import seaborn as sns
from sklearn.model_selection import TimeSeriesSplit
import statsmodels.api as sm
import matplotlib.pyplot as plt
#%% Set Functions
"""
DISCLAIMER: Many of these functions were not used in the end, but we kept them here for future research purposes.
"""
def plot_series(series):
plt.figure(figsize=(12,6))
plt.plot(series, color='red')
plt.title(series.name, fontsize=16)
# Normalization:
def Normalization(df):
avg, dev = df.mean(), df.std()
df = (df - avg) / dev
return df
# De-trend (first diff):
def DeTrend(df):
df = df.diff().dropna()
return(df)
# Removing increasing volatility:
def Ch_Vol(df):
annual_vol = df.groupby(df.index.year).std()
df_annual = df.index.map(lambda d: annual_vol.loc[d.year])
df = df / df_annual
return(df)
# Removing seasonality:
def De_Sea(df):
month_avgs = df.groupby(df.index.month).mean()
heater_month_avg = df.index.map(lambda d: month_avgs.loc[d.month])
df = df - heater_month_avg
return(df)
def pad_monthly(df):
df["DATE"] = pd.to_datetime(df["DATE"]).dt.to_period("M")
df = df.set_index("DATE").resample("M").ffill()
df["year"], df["month"] = df.index.year, df.index.month
df.insert(0, "year", df.pop("year"))
df.insert(1, "month", df.pop("month"))
return(df)
def adding_date_variables(df):
df["DATE"] = pd.to_datetime(df["DATE"])
df = df.set_index("DATE")
df["year"], df["month"], = df.index.year, df.index.month
df.insert(0, "year", df.pop("year"))
df.insert(1, "month", df.pop("month"))
return(df)
def transform_pad(df):
df = adding_date_variables(df)
df.iloc[:,2] = Normalization(df.iloc[:,2]) #Normalize
# df.iloc[:,2] = df.iloc[:,2].diff()
# df.iloc[:,2] = Ch_Vol(df.iloc[:,2])
# df.iloc[:,2] = De_Sea(df.iloc[:,2])
df["year"] = df["year"].astype(str)
df['month'] = df['month'].astype(str)
df["DATE"] = df[["year", "month"]].agg("-".join, axis=1)
df = pad_monthly(df)
df = df.dropna()
return df
def transform_hpi(df):
df = adding_date_variables(df)
df.iloc[:,2] = Normalization(df.iloc[:,2]) #Normalize
df.iloc[:,2] = df.iloc[:,2].diff()
# df.iloc[:,2] = Ch_Vol(df.iloc[:,2])
# df.iloc[:,2] = De_Sea(df.iloc[:,2])
df["year"] = df["year"].astype(str)
df['month'] = df['month'].astype(str)
df["DATE"] = df[["year", "month"]].agg("-".join, axis=1)
df = pad_monthly(df)
df = df.dropna()
return df
# Doing it for all varibles in a df:
def time_fix(df):
for i in range(df.shape[1]):
df.iloc[:,i] = Normalization(df.iloc[:,i])
print('Normalization' ,df.columns[i], 'complete')
# df.iloc[:,i] = DeTrend(df.iloc[:,i])
# print('DeTrend', df.columns[i], 'complete')
# df.iloc[:,i] = Ch_Vol(df.iloc[:,i])
# print('Ch_Vol', df.columns[i], 'complete')
# df.iloc[:,i] = De_Sea(df.iloc[:,i])
# print('De_Sea', df.columns[i], 'complete')
plot_series(df.iloc[:,i])
plt.axhline(0, linestyle='--', color='k', alpha=0.3)
return(df)
def corr_y_x_lag(df, lags):
values = np.zeros(shape=(lags,df.iloc[:,1:].shape[1]), dtype=object)
df_temp = df.iloc[:,1:df.shape[1]]
for i in range(df_temp.shape[1]):
for lag in range(1,lags):
y = df.iloc[lag:,0]
x = df_temp.iloc[:-lag,i]
values[lag,i] = pearsonr(y, x)[1]
print(df.columns[i+1],'Lag: %s'%lag)
print(values[lag,i])
print('------')
return(values)
def corr_y_x_lag(df, lags):
for i in range(1,df.shape[1]):
for lag in range(1,lags):
y = df.iloc[lag:,0]
x = df.iloc[:-lag,0]
return [pearsonr(y, x)]
def adf_test(df):
print("")
print ('Results of Dickey-Fuller Test: %s' %(df.name))
dftest = adfuller(df, autolag='AIC')
dfoutput = pd.Series(dftest[0:4], index=['Test Statistic','p-value','#Lags Used','Number of Observations Used'])
for key,value in dftest[4].items():
dfoutput['Critical Value (%s)'%key] = value
print (dfoutput)
def perform_adf_test(series):
result = adfuller(series)
print('ADF Statistic: %f' % result[0])
print('p-value: %f' % result[1])
#%% Load data
y_p = pd.read_csv(r"...\Data\Clean\passive_prices_m_df.csv", index_col=0)
y_a = pd.read_csv(r"...\Data\Clean\active_prices_m_df.csv", index_col=0)
agg_y = y_p.iloc[:,:2]
agg_y["mean"] = y_p.iloc[:,2:].mean(axis=1)
agg_y = agg_y[np.isfinite(agg_y).all(axis = 1)]
agg_a = y_a.iloc[:,:2]
agg_a['mean'] = y_a.iloc[:,2:].mean(axis=1)
agg_a = agg_a[np.isfinite(agg_a).all(axis=1)]
x = pd.read_csv(r"...\Data\Clean\x_df.csv")
recession = x.iloc[:,:2]
recession['recession'] = x.pop('recession')
x.loc[:,'consumer_sent'] = x.loc[:,'consumer_sent'].pct_change(periods=1)
x.loc[:,'inflation'] = x.loc[:,'inflation'].pct_change(periods=1)
x.loc[:,'m2'] = x.loc[:,'m2'].pct_change(periods=1)
x.loc[:,'hpi'] = x.loc[:,'hpi'].pct_change(periods=1)
x = x.dropna()
for i in range(x.shape[1]):
adf_test(x.iloc[:,i])
x.pop('nrou')
x.pop('interest_rate')
#%% Fix the quarterly variables:
anxious_index_df = | pd.read_excel(r"...\Data\Raw Data\Other Variables\Anxious Index\anxious_index_chart.xlsx") | pandas.read_excel |
# -*- coding: utf-8 -*-
"""Device curtailment plots.
This module creates plots are related to the curtailment of generators.
@author: <NAME>
"""
import os
import logging
import pandas as pd
from collections import OrderedDict
import matplotlib.pyplot as plt
import matplotlib as mpl
import matplotlib.dates as mdates
import matplotlib.ticker as mtick
import marmot.config.mconfig as mconfig
import marmot.plottingmodules.plotutils.plot_library as plotlib
from marmot.plottingmodules.plotutils.plot_data_helper import PlotDataHelper
from marmot.plottingmodules.plotutils.plot_exceptions import (MissingInputData, DataSavedInModule,
UnderDevelopment, MissingZoneData)
class MPlot(PlotDataHelper):
"""curtailment MPlot class.
All the plotting modules use this same class name.
This class contains plotting methods that are grouped based on the
current module name.
The curtailment.py module contains methods that are
related to the curtailment of generators .
MPlot inherits from the PlotDataHelper class to assist in creating figures.
"""
def __init__(self, argument_dict: dict):
"""
Args:
argument_dict (dict): Dictionary containing all
arguments passed from MarmotPlot.
"""
# iterate over items in argument_dict and set as properties of class
# see key_list in Marmot_plot_main for list of properties
for prop in argument_dict:
self.__setattr__(prop, argument_dict[prop])
# Instantiation of MPlotHelperFunctions
super().__init__(self.Marmot_Solutions_folder, self.AGG_BY, self.ordered_gen,
self.PLEXOS_color_dict, self.Scenarios, self.ylabels,
self.xlabels, self.gen_names_dict, Region_Mapping=self.Region_Mapping)
self.logger = logging.getLogger('marmot_plot.'+__name__)
self.x = mconfig.parser("figure_size","xdimension")
self.y = mconfig.parser("figure_size","ydimension")
self.y_axes_decimalpt = mconfig.parser("axes_options","y_axes_decimalpt")
self.curtailment_prop = mconfig.parser("plot_data","curtailment_property")
def curt_duration_curve(self, prop: str = None,
start_date_range: str = None, end_date_range: str = None, **_):
"""Curtailment duration curve (line plot)
Displays curtailment sorted from highest occurrence to lowest
over given time period.
Args:
prop (str, optional): Controls type of re to include in plot.
Controlled through the plot_select.csv.
Defaults to None.
start_date_range (str, optional): Defines a start date at which to represent data from.
Defaults to None.
end_date_range (str, optional): Defines a end date at which to represent data to.
Defaults to None.
Returns:
dict: dictionary containing the created plot and its data table.
"""
outputs = {}
# List of properties needed by the plot, properties are a set of tuples and contain 3 parts:
# required True/False, property name and scenarios required, scenarios must be a list.
properties = [(True,f"generator_{self.curtailment_prop}",self.Scenarios)]
# Runs get_formatted_data within PlotDataHelper to populate PlotDataHelper dictionary
# with all required properties, returns a 1 if required data is missing
check_input_data = self.get_formatted_data(properties)
if 1 in check_input_data:
return MissingInputData()
for zone_input in self.Zones:
self.logger.info(f"{self.AGG_BY} = {zone_input}")
RE_Curtailment_DC = pd.DataFrame()
PV_Curtailment_DC = pd.DataFrame()
for scenario in self.Scenarios:
self.logger.info(f"Scenario = {scenario}")
re_curt = self[f"generator_{self.curtailment_prop}"].get(scenario)
# Timeseries [MW] RE curtailment [MWh]
try: #Check for regions missing all generation.
re_curt = re_curt.xs(zone_input,level = self.AGG_BY)
except KeyError:
self.logger.info(f'No curtailment in {zone_input}')
continue
re_curt = self.df_process_gen_inputs(re_curt)
# If using Marmot's curtailment property
if self.curtailment_prop == 'Curtailment':
re_curt = self.assign_curtailment_techs(re_curt)
# Timeseries [MW] PV curtailment [MWh]
pv_curt = re_curt[re_curt.columns.intersection(self.pv_gen_cat)]
re_curt = re_curt.sum(axis=1)
pv_curt = pv_curt.sum(axis=1)
re_curt = re_curt.squeeze() #Convert to Series
pv_curt = pv_curt.squeeze() #Convert to Series
if pd.notna(start_date_range):
self.logger.info(f"Plotting specific date range: \
{str(start_date_range)} to {str(end_date_range)}")
re_curt = re_curt[start_date_range : end_date_range]
pv_curt = pv_curt[start_date_range : end_date_range]
if re_curt.empty is True and prop == "PV+Wind":
self.logger.warning('No data in selected Date Range')
continue
if pv_curt.empty is True and prop == "PV":
self.logger.warning('No data in selected Date Range')
continue
# Sort from larget to smallest
re_cdc = re_curt.sort_values(ascending=False).reset_index(drop=True)
pv_cdc = pv_curt.sort_values(ascending=False).reset_index(drop=True)
re_cdc.rename(scenario, inplace=True)
pv_cdc.rename(scenario, inplace=True)
RE_Curtailment_DC = pd.concat([RE_Curtailment_DC, re_cdc], axis=1, sort=False)
PV_Curtailment_DC = pd.concat([PV_Curtailment_DC, pv_cdc], axis=1, sort=False)
# Remove columns that have values less than 1
RE_Curtailment_DC = RE_Curtailment_DC.loc[:, (RE_Curtailment_DC >= 1).any(axis=0)]
PV_Curtailment_DC = PV_Curtailment_DC.loc[:, (PV_Curtailment_DC >= 1).any(axis=0)]
# Replace _ with white space
RE_Curtailment_DC.columns = RE_Curtailment_DC.columns.str.replace('_',' ')
PV_Curtailment_DC.columns = PV_Curtailment_DC.columns.str.replace('_',' ')
# Create Dictionary from scenario names and color list
colour_dict = dict(zip(RE_Curtailment_DC.columns, self.color_list))
fig2, ax = plt.subplots(figsize=(self.x,self.y))
if prop == "PV":
if PV_Curtailment_DC.empty:
out = MissingZoneData()
outputs[zone_input] = out
continue
# unit conversion return divisor and energy units
unitconversion = PlotDataHelper.capacity_energy_unitconversion(PV_Curtailment_DC.values.max())
PV_Curtailment_DC = PV_Curtailment_DC/unitconversion['divisor']
Data_Table_Out = PV_Curtailment_DC
Data_Table_Out = Data_Table_Out.add_suffix(f" ({unitconversion['units']})")
x_axis_lim = 1.25 * len(PV_Curtailment_DC)
for column in PV_Curtailment_DC:
ax.plot(PV_Curtailment_DC[column], linewidth=3, color=colour_dict[column],
label=column)
ax.legend(loc='lower left',bbox_to_anchor=(1,0),
facecolor='inherit', frameon=True)
ax.set_ylabel(f"PV Curtailment ({unitconversion['units']})", color='black', rotation='vertical')
if prop == "PV+Wind":
if RE_Curtailment_DC.empty:
out = MissingZoneData()
outputs[zone_input] = out
continue
# unit conversion return divisor and energy units
unitconversion = PlotDataHelper.capacity_energy_unitconversion(RE_Curtailment_DC.values.max())
RE_Curtailment_DC = RE_Curtailment_DC/unitconversion['divisor']
Data_Table_Out = RE_Curtailment_DC
Data_Table_Out = Data_Table_Out.add_suffix(f" ({unitconversion['units']})")
x_axis_lim = 1.25 * len(RE_Curtailment_DC)
for column in RE_Curtailment_DC:
ax.plot(RE_Curtailment_DC[column], linewidth=3, color=colour_dict[column],
label=column)
ax.legend(loc='lower left',bbox_to_anchor=(1,0),
facecolor='inherit', frameon=True)
ax.set_ylabel(f"PV + Wind Curtailment ({unitconversion['units']})", color='black', rotation='vertical')
ax.set_xlabel('Hours', color='black', rotation='horizontal')
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.tick_params(axis='y', which='major', length=5, width=1)
ax.tick_params(axis='x', which='major', length=5, width=1)
ax.yaxis.set_major_formatter(mpl.ticker.FuncFormatter(lambda x, p: format(x, f',.{self.y_axes_decimalpt}f')))
ax.margins(x=0.01)
#ax.set_xlim(0, 9490)
ax.set_xlim(0,x_axis_lim)
ax.set_ylim(bottom=0)
if mconfig.parser("plot_title_as_region"):
ax.set_title(zone_input)
outputs[zone_input] = {'fig': fig2, 'data_table': Data_Table_Out}
return outputs
def curt_pen(self, prop: str = None,
start_date_range: str = None, end_date_range: str = None, **_):
"""Plot of curtailment vs penetration.
Each scenario is represented by a different symbel on a x, y axis
Args:
prop (str, optional): Controls type of re to include in plot.
Controlled through the plot_select.csv.
Defaults to None.
start_date_range (str, optional): Defines a start date at which to represent data from.
Defaults to None.
end_date_range (str, optional): Defines a end date at which to represent data to.
Defaults to None.
Returns:
dict: dictionary containing the created plot and its data table.
"""
outputs = {}
# List of properties needed by the plot, properties are a set of tuples and contain 3 parts:
# required True/False, property name and scenarios required, scenarios must be a list.
properties = [(True, "generator_Generation", self.Scenarios),
(True, "generator_Available_Capacity", self.Scenarios),
(True, f"generator_{self.curtailment_prop}", self.Scenarios),
(True, "generator_Total_Generation_Cost", self.Scenarios)]
# Runs get_formatted_data within PlotDataHelper to populate PlotDataHelper dictionary
# with all required properties, returns a 1 if required data is missing
check_input_data = self.get_formatted_data(properties)
if 1 in check_input_data:
return MissingInputData()
for zone_input in self.Zones:
Penetration_Curtailment_out = pd.DataFrame()
self.logger.info(f"{self.AGG_BY } = {zone_input}")
for scenario in self.Scenarios:
self.logger.info(f"Scenario = {scenario}")
gen = self["generator_Generation"].get(scenario)
try: #Check for regions missing all generation.
gen = gen.xs(zone_input,level = self.AGG_BY)
except KeyError:
self.logger.info(f'No generation in {zone_input}')
continue
avail_gen = self["generator_Available_Capacity"].get(scenario)
avail_gen = avail_gen.xs(zone_input,level=self.AGG_BY)
re_curt = self[f"generator_{self.curtailment_prop}"].get(scenario)
try:
re_curt = re_curt.xs(zone_input,level=self.AGG_BY)
except KeyError:
self.logger.info(f'No curtailment in {zone_input}')
continue
re_curt = self.df_process_gen_inputs(re_curt)
# If using Marmot's curtailment property
if self.curtailment_prop == 'Curtailment':
re_curt = self.assign_curtailment_techs(re_curt)
# Finds the number of unique hours in the year
no_hours_year = len(gen.index.unique(level="timestamp"))
# Total generation across all technologies [MWh]
total_gen = float(gen.sum())
# Timeseries [MW] and Total VRE generation [MWh]
vre_gen = (gen.loc[(slice(None), self.vre_gen_cat),:])
total_vre_gen = float(vre_gen.sum())
# Timeseries [MW] and Total RE generation [MWh]
re_gen = (gen.loc[(slice(None), self.re_gen_cat),:])
total_re_gen = float(re_gen.sum())
# Timeseries [MW] and Total PV generation [MWh]
pv_gen = (gen.loc[(slice(None), self.pv_gen_cat),:])
total_pv_gen = float(pv_gen.sum())
# % Penetration of generation classes across the year
VRE_Penetration = (total_vre_gen/total_gen)*100
RE_Penetration = (total_re_gen/total_gen)*100
PV_Penetration = (total_pv_gen/total_gen)*100
# Timeseries [MW] and Total RE available [MWh]
re_avail = (avail_gen.loc[(slice(None), self.re_gen_cat),:])
total_re_avail = float(re_avail.sum())
# Timeseries [MW] and Total PV available [MWh]
pv_avail = (avail_gen.loc[(slice(None), self.pv_gen_cat),:])
total_pv_avail = float(pv_avail.sum())
# Total RE curtailment [MWh]
total_re_curt = float(re_curt.sum().sum())
# Timeseries [MW] and Total PV curtailment [MWh]
pv_curt = re_curt[re_curt.columns.intersection(self.pv_gen_cat)]
total_pv_curt = float(pv_curt.sum().sum())
# % of hours with curtailment
Prct_hr_RE_curt = (len((re_curt.sum(axis=1)).loc[(re_curt.sum(axis=1))>0])/no_hours_year)*100
Prct_hr_PV_curt = (len((pv_curt.sum(axis=1)).loc[(pv_curt.sum(axis=1))>0])/no_hours_year)*100
# Max instantaneous curtailment
if re_curt.empty == True:
continue
else:
Max_RE_Curt = max(re_curt.sum(axis=1))
if pv_curt.empty == True:
continue
else:
Max_PV_Curt = max(pv_curt.sum(axis=1))
# % RE and PV Curtailment Capacity Factor
if total_pv_curt > 0:
RE_Curt_Cap_factor = (total_re_curt/Max_RE_Curt)/no_hours_year
PV_Curt_Cap_factor = (total_pv_curt/Max_PV_Curt)/no_hours_year
else:
RE_Curt_Cap_factor = 0
PV_Curt_Cap_factor = 0
# % Curtailment across the year
if total_re_avail == 0:
continue
else:
Prct_RE_curt = (total_re_curt/total_re_avail)*100
if total_pv_avail == 0:
continue
else:
Prct_PV_curt = (total_pv_curt/total_pv_avail)*100
# Total generation cost
Total_Gen_Cost = self["generator_Total_Generation_Cost"].get(scenario)
Total_Gen_Cost = Total_Gen_Cost.xs(zone_input,level=self.AGG_BY)
Total_Gen_Cost = float(Total_Gen_Cost.sum())
vg_out = pd.Series([PV_Penetration ,RE_Penetration, VRE_Penetration, Max_PV_Curt,
Max_RE_Curt, Prct_PV_curt, Prct_RE_curt, Prct_hr_PV_curt,
Prct_hr_RE_curt, PV_Curt_Cap_factor, RE_Curt_Cap_factor, Total_Gen_Cost],
index=["% PV Penetration", "% RE Penetration", "% VRE Penetration",
"Max PV Curtailment [MW]", "Max RE Curtailment [MW]",
"% PV Curtailment", '% RE Curtailment',"% PV hrs Curtailed",
"% RE hrs Curtailed", "PV Curtailment Capacity Factor",
"RE Curtailment Capacity Factor", "Gen Cost"])
vg_out = vg_out.rename(scenario)
Penetration_Curtailment_out = pd.concat([Penetration_Curtailment_out, vg_out], axis=1, sort=False)
Penetration_Curtailment_out = Penetration_Curtailment_out.T
# Data table of values to return to main program
Data_Table_Out = Penetration_Curtailment_out
VG_index = pd.Series(Penetration_Curtailment_out.index)
# VG_index = VG_index.str.split(n=1, pat="_", expand=True)
# VG_index.rename(columns = {0:"Scenario"}, inplace=True)
VG_index.rename("Scenario", inplace=True)
# VG_index = VG_index["Scenario"]
Penetration_Curtailment_out.loc[:, "Scenario"] = VG_index[:,].values
marker_dict = dict(zip(VG_index.unique(), self.marker_style))
colour_dict = dict(zip(VG_index.unique(), self.color_list))
Penetration_Curtailment_out["colour"] = [colour_dict.get(x, '#333333') for x in Penetration_Curtailment_out.Scenario]
Penetration_Curtailment_out["marker"] = [marker_dict.get(x, '.') for x in Penetration_Curtailment_out.Scenario]
if Penetration_Curtailment_out.empty:
self.logger.warning(f'No Generation in {zone_input}')
out = MissingZoneData()
outputs[zone_input] = out
continue
fig1, ax = plt.subplots(figsize=(self.x,self.y))
for index, row in Penetration_Curtailment_out.iterrows():
if prop == "PV":
ax.scatter(row["% PV Penetration"], row["% PV Curtailment"],
marker=row["marker"], c=row["colour"], s=100, label = row["Scenario"])
ax.set_ylabel('% PV Curtailment', color='black', rotation='vertical')
ax.set_xlabel('% PV Penetration', color='black', rotation='horizontal')
elif prop == "PV+Wind":
ax.scatter(row["% RE Penetration"], row["% RE Curtailment"],
marker=row["marker"], c=row["colour"], s=40, label = row["Scenario"])
ax.set_ylabel('% PV + Wind Curtailment', color='black', rotation='vertical')
ax.set_xlabel('% PV + Wind Penetration', color='black', rotation='horizontal')
ax.set_ylim(bottom=0)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.tick_params(axis='y', which='major', length=5, width=1)
ax.tick_params(axis='x', which='major', length=5, width=1)
ax.margins(x=0.01)
if mconfig.parser("plot_title_as_region"):
ax.set_title(zone_input)
handles, labels = plt.gca().get_legend_handles_labels()
by_label = OrderedDict(zip(labels, handles))
plt.legend(by_label.values(), by_label.keys(), loc = 'lower right')
outputs[zone_input] = {'fig': fig1, 'data_table': Data_Table_Out}
return outputs
def curt_total(self, start_date_range: str = None, end_date_range: str = None, **_):
"""Creates stacked barplots of total curtailment by technology.
A separate bar is created for each scenario.
Args:
start_date_range (str, optional): Defines a start date at which to represent data from.
Defaults to None.
end_date_range (str, optional): Defines a end date at which to represent data to.
Defaults to None.
Returns:
dict: dictionary containing the created plot and its data table.
"""
outputs = {}
# List of properties needed by the plot, properties are a set of tuples and contain 3 parts:
# required True/False, property name and scenarios required, scenarios must be a list.
properties = [(True, f"generator_{self.curtailment_prop}", self.Scenarios),
(True, "generator_Available_Capacity", self.Scenarios)]
# Runs get_formatted_data within PlotDataHelper to populate PlotDataHelper dictionary
# with all required properties, returns a 1 if required data is missing
check_input_data = self.get_formatted_data(properties)
if 1 in check_input_data:
return MissingInputData()
for zone_input in self.Zones:
self.logger.info(f"{self.AGG_BY} = {zone_input}")
Total_Curtailment_out = pd.DataFrame()
Total_Available_gen = pd.DataFrame()
vre_curt_chunks = []
avail_gen_chunks = []
for scenario in self.Scenarios:
self.logger.info(f"Scenario = {scenario}")
vre_collection = {}
avail_vre_collection = {}
vre_curt = self[f"generator_{self.curtailment_prop}"].get(scenario)
try:
vre_curt = vre_curt.xs(zone_input,level=self.AGG_BY)
except KeyError:
self.logger.info(f'No curtailment in {zone_input}')
continue
vre_curt = self.df_process_gen_inputs(vre_curt)
# If using Marmot's curtailment property
if self.curtailment_prop == 'Curtailment':
vre_curt = self.assign_curtailment_techs(vre_curt)
avail_gen = self["generator_Available_Capacity"].get(scenario)
try: #Check for regions missing all generation.
avail_gen = avail_gen.xs(zone_input,level = self.AGG_BY)
except KeyError:
self.logger.info(f'No available generation in {zone_input}')
continue
avail_gen = self.df_process_gen_inputs(avail_gen)
# If using Marmot's curtailment property
if self.curtailment_prop == 'Curtailment':
avail_gen = self.assign_curtailment_techs(avail_gen)
all_empty = True
if | pd.notna(start_date_range) | pandas.notna |
import pandas as pd
from pm4py.objects.log.importer.xes import factory as xes_import_factory
from pm4py.objects.conversion.log.versions.to_dataframe import get_dataframe_from_event_stream
from pm4py.objects.conversion.log import converter as log_converter
from pm4py.algo.discovery.dfg import factory as dfg_factory
from reportlab import xrange
# from amun.edges_pruning import pruning_by_edge_name_freq, pruning_by_edge_name_time
from amun.guessing_advantage import AggregateType
from math import log10
import os
# from pm4py.algo.discovery.dfg import algorithm as dfg_discovery
from pm4py.algo.filtering.log.start_activities import start_activities_filter
from pm4py.algo.filtering.log.end_activities import end_activities_filter
from pm4py.objects.log.importer.csv import factory as csv_importer
from pm4py.objects.conversion.log import factory as conversion_factory
from pm4py.objects.log.adapters.pandas import csv_import_adapter
# from pruning_edges import get_pruning_edges
from amun.guessing_advantage import AggregateType
from amun.input_module import read_xes
import seaborn as sns, numpy as np
import matplotlib.pyplot as plt
import sys
def get_dfg_time(data_dir ,data):
"""
Returns the DFG matrix as a dictionary of lists. The key is the pair of acitivities
and the value is a list of values
"""
prune_parameter_freq = 350
prune_parameter_time = -1 # keep all
# read the xes file
if data in "BPIC14":
# log = csv_importer.import_event_stream(os.path.join(data_dir, data + ".csv"))
dataset = csv_import_adapter.import_dataframe_from_path(os.path.join(data_dir, data + ".csv"), sep=";")
dataset['case:concept:name'] = dataset['Incident ID']
dataset['time:timestamp'] = dataset['DateStamp']
dataset['concept:name'] = dataset['IncidentActivity_Type']
log = conversion_factory.apply(dataset)
elif data == "Unrineweginfectie":
dataset = csv_import_adapter.import_dataframe_from_path(os.path.join(data_dir, data + ".csv"), sep=",")
dataset['case:concept:name'] = dataset['Patientnummer']
dataset['time:timestamp'] = dataset['Starttijd']
dataset['concept:name'] = dataset['Aciviteit']
log = conversion_factory.apply(dataset)
else:
log = xes_import_factory.apply(os.path.join(data_dir, data + ".xes"))
dataset = get_dataframe_from_event_stream(log)
# taking only the complete event to avoid ambiuoutiy
if data not in ["BPIC13","BPIC20","BPIC19","BPIC14","Unrineweginfectie"]:
dataset=dataset.where((dataset["lifecycle:transition"].str.upper()=="COMPLETE" ) )
dataset=dataset.dropna(subset=['lifecycle:transition'])
#moving first row to the last one
temp_row= dataset.iloc[0]
dataset2=dataset.copy()
dataset2.drop(dataset2.index[0], inplace=True)
dataset2=dataset2.append(temp_row)
#changing column names
columns= dataset2.columns
columns= [i+"_2" for i in columns]
dataset2.columns=columns
#combining the two dataframes into one
dataset = dataset.reset_index()
dataset2=dataset2.reset_index()
dataset=pd.concat([dataset, dataset2], axis=1)
#filter the rows with the same case
dataset=dataset[dataset['case:concept:name'] == dataset['case:concept:name_2']]
#calculating time difference
dataset['time:timestamp']= | pd.to_datetime(dataset['time:timestamp'],utc=True) | pandas.to_datetime |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Dec 31 13:31:13 2019
@author: mehrdad
"""
import pandas as pd
import numpy as np
import tslib.trip_detection
# Compute the difference between observed trips and computed trips ----------------------
# Any mode to any mode
def compute_observed_vs_computed_diffs(observed_, computed_):
M1 = pd.merge(observed_[['duration_in_min','distance','od_distance2','emission',
'walk_distance', 'bike_distance', 'active_distance']],
computed_[['duration_in_min','distance','od_distance2','emission',
'walk_distance', 'bike_distance', 'active_distance']],
left_index=True, right_index=True,
how='left',
suffixes=['_observed', '_alt'],
indicator=True)
#TOOD: whenever need the user-trip-plan_id column values as columns:
# ... M1.reset_index()
diff = pd.DataFrame()
#index is automatically created by this first insert!!!
# plan_id part of the index is that of the computed
# diff['user'] = M1.user
# diff['trip'] = M1.trip
# diff['plan_id'] = M1.plan_id
diff['has_any_computed'] = (M1._merge=='both')
diff['deltaT'] = M1.duration_in_min_observed - M1.duration_in_min_alt
diff['deltaE'] = M1.emission_observed - M1.emission_alt
diff['deltaD'] = M1.distance_observed - M1.distance_alt
diff['delta_walk_D'] = M1.walk_distance_observed - M1.walk_distance_alt
diff['delta_bike_D'] = M1.bike_distance_observed - M1.bike_distance_alt
diff['delta_AD'] = M1.active_distance_observed - M1.active_distance_alt
return diff
# Choose the desired time-relevant low-carbon alternative --------------------------------
# Selection is made in comparison to the observed trip's attributes
def compute_substitutes(observed_, computed_, observed_vs_computed_deltas_, with_ebike):
alts = observed_vs_computed_deltas_[observed_vs_computed_deltas_.has_any_computed]
alts = alts.drop(columns='has_any_computed')
alts = pd.merge(alts, computed_[['mode']], left_index=True, right_index=True, how='inner')
alts.rename(columns={'mode':'mode_alt'}, inplace = True)
# Consider e-bike or not
if not with_ebike:
alts = alts[alts.mode_alt != 'EBICYCLE']
# Skip bike for bad weather months
alts = pd.merge(alts, observed_[['month']], left_index=True, right_index=True, how='inner')
alts = alts[(alts.mode_alt!='BICYCLE') | (~alts.month.isin([1,2,3, 10,11,12]))]
# Apply time-delta (max 3 min) and emission-saving conditions *:
#np.sum((alts.deltaT >= -3) & (alts.deltaE > 0) & (alts.mode_alt=='CAR'))/len(alts) # mainmode is incorrect?!
#M2 = alts[(alts.deltaT >= -3) & (alts.deltaE > 0) & (alts.mode_alt!='CAR')]
C_duration = -3
alts = alts[(alts.deltaT >= C_duration) & (alts.deltaE > 0)]
# Then, select the alt with the smallest emission, i.e., the largest emission-saving
deltaE_maxs = alts.groupby(level=['user', 'trip'])['deltaE'].max()
alts = pd.merge(alts, deltaE_maxs, left_index=True, right_index=True, how='inner', suffixes=['','_max'])
# another type of code:
#alts = pd.merge(alts, deltaE_maxs,
# left_on=['user','trip'],
# right_index=True, how='inner', suffixes=['','_max'])
alts = alts[alts.deltaE == alts.deltaE_max]
# test the problems
# M4[(M4.user_id==2) & (M4.trip_id==360)]
# MT[['duration_in_min_observed','mode_alt','duration_in_min_alt']]
# duration_in_min_observed mode_alt duration_in_min_alt
#user_id trip_id
#2 360 41.5 BICYCLE 43.350000
# 360 41.5 SUBWAY 29.366667
# 360 41.5 SUBWAY 28.366667
# 360 41.5 SUBWAY 29.366667
#
# Select the alt with shortest travel-time, i.e., the largest time-saving
# TODO: How about giving priority to for example bike or walk? ... more advanced prioritization
deltaT_maxs = alts.groupby(level=['user', 'trip'])['deltaT'].max()
alts = pd.merge(alts, deltaT_maxs, left_index=True, right_index=True, how='inner', suffixes=['','_max'])
alts = alts[alts.deltaT == alts.deltaT_max]
#
#2
# 198 3 3 ... 3 3
# 207 3 3 ... 3 3
# MT[['alt_plan_id','mode_alt','duration_in_min_alt', 'start_time']]
# TODO: At this point, there is still some trips with more than one alternative!
# SOME mode_alts are duplicates ... e.g. OTP query for PT returned only WALK !
# for example Trip: (2, 116) ***
dft = alts.reset_index()
dft = dft.drop_duplicates(subset=['user','trip','mode_alt'])
alts = dft.set_index(keys=['user','trip','plan_id'])
#alts = alts.drop_duplicates(subset=['user','trip','mode_alt'])
substitutes_ = pd.DataFrame()
substitutes_['deltaE_max'] = alts.deltaE_max
substitutes_['deltaT_max'] = alts.deltaT_max
return substitutes_
# Choose the desired time-relevant low-carbon alternative --------------------------------
# Selection is made in comparison to the observed trip's attributes
def compute_substitutes_regardless_of_timesaving(observed_, computed_, observed_vs_computed_deltas_,
day_weather_suitable_for_bike,
with_ebike = False,
consider_weather = True):
print("-- compute_substitutes_regardless_of_timesaving --")
alts = observed_vs_computed_deltas_[observed_vs_computed_deltas_.has_any_computed]
alts = | pd.merge(alts, computed_[['mainmode']], left_index=True, right_index=True, how='inner') | pandas.merge |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.