prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import pandas as pd
import numpy as np
from sklearn import linear_model
from itertools import combinations
from .stats import *
### from stats import *
def csRenameOrth(adQuery,adTrain,orthTable,speciesQuery='human',speciesTrain='mouse'):
_,_,cgenes=np.intersect1d(adQuery.var_names.values, orthTable[speciesQuery], return_indices=True)
_,_,ccgenes=np.intersect1d(adTrain.var_names.values, orthTable[speciesTrain], return_indices=True)
temp1=np.zeros(len(orthTable.index.values), dtype=bool)
temp2=np.zeros(len(orthTable.index.values), dtype=bool)
temp1[cgenes]=True
temp2[ccgenes]=True
common=np.logical_and(temp1, temp2)
oTab=orthTable.loc[common.T,:]
adT=adTrain[:, oTab[speciesTrain]]
adQ=adQuery[:, oTab[speciesQuery]]
adQ.var_names = adT.var_names
return [adQ, adT]
def csRenameOrth2(expQuery,expTrain,orthTable,speciesQuery='human',speciesTrain='mouse'):
_,_,cgenes=np.intersect1d(expQuery.columns.values, orthTable[speciesQuery], return_indices=True)
_,_,ccgenes=np.intersect1d(expTrain.columns.values, orthTable[speciesTrain], return_indices=True)
temp1=np.zeros(len(orthTable.index.values), dtype=bool)
temp2=np.zeros(len(orthTable.index.values), dtype=bool)
temp1[cgenes]=True
temp2[ccgenes]=True
common=np.logical_and(temp1, temp2)
oTab=orthTable.loc[common.T,:]
expT=expTrain.loc[:, oTab[speciesTrain]]
expQ=expQuery.loc[:, oTab[speciesQuery]]
expQ.columns= expT.columns
return [expQ, expT]
def makePairTab(genes):
pairs = list(combinations(genes,2))
labels = ['genes1', 'genes2']
pTab = pd.DataFrame(data = pairs, columns = labels)
pTab['gene_pairs'] = pTab['genes1'] + '_' + pTab['genes2']
return(pTab)
def gnrAll(expDat,cellLabels):
myPatternG=sc_sampR_to_pattern(cellLabels)
res={}
groups=np.unique(cellLabels)
for i in range(0, len(groups)):
res[groups[i]]=sc_testPattern(myPatternG[groups[i]], expDat)
return res
def getClassGenes(diffRes, topX=25, bottom=True):
xi = ~pd.isna(diffRes["cval"])
diffRes = diffRes.loc[xi,:]
sortRes= diffRes.sort_values(by="cval", ascending=False)
ans=sortRes.index.values[0:topX]
if bottom:
l= len(sortRes)-topX
ans= np.append(ans, sortRes.index.values[l:] ).flatten()
return ans
def addRandToSampTab(classRes, sampTab, desc, id="cell_name"):
cNames= classRes.index.values
snames= sampTab.index.values
rnames= np.setdiff1d(cNames, snames)
stNew= pd.DataFrame()
stNew["rid"]=rnames
stNew["rdesc"]="rand"
stTop=sampTab[[id, desc]]
stNew.columns= [id, desc]
ans = stTop.append(stNew)
return ans
def ptSmall(expMat, pTab):
npairs = len(pTab.index)
genes1 = pTab['genes1'].values
genes2 = pTab['genes2'].values
expTemp=expMat.loc[:,np.unique(np.concatenate([genes1,genes2]))]
ans = pd.DataFrame(0, index = expTemp.index, columns = np.arange(npairs))
ans = ans.astype( | pd.SparseDtype("int", 0) | pandas.SparseDtype |
# %% [markdown]
# ## Imports and functions
import os
from operator import itemgetter
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
import pandas as pd
import seaborn as sns
from graspy.embed import LaplacianSpectralEmbed
from graspy.plot import heatmap, pairplot
from graspy.simulations import sbm
from graspy.utils import get_lcc
from graspy.match import FastApproximateQAP
from src.data import load_everything
from src.utils import savefig
from scipy import version
import scipy
print(version)
print(scipy.__version__)
FNAME = os.path.basename(__file__)[:-3]
print(FNAME)
SAVEFIGS = True
DEFAULT_FMT = "png"
DEFUALT_DPI = 150
plt.style.use("seaborn-white")
sns.set_palette("deep")
sns.set_context("talk", font_scale=1)
def stashfig(name, **kws):
if SAVEFIGS:
savefig(name, foldername=FNAME, fmt=DEFAULT_FMT, dpi=DEFUALT_DPI, **kws)
def get_feedforward_B(low_p, diag_p, feedforward_p, n_blocks=5):
B = np.zeros((n_blocks, n_blocks))
B += low_p
B -= np.diag(np.diag(B))
B -= np.diag(np.diag(B, k=1), k=1)
B += np.diag(diag_p * np.ones(n_blocks))
B += np.diag(feedforward_p * np.ones(n_blocks - 1), k=1)
return B
def n_to_labels(n):
n_cumsum = n.cumsum()
labels = np.zeros(n.sum(), dtype=np.int64)
for i in range(1, len(n)):
labels[n_cumsum[i - 1] : n_cumsum[i]] = i
return labels
def signal_flow(A, n_components=5, return_evals=False):
""" Implementation of the signal flow metric from Varshney et al 2011
Parameters
----------
A : [type]
[description]
Returns
-------
[type]
[description]
"""
W = (A + A.T) / 2
D = np.diag(np.sum(W, axis=1))
L = D - W
b = np.sum(W * np.sign(A - A.T), axis=1)
L_pinv = np.linalg.pinv(L)
z = L_pinv @ b
D_root = np.diag(np.diag(D) ** (-1 / 2))
D_root[np.isnan(D_root)] = 0
D_root[np.isinf(D_root)] = 0
Q = D_root @ L @ D_root
evals, evecs = np.linalg.eig(Q)
inds = np.argsort(evals)
evals = evals[inds]
evecs = evecs[:, inds]
evecs = np.diag(np.diag(D) ** (1 / 2)) @ evecs
# return evals, evecs, z, D_root
scatter_df = pd.DataFrame()
for i in range(1, n_components + 1):
scatter_df[f"Lap-{i+1}"] = evecs[:, i]
scatter_df["Signal flow"] = z
if return_evals:
return scatter_df, evals
else:
return scatter_df
def get_template_mat(A):
total_synapses = np.sum(A)
upper_triu_inds = np.triu_indices_from(A, k=1)
filler = total_synapses / len(upper_triu_inds[0])
upper_triu_template = np.zeros_like(A)
upper_triu_template[upper_triu_inds] = filler
return upper_triu_template
def invert_permutation(p):
"""The argument p is assumed to be some permutation of 0, 1, ..., len(p)-1.
Returns an array s, where s[i] gives the index of i in p.
"""
s = np.empty(p.size, p.dtype)
s[p] = np.arange(p.size)
return s
# %% [markdown]
# ## Generate a "perfect" feedforward network (stochastic block model)
low_p = 0.01
diag_p = 0.1
feedforward_p = 0.2
community_sizes = np.array(5 * [20])
block_probs = get_feedforward_B(low_p, diag_p, feedforward_p)
A = sbm(community_sizes, block_probs, directed=True, loops=False)
n_verts = A.shape[0]
# A[:20, 20:40] *= 2
# A[20:40, 40:60] *= 3
# A[40:60, 60:80] *= 4
# A[60:80, 80:100] *= 5
plt.figure(figsize=(10, 10))
plt.title("Feedforward SBM block probability matrix")
sns.heatmap(block_probs, annot=True, square=True, cmap="Reds", cbar=False)
stashfig("ffwSBM-B")
plt.show()
heatmap(A, cbar=False, title="Feedforward SBM sampled adjacency matrix")
stashfig("ffwSBM-adj")
plt.show()
labels = n_to_labels(community_sizes).astype(str)
# %% [markdown]
# # Demonstrate that FAQ works
# Shuffle the true adjacency matrix and then show that it can be recovered
shuffle_inds = np.random.permutation(n_verts)
B = A[np.ix_(shuffle_inds, shuffle_inds)]
faq = FastApproximateQAP(
max_iter=30,
eps=0.0001,
init_method="rand",
n_init=10,
shuffle_input=False,
maximize=True,
)
A_found, B_found = faq.fit_predict(A, B)
perm_inds = faq.perm_inds_
heatmap(
A - B_found, title="Diff between true and FAQ-prediced adjacency", vmin=-1, vmax=1
)
stashfig("faq-works")
#%%
inv_shuffle_inds = invert_permutation(shuffle_inds)
perm_inds
arr = np.empty((n_verts))
arr[inv_shuffle_inds] = perm_inds
arr
scatter_df = pd.DataFrame()
scatter_df["True Ind"] = range(n_verts)
scatter_df["Pred Ind"] = arr
scatter_df["Score"] = faq.score_
plt.figure(figsize=(10, 10))
sns.scatterplot(data=scatter_df, x="True Ind", y="Pred Ind", hue="Score")
plt.legend(bbox_to_anchor=(1.03, 1), loc=2, borderaxespad=0.0)
# %% [markdown]
# # Use multiple restarts of FAQ and a template upper triangular matrix
template = get_template_mat(A)
n_init = 1
faq = FastApproximateQAP(
max_iter=20,
eps=0.0001,
init_method="rand",
n_init=100,
shuffle_input=False,
maximize=True,
)
fig, axs = plt.subplots(5, 4, figsize=(20, 20))
axs = axs.ravel()
perm_inds_mat = np.zeros((n_init, n_verts))
scores = []
dfs = []
for i in range(n_init):
print()
print(i)
print()
# shuffle A
shuffle_inds = np.random.permutation(n_verts)
A_shuffle = A[np.ix_(shuffle_inds, shuffle_inds)].copy()
# fit FAQ
_, A_found = faq.fit_predict(template, A_shuffle)
temp_perm_inds = faq.perm_inds_
heatmap(A_shuffle[np.ix_(temp_perm_inds, temp_perm_inds)], cbar=False, ax=axs[i])
# put things back in order
pred_inds = np.empty_like(shuffle_inds)
pred_inds[shuffle_inds[temp_perm_inds]] = range(n_verts)
perm_inds_mat[i, :] = pred_inds
temp_df = pd.DataFrame()
temp_df["True Ind"] = range(n_verts)
temp_df["Predicted Ind"] = pred_inds
temp_df["Score"] = faq.score_
temp_df["Labels"] = labels
dfs.append(temp_df)
scores.append(faq.score_)
plt.tight_layout()
stashfig("multisort-heatmap")
plt.show()
#%%
scatter_df = | pd.concat(dfs) | pandas.concat |
from typing import Type, Callable, Tuple, Union
import numpy as np
import pandas as pd
import pytest
from py4j.java_gateway import JVMView
from keanu import set_deterministic_state
from keanu.context import KeanuContext
from keanu.vartypes import tensor_arg_types, primitive_types, numpy_types, pandas_types
from keanu.vertex import Gaussian, Const, UniformInt, Bernoulli, IntegerProxy, Double
from keanu.vertex.base import Vertex
@pytest.fixture
def jvm_view():
from py4j.java_gateway import java_import
jvm_view = KeanuContext().jvm_view()
java_import(jvm_view, "io.improbable.keanu.vertices.tensor.number.floating.dbl.probabilistic.GaussianVertex")
return jvm_view
def assert_vertex_value_equals_scalar(vertex: Vertex, expected_type: Type, scalar: primitive_types) -> None:
vertex_value = vertex.get_value()
assert vertex_value == scalar
assert type(vertex_value) == numpy_types
assert vertex_value.shape == ()
assert vertex_value.dtype == expected_type
def assert_vertex_value_equals_ndarray(vertex: Vertex, expected_type: Type, ndarray: numpy_types) -> None:
vertex_value = vertex.get_value()
expected_value = ndarray.astype(expected_type)
assert np.array_equal(vertex_value, expected_value)
assert np.issubdtype(vertex_value.dtype, expected_type)
def assert_vertex_value_equals_pandas(vertex: Vertex, expected_type: Type, pandas: pandas_types) -> None:
get_value = vertex.get_value()
expected_value = pandas.values.astype(expected_type).reshape(get_value.shape)
assert np.array_equal(get_value, expected_value)
assert np.issubdtype(get_value.dtype, expected_type)
def test_can_pass_scalar_to_vertex() -> None:
gaussian = Gaussian(0., 1.)
sample = gaussian.sample()
assert type(sample) == numpy_types
assert sample.shape == ()
assert sample.dtype == float
def test_can_pass_ndarray_to_vertex() -> None:
gaussian = Gaussian(np.array([0.1, 0.4]), np.array([0.4, 0.5]))
sample = gaussian.sample()
assert sample.shape == (2,)
def test_can_pass_pandas_dataframe_to_vertex() -> None:
gaussian = Gaussian(pd.DataFrame(data=[0.1, 0.4]), pd.DataFrame(data=[0.1, 0.4]))
sample = gaussian.sample()
assert sample.shape == (2, 1)
def test_can_pass_pandas_series_to_vertex() -> None:
gaussian = Gaussian(pd.Series(data=[0.1, 0.4]), pd.Series(data=[0.1, 0.4]))
sample = gaussian.sample()
assert sample.shape == (2,)
def test_can_pass_vertex_to_vertex(jvm_view: JVMView) -> None:
mu = Gaussian(0., 1.)
gaussian = Vertex(jvm_view.GaussianVertex, "gaussian", mu, Const(1.))
sample = gaussian.sample()
assert type(sample) == numpy_types
assert sample.shape == ()
assert sample.dtype == float
def test_can_pass_array_to_vertex(jvm_view: JVMView) -> None:
gaussian = Vertex(jvm_view.GaussianVertex, "gaussian", [3, 3], Const(0.), Const(1.))
sample = gaussian.sample()
assert sample.shape == (3, 3)
def test_cannot_pass_generic_to_vertex(jvm_view: JVMView) -> None:
class GenericExampleClass:
pass
with pytest.raises(ValueError, match=r"Can't parse generic argument. Was given {}".format(GenericExampleClass)):
Vertex( # type: ignore # this is expected to fail mypy
jvm_view.GaussianVertex, "gaussian", GenericExampleClass(), GenericExampleClass())
def test_int_vertex_value_is_a_numpy_array() -> None:
ndarray = np.array([[1, 2], [3, 4]])
vertex = Const(ndarray)
value = vertex.get_value()
assert type(value) == np.ndarray
assert value.dtype == np.int64 or value.dtype == np.int32
assert (value == ndarray).all()
def test_float_vertex_value_is_a_numpy_array() -> None:
ndarray = np.array([[1., 2.], [3., 4.]])
vertex = Const(ndarray)
value = vertex.get_value()
assert type(value) == np.ndarray
assert value.dtype == np.float64
assert (value == ndarray).all()
def test_boolean_vertex_value_is_a_numpy_array() -> None:
ndarray = np.array([[True, True], [False, True]])
vertex = Const(ndarray)
value = vertex.get_value()
assert type(value) == np.ndarray
assert value.dtype == np.bool_
assert (value == ndarray).all()
def test_scalar_vertex_value_is_a_numpy_array() -> None:
scalar = 1.
vertex = Const(scalar)
value = vertex.get_value()
assert type(value) == numpy_types
assert value.shape == ()
assert value.dtype == float
assert value == scalar
def test_vertex_sample_is_a_numpy_array() -> None:
mu = np.array([[1., 2.], [3., 4.]])
sigma = np.array([[.1, .2], [.3, .4]])
vertex = Gaussian(mu, sigma)
value = vertex.sample()
assert type(value) == np.ndarray
assert value.dtype == np.float64
assert value.shape == (2, 2)
def test_get_connected_graph() -> None:
gaussian = Gaussian(0., 1.)
connected_graph = set(gaussian.iter_connected_graph())
assert len(connected_graph) == 3
def test_id_str_of_downstream_vertex_is_higher_than_upstream() -> None:
hyper_params = Gaussian(0., 1.)
gaussian = Gaussian(0., hyper_params)
hyper_params_id = hyper_params.get_id()
gaussian_id = gaussian.get_id()
assert type(hyper_params_id) == tuple
assert type(gaussian_id) == tuple
assert hyper_params_id < gaussian_id
def test_construct_vertex_with_java_vertex() -> None:
java_vertex = Gaussian(0., 1.).unwrap()
python_vertex = Vertex._from_java_vertex(java_vertex)
assert tuple(java_vertex.getId().getValue()) == python_vertex.get_id()
def test_java_collections_to_generator() -> None:
gaussian = Gaussian(0., 1.)
java_collections = gaussian.unwrap().getConnectedGraph()
python_list = list(Vertex._to_generator(java_collections))
java_vertex_ids = [Vertex._get_python_id(java_vertex) for java_vertex in java_collections]
assert java_collections.size() == len(python_list)
assert all(type(element) == Double and element.get_id() in java_vertex_ids for element in python_list)
def test_get_vertex_id() -> None:
gaussian = Gaussian(0., 1.)
java_id = gaussian.unwrap().getId().getValue()
python_id = gaussian.get_id()
assert all(value in python_id for value in java_id)
def test_ids_are_reset() -> None:
gaussian = Gaussian(0., 1.)
set_deterministic_state()
gaussian2 = Gaussian(0., 1.)
assert gaussian.get_id() == gaussian2.get_id()
@pytest.mark.parametrize("vertex, expected_type", [(Gaussian(0., 1.), np.floating), (UniformInt(0, 10), np.integer),
(Bernoulli(0.5), np.bool_)])
@pytest.mark.parametrize("value, assert_vertex_value_equals",
[(np.array([[4]]), assert_vertex_value_equals_ndarray),
(np.array([[5.]]), assert_vertex_value_equals_ndarray),
(np.array([[True]]), assert_vertex_value_equals_ndarray),
(np.array([[1, 2], [3, 4]]), assert_vertex_value_equals_ndarray),
(pd.Series(data=[4]), assert_vertex_value_equals_pandas),
(pd.Series(data=[5.]), assert_vertex_value_equals_pandas),
(pd.Series(data=[True]), assert_vertex_value_equals_pandas),
(pd.Series(data=[1, 2, 3]), assert_vertex_value_equals_pandas),
(pd.Series(data=[1., 2., 3.]), assert_vertex_value_equals_pandas),
(pd.Series(data=[True, False, False]), assert_vertex_value_equals_pandas),
(pd.DataFrame(data=[[4]]), assert_vertex_value_equals_pandas),
(pd.DataFrame(data=[[5.]]), assert_vertex_value_equals_pandas),
(pd.DataFrame(data=[[True]]), assert_vertex_value_equals_pandas),
(pd.DataFrame(data=[[1, 2, 3]]), assert_vertex_value_equals_pandas),
(pd.DataFrame(data=[[1., 2., 3.]]), assert_vertex_value_equals_pandas),
(pd.DataFrame(data=[[True, False, False]]), assert_vertex_value_equals_pandas)])
def test_you_can_set_value(vertex: Vertex, expected_type: Type, value: tensor_arg_types,
assert_vertex_value_equals: Callable) -> None:
vertex.set_value(value)
assert_vertex_value_equals(vertex, expected_type, value)
@pytest.mark.parametrize("vertex, expected_type, value", [(Gaussian(0., 1.), float, 4.), (UniformInt(0, 10), int, 5),
(Bernoulli(0.5), bool, True)])
def test_you_can_set_scalar_value(vertex, expected_type, value):
vertex.set_value(value)
assert_vertex_value_equals_scalar(vertex, expected_type, value)
@pytest.mark.parametrize("ctor, args, expected_type", [(Gaussian, (0., 1.), np.floating),
(UniformInt, (0, 10), np.integer), (Bernoulli,
(0.5,), np.bool_)])
@pytest.mark.parametrize("value, assert_vertex_value_equals",
[(np.array([[4]]), assert_vertex_value_equals_ndarray),
(np.array([[5.]]), assert_vertex_value_equals_ndarray),
(np.array([[True]]), assert_vertex_value_equals_ndarray),
(np.array([[1, 2], [3, 4]]), assert_vertex_value_equals_ndarray),
(pd.Series(data=[4]), assert_vertex_value_equals_pandas),
( | pd.Series(data=[5.]) | pandas.Series |
from collections.abc import Iterable as abc_iterator
from typing import Any, Iterable, List, Protocol, Union
import marshmallow_dataclass
import pandas as pd
from marshmallow.schema import Schema
from pupil.types import IsDataclass
class MetaDataDB(Protocol):
schema: Schema
label: str
def __init__(self, schema: IsDataclass, label: str) -> None:
...
def add(self, data: Any):
...
def get(self, index: Union[int, Iterable[int]]) -> List[IsDataclass]:
...
def __getitem__(self, index: Union[int, Iterable[int], slice]) -> List[IsDataclass]:
"""Getting data with row number
Args:
i (Union[int, Iterable[int]]): Row numbers
Returns:
List[IsDataclass]: List of schema objects
"""
...
def __len__(self) -> int:
"""Lenght of data
Returns:
int: _description_
"""
...
def set_label(self, i: int, input: Any) -> None:
...
class PandasDB:
def __init__(self, schema: IsDataclass, label: str) -> None:
"""_summary_
Args:
schema (IsDataclass): Dataclass that should describe the schema of data
label (str): Which column in your DataFrame is the label of data
"""
self.schema = marshmallow_dataclass.class_schema(schema)(many=True) # type: ignore
if label not in self.schema.fields.keys():
raise ValueError(
f"{label} must be in your schema. your schema has {[k for k in self.schema.fields.keys()]}"
)
self.label = label
self.df = | pd.DataFrame() | pandas.DataFrame |
"""
Tests for TimedeltaIndex methods behaving like their Timedelta counterparts
"""
import numpy as np
import pytest
import pandas as pd
from pandas import Index, Series, Timedelta, TimedeltaIndex, timedelta_range
import pandas._testing as tm
class TestVectorizedTimedelta:
def test_tdi_total_seconds(self):
# GH#10939
# test index
rng = timedelta_range("1 days, 10:11:12.100123456", periods=2, freq="s")
expt = [
1 * 86400 + 10 * 3600 + 11 * 60 + 12 + 100123456.0 / 1e9,
1 * 86400 + 10 * 3600 + 11 * 60 + 13 + 100123456.0 / 1e9,
]
tm.assert_almost_equal(rng.total_seconds(), Index(expt))
# test Series
ser = Series(rng)
s_expt = Series(expt, index=[0, 1])
tm.assert_series_equal(ser.dt.total_seconds(), s_expt)
# with nat
ser[1] = np.nan
s_expt = Series(
[1 * 86400 + 10 * 3600 + 11 * 60 + 12 + 100123456.0 / 1e9, np.nan],
index=[0, 1],
)
tm.assert_series_equal(ser.dt.total_seconds(), s_expt)
# with both nat
ser = Series([np.nan, np.nan], dtype="timedelta64[ns]")
tm.assert_series_equal(
ser.dt.total_seconds(), Series([np.nan, np.nan], index=[0, 1])
)
def test_tdi_round(self):
td = pd.timedelta_range(start="16801 days", periods=5, freq="30Min")
elt = td[1]
expected_rng = TimedeltaIndex(
[
Timedelta("16801 days 00:00:00"),
Timedelta("16801 days 00:00:00"),
Timedelta("16801 days 01:00:00"),
Timedelta("16801 days 02:00:00"),
| Timedelta("16801 days 02:00:00") | pandas.Timedelta |
#!/usr/bin/env python
# coding: utf-8
# DarshanUtils for Python for processing APMPI records
#
# This script gives an overview of features provided by the Python bindings for DarshanUtils.
# By default all APMPI module records, metadata, and the name records are loaded when opening a Darshan log:
import argparse
import darshan
import cffi
import numpy
import pandas
import matplotlib
import matplotlib.pyplot as plt
import seaborn as sns
#import pprint
import pandas as pd
import numpy as np
import jinja2
import logging
from darshan.backend.cffi_backend import ffi
logger = logging.getLogger(__name__)
from darshan.report import DarshanReport
import darshan.backend.cffi_backend as backend
import darshan
import time
'''
from rich import print as rprint
from rich import pretty
from rich.panel import Panel
from rich import inspect
from rich.color import Color
from rich.console import Console
console = Console()
'''
from matplotlib.backends.backend_pdf import FigureCanvasPdf, PdfPages
from matplotlib.figure import Figure
#pp = pprint.PrettyPrinter()
#pretty.install()
#color = Color.parse("blue")
#inspect(color, methods=True)
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--quiet",
dest="quiet",
action="store_true",
default=False,
help="Surpress zero count calls",
)
parser.add_argument(
"logname", metavar="logname", type=str, nargs=1, help="Logname to parse"
)
args = parser.parse_args()
report = darshan.DarshanReport(args.logname[0], read_all=False)
report.info()
if "APMPI" not in report.modules:
print("This log does not contain AutoPerf MPI data")
return
r = report.mod_read_all_apmpi_records("APMPI")
report.update_name_records()
report.info()
pdf = matplotlib.backends.backend_pdf.PdfPages("apmpi_output.pdf")
header_rec = report.records["APMPI"][0]
sync_flag = header_rec["sync_flag"]
print("sync_flag= ", sync_flag)
print(
"APMPI Variance in total mpi time: ", header_rec["variance_total_mpitime"], "\n"
)
if sync_flag:
print(
"APMPI Variance in total mpi sync time: ",
header_rec["variance_total_mpisynctime"],
)
df_apmpi = pd.DataFrame()
list_mpiop = []
list_rank = []
for rec in report.records["APMPI"][
1:
]: # skip the first record which is header record
mpi_nonzero_callcount = []
for k, v in rec["all_counters"].items():
if k.endswith("_CALL_COUNT") and v > 0:
mpi_nonzero_callcount.append(k[: -(len("CALL_COUNT"))])
df_rank = pd.DataFrame()
for mpiop in mpi_nonzero_callcount:
ncall = mpiop
ncount = mpiop + "CALL_COUNT"
nsize = mpiop + "TOTAL_BYTES"
h0 = mpiop + "MSG_SIZE_AGG_0_256"
h1 = mpiop + "MSG_SIZE_AGG_256_1K"
h2 = mpiop + "MSG_SIZE_AGG_1K_8K"
h3 = mpiop + "MSG_SIZE_AGG_8K_256K"
h4 = mpiop + "MSG_SIZE_AGG_256K_1M"
h5 = mpiop + "MSG_SIZE_AGG_1M_PLUS"
ntime = mpiop + "TOTAL_TIME"
mintime = mpiop + "MIN_TIME"
maxtime = mpiop + "MAX_TIME"
if sync_flag:
totalsync = mpiop + "TOTAL_SYNC_TIME"
mpiopstat = {}
mpiopstat["Rank"] = rec["rank"]
mpiopstat["Node_ID"] = rec["node_name"]
mpiopstat["Call"] = ncall[:-1]
mpiopstat["Total_Time"] = rec["all_counters"][ntime]
mpiopstat["Count"] = rec["all_counters"][ncount]
mpiopstat["Total_Bytes"] = rec["all_counters"].get(nsize, None)
mpiopstat["[0-256B]"] = rec["all_counters"].get(h0, None)
mpiopstat["[256-1KB]"] = rec["all_counters"].get(h1, None)
mpiopstat["[1K-8KB]"] = rec["all_counters"].get(h2, None)
mpiopstat["[8K-256KB]"] = rec["all_counters"].get(h3, None)
mpiopstat["256K-1MB"] = rec["all_counters"].get(h4, None)
mpiopstat["[>1MB]"] = rec["all_counters"].get(h5, None)
mpiopstat["Min_Time"] = rec["all_counters"][mintime]
mpiopstat["Max_Time"] = rec["all_counters"][maxtime]
if sync_flag and (totalsync in rec["all_counters"]):
mpiopstat["Total_SYNC_Time"] = rec["all_counters"][totalsync]
list_mpiop.append(mpiopstat)
rankstat = {}
rankstat["Rank"] = rec["rank"]
rankstat["Node_ID"] = rec["node_name"]
rankstat["Call"] = "Total_MPI_time"
rankstat["Total_Time"] = rec["all_counters"]["MPI_TOTAL_COMM_TIME"]
list_rank.append(rankstat)
df_rank = pd.DataFrame(list_rank)
avg_total_time = df_rank["Total_Time"].mean()
max_total_time = df_rank["Total_Time"].max()
min_total_time = df_rank["Total_Time"].min()
max_rank = df_rank.loc[df_rank["Total_Time"].idxmax()]["Rank"]
min_rank = df_rank.loc[df_rank["Total_Time"].idxmin()]["Rank"]
# assumption: row index and rank id are same in df_rank
# .. need to check if that is an incorrect assumption
mean_rank = (
(df_rank["Total_Time"] - df_rank["Total_Time"].mean()).abs().argsort()[:1][0]
)
| pd.set_option("display.max_rows", None, "display.max_columns", None) | pandas.set_option |
# -*- coding: utf-8 -*-
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
import pytest
import re
from numpy import nan as NA
import numpy as np
from numpy.random import randint
from pandas.compat import range, u
import pandas.compat as compat
from pandas import Index, Series, DataFrame, isna, MultiIndex, notna
from pandas.util.testing import assert_series_equal
import pandas.util.testing as tm
import pandas.core.strings as strings
class TestStringMethods(object):
def test_api(self):
# GH 6106, GH 9322
assert Series.str is strings.StringMethods
assert isinstance(Series(['']).str, strings.StringMethods)
# GH 9184
invalid = Series([1])
with tm.assert_raises_regex(AttributeError,
"only use .str accessor"):
invalid.str
assert not hasattr(invalid, 'str')
def test_iter(self):
# GH3638
strs = 'google', 'wikimedia', 'wikipedia', 'wikitravel'
ds = Series(strs)
for s in ds.str:
# iter must yield a Series
assert isinstance(s, Series)
# indices of each yielded Series should be equal to the index of
# the original Series
tm.assert_index_equal(s.index, ds.index)
for el in s:
# each element of the series is either a basestring/str or nan
assert isinstance(el, compat.string_types) or isna(el)
# desired behavior is to iterate until everything would be nan on the
# next iter so make sure the last element of the iterator was 'l' in
# this case since 'wikitravel' is the longest string
assert s.dropna().values.item() == 'l'
def test_iter_empty(self):
ds = Series([], dtype=object)
i, s = 100, 1
for i, s in enumerate(ds.str):
pass
# nothing to iterate over so nothing defined values should remain
# unchanged
assert i == 100
assert s == 1
def test_iter_single_element(self):
ds = Series(['a'])
for i, s in enumerate(ds.str):
pass
assert not i
assert_series_equal(ds, s)
def test_iter_object_try_string(self):
ds = Series([slice(None, randint(10), randint(10, 20)) for _ in range(
4)])
i, s = 100, 'h'
for i, s in enumerate(ds.str):
pass
assert i == 100
assert s == 'h'
def test_cat(self):
one = np.array(['a', 'a', 'b', 'b', 'c', NA], dtype=np.object_)
two = np.array(['a', NA, 'b', 'd', 'foo', NA], dtype=np.object_)
# single array
result = strings.str_cat(one)
exp = 'aabbc'
assert result == exp
result = strings.str_cat(one, na_rep='NA')
exp = 'aabbcNA'
assert result == exp
result = strings.str_cat(one, na_rep='-')
exp = 'aabbc-'
assert result == exp
result = strings.str_cat(one, sep='_', na_rep='NA')
exp = 'a_a_b_b_c_NA'
assert result == exp
result = strings.str_cat(two, sep='-')
exp = 'a-b-d-foo'
assert result == exp
# Multiple arrays
result = strings.str_cat(one, [two], na_rep='NA')
exp = np.array(['aa', 'aNA', 'bb', 'bd', 'cfoo', 'NANA'],
dtype=np.object_)
tm.assert_numpy_array_equal(result, exp)
result = strings.str_cat(one, two)
exp = np.array(['aa', NA, 'bb', 'bd', 'cfoo', NA], dtype=np.object_)
tm.assert_almost_equal(result, exp)
def test_count(self):
values = np.array(['foo', 'foofoo', NA, 'foooofooofommmfoo'],
dtype=np.object_)
result = strings.str_count(values, 'f[o]+')
exp = np.array([1, 2, NA, 4])
tm.assert_numpy_array_equal(result, exp)
result = Series(values).str.count('f[o]+')
exp = Series([1, 2, NA, 4])
assert isinstance(result, Series)
tm.assert_series_equal(result, exp)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_count(mixed, 'a')
xp = np.array([1, NA, 0, NA, NA, 0, NA, NA, NA])
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.count('a')
xp = Series([1, NA, 0, NA, NA, 0, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = [u('foo'), u('foofoo'), NA, u('foooofooofommmfoo')]
result = strings.str_count(values, 'f[o]+')
exp = np.array([1, 2, NA, 4])
tm.assert_numpy_array_equal(result, exp)
result = Series(values).str.count('f[o]+')
exp = Series([1, 2, NA, 4])
assert isinstance(result, Series)
tm.assert_series_equal(result, exp)
def test_contains(self):
values = np.array(['foo', NA, 'fooommm__foo',
'mmm_', 'foommm[_]+bar'], dtype=np.object_)
pat = 'mmm[_]+'
result = strings.str_contains(values, pat)
expected = np.array([False, NA, True, True, False], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
result = strings.str_contains(values, pat, regex=False)
expected = np.array([False, NA, False, False, True], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
values = ['foo', 'xyz', 'fooommm__foo', 'mmm_']
result = strings.str_contains(values, pat)
expected = np.array([False, False, True, True])
assert result.dtype == np.bool_
tm.assert_numpy_array_equal(result, expected)
# case insensitive using regex
values = ['Foo', 'xYz', 'fOOomMm__fOo', 'MMM_']
result = strings.str_contains(values, 'FOO|mmm', case=False)
expected = np.array([True, False, True, True])
tm.assert_numpy_array_equal(result, expected)
# case insensitive without regex
result = strings.str_contains(values, 'foo', regex=False, case=False)
expected = np.array([True, False, True, False])
tm.assert_numpy_array_equal(result, expected)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_contains(mixed, 'o')
xp = np.array([False, NA, False, NA, NA, True, NA, NA, NA],
dtype=np.object_)
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.contains('o')
xp = Series([False, NA, False, NA, NA, True, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = np.array([u'foo', NA, u'fooommm__foo', u'mmm_'],
dtype=np.object_)
pat = 'mmm[_]+'
result = strings.str_contains(values, pat)
expected = np.array([False, np.nan, True, True], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
result = strings.str_contains(values, pat, na=False)
expected = np.array([False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
values = np.array(['foo', 'xyz', 'fooommm__foo', 'mmm_'],
dtype=np.object_)
result = strings.str_contains(values, pat)
expected = np.array([False, False, True, True])
assert result.dtype == np.bool_
tm.assert_numpy_array_equal(result, expected)
# na
values = Series(['om', 'foo', np.nan])
res = values.str.contains('foo', na="foo")
assert res.loc[2] == "foo"
def test_startswith(self):
values = Series(['om', NA, 'foo_nom', 'nom', 'bar_foo', NA, 'foo'])
result = values.str.startswith('foo')
exp = Series([False, NA, True, False, False, NA, True])
tm.assert_series_equal(result, exp)
# mixed
mixed = np.array(['a', NA, 'b', True, datetime.today(),
'foo', None, 1, 2.], dtype=np.object_)
rs = strings.str_startswith(mixed, 'f')
xp = np.array([False, NA, False, NA, NA, True, NA, NA, NA],
dtype=np.object_)
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.startswith('f')
assert isinstance(rs, Series)
xp = Series([False, NA, False, NA, NA, True, NA, NA, NA])
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('om'), NA, u('foo_nom'), u('nom'), u('bar_foo'), NA,
u('foo')])
result = values.str.startswith('foo')
exp = Series([False, NA, True, False, False, NA, True])
tm.assert_series_equal(result, exp)
result = values.str.startswith('foo', na=True)
tm.assert_series_equal(result, exp.fillna(True).astype(bool))
def test_endswith(self):
values = Series(['om', NA, 'foo_nom', 'nom', 'bar_foo', NA, 'foo'])
result = values.str.endswith('foo')
exp = Series([False, NA, False, False, True, NA, True])
tm.assert_series_equal(result, exp)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_endswith(mixed, 'f')
xp = np.array([False, NA, False, NA, NA, False, NA, NA, NA],
dtype=np.object_)
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.endswith('f')
xp = Series([False, NA, False, NA, NA, False, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('om'), NA, u('foo_nom'), u('nom'), u('bar_foo'), NA,
u('foo')])
result = values.str.endswith('foo')
exp = Series([False, NA, False, False, True, NA, True])
tm.assert_series_equal(result, exp)
result = values.str.endswith('foo', na=False)
tm.assert_series_equal(result, exp.fillna(False).astype(bool))
def test_title(self):
values = Series(["FOO", "BAR", NA, "Blah", "blurg"])
result = values.str.title()
exp = Series(["Foo", "Bar", NA, "Blah", "Blurg"])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(["FOO", NA, "bar", True, datetime.today(), "blah", None,
1, 2.])
mixed = mixed.str.title()
exp = Series(["Foo", NA, "Bar", NA, NA, "Blah", NA, NA, NA])
tm.assert_almost_equal(mixed, exp)
# unicode
values = Series([u("FOO"), NA, u("bar"), u("Blurg")])
results = values.str.title()
exp = Series([u("Foo"), NA, u("Bar"), u("Blurg")])
tm.assert_series_equal(results, exp)
def test_lower_upper(self):
values = Series(['om', NA, 'nom', 'nom'])
result = values.str.upper()
exp = Series(['OM', NA, 'NOM', 'NOM'])
tm.assert_series_equal(result, exp)
result = result.str.lower()
tm.assert_series_equal(result, values)
# mixed
mixed = Series(['a', NA, 'b', True, datetime.today(), 'foo', None, 1,
2.])
mixed = mixed.str.upper()
rs = Series(mixed).str.lower()
xp = Series(['a', NA, 'b', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('om'), NA, u('nom'), u('nom')])
result = values.str.upper()
exp = Series([u('OM'), NA, u('NOM'), u('NOM')])
tm.assert_series_equal(result, exp)
result = result.str.lower()
tm.assert_series_equal(result, values)
def test_capitalize(self):
values = Series(["FOO", "BAR", NA, "Blah", "blurg"])
result = values.str.capitalize()
exp = Series(["Foo", "Bar", NA, "Blah", "Blurg"])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(["FOO", NA, "bar", True, datetime.today(), "blah", None,
1, 2.])
mixed = mixed.str.capitalize()
exp = Series(["Foo", NA, "Bar", NA, NA, "Blah", NA, NA, NA])
tm.assert_almost_equal(mixed, exp)
# unicode
values = Series([u("FOO"), NA, u("bar"), u("Blurg")])
results = values.str.capitalize()
exp = Series([u("Foo"), NA, u("Bar"), u("Blurg")])
tm.assert_series_equal(results, exp)
def test_swapcase(self):
values = Series(["FOO", "BAR", NA, "Blah", "blurg"])
result = values.str.swapcase()
exp = Series(["foo", "bar", NA, "bLAH", "BLURG"])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(["FOO", NA, "bar", True, datetime.today(), "Blah", None,
1, 2.])
mixed = mixed.str.swapcase()
exp = Series(["foo", NA, "BAR", NA, NA, "bLAH", NA, NA, NA])
tm.assert_almost_equal(mixed, exp)
# unicode
values = Series([u("FOO"), NA, u("bar"), u("Blurg")])
results = values.str.swapcase()
exp = Series([u("foo"), NA, u("BAR"), u("bLURG")])
tm.assert_series_equal(results, exp)
def test_casemethods(self):
values = ['aaa', 'bbb', 'CCC', 'Dddd', 'eEEE']
s = Series(values)
assert s.str.lower().tolist() == [v.lower() for v in values]
assert s.str.upper().tolist() == [v.upper() for v in values]
assert s.str.title().tolist() == [v.title() for v in values]
assert s.str.capitalize().tolist() == [v.capitalize() for v in values]
assert s.str.swapcase().tolist() == [v.swapcase() for v in values]
def test_replace(self):
values = Series(['fooBAD__barBAD', NA])
result = values.str.replace('BAD[_]*', '')
exp = Series(['foobar', NA])
tm.assert_series_equal(result, exp)
result = values.str.replace('BAD[_]*', '', n=1)
exp = Series(['foobarBAD', NA])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['aBAD', NA, 'bBAD', True, datetime.today(), 'fooBAD',
None, 1, 2.])
rs = Series(mixed).str.replace('BAD[_]*', '')
xp = Series(['a', NA, 'b', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA])
result = values.str.replace('BAD[_]*', '')
exp = Series([u('foobar'), NA])
tm.assert_series_equal(result, exp)
result = values.str.replace('BAD[_]*', '', n=1)
exp = Series([u('foobarBAD'), NA])
tm.assert_series_equal(result, exp)
# flags + unicode
values = Series([b"abcd,\xc3\xa0".decode("utf-8")])
exp = Series([b"abcd, \xc3\xa0".decode("utf-8")])
result = values.str.replace(r"(?<=\w),(?=\w)", ", ", flags=re.UNICODE)
tm.assert_series_equal(result, exp)
# GH 13438
for klass in (Series, Index):
for repl in (None, 3, {'a': 'b'}):
for data in (['a', 'b', None], ['a', 'b', 'c', 'ad']):
values = klass(data)
pytest.raises(TypeError, values.str.replace, 'a', repl)
def test_replace_callable(self):
# GH 15055
values = Series(['fooBAD__barBAD', NA])
# test with callable
repl = lambda m: m.group(0).swapcase()
result = values.str.replace('[a-z][A-Z]{2}', repl, n=2)
exp = Series(['foObaD__baRbaD', NA])
tm.assert_series_equal(result, exp)
# test with wrong number of arguments, raising an error
if compat.PY2:
p_err = r'takes (no|(exactly|at (least|most)) ?\d+) arguments?'
else:
p_err = (r'((takes)|(missing)) (?(2)from \d+ to )?\d+ '
r'(?(3)required )positional arguments?')
repl = lambda: None
with tm.assert_raises_regex(TypeError, p_err):
values.str.replace('a', repl)
repl = lambda m, x: None
with tm.assert_raises_regex(TypeError, p_err):
values.str.replace('a', repl)
repl = lambda m, x, y=None: None
with tm.assert_raises_regex(TypeError, p_err):
values.str.replace('a', repl)
# test regex named groups
values = Series(['Foo Bar Baz', NA])
pat = r"(?P<first>\w+) (?P<middle>\w+) (?P<last>\w+)"
repl = lambda m: m.group('middle').swapcase()
result = values.str.replace(pat, repl)
exp = Series(['bAR', NA])
tm.assert_series_equal(result, exp)
def test_replace_compiled_regex(self):
# GH 15446
values = Series(['fooBAD__barBAD', NA])
# test with compiled regex
pat = re.compile(r'BAD[_]*')
result = values.str.replace(pat, '')
exp = Series(['foobar', NA])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['aBAD', NA, 'bBAD', True, datetime.today(), 'fooBAD',
None, 1, 2.])
rs = Series(mixed).str.replace(pat, '')
xp = Series(['a', NA, 'b', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA])
result = values.str.replace(pat, '')
exp = Series([u('foobar'), NA])
tm.assert_series_equal(result, exp)
result = values.str.replace(pat, '', n=1)
exp = Series([u('foobarBAD'), NA])
tm.assert_series_equal(result, exp)
# flags + unicode
values = Series([b"abcd,\xc3\xa0".decode("utf-8")])
exp = Series([b"abcd, \xc3\xa0".decode("utf-8")])
pat = re.compile(r"(?<=\w),(?=\w)", flags=re.UNICODE)
result = values.str.replace(pat, ", ")
tm.assert_series_equal(result, exp)
# case and flags provided to str.replace will have no effect
# and will produce warnings
values = Series(['fooBAD__barBAD__bad', NA])
pat = re.compile(r'BAD[_]*')
with tm.assert_raises_regex(ValueError,
"case and flags cannot be"):
result = values.str.replace(pat, '', flags=re.IGNORECASE)
with tm.assert_raises_regex(ValueError,
"case and flags cannot be"):
result = values.str.replace(pat, '', case=False)
with tm.assert_raises_regex(ValueError,
"case and flags cannot be"):
result = values.str.replace(pat, '', case=True)
# test with callable
values = Series(['fooBAD__barBAD', NA])
repl = lambda m: m.group(0).swapcase()
pat = re.compile('[a-z][A-Z]{2}')
result = values.str.replace(pat, repl, n=2)
exp = Series(['foObaD__baRbaD', NA])
tm.assert_series_equal(result, exp)
def test_repeat(self):
values = Series(['a', 'b', NA, 'c', NA, 'd'])
result = values.str.repeat(3)
exp = Series(['aaa', 'bbb', NA, 'ccc', NA, 'ddd'])
tm.assert_series_equal(result, exp)
result = values.str.repeat([1, 2, 3, 4, 5, 6])
exp = Series(['a', 'bb', NA, 'cccc', NA, 'dddddd'])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['a', NA, 'b', True, datetime.today(), 'foo', None, 1,
2.])
rs = Series(mixed).str.repeat(3)
xp = Series(['aaa', NA, 'bbb', NA, NA, 'foofoofoo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('a'), u('b'), NA, u('c'), NA, u('d')])
result = values.str.repeat(3)
exp = Series([u('aaa'), u('bbb'), NA, u('ccc'), NA, u('ddd')])
tm.assert_series_equal(result, exp)
result = values.str.repeat([1, 2, 3, 4, 5, 6])
exp = Series([u('a'), u('bb'), NA, u('cccc'), NA, u('dddddd')])
tm.assert_series_equal(result, exp)
def test_match(self):
# New match behavior introduced in 0.13
values = Series(['fooBAD__barBAD', NA, 'foo'])
result = values.str.match('.*(BAD[_]+).*(BAD)')
exp = Series([True, NA, False])
tm.assert_series_equal(result, exp)
values = Series(['fooBAD__barBAD', NA, 'foo'])
result = values.str.match('.*BAD[_]+.*BAD')
exp = Series([True, NA, False])
tm.assert_series_equal(result, exp)
# test passing as_indexer still works but is ignored
values = Series(['fooBAD__barBAD', NA, 'foo'])
exp = Series([True, NA, False])
with tm.assert_produces_warning(FutureWarning):
result = values.str.match('.*BAD[_]+.*BAD', as_indexer=True)
tm.assert_series_equal(result, exp)
with tm.assert_produces_warning(FutureWarning):
result = values.str.match('.*BAD[_]+.*BAD', as_indexer=False)
tm.assert_series_equal(result, exp)
with tm.assert_produces_warning(FutureWarning):
result = values.str.match('.*(BAD[_]+).*(BAD)', as_indexer=True)
tm.assert_series_equal(result, exp)
pytest.raises(ValueError, values.str.match, '.*(BAD[_]+).*(BAD)',
as_indexer=False)
# mixed
mixed = Series(['aBAD_BAD', NA, 'BAD_b_BAD', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.match('.*(BAD[_]+).*(BAD)')
xp = Series([True, NA, True, NA, NA, False, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA, u('foo')])
result = values.str.match('.*(BAD[_]+).*(BAD)')
exp = Series([True, NA, False])
tm.assert_series_equal(result, exp)
# na GH #6609
res = Series(['a', 0, np.nan]).str.match('a', na=False)
exp = Series([True, False, False])
assert_series_equal(exp, res)
res = Series(['a', 0, np.nan]).str.match('a')
exp = Series([True, np.nan, np.nan])
assert_series_equal(exp, res)
def test_extract_expand_None(self):
values = Series(['fooBAD__barBAD', NA, 'foo'])
with tm.assert_produces_warning(FutureWarning):
values.str.extract('.*(BAD[_]+).*(BAD)', expand=None)
def test_extract_expand_unspecified(self):
values = Series(['fooBAD__barBAD', NA, 'foo'])
with tm.assert_produces_warning(FutureWarning):
values.str.extract('.*(BAD[_]+).*(BAD)')
def test_extract_expand_False(self):
# Contains tests like those in test_match and some others.
values = Series(['fooBAD__barBAD', NA, 'foo'])
er = [NA, NA] # empty row
result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=False)
exp = DataFrame([['BAD__', 'BAD'], er, er])
tm.assert_frame_equal(result, exp)
# mixed
mixed = Series(['aBAD_BAD', NA, 'BAD_b_BAD', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.extract('.*(BAD[_]+).*(BAD)', expand=False)
exp = DataFrame([['BAD_', 'BAD'], er, ['BAD_', 'BAD'], er, er, er, er,
er, er])
tm.assert_frame_equal(rs, exp)
# unicode
values = Series([u('fooBAD__barBAD'), NA, u('foo')])
result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=False)
exp = DataFrame([[u('BAD__'), u('BAD')], er, er])
tm.assert_frame_equal(result, exp)
# GH9980
# Index only works with one regex group since
# multi-group would expand to a frame
idx = Index(['A1', 'A2', 'A3', 'A4', 'B5'])
with tm.assert_raises_regex(ValueError, "supported"):
idx.str.extract('([AB])([123])', expand=False)
# these should work for both Series and Index
for klass in [Series, Index]:
# no groups
s_or_idx = klass(['A1', 'B2', 'C3'])
f = lambda: s_or_idx.str.extract('[ABC][123]', expand=False)
pytest.raises(ValueError, f)
# only non-capturing groups
f = lambda: s_or_idx.str.extract('(?:[AB]).*', expand=False)
pytest.raises(ValueError, f)
# single group renames series/index properly
s_or_idx = klass(['A1', 'A2'])
result = s_or_idx.str.extract(r'(?P<uno>A)\d', expand=False)
assert result.name == 'uno'
exp = klass(['A', 'A'], name='uno')
if klass == Series:
tm.assert_series_equal(result, exp)
else:
tm.assert_index_equal(result, exp)
s = Series(['A1', 'B2', 'C3'])
# one group, no matches
result = s.str.extract('(_)', expand=False)
exp = Series([NA, NA, NA], dtype=object)
tm.assert_series_equal(result, exp)
# two groups, no matches
result = s.str.extract('(_)(_)', expand=False)
exp = DataFrame([[NA, NA], [NA, NA], [NA, NA]], dtype=object)
tm.assert_frame_equal(result, exp)
# one group, some matches
result = s.str.extract('([AB])[123]', expand=False)
exp = Series(['A', 'B', NA])
tm.assert_series_equal(result, exp)
# two groups, some matches
result = s.str.extract('([AB])([123])', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
tm.assert_frame_equal(result, exp)
# one named group
result = s.str.extract('(?P<letter>[AB])', expand=False)
exp = Series(['A', 'B', NA], name='letter')
tm.assert_series_equal(result, exp)
# two named groups
result = s.str.extract('(?P<letter>[AB])(?P<number>[123])',
expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]],
columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# mix named and unnamed groups
result = s.str.extract('([AB])(?P<number>[123])', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]],
columns=[0, 'number'])
tm.assert_frame_equal(result, exp)
# one normal group, one non-capturing group
result = s.str.extract('([AB])(?:[123])', expand=False)
exp = Series(['A', 'B', NA])
tm.assert_series_equal(result, exp)
# two normal groups, one non-capturing group
result = Series(['A11', 'B22', 'C33']).str.extract(
'([AB])([123])(?:[123])', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
tm.assert_frame_equal(result, exp)
# one optional group followed by one normal group
result = Series(['A1', 'B2', '3']).str.extract(
'(?P<letter>[AB])?(?P<number>[123])', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, '3']],
columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# one normal group followed by one optional group
result = Series(['A1', 'B2', 'C']).str.extract(
'(?P<letter>[ABC])(?P<number>[123])?', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], ['C', NA]],
columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# GH6348
# not passing index to the extractor
def check_index(index):
data = ['A1', 'B2', 'C']
index = index[:len(data)]
s = Series(data, index=index)
result = s.str.extract(r'(\d)', expand=False)
exp = Series(['1', '2', NA], index=index)
tm.assert_series_equal(result, exp)
result = Series(data, index=index).str.extract(
r'(?P<letter>\D)(?P<number>\d)?', expand=False)
e_list = [
['A', '1'],
['B', '2'],
['C', NA]
]
exp = DataFrame(e_list, columns=['letter', 'number'], index=index)
tm.assert_frame_equal(result, exp)
i_funs = [
tm.makeStringIndex, tm.makeUnicodeIndex, tm.makeIntIndex,
tm.makeDateIndex, tm.makePeriodIndex, tm.makeRangeIndex
]
for index in i_funs:
check_index(index())
# single_series_name_is_preserved.
s = Series(['a3', 'b3', 'c2'], name='bob')
r = s.str.extract(r'(?P<sue>[a-z])', expand=False)
e = Series(['a', 'b', 'c'], name='sue')
tm.assert_series_equal(r, e)
assert r.name == e.name
def test_extract_expand_True(self):
# Contains tests like those in test_match and some others.
values = Series(['fooBAD__barBAD', NA, 'foo'])
er = [NA, NA] # empty row
result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=True)
exp = DataFrame([['BAD__', 'BAD'], er, er])
tm.assert_frame_equal(result, exp)
# mixed
mixed = Series(['aBAD_BAD', NA, 'BAD_b_BAD', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.extract('.*(BAD[_]+).*(BAD)', expand=True)
exp = DataFrame([['BAD_', 'BAD'], er, ['BAD_', 'BAD'], er, er,
er, er, er, er])
tm.assert_frame_equal(rs, exp)
# unicode
values = Series([u('fooBAD__barBAD'), NA, u('foo')])
result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=True)
exp = DataFrame([[u('BAD__'), u('BAD')], er, er])
tm.assert_frame_equal(result, exp)
# these should work for both Series and Index
for klass in [Series, Index]:
# no groups
s_or_idx = klass(['A1', 'B2', 'C3'])
f = lambda: s_or_idx.str.extract('[ABC][123]', expand=True)
pytest.raises(ValueError, f)
# only non-capturing groups
f = lambda: s_or_idx.str.extract('(?:[AB]).*', expand=True)
pytest.raises(ValueError, f)
# single group renames series/index properly
s_or_idx = klass(['A1', 'A2'])
result_df = s_or_idx.str.extract(r'(?P<uno>A)\d', expand=True)
assert isinstance(result_df, DataFrame)
result_series = result_df['uno']
assert_series_equal(result_series, Series(['A', 'A'], name='uno'))
def test_extract_series(self):
# extract should give the same result whether or not the
# series has a name.
for series_name in None, "series_name":
s = Series(['A1', 'B2', 'C3'], name=series_name)
# one group, no matches
result = s.str.extract('(_)', expand=True)
exp = DataFrame([NA, NA, NA], dtype=object)
tm.assert_frame_equal(result, exp)
# two groups, no matches
result = s.str.extract('(_)(_)', expand=True)
exp = DataFrame([[NA, NA], [NA, NA], [NA, NA]], dtype=object)
tm.assert_frame_equal(result, exp)
# one group, some matches
result = s.str.extract('([AB])[123]', expand=True)
exp = DataFrame(['A', 'B', NA])
tm.assert_frame_equal(result, exp)
# two groups, some matches
result = s.str.extract('([AB])([123])', expand=True)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
tm.assert_frame_equal(result, exp)
# one named group
result = s.str.extract('(?P<letter>[AB])', expand=True)
exp = DataFrame({"letter": ['A', 'B', NA]})
tm.assert_frame_equal(result, exp)
# two named groups
result = s.str.extract(
'(?P<letter>[AB])(?P<number>[123])',
expand=True)
e_list = [
['A', '1'],
['B', '2'],
[NA, NA]
]
exp = DataFrame(e_list, columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# mix named and unnamed groups
result = s.str.extract('([AB])(?P<number>[123])', expand=True)
exp = DataFrame(e_list, columns=[0, 'number'])
tm.assert_frame_equal(result, exp)
# one normal group, one non-capturing group
result = s.str.extract('([AB])(?:[123])', expand=True)
exp = DataFrame(['A', 'B', NA])
tm.assert_frame_equal(result, exp)
def test_extract_optional_groups(self):
# two normal groups, one non-capturing group
result = Series(['A11', 'B22', 'C33']).str.extract(
'([AB])([123])(?:[123])', expand=True)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
tm.assert_frame_equal(result, exp)
# one optional group followed by one normal group
result = Series(['A1', 'B2', '3']).str.extract(
'(?P<letter>[AB])?(?P<number>[123])', expand=True)
e_list = [
['A', '1'],
['B', '2'],
[NA, '3']
]
exp = DataFrame(e_list, columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# one normal group followed by one optional group
result = Series(['A1', 'B2', 'C']).str.extract(
'(?P<letter>[ABC])(?P<number>[123])?', expand=True)
e_list = [
['A', '1'],
['B', '2'],
['C', NA]
]
exp = DataFrame(e_list, columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# GH6348
# not passing index to the extractor
def check_index(index):
data = ['A1', 'B2', 'C']
index = index[:len(data)]
result = Series(data, index=index).str.extract(
r'(\d)', expand=True)
exp = DataFrame(['1', '2', NA], index=index)
tm.assert_frame_equal(result, exp)
result = Series(data, index=index).str.extract(
r'(?P<letter>\D)(?P<number>\d)?', expand=True)
e_list = [
['A', '1'],
['B', '2'],
['C', NA]
]
exp = DataFrame(e_list, columns=['letter', 'number'], index=index)
tm.assert_frame_equal(result, exp)
i_funs = [
tm.makeStringIndex, tm.makeUnicodeIndex, tm.makeIntIndex,
tm.makeDateIndex, tm.makePeriodIndex, tm.makeRangeIndex
]
for index in i_funs:
check_index(index())
def test_extract_single_group_returns_frame(self):
# GH11386 extract should always return DataFrame, even when
# there is only one group. Prior to v0.18.0, extract returned
# Series when there was only one group in the regex.
s = Series(['a3', 'b3', 'c2'], name='series_name')
r = s.str.extract(r'(?P<letter>[a-z])', expand=True)
e = DataFrame({"letter": ['a', 'b', 'c']})
tm.assert_frame_equal(r, e)
def test_extractall(self):
subject_list = [
'<EMAIL>',
'<EMAIL>',
'<EMAIL>',
'<EMAIL> some text <EMAIL>',
'<EMAIL> some text c@d.<EMAIL> and <EMAIL>',
np.nan,
"",
]
expected_tuples = [
("dave", "google", "com"),
("tdhock5", "gmail", "com"),
("maudelaperriere", "gmail", "com"),
("rob", "gmail", "com"), ("steve", "gmail", "com"),
("a", "b", "com"), ("c", "d", "com"), ("e", "f", "com"),
]
named_pattern = r"""
(?P<user>[a-z0-9]+)
@
(?P<domain>[a-z]+)
\.
(?P<tld>[a-z]{2,4})
"""
expected_columns = ["user", "domain", "tld"]
S = Series(subject_list)
# extractall should return a DataFrame with one row for each
# match, indexed by the subject from which the match came.
expected_index = MultiIndex.from_tuples([
(0, 0),
(1, 0),
(2, 0),
(3, 0),
(3, 1),
(4, 0),
(4, 1),
(4, 2),
], names=(None, "match"))
expected_df = DataFrame(
expected_tuples, expected_index, expected_columns)
computed_df = S.str.extractall(named_pattern, re.VERBOSE)
tm.assert_frame_equal(computed_df, expected_df)
# The index of the input Series should be used to construct
# the index of the output DataFrame:
series_index = MultiIndex.from_tuples([
("single", "Dave"),
("single", "Toby"),
("single", "Maude"),
("multiple", "robAndSteve"),
("multiple", "abcdef"),
("none", "missing"),
("none", "empty"),
])
Si = Series(subject_list, series_index)
expected_index = MultiIndex.from_tuples([
("single", "Dave", 0),
("single", "Toby", 0),
("single", "Maude", 0),
("multiple", "robAndSteve", 0),
("multiple", "robAndSteve", 1),
("multiple", "abcdef", 0),
("multiple", "abcdef", 1),
("multiple", "abcdef", 2),
], names=(None, None, "match"))
expected_df = DataFrame(
expected_tuples, expected_index, expected_columns)
computed_df = Si.str.extractall(named_pattern, re.VERBOSE)
tm.assert_frame_equal(computed_df, expected_df)
# MultiIndexed subject with names.
Sn = Series(subject_list, series_index)
Sn.index.names = ("matches", "description")
expected_index.names = ("matches", "description", "match")
expected_df = DataFrame(
expected_tuples, expected_index, expected_columns)
computed_df = Sn.str.extractall(named_pattern, re.VERBOSE)
tm.assert_frame_equal(computed_df, expected_df)
# optional groups.
subject_list = ['', 'A1', '32']
named_pattern = '(?P<letter>[AB])?(?P<number>[123])'
computed_df = Series(subject_list).str.extractall(named_pattern)
expected_index = MultiIndex.from_tuples([
(1, 0),
(2, 0),
(2, 1),
], names=(None, "match"))
expected_df = DataFrame([
('A', '1'),
(NA, '3'),
(NA, '2'),
], expected_index, columns=['letter', 'number'])
tm.assert_frame_equal(computed_df, expected_df)
# only one of two groups has a name.
pattern = '([AB])?(?P<number>[123])'
computed_df = Series(subject_list).str.extractall(pattern)
expected_df = DataFrame([
('A', '1'),
(NA, '3'),
(NA, '2'),
], expected_index, columns=[0, 'number'])
tm.assert_frame_equal(computed_df, expected_df)
def test_extractall_single_group(self):
# extractall(one named group) returns DataFrame with one named
# column.
s = Series(['a3', 'b3', 'd4c2'], name='series_name')
r = s.str.extractall(r'(?P<letter>[a-z])')
i = MultiIndex.from_tuples([
(0, 0),
(1, 0),
(2, 0),
(2, 1),
], names=(None, "match"))
e = DataFrame({"letter": ['a', 'b', 'd', 'c']}, i)
tm.assert_frame_equal(r, e)
# extractall(one un-named group) returns DataFrame with one
# un-named column.
r = s.str.extractall(r'([a-z])')
e = DataFrame(['a', 'b', 'd', 'c'], i)
tm.assert_frame_equal(r, e)
def test_extractall_single_group_with_quantifier(self):
# extractall(one un-named group with quantifier) returns
# DataFrame with one un-named column (GH13382).
s = Series(['ab3', 'abc3', 'd4cd2'], name='series_name')
r = s.str.extractall(r'([a-z]+)')
i = MultiIndex.from_tuples([
(0, 0),
(1, 0),
(2, 0),
(2, 1),
], names=(None, "match"))
e = DataFrame(['ab', 'abc', 'd', 'cd'], i)
tm.assert_frame_equal(r, e)
def test_extractall_no_matches(self):
s = Series(['a3', 'b3', 'd4c2'], name='series_name')
# one un-named group.
r = s.str.extractall('(z)')
e = DataFrame(columns=[0])
tm.assert_frame_equal(r, e)
# two un-named groups.
r = s.str.extractall('(z)(z)')
e = DataFrame(columns=[0, 1])
tm.assert_frame_equal(r, e)
# one named group.
r = s.str.extractall('(?P<first>z)')
e = DataFrame(columns=["first"])
tm.assert_frame_equal(r, e)
# two named groups.
r = s.str.extractall('(?P<first>z)(?P<second>z)')
e = DataFrame(columns=["first", "second"])
tm.assert_frame_equal(r, e)
# one named, one un-named.
r = s.str.extractall('(z)(?P<second>z)')
e = DataFrame(columns=[0,
"second"])
tm.assert_frame_equal(r, e)
def test_extractall_stringindex(self):
s = Series(["a1a2", "b1", "c1"], name='xxx')
res = s.str.extractall(r"[ab](?P<digit>\d)")
exp_idx = MultiIndex.from_tuples([(0, 0), (0, 1), (1, 0)],
names=[None, 'match'])
exp = DataFrame({'digit': ["1", "2", "1"]}, index=exp_idx)
tm.assert_frame_equal(res, exp)
# index should return the same result as the default index without name
# thus index.name doesn't affect to the result
for idx in [Index(["a1a2", "b1", "c1"]),
Index(["a1a2", "b1", "c1"], name='xxx')]:
res = idx.str.extractall(r"[ab](?P<digit>\d)")
tm.assert_frame_equal(res, exp)
s = Series(["a1a2", "b1", "c1"], name='s_name',
index=Index(["XX", "yy", "zz"], name='idx_name'))
res = s.str.extractall(r"[ab](?P<digit>\d)")
exp_idx = MultiIndex.from_tuples([("XX", 0), ("XX", 1), ("yy", 0)],
names=["idx_name", 'match'])
exp = DataFrame({'digit': ["1", "2", "1"]}, index=exp_idx)
tm.assert_frame_equal(res, exp)
def test_extractall_errors(self):
# Does not make sense to use extractall with a regex that has
# no capture groups. (it returns DataFrame with one column for
# each capture group)
s = Series(['a3', 'b3', 'd4c2'], name='series_name')
with tm.assert_raises_regex(ValueError, "no capture groups"):
s.str.extractall(r'[a-z]')
def test_extract_index_one_two_groups(self):
s = Series(['a3', 'b3', 'd4c2'], index=["A3", "B3", "D4"],
name='series_name')
r = s.index.str.extract(r'([A-Z])', expand=True)
e = DataFrame(['A', "B", "D"])
tm.assert_frame_equal(r, e)
# Prior to v0.18.0, index.str.extract(regex with one group)
# returned Index. With more than one group, extract raised an
# error (GH9980). Now extract always returns DataFrame.
r = s.index.str.extract(
r'(?P<letter>[A-Z])(?P<digit>[0-9])', expand=True)
e_list = [
("A", "3"),
("B", "3"),
("D", "4"),
]
e = DataFrame(e_list, columns=["letter", "digit"])
tm.assert_frame_equal(r, e)
def test_extractall_same_as_extract(self):
s = Series(['a3', 'b3', 'c2'], name='series_name')
pattern_two_noname = r'([a-z])([0-9])'
extract_two_noname = s.str.extract(pattern_two_noname, expand=True)
has_multi_index = s.str.extractall(pattern_two_noname)
no_multi_index = has_multi_index.xs(0, level="match")
tm.assert_frame_equal(extract_two_noname, no_multi_index)
pattern_two_named = r'(?P<letter>[a-z])(?P<digit>[0-9])'
extract_two_named = s.str.extract(pattern_two_named, expand=True)
has_multi_index = s.str.extractall(pattern_two_named)
no_multi_index = has_multi_index.xs(0, level="match")
tm.assert_frame_equal(extract_two_named, no_multi_index)
pattern_one_named = r'(?P<group_name>[a-z])'
extract_one_named = s.str.extract(pattern_one_named, expand=True)
has_multi_index = s.str.extractall(pattern_one_named)
no_multi_index = has_multi_index.xs(0, level="match")
tm.assert_frame_equal(extract_one_named, no_multi_index)
pattern_one_noname = r'([a-z])'
extract_one_noname = s.str.extract(pattern_one_noname, expand=True)
has_multi_index = s.str.extractall(pattern_one_noname)
no_multi_index = has_multi_index.xs(0, level="match")
tm.assert_frame_equal(extract_one_noname, no_multi_index)
def test_extractall_same_as_extract_subject_index(self):
# same as above tests, but s has an MultiIndex.
i = MultiIndex.from_tuples([
("A", "first"),
("B", "second"),
("C", "third"),
], names=("capital", "ordinal"))
s = Series(['a3', 'b3', 'c2'], i, name='series_name')
pattern_two_noname = r'([a-z])([0-9])'
extract_two_noname = s.str.extract(pattern_two_noname, expand=True)
has_match_index = s.str.extractall(pattern_two_noname)
no_match_index = has_match_index.xs(0, level="match")
tm.assert_frame_equal(extract_two_noname, no_match_index)
pattern_two_named = r'(?P<letter>[a-z])(?P<digit>[0-9])'
extract_two_named = s.str.extract(pattern_two_named, expand=True)
has_match_index = s.str.extractall(pattern_two_named)
no_match_index = has_match_index.xs(0, level="match")
tm.assert_frame_equal(extract_two_named, no_match_index)
pattern_one_named = r'(?P<group_name>[a-z])'
extract_one_named = s.str.extract(pattern_one_named, expand=True)
has_match_index = s.str.extractall(pattern_one_named)
no_match_index = has_match_index.xs(0, level="match")
tm.assert_frame_equal(extract_one_named, no_match_index)
pattern_one_noname = r'([a-z])'
extract_one_noname = s.str.extract(pattern_one_noname, expand=True)
has_match_index = s.str.extractall(pattern_one_noname)
no_match_index = has_match_index.xs(0, level="match")
tm.assert_frame_equal(extract_one_noname, no_match_index)
def test_empty_str_methods(self):
empty_str = empty = Series(dtype=object)
empty_int = Series(dtype=int)
empty_bool = Series(dtype=bool)
empty_bytes = Series(dtype=object)
# GH7241
# (extract) on empty series
tm.assert_series_equal(empty_str, empty.str.cat(empty))
assert '' == empty.str.cat()
tm.assert_series_equal(empty_str, empty.str.title())
tm.assert_series_equal(empty_int, empty.str.count('a'))
tm.assert_series_equal(empty_bool, empty.str.contains('a'))
tm.assert_series_equal(empty_bool, empty.str.startswith('a'))
tm.assert_series_equal(empty_bool, empty.str.endswith('a'))
tm.assert_series_equal(empty_str, empty.str.lower())
tm.assert_series_equal(empty_str, empty.str.upper())
tm.assert_series_equal(empty_str, empty.str.replace('a', 'b'))
tm.assert_series_equal(empty_str, empty.str.repeat(3))
tm.assert_series_equal(empty_bool, empty.str.match('^a'))
tm.assert_frame_equal(
DataFrame(columns=[0], dtype=str),
empty.str.extract('()', expand=True))
tm.assert_frame_equal(
DataFrame(columns=[0, 1], dtype=str),
empty.str.extract('()()', expand=True))
tm.assert_series_equal(
empty_str,
empty.str.extract('()', expand=False))
tm.assert_frame_equal(
DataFrame(columns=[0, 1], dtype=str),
empty.str.extract('()()', expand=False))
tm.assert_frame_equal(DataFrame(dtype=str), empty.str.get_dummies())
tm.assert_series_equal(empty_str, empty_str.str.join(''))
tm.assert_series_equal(empty_int, empty.str.len())
tm.assert_series_equal(empty_str, empty_str.str.findall('a'))
tm.assert_series_equal(empty_int, empty.str.find('a'))
tm.assert_series_equal(empty_int, empty.str.rfind('a'))
tm.assert_series_equal(empty_str, empty.str.pad(42))
tm.assert_series_equal(empty_str, empty.str.center(42))
tm.assert_series_equal(empty_str, empty.str.split('a'))
tm.assert_series_equal(empty_str, empty.str.rsplit('a'))
tm.assert_series_equal(empty_str,
empty.str.partition('a', expand=False))
tm.assert_series_equal(empty_str,
empty.str.rpartition('a', expand=False))
tm.assert_series_equal(empty_str, empty.str.slice(stop=1))
tm.assert_series_equal(empty_str, empty.str.slice(step=1))
tm.assert_series_equal(empty_str, empty.str.strip())
tm.assert_series_equal(empty_str, empty.str.lstrip())
tm.assert_series_equal(empty_str, empty.str.rstrip())
tm.assert_series_equal(empty_str, empty.str.wrap(42))
tm.assert_series_equal(empty_str, empty.str.get(0))
tm.assert_series_equal(empty_str, empty_bytes.str.decode('ascii'))
tm.assert_series_equal(empty_bytes, empty.str.encode('ascii'))
tm.assert_series_equal(empty_str, empty.str.isalnum())
tm.assert_series_equal(empty_str, empty.str.isalpha())
tm.assert_series_equal(empty_str, empty.str.isdigit())
tm.assert_series_equal(empty_str, empty.str.isspace())
tm.assert_series_equal(empty_str, empty.str.islower())
tm.assert_series_equal(empty_str, empty.str.isupper())
tm.assert_series_equal(empty_str, empty.str.istitle())
tm.assert_series_equal(empty_str, empty.str.isnumeric())
tm.assert_series_equal(empty_str, empty.str.isdecimal())
tm.assert_series_equal(empty_str, empty.str.capitalize())
tm.assert_series_equal(empty_str, empty.str.swapcase())
tm.assert_series_equal(empty_str, empty.str.normalize('NFC'))
if compat.PY3:
table = str.maketrans('a', 'b')
else:
import string
table = string.maketrans('a', 'b')
tm.assert_series_equal(empty_str, empty.str.translate(table))
def test_empty_str_methods_to_frame(self):
empty = Series(dtype=str)
empty_df = DataFrame([])
tm.assert_frame_equal(empty_df, empty.str.partition('a'))
tm.assert_frame_equal(empty_df, empty.str.rpartition('a'))
def test_ismethods(self):
values = ['A', 'b', 'Xy', '4', '3A', '', 'TT', '55', '-', ' ']
str_s = Series(values)
alnum_e = [True, True, True, True, True, False, True, True, False,
False]
alpha_e = [True, True, True, False, False, False, True, False, False,
False]
digit_e = [False, False, False, True, False, False, False, True, False,
False]
# TODO: unused
num_e = [False, False, False, True, False, False, # noqa
False, True, False, False]
space_e = [False, False, False, False, False, False, False, False,
False, True]
lower_e = [False, True, False, False, False, False, False, False,
False, False]
upper_e = [True, False, False, False, True, False, True, False, False,
False]
title_e = [True, False, True, False, True, False, False, False, False,
False]
tm.assert_series_equal(str_s.str.isalnum(), Series(alnum_e))
tm.assert_series_equal(str_s.str.isalpha(), Series(alpha_e))
tm.assert_series_equal(str_s.str.isdigit(), Series(digit_e))
tm.assert_series_equal(str_s.str.isspace(), Series(space_e))
tm.assert_series_equal(str_s.str.islower(), Series(lower_e))
tm.assert_series_equal(str_s.str.isupper(), Series(upper_e))
tm.assert_series_equal(str_s.str.istitle(), Series(title_e))
assert str_s.str.isalnum().tolist() == [v.isalnum() for v in values]
assert str_s.str.isalpha().tolist() == [v.isalpha() for v in values]
assert str_s.str.isdigit().tolist() == [v.isdigit() for v in values]
assert str_s.str.isspace().tolist() == [v.isspace() for v in values]
assert str_s.str.islower().tolist() == [v.islower() for v in values]
assert str_s.str.isupper().tolist() == [v.isupper() for v in values]
assert str_s.str.istitle().tolist() == [v.istitle() for v in values]
def test_isnumeric(self):
# 0x00bc: ¼ VULGAR FRACTION ONE QUARTER
# 0x2605: ★ not number
# 0x1378: ፸ ETHIOPIC NUMBER SEVENTY
# 0xFF13: 3 Em 3
values = ['A', '3', u'¼', u'★', u'፸', u'3', 'four']
s = Series(values)
numeric_e = [False, True, True, False, True, True, False]
decimal_e = [False, True, False, False, False, True, False]
tm.assert_series_equal(s.str.isnumeric(), Series(numeric_e))
tm.assert_series_equal(s.str.isdecimal(), Series(decimal_e))
unicodes = [u'A', u'3', u'¼', u'★', u'፸', u'3', u'four']
assert s.str.isnumeric().tolist() == [v.isnumeric() for v in unicodes]
assert s.str.isdecimal().tolist() == [v.isdecimal() for v in unicodes]
values = ['A', np.nan, u'¼', u'★', np.nan, u'3', 'four']
s = Series(values)
numeric_e = [False, np.nan, True, False, np.nan, True, False]
decimal_e = [False, np.nan, False, False, np.nan, True, False]
tm.assert_series_equal(s.str.isnumeric(), Series(numeric_e))
tm.assert_series_equal(s.str.isdecimal(), Series(decimal_e))
def test_get_dummies(self):
s = Series(['a|b', 'a|c', np.nan])
result = s.str.get_dummies('|')
expected = DataFrame([[1, 1, 0], [1, 0, 1], [0, 0, 0]],
columns=list('abc'))
tm.assert_frame_equal(result, expected)
s = Series(['a;b', 'a', 7])
result = s.str.get_dummies(';')
expected = DataFrame([[0, 1, 1], [0, 1, 0], [1, 0, 0]],
columns=list('7ab'))
tm.assert_frame_equal(result, expected)
# GH9980, GH8028
idx = Index(['a|b', 'a|c', 'b|c'])
result = idx.str.get_dummies('|')
expected = MultiIndex.from_tuples([(1, 1, 0), (1, 0, 1),
(0, 1, 1)], names=('a', 'b', 'c'))
tm.assert_index_equal(result, expected)
def test_get_dummies_with_name_dummy(self):
# GH 12180
# Dummies named 'name' should work as expected
s = Series(['a', 'b,name', 'b'])
result = s.str.get_dummies(',')
expected = DataFrame([[1, 0, 0], [0, 1, 1], [0, 1, 0]],
columns=['a', 'b', 'name'])
tm.assert_frame_equal(result, expected)
idx = Index(['a|b', 'name|c', 'b|name'])
result = idx.str.get_dummies('|')
expected = MultiIndex.from_tuples([(1, 1, 0, 0), (0, 0, 1, 1),
(0, 1, 0, 1)],
names=('a', 'b', 'c', 'name'))
tm.assert_index_equal(result, expected)
def test_join(self):
values = Series(['a_b_c', 'c_d_e', np.nan, 'f_g_h'])
result = values.str.split('_').str.join('_')
tm.assert_series_equal(values, result)
# mixed
mixed = Series(['a_b', NA, 'asdf_cas_asdf', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.split('_').str.join('_')
xp = Series(['a_b', NA, 'asdf_cas_asdf', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('a_b_c'), u('c_d_e'), np.nan, u('f_g_h')])
result = values.str.split('_').str.join('_')
tm.assert_series_equal(values, result)
def test_len(self):
values = Series(['foo', 'fooo', 'fooooo', np.nan, 'fooooooo'])
result = values.str.len()
exp = values.map(lambda x: len(x) if notna(x) else NA)
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['a_b', NA, 'asdf_cas_asdf', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.len()
xp = Series([3, NA, 13, NA, NA, 3, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('foo'), u('fooo'), u('fooooo'), np.nan, u(
'fooooooo')])
result = values.str.len()
exp = values.map(lambda x: len(x) if notna(x) else NA)
tm.assert_series_equal(result, exp)
def test_findall(self):
values = Series(['fooBAD__barBAD', NA, 'foo', 'BAD'])
result = values.str.findall('BAD[_]*')
exp = Series([['BAD__', 'BAD'], NA, [], ['BAD']])
tm.assert_almost_equal(result, exp)
# mixed
mixed = Series(['fooBAD__barBAD', NA, 'foo', True, datetime.today(),
'BAD', None, 1, 2.])
rs = Series(mixed).str.findall('BAD[_]*')
xp = Series([['BAD__', 'BAD'], NA, [], NA, NA, ['BAD'], NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA, u('foo'), u('BAD')])
result = values.str.findall('BAD[_]*')
exp = Series([[u('BAD__'), u('BAD')], NA, [], [u('BAD')]])
tm.assert_almost_equal(result, exp)
def test_find(self):
values = Series(['ABCDEFG', 'BCDEFEF', 'DEFGHIJEF', 'EFGHEF', 'XXXX'])
result = values.str.find('EF')
tm.assert_series_equal(result, Series([4, 3, 1, 0, -1]))
expected = np.array([v.find('EF') for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.rfind('EF')
tm.assert_series_equal(result, Series([4, 5, 7, 4, -1]))
expected = np.array([v.rfind('EF') for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.find('EF', 3)
tm.assert_series_equal(result, Series([4, 3, 7, 4, -1]))
expected = np.array([v.find('EF', 3) for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.rfind('EF', 3)
tm.assert_series_equal(result, Series([4, 5, 7, 4, -1]))
expected = np.array([v.rfind('EF', 3) for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.find('EF', 3, 6)
tm.assert_series_equal(result, Series([4, 3, -1, 4, -1]))
expected = np.array([v.find('EF', 3, 6) for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.rfind('EF', 3, 6)
tm.assert_series_equal(result, Series([4, 3, -1, 4, -1]))
expected = np.array([v.rfind('EF', 3, 6) for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
with tm.assert_raises_regex(TypeError,
"expected a string object, not int"):
result = values.str.find(0)
with tm.assert_raises_regex(TypeError,
"expected a string object, not int"):
result = values.str.rfind(0)
def test_find_nan(self):
values = Series(['ABCDEFG', np.nan, 'DEFGHIJEF', np.nan, 'XXXX'])
result = values.str.find('EF')
tm.assert_series_equal(result, Series([4, np.nan, 1, np.nan, -1]))
result = values.str.rfind('EF')
tm.assert_series_equal(result, Series([4, np.nan, 7, np.nan, -1]))
result = values.str.find('EF', 3)
tm.assert_series_equal(result, Series([4, np.nan, 7, np.nan, -1]))
result = values.str.rfind('EF', 3)
tm.assert_series_equal(result, Series([4, np.nan, 7, np.nan, -1]))
result = values.str.find('EF', 3, 6)
tm.assert_series_equal(result, Series([4, np.nan, -1, np.nan, -1]))
result = values.str.rfind('EF', 3, 6)
tm.assert_series_equal(result, Series([4, np.nan, -1, np.nan, -1]))
def test_index(self):
def _check(result, expected):
if isinstance(result, Series):
tm.assert_series_equal(result, expected)
else:
tm.assert_index_equal(result, expected)
for klass in [Series, Index]:
s = klass(['ABCDEFG', 'BCDEFEF', 'DEFGHIJEF', 'EFGHEF'])
result = s.str.index('EF')
_check(result, klass([4, 3, 1, 0]))
expected = np.array([v.index('EF') for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = s.str.rindex('EF')
_check(result, klass([4, 5, 7, 4]))
expected = np.array([v.rindex('EF') for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = s.str.index('EF', 3)
_check(result, klass([4, 3, 7, 4]))
expected = np.array([v.index('EF', 3) for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = s.str.rindex('EF', 3)
_check(result, klass([4, 5, 7, 4]))
expected = np.array([v.rindex('EF', 3) for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = s.str.index('E', 4, 8)
_check(result, klass([4, 5, 7, 4]))
expected = np.array([v.index('E', 4, 8) for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = s.str.rindex('E', 0, 5)
_check(result, klass([4, 3, 1, 4]))
expected = np.array([v.rindex('E', 0, 5) for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
with tm.assert_raises_regex(ValueError,
"substring not found"):
result = s.str.index('DE')
with tm.assert_raises_regex(TypeError,
"expected a string "
"object, not int"):
result = s.str.index(0)
# test with nan
s = Series(['abcb', 'ab', 'bcbe', np.nan])
result = s.str.index('b')
tm.assert_series_equal(result, Series([1, 1, 0, np.nan]))
result = s.str.rindex('b')
tm.assert_series_equal(result, Series([3, 1, 2, np.nan]))
def test_pad(self):
values = Series(['a', 'b', NA, 'c', NA, 'eeeeee'])
result = values.str.pad(5, side='left')
exp = Series([' a', ' b', NA, ' c', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='right')
exp = Series(['a ', 'b ', NA, 'c ', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='both')
exp = Series([' a ', ' b ', NA, ' c ', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
# mixed
mixed = Series(['a', NA, 'b', True, datetime.today(), 'ee', None, 1, 2.
])
rs = Series(mixed).str.pad(5, side='left')
xp = Series([' a', NA, ' b', NA, NA, ' ee', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
mixed = Series(['a', NA, 'b', True, datetime.today(), 'ee', None, 1, 2.
])
rs = Series(mixed).str.pad(5, side='right')
xp = Series(['a ', NA, 'b ', NA, NA, 'ee ', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
mixed = Series(['a', NA, 'b', True, datetime.today(), 'ee', None, 1, 2.
])
rs = Series(mixed).str.pad(5, side='both')
xp = Series([' a ', NA, ' b ', NA, NA, ' ee ', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('a'), u('b'), NA, u('c'), NA, u('eeeeee')])
result = values.str.pad(5, side='left')
exp = Series([u(' a'), u(' b'), NA, u(' c'), NA, u('eeeeee')])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='right')
exp = Series([u('a '), u('b '), NA, u('c '), NA, u('eeeeee')])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='both')
exp = Series([u(' a '), u(' b '), NA, u(' c '), NA, u('eeeeee')])
tm.assert_almost_equal(result, exp)
def test_pad_fillchar(self):
values = Series(['a', 'b', NA, 'c', NA, 'eeeeee'])
result = values.str.pad(5, side='left', fillchar='X')
exp = Series(['XXXXa', 'XXXXb', NA, 'XXXXc', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='right', fillchar='X')
exp = Series(['aXXXX', 'bXXXX', NA, 'cXXXX', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='both', fillchar='X')
exp = Series(['XXaXX', 'XXbXX', NA, 'XXcXX', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not str"):
result = values.str.pad(5, fillchar='XY')
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not int"):
result = values.str.pad(5, fillchar=5)
def test_pad_width(self):
# GH 13598
s = Series(['1', '22', 'a', 'bb'])
for f in ['center', 'ljust', 'rjust', 'zfill', 'pad']:
with tm.assert_raises_regex(TypeError,
"width must be of "
"integer type, not*"):
getattr(s.str, f)('f')
def test_translate(self):
def _check(result, expected):
if isinstance(result, Series):
tm.assert_series_equal(result, expected)
else:
tm.assert_index_equal(result, expected)
for klass in [Series, Index]:
s = klass(['abcdefg', 'abcc', 'cdddfg', 'cdefggg'])
if not compat.PY3:
import string
table = string.maketrans('abc', 'cde')
else:
table = str.maketrans('abc', 'cde')
result = s.str.translate(table)
expected = klass(['cdedefg', 'cdee', 'edddfg', 'edefggg'])
_check(result, expected)
# use of deletechars is python 2 only
if not compat.PY3:
result = s.str.translate(table, deletechars='fg')
expected = klass(['cdede', 'cdee', 'eddd', 'ede'])
_check(result, expected)
result = s.str.translate(None, deletechars='fg')
expected = klass(['abcde', 'abcc', 'cddd', 'cde'])
_check(result, expected)
else:
with tm.assert_raises_regex(
ValueError, "deletechars is not a valid argument"):
result = s.str.translate(table, deletechars='fg')
# Series with non-string values
s = Series(['a', 'b', 'c', 1.2])
expected = Series(['c', 'd', 'e', np.nan])
result = s.str.translate(table)
tm.assert_series_equal(result, expected)
def test_center_ljust_rjust(self):
values = Series(['a', 'b', NA, 'c', NA, 'eeeeee'])
result = values.str.center(5)
exp = Series([' a ', ' b ', NA, ' c ', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.ljust(5)
exp = Series(['a ', 'b ', NA, 'c ', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.rjust(5)
exp = Series([' a', ' b', NA, ' c', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
# mixed
mixed = Series(['a', NA, 'b', True, datetime.today(), 'c', 'eee', None,
1, 2.])
rs = Series(mixed).str.center(5)
xp = Series([' a ', NA, ' b ', NA, NA, ' c ', ' eee ', NA, NA, NA
])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
rs = Series(mixed).str.ljust(5)
xp = Series(['a ', NA, 'b ', NA, NA, 'c ', 'eee ', NA, NA, NA
])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
rs = Series(mixed).str.rjust(5)
xp = Series([' a', NA, ' b', NA, NA, ' c', ' eee', NA, NA, NA
])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('a'), u('b'), NA, u('c'), NA, u('eeeeee')])
result = values.str.center(5)
exp = Series([u(' a '), u(' b '), NA, u(' c '), NA, u('eeeeee')])
tm.assert_almost_equal(result, exp)
result = values.str.ljust(5)
exp = Series([u('a '), u('b '), NA, u('c '), NA, u('eeeeee')])
tm.assert_almost_equal(result, exp)
result = values.str.rjust(5)
exp = Series([u(' a'), u(' b'), NA, u(' c'), NA, u('eeeeee')])
tm.assert_almost_equal(result, exp)
def test_center_ljust_rjust_fillchar(self):
values = Series(['a', 'bb', 'cccc', 'ddddd', 'eeeeee'])
result = values.str.center(5, fillchar='X')
expected = Series(['XXaXX', 'XXbbX', 'Xcccc', 'ddddd', 'eeeeee'])
tm.assert_series_equal(result, expected)
expected = np.array([v.center(5, 'X') for v in values.values],
dtype=np.object_)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.ljust(5, fillchar='X')
expected = Series(['aXXXX', 'bbXXX', 'ccccX', 'ddddd', 'eeeeee'])
tm.assert_series_equal(result, expected)
expected = np.array([v.ljust(5, 'X') for v in values.values],
dtype=np.object_)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.rjust(5, fillchar='X')
expected = Series(['XXXXa', 'XXXbb', 'Xcccc', 'ddddd', 'eeeeee'])
tm.assert_series_equal(result, expected)
expected = np.array([v.rjust(5, 'X') for v in values.values],
dtype=np.object_)
tm.assert_numpy_array_equal(result.values, expected)
# If fillchar is not a charatter, normal str raises TypeError
# 'aaa'.ljust(5, 'XY')
# TypeError: must be char, not str
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not str"):
result = values.str.center(5, fillchar='XY')
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not str"):
result = values.str.ljust(5, fillchar='XY')
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not str"):
result = values.str.rjust(5, fillchar='XY')
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not int"):
result = values.str.center(5, fillchar=1)
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not int"):
result = values.str.ljust(5, fillchar=1)
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not int"):
result = values.str.rjust(5, fillchar=1)
def test_zfill(self):
values = Series(['1', '22', 'aaa', '333', '45678'])
result = values.str.zfill(5)
expected = Series(['00001', '00022', '00aaa', '00333', '45678'])
tm.assert_series_equal(result, expected)
expected = np.array([v.zfill(5) for v in values.values],
dtype=np.object_)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.zfill(3)
expected = Series(['001', '022', 'aaa', '333', '45678'])
tm.assert_series_equal(result, expected)
expected = np.array([v.zfill(3) for v in values.values],
dtype=np.object_)
tm.assert_numpy_array_equal(result.values, expected)
values = Series(['1', np.nan, 'aaa', np.nan, '45678'])
result = values.str.zfill(5)
expected = Series(['00001', np.nan, '00aaa', np.nan, '45678'])
tm.assert_series_equal(result, expected)
def test_split(self):
values = Series(['a_b_c', 'c_d_e', NA, 'f_g_h'])
result = values.str.split('_')
exp = Series([['a', 'b', 'c'], ['c', 'd', 'e'], NA, ['f', 'g', 'h']])
tm.assert_series_equal(result, exp)
# more than one char
values = Series(['a__b__c', 'c__d__e', NA, 'f__g__h'])
result = values.str.split('__')
tm.assert_series_equal(result, exp)
result = values.str.split('__', expand=False)
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['a_b_c', NA, 'd_e_f', True, datetime.today(), None, 1,
2.])
result = mixed.str.split('_')
exp = Series([['a', 'b', 'c'], NA, ['d', 'e', 'f'], NA, NA, NA, NA, NA
])
assert isinstance(result, Series)
tm.assert_almost_equal(result, exp)
result = mixed.str.split('_', expand=False)
assert isinstance(result, Series)
tm.assert_almost_equal(result, exp)
# unicode
values = Series([u('a_b_c'), u('c_d_e'), NA, u('f_g_h')])
result = values.str.split('_')
exp = Series([[u('a'), u('b'), u('c')], [u('c'), u('d'), u('e')], NA,
[ | u('f') | pandas.compat.u |
"""Utility functions shared across the Aquarius project."""
import ftplib
import os
import logging
import gzip
import numpy as np
import pandas as pd
import yaml
import json
from datetime import timedelta, date, datetime
from dfply import (
X,
group_by,
summarize,
mask,
n,
transmute,
select,
left_join,
ungroup,
arrange,
mutate,
)
from tqdm import tqdm
from typing import Tuple, Dict, List, Optional, NamedTuple
from geopy.distance import geodesic
from sqlalchemy import create_engine
import matplotlib.pyplot as plt
from bokeh.plotting import figure
from bokeh.models import HoverTool, ColumnDataSource, VArea, Line, VBar
from bokeh.tile_providers import get_provider, STAMEN_TERRAIN
class ECDF:
"""Empirical Cumulative Distribution Function with linear interpolation."""
def __init__(self):
self.x_values = None
self.cdf_values = None
def fit(self, xdata: np.ndarray, weights: Optional[np.ndarray] = None):
if weights is None:
ind_valid = ~np.isnan(xdata)
xv = xdata[ind_valid]
values, counts = np.unique(xv, return_counts=True)
sort_index = np.argsort(values)
self.x_values = values[sort_index]
self.cdf_values = (np.cumsum(counts[sort_index]) - 0.5)/np.sum(counts)
else:
assert len(xdata) == len(weights)
ind_valid = ~np.isnan(xdata) & ~np.isnan(weights)
xv = xdata[ind_valid]
wv = weights[ind_valid]
sorter = np.argsort(xv)
values = xv[sorter]
sample_weight = wv[sorter]
weighted_quantiles = (np.cumsum(sample_weight) - 0.5 * sample_weight) / np.sum(sample_weight)
unique_values, unique_index, unique_counts = np.unique(values, return_index=True, return_counts=True)
self.x_values = unique_values
self.cdf_values = weighted_quantiles[unique_index + unique_counts - 1] # last index instead of first index
return self
def eval(self, x: np.ndarray):
cdf = np.interp(x, xp=self.x_values, fp=self.cdf_values, left=0, right=1)
return cdf
def quantile(self, q: np.ndarray):
assert np.all(q >= 0) and np.all(q <= 1), 'quantiles should be in [0, 1]'
xq = np.interp(q, xp=self.cdf_values, fp=self.x_values, left=self.x_values[0], right=self.x_values[-1])
return xq
def download_ghcn_file(ftp_filename: str, save_dir: str):
logging.debug(f"ftp_filename={ftp_filename}")
logging.debug(f"save_dir={save_dir}")
ftp = ftplib.FTP(host='ftp.ncdc.noaa.gov', timeout=10.0, user='anonymous', passwd='<PASSWORD>')
logging.debug("FTP server connected")
ftp.cwd("/pub/data/ghcn/daily/by_year/")
save_path = os.path.join(save_dir, ftp_filename)
logging.debug(f"downloading {save_path}")
with open(save_path, 'wb') as file:
ftp.retrbinary(f"RETR {ftp_filename}", file.write)
logging.debug(f"downloaded {save_path}")
return 1
def unzip_file(filename: str, folder: str):
read_path = os.path.join(folder, filename)
logging.debug(f"unzipping {read_path}")
f = gzip.open(read_path, 'rb')
file_content = f.read()
f.close()
logging.debug(f"unzipped {read_path}")
return file_content
def df_file(filename: str, folder: str) -> pd.DataFrame:
# based on https://stackoverflow.com/questions/31028815/how-to-unzip-gz-file-using-python
read_path = os.path.join(folder, filename)
logging.debug(f"unzipping and reading {read_path}")
with gzip.open(read_path, 'rb') as f:
df = pd.read_csv(
f,
header=None,
names=['station', 'dateto', 'element', 'value', 'm_flag', 'q_flag', 's_flag', 'obs_time'],
parse_dates=['dateto'],
)
logging.debug(f"read {read_path}")
return df
def get_config():
with open('config.yaml', 'r') as file:
config = yaml.safe_load(file)
return config
def load_all_years(year_from: int, year_to: int, save_dir: str):
for year in range(year_from, year_to + 1):
filename = f"{year}.csv.gz"
download_ghcn_file(filename, save_dir)
logging.debug("completed")
def extract_one_prcp(filename: str, by_year_path: str, prcp_path: str):
df = df_file(filename, by_year_path)
df_sel = df >> mask(X.element == 'PRCP') >> transmute(station=X.station, dateto=X.dateto, prcp=X.value)
logging.debug(f"{df_sel.shape[0]} out of {df.shape[0]} rows selected")
year_string = filename.split('.')[0]
df_sel.to_csv(os.path.join(prcp_path, f"{year_string}.csv"), sep=',', index=False)
logging.debug(f"{filename} processed")
def extract_one_station_prcp(station: str, filename: str, by_year_path: str, prcp_path: str):
df = df_file(filename, by_year_path)
df_sel = df >> mask(X.element == 'PRCP') >> mask(X.station == station) >> \
transmute(station=X.station, dateto=X.dateto, prcp=X.value)
logging.debug(f"{df_sel.shape[0]} out of {df.shape[0]} rows selected")
year_string = filename.split('.')[0]
df_sel.to_csv(os.path.join(prcp_path, f"{year_string}.csv"), sep=',', index=False)
logging.debug(f"{filename} processed")
def extract_one_station_startswith(station_startswith: str, filename: str, by_year_path: str, prcp_path: str):
df = df_file(filename, by_year_path)
df_sel = df >> mask(X.element == 'PRCP') >> mask(X.station.str.startswith(station_startswith)) >> \
transmute(station=X.station, dateto=X.dateto, prcp=X.value)
logging.debug(f"{df_sel.shape[0]} out of {df.shape[0]} rows selected")
year_string = filename.split('.')[0]
df_sel.to_csv(os.path.join(prcp_path, f"{year_string}.csv"), sep=',', index=False)
logging.debug(f"{filename} processed")
def extract_all_prcp(by_year_path: str, prcp_path: str):
if not os.path.isdir(prcp_path):
os.makedirs(prcp_path)
for filename in sorted(os.listdir(by_year_path), reverse=True):
extract_one_prcp(filename, by_year_path, prcp_path)
return 1
def extract_all_prcp_station(station: str, by_year_path: str, prcp_path: str):
if not os.path.isdir(prcp_path):
os.makedirs(prcp_path)
for filename in sorted(os.listdir(by_year_path), reverse=True):
extract_one_station_prcp(station, filename, by_year_path, prcp_path)
return 1
def extract_all_prcp_station_startswith(station_startswith: str, by_year_path: str, prcp_path: str):
if not os.path.isdir(prcp_path):
os.makedirs(prcp_path)
for filename in sorted(os.listdir(by_year_path), reverse=True):
extract_one_station_startswith(station_startswith, filename, by_year_path, prcp_path)
return 1
def ded(prcp: pd.DataFrame) -> date:
logging.debug(f"{prcp.shape[0]} station*days")
station_ded = prcp >> group_by(X.station) >> summarize(ded=X.dateto.max())
logging.debug(f"{station_ded.shape[0]} stations")
data_end_dt = station_ded['ded'].quantile(0.90)
data_end_date = date(data_end_dt.year, data_end_dt.month, data_end_dt.day)
logging.debug(f"data_end_date={data_end_date}")
return data_end_date
def date_limits(prcp: pd.DataFrame) -> tuple:
logging.debug(f"{prcp.shape[0]} station*days")
station_ded = prcp >> group_by(X.station) >> summarize(dsd=X.dateto.min(), ded=X.dateto.max())
logging.debug(f"{station_ded.shape[0]} stations")
data_start_date = station_ded['dsd'].quantile(0.10)
data_end_date = station_ded['ded'].quantile(0.90)
return data_start_date, data_end_date
def df_prcp(year: int, prcp_path=None) -> pd.DataFrame:
if prcp_path is None:
prcp_path = '../../data/prcp_ruzyne'
filename = os.path.join(prcp_path, f'{year}.csv')
logging.debug(f"reading {filename}")
prcp = pd.read_csv(filename, parse_dates=['dateto']) >> arrange(X.dateto, X.station)
return prcp
def active_stations(prcp: pd.DataFrame, date_valid, config) -> pd.DataFrame:
prcp_valid = prcp >> mask(X.dateto <= date_valid)
data_end_date = ded(prcp_valid)
logging.debug(f"data_end_date={data_end_date}")
logging.debug(f"active_period_length_days={config['active_period_length_days']}")
active_start_date = data_end_date - timedelta(days=config['active_period_length_days']-1)
logging.debug(f"active_start_date={active_start_date}")
prcp_window = prcp_valid >> mask(X.dateto >= active_start_date)
prcp_active = prcp_window >> group_by(X.station) >> summarize(num_observed_days=n(X.prcp)) >> arrange(X.station)
prcp_active['is_active'] = prcp_active['num_observed_days'] >= config['active_period_min_days']
return prcp_active >> ungroup()
def transpose_to_stations(prcp_path: str, stations_path: str):
# deprecated - too slow
all_files = sorted(os.listdir(prcp_path), reverse=True)
num_files = len(all_files)
logging.debug(f"{num_files} files in {prcp_path}")
for i_file, filename in enumerate(all_files):
year = int(filename.split('.')[0])
df = df_prcp(year)
stations = df['station'].unique().sort_values()
num_stations = len(stations)
logging.debug(f"{num_stations} stations in {filename}")
for i_station, station in enumerate(stations):
df_sel = df >> mask(X.station == station) >> select(X.dateto, X.prcp)
out_filename = os.path.join(stations_path, f"{station}.csv")
if os.path.isfile(out_filename):
df_sel.to_csv(out_filename, mode='a', index=False, header=False)
else:
df_sel.to_csv(out_filename, mode='w', index=False, header=True)
logging.debug(f"file={i_file}/{num_files} station={i_station}/{num_stations} processed")
logging.debug(f"{filename} processed")
logging.debug("transpose completed")
def make_recent(data_end_date: date, config) -> pd.Series:
"""Make daily calendar with period taking values True=recent and False=preceding."""
num_days_recent = 365*config['recent_time_window_years']
num_days_preceding = 365*config['preceding_time_window_max_years']
num_days = num_days_recent + num_days_preceding
date_axis = np.flip(pd.date_range(end=data_end_date, periods=num_days, freq='D'))
calendar_values = np.concatenate([
np.ones(num_days_recent, dtype=bool),
np.zeros(num_days_preceding, dtype=bool),
])
calendar = pd.Series(calendar_values, index=date_axis)
logging.debug((
f"calendar with {num_days} days from {date_axis[-1]} to {date_axis[0]} "
f"with recent period of {num_days_recent} from {date_axis[num_days_recent-1]}"
))
return calendar
def update_drought(df_running: pd.DataFrame, df_update: pd.DataFrame, calendar: pd.Series) -> pd.DataFrame:
"""Update drought statistics with time series from a new time period."""
if df_update.shape[0] > 0:
assert "station" in df_running.columns
assert "station" in df_update.columns
assert "dateto" in df_update.columns
running_columns = [
'recent_time_window_days',
'recent_days_observed',
'recent_fill_rate',
'recent_precipitation_mm',
'recent_precipitation_annual_mean',
'preceding_time_window_days',
'preceding_days_observed',
'preceding_fill_rate',
'preceding_precipitation_mm',
'preceding_precipitation_annual_mean',
]
for column in running_columns:
if column not in df_running.columns:
df_running[column] = 0
d1, d2 = date_limits(df_update)
logging.debug(f"date_limits: {d1} and {d2}")
calendar_recent = pd.DataFrame({'dateto': calendar[calendar].index})
recent_start_date = calendar_recent.iat[-1, 0]
recent_end_date = calendar_recent.iat[0, 0]
calendar_preceding = pd.DataFrame({'dateto': calendar[~calendar].index})
preceding_start_date = calendar_preceding.iat[-1, 0]
preceding_end_date = calendar_preceding.iat[0, 0]
d1_recent = max(d1, recent_start_date)
d2_recent = min(d2, recent_end_date)
recent_delta_days = max((d2_recent - d1_recent).days + 1, 0)
logging.debug(f"recent_delta_days={recent_delta_days}")
d1_preceding = max(d1, preceding_start_date)
d2_preceding = min(d2, preceding_end_date)
preceding_delta_days = max((d2_preceding - d1_preceding).days + 1, 0)
logging.debug(f"preceding_delta_days={preceding_delta_days}")
if (recent_delta_days > 0) or (preceding_delta_days > 0):
logging.debug("proceeding")
df_base = df_running[['station']].copy()
df_update_recent = calendar_recent >> \
left_join(df_update, by='dateto') >> \
group_by(X.station) >> \
summarize(
recent_days_observed=n(X.prcp),
recent_precipitation_mm=X.prcp.sum()/10,
)
if df_update_recent.shape[0] == 0: # df_update does not intersect recent window
df_update_recent = df_base.copy()
df_update_recent['recent_days_observed'] = 0
df_update_recent['recent_precipitation_mm'] = 0.0
# logging.debug(df_update_recent.head())
df_update_preceding = calendar_preceding >> \
left_join(df_update, by='dateto') >> \
group_by(X.station) >> \
summarize(
preceding_days_observed=n(X.prcp),
preceding_precipitation_mm=X.prcp.sum()/10
)
if df_update_preceding.shape[0] == 0: # df_update does not intersect preceding window
df_update_preceding = df_base.copy()
df_update_preceding['preceding_days_observed'] = 0
df_update_preceding['preceding_precipitation_mm'] = 0.0
# logging.debug(df_update_preceding.head())
df_delta = df_base.copy() >> \
left_join(df_update_recent, by='station') >> \
left_join(df_update_preceding, by='station')
df_delta.fillna(value=0, inplace=True)
assert df_delta.shape[0] == df_running.shape[0]
recent_time_window_days = df_running.recent_time_window_days + recent_delta_days
preceding_time_window_days = df_running.preceding_time_window_days + preceding_delta_days
recent_days_observed = df_running.recent_days_observed + df_delta.recent_days_observed
preceding_days_observed = df_running.preceding_days_observed + df_delta.preceding_days_observed
recent_fill_rate = recent_days_observed / recent_time_window_days
preceding_fill_rate = preceding_days_observed / preceding_time_window_days
recent_precipitation_mm = df_running.ecent_precipitation_mm + df_delta.recent_precipitation_mm
preceding_precipitation_mm = df_running.preceding_precipitation_mm + df_delta.preceding_precipitation_mm
recent_precipitation_annual_mean = recent_precipitation_mm / recent_days_observed * 365
preceding_prcp_annual_mean = preceding_precipitation_mm / preceding_days_observed * 365
df_running['recent_time_window_days'] = recent_time_window_days
df_running['recent_days_observed'] = recent_days_observed
df_running['recent_fill_rate'] = recent_fill_rate
df_running['recent_precipitation_mm'] = recent_precipitation_mm
df_running['recent_precipitation_annual_mean'] = recent_precipitation_annual_mean
df_running['preceding_time_window_days'] = preceding_time_window_days
df_running['preceding_days_observed'] = preceding_days_observed
df_running['preceding_fill_rate'] = preceding_fill_rate
df_running['preceding_precipitation_mm'] = preceding_precipitation_mm
df_running['preceding_precipitation_annual_mean'] = preceding_prcp_annual_mean
df_running['dq_flag'] = (recent_fill_rate >= 0.90) & (preceding_fill_rate >= 0.80)
df_running['drought_index'] = 100*(1 - recent_precipitation_annual_mean / preceding_prcp_annual_mean)
else:
logging.debug("skipping")
else:
logging.debug("df_running is empty")
return df_running
def get_current_year() -> int:
y0 = date.today().year
return y0
def get_oldest_year() -> int:
current_year = get_current_year()
config = get_config()
oldest_year = current_year - \
config['drought_window_years'] - \
config['recent_time_window_years'] - \
config['preceding_time_window_min_years']
return oldest_year
def calculate_drought(
stations: pd.DataFrame,
data_end_date: date,
prcp_path: str,
out_path: str,
) -> pd.DataFrame:
logging.info(f"{stations.shape[0]} active stations with data_end_date={data_end_date}")
config = get_config()
calendar = make_recent(data_end_date, config)
year_to = calendar.index[0].year
year_from = calendar.index[-1].year
years = range(year_to, year_from - 1, -1)
logging.info(f"processing {len(years)} years from {year_to} back to {year_from}")
for year in years:
logging.info(f"year={year}")
prcp_year = df_prcp(year, prcp_path)
stations = update_drought(stations, prcp_year, calendar)
logging.info(f"{stations['dq_flag'].sum()} data quality passed")
stations.to_csv(f'{out_path}/{data_end_date.isoformat()[:10]}.csv', index=False)
logging.debug(f"\n{stations.head(10)}")
aquarius = stations >> mask(X.dq_flag) >> \
summarize(
min=X.drought_index.min(),
p25=X.drought_index.quantile(0.25),
p50=X.drought_index.quantile(0.50),
p75=X.drought_index.quantile(0.75),
max=X.drought_index.max(),
)
return aquarius
def load_countries() -> pd.DataFrame:
countries_file = '../../data/station/ghcnd-countries-continent.txt'
cdf_list = []
with open(countries_file, 'r') as file:
for line in file:
country_code = line[:2]
continent_code = line[3:5]
country_name = line[6:].rstrip()
cdf_row = (country_code, continent_code, country_name)
cdf_list.append(cdf_row)
logging.debug(f"{len(cdf_list)} countries parsed")
cdf = pd.DataFrame(cdf_list, columns=['country_code', 'continent_code', 'country_name'])
continent = {
'EU': 'Europe',
'AS': 'Asia',
'AF': 'Africa',
'NA': 'North America',
'SA': 'South America',
'OC': 'Oceania',
'AN': 'Antarctica',
}
cdf['continent_name'] = cdf['continent_code'].apply(lambda x: continent[x])
return cdf
def load_stations() -> pd.DataFrame:
stations_file = '../../data/station/ghcnd-stations.txt'
stations_list = []
with open(stations_file, 'r') as file:
for line in file:
country_code = line[:2]
station = line[:11]
latitude = float(line[12:20])
longitude = float(line[21:30])
elevation = float(line[31:37])
station_name = line[41:71].rstrip().lower()
stations_row = (station, country_code, latitude, longitude, elevation, station_name)
stations_list.append(stations_row)
logging.debug(f"{len(stations_list)} stations parsed")
colnames = ['station', 'country_code', 'latitude', 'longitude', 'elevation', 'station_name']
sdfbase = pd.DataFrame(stations_list, columns=colnames)
cdf = load_countries()
sdf = sdfbase.merge(cdf, how='left', on='country_code').set_index('station')
return sdf
def load_country_continent() -> pd.DataFrame:
cc_file = '../../data/station/country-and-continent-codes-list-csv_csv.txt'
ccdf = pd.read_csv(cc_file, sep=",")
return ccdf
def chunker(seq, size):
# from http://stackoverflow.com/a/434328
return (seq[pos:pos + size] for pos in range(0, len(seq), size))
def insert_with_progress(df, engine, table_name: str, chunksize=None, reset_index=True):
if reset_index:
dfi = df.reset_index()
else:
dfi = df
if chunksize is None:
chunksize = int(len(dfi) / 10) # 10%
with tqdm(total=len(dfi)) as pbar:
for i, cdf in enumerate(chunker(dfi, chunksize)):
cdf.to_sql(con=engine, name=table_name, if_exists="append", index=False)
pbar.update(chunksize)
def extract_one_prcp_to_sql(filename: str, by_year_path: str, engine, table_name: str):
keys = ['station', 'dateto']
df = df_file(filename, by_year_path)
logging.debug(f"dateframe {df.shape} loaded")
df_sel = df >> mask(X.element == 'PRCP') >> transmute(station=X.station, dateto=X.dateto, prcp_mm=X.value / 10)
logging.debug(f"prcp data {df_sel.shape} extracted")
dmin, dmax = (df_sel['dateto'].min(), df_sel['dateto'].max())
df_sorted = df_sel.set_index(keys)
sql_mirror = (
"select station, dateto\n"
f"from {table_name}\n"
f"where dateto between '{dmin}' and '{dmax}'\n"
"order by station, dateto"
)
df_mirror = pd.DataFrame(engine.execute(sql_mirror).fetchall(), columns=keys).set_index(keys)
df_mirror['indb'] = True
logging.debug(f"mirror data {df_mirror.shape} extracted")
if df_mirror.shape[0] == 0:
df_joined = df_sorted
df_joined['indb'] = False
else:
df_joined = df_sorted.join(df_mirror, on=keys, how='left', sort=True)
df_joined['indb'] = df_joined['indb'].fillna(False)
if (~df_joined['indb']).sum() > 0:
df_filtered = df_joined >> mask(~X.indb)
df_increment = df_filtered >> select(X.prcp_mm)
logging.debug(f"sql insert to {table_name} in progress")
insert_with_progress(df_increment, engine, table_name, chunksize=100)
logging.debug(f"insert to {table_name} completed")
else:
logging.debug("increment is empty")
def extract_all_prcp_to_sql(by_year_path: str, engine, table_name: str):
files = sorted(os.listdir(by_year_path))
nfiles = len(files)
for i, filename in enumerate(files):
logging.debug(f"{i + 1}/{nfiles} {filename}")
extract_one_prcp_to_sql(filename, by_year_path, engine, table_name)
logging.debug("extract completed")
def find_topk_nearest(k: int, station, index, x1, sortindex1, x2, sortindex2) -> List[dict]:
nst = len(index) # total number of stations
point = (station.latitude, station.longitude)
i1 = np.where(x1[sortindex1] == point[0])[0][0]
i2 = np.where(x2[sortindex2] == point[1])[0][0]
n1 = 100 # intial perimeter, expert guess, works on ruzyne
n2 = 100 # intial perimeter, expert guess, works on ruzyne
inperim = np.zeros(nst, dtype=bool)
ninp = 1
while ninp < k + 1:
i1lb = max(i1 - n1, 0)
i1ub = min(i1 + n1, nst - 1)
x1lb, x1ub = (x1[sortindex1][i1lb], x1[sortindex1][i1ub])
i2lb = max(i2 - n2, 0)
i2ub = min(i2 + n2, nst - 1)
x2lb, x2ub = (x2[sortindex2][i2lb], x2[sortindex2][i2ub])
inperim = (x1 >= x1lb) & (x1 <= x1ub) & (x2 >= x2lb) & (x2 <= x2ub)
ninp = np.sum(inperim)
n1 *= 2
n2 *= 2
distvec = np.array([geodesic(point, station_point).km for station_point in zip(x1[inperim], x2[inperim])])
indout = np.argsort(distvec)[1:k + 1]
result = [{'station': stid, 'dist_km': disti} for stid, disti in zip(index[indout], distvec[indout])]
return result
def find_nearest_stations(stations: pd.DataFrame) -> Dict[str, List]:
topk = 3
x1 = stations['latitude'].values
x2 = stations['longitude'].values
sortindex1 = np.argsort(x1)
sortindex2 = np.argsort(x2)
result = {}
for station in tqdm(stations.itertuples(), total=len(stations)):
topn_list = find_topk_nearest(
k=topk,
station=station,
index=stations.index,
x1=x1,
sortindex1=sortindex1,
x2=x2,
sortindex2=sortindex2)
result[station.Index] = topn_list
return result
def get_nearest_stations() -> Dict[str, list]:
with open('../../data/station/nearest_stations.json', 'r') as file:
nearest = json.load(file)
return nearest
def df_station(station: str, engine=None) -> pd.DataFrame:
if engine is None:
engine = create_engine('postgresql://postgres:@localhost/ghcn')
q = engine.execute(f"select * from prcp where station='{station}' order by dateto").fetchall()
df = pd.DataFrame(q, columns=['station', 'dateto', 'prcp_mm'])
return df.set_index(['station', 'dateto'])
def make_day_index(year: int) -> pd.DataFrame:
"""
Make calendar with day index where 0=last day of the previous year, 1=first day of the year.
It spans the current year and two previous years, so the range is -730 to +365,
which is 1095 days for one station and year.
"""
start_date = date(year-2, 1, 1)
end_date = date(year, 12, 31)
zero_date = datetime(year-1, 12, 31)
date_axis = pd.date_range(start=start_date, end=end_date, freq='D')
day_index = (date_axis - zero_date).days
calendar = pd.DataFrame({
'year': year,
'dateto': [date(d.year, d.month, d.day) for d in date_axis],
'day_index': day_index,
}, columns=['year', 'dateto', 'day_index'])
# logging.debug(f"calendar with {len(date_axis)} days from {date_axis[0]} to {date_axis[-1]}")
return calendar
def calc_reference_station_year(prcp: pd.DataFrame, year: int) -> pd.DataFrame:
keys = ['station', 'dateto']
day_index = make_day_index(year)
day_index['station'] = prcp.index[0][0]
day_index = day_index.set_index(keys)
ref = day_index.join(prcp)
ref['cum_prcp'] = np.nancumsum(ref['prcp_mm'].astype(float))
day_observed = ref['prcp_mm'].notnull()
cum_days_observed = np.cumsum(day_observed)
cum_days_available = np.arange(1, len(ref)+1)
ref['cum_fillrate'] = cum_days_observed / cum_days_available
ref['reference_prcp'] = ref['cum_prcp'] / ref['cum_fillrate']
# ref.at[ref['cum_fillrate'] < 0.8, 'reference_prcp'] = np.nan
return ref
def calc_reference_station(prcp: pd.DataFrame) -> pd.DataFrame:
years = np.arange(1981, 2010+1)
ref_list = []
for year in years:
ref_year = calc_reference_station_year(prcp, year)
ref_list.append(ref_year)
ref = pd.concat(ref_list, axis=0)
return ref
def reference_quantiles(reference: pd.DataFrame) -> pd.DataFrame:
qq = np.array([0.00, 0.25, 0.50, 0.75, 1.00])
cdf_prcp = ECDF()
cdf_fill = ECDF()
qlist = []
keys = ['station', 'day_index']
for gkeys, gref in reference.groupby(keys):
if gref.empty or gref['reference_prcp'].notnull().sum() == 0:
qprcp = np.full(5, np.nan)
qfill = np.full(5, np.nan)
else:
cdf_prcp.fit(gref['reference_prcp'], weights=gref['cum_fillrate'])
qprcp = cdf_prcp.quantile(qq)
cdf_fill.fit(gref['cum_fillrate'])
qfill = cdf_fill.quantile(qq)
row = (*gkeys, *qprcp, *qfill)
qlist.append(row)
cols = [
*keys,
'prcp_min',
'prcp_p25',
'prcp_p50',
'prcp_p75',
'prcp_max',
'fill_min',
'fill_p25',
'fill_p50',
'fill_p75',
'fill_max',
]
qdf = pd.DataFrame(qlist, columns=cols).set_index(keys)
return qdf
def calc_reference_quantiles(prcp: pd.DataFrame) -> pd.DataFrame:
"""Composition of calc_reference_station and reference_quantiles."""
# This makes sure that we do not use the reference dataset directly, just the quantiles
ref = calc_reference_station(prcp)
q = reference_quantiles(ref)
return q
def load_reference_quantiles(station: str, engine) -> pd.DataFrame:
"""Load reference quantiles from database."""
q = engine.execute(f"select * from reference where station='{station}'").fetchall()
cols = [
'station',
'day_index',
'prcp_min',
'prcp_p25',
'prcp_p50',
'prcp_p75',
'prcp_max',
'fill_min',
'fill_p25',
'fill_p50',
'fill_p75',
'fill_max',
]
df = pd.DataFrame(q, columns=cols).set_index(keys=['station', 'day_index'])
return df
def calc_cumprcp(prcp: pd.DataFrame, year: int) -> pd.DataFrame:
data_end_date = prcp.index.get_level_values('dateto')[-1]
cprcp = calc_reference_station_year(prcp, year) # reuse the same code as for the reference
cprcp.columns = ['year', 'day_index', 'prcp_mm', 'cum_prcp', 'cum_fillrate', 'ytd_prcp']
idx = pd.IndexSlice
return cprcp.loc[idx[:, :data_end_date], :]
def drought_index(cum_prcp: float, reference_cum_prcp: np.ndarray) -> float:
"""Calculate drought index from the cumulative precipitation and the reference values."""
cdf = ECDF()
cdf.fit(reference_cum_prcp)
curr_cdf = cdf.eval(np.array(cum_prcp))
curr_drought_index = 2 * (0.5 - curr_cdf)
return curr_drought_index
def current_drought_rate(refq: pd.DataFrame, curr_cprcp: pd.Series) -> float:
if refq.empty:
curr_drought_rate = np.nan
else:
curr_station = refq.index[0][0]
curr_day_index = curr_cprcp['day_index']
curr_ytd_prcp = curr_cprcp['ytd_prcp']
refq_columns = ['prcp_min', 'prcp_p25', 'prcp_p50', 'prcp_p75', 'prcp_max']
refq_prcp = refq.loc[(curr_station, curr_day_index), refq_columns].values
if len(refq_prcp) > 0:
curr_drought_rate = drought_index(curr_ytd_prcp, refq_prcp.flatten())
else:
curr_drought_rate = np.nan
return curr_drought_rate
def current_fillrate_cdf(refq: pd.DataFrame, curr_cprcp: pd.Series) -> float:
curr_station = refq.index[0][0]
curr_day_index = curr_cprcp['day_index']
curr_fillrate = curr_cprcp['cum_fillrate']
refq_columns = ['fill_min', 'fill_p25', 'fill_p50', 'fill_p75', 'fill_max']
ref_fillrate = refq.loc[(curr_station, curr_day_index), refq_columns].values
if len(ref_fillrate) > 0:
cdf = ECDF()
cdf.fit(ref_fillrate.flatten())
curr_fillrate_cdf = cdf.eval(curr_fillrate)
else:
curr_fillrate_cdf = np.nan
return curr_fillrate_cdf
def station_label(station: pd.Series) -> str:
coords = f"{station.latitude:.3f}, {station.longitude:.3f}, {station.elevation:.0f}"
stlabel = f"{station.continent_name}/{station.country_name}/{station.station_name} ({coords})"
return stlabel
def nice_ylim(y: float) -> float:
"""Guess the ylim which is proportional to the value."""
step = 10.0 ** np.round(np.log10(0.1*y))
ub = step * np.ceil(y / step)
return ub
def cum_prcp_plot_matplotlib(
stlabel: str,
rdf: pd.DataFrame,
cprcp: pd.DataFrame,
curr_drought_rate: float
):
"""
Plot cumulative precipitation with Matplotlib. Deprecated in favor of cum_prcp_plot.
:param stlabel:
:param rdf:
:param cprcp:
:param curr_drought_rate:
:return:
"""
f = plt.figure(figsize=(12, 12))
if not rdf.empty and rdf['prcp_min'].notnull().sum() > 0:
prcp_ub = nice_ylim(rdf['prcp_max'].iloc[-1])
xx = rdf['dateto']
plt.fill_between(x=xx, y1=0, y2=rdf['prcp_min'], color='red', linewidth=0.0, alpha=0.5)
plt.fill_between(x=xx, y1=rdf['prcp_min'], y2=rdf['prcp_p25'], color='orange', linewidth=0.0, alpha=0.5)
plt.fill_between(x=xx, y1=rdf['prcp_p25'], y2=rdf['prcp_p75'], color='green', linewidth=0.0, alpha=0.5)
plt.fill_between(x=xx, y1=rdf['prcp_p75'], y2=rdf['prcp_max'], color='cyan', linewidth=0.0, alpha=0.5)
plt.fill_between(x=xx, y1=rdf['prcp_max'], y2=prcp_ub, color='blue', linewidth=0.0, alpha=0.5)
plt.plot(xx, rdf['prcp_p50'], c='grey')
if not cprcp.empty:
plt.plot(cprcp.index.get_level_values('dateto'), cprcp['ytd_prcp'], c='red', linewidth=3)
ax = plt.gca()
ax.set_title(f"{stlabel}: current drought rate is {100 * curr_drought_rate:.0f}%")
ax.set_ylabel('3rd year cumulative precipitation in mm')
ax.grid(True)
return f
def cum_prcp_plot(
stlabel: str,
rdf: pd.DataFrame,
cprcp: pd.DataFrame,
curr_drought_rate: float
):
"""
Plot cumulative precipitation with Bokeh.
:param stlabel:
:param rdf:
:param cprcp:
:param curr_drought_rate:
:return:
"""
src_ref = ColumnDataSource(rdf)
src_cur = ColumnDataSource(cprcp.reset_index())
p = figure(
plot_width=800,
plot_height=800,
title=f"{stlabel}: current drought index is {100 * curr_drought_rate:.0f}%",
y_axis_label="3rd year cumulative precipitation in mm",
x_axis_type='datetime',
)
if not rdf.empty and rdf['prcp_min'].notnull().sum() > 0:
prcp_ub = nice_ylim(rdf['prcp_max'].iloc[-1])
amin = VArea(x="dateto", y1=0, y2="prcp_min", fill_color="red", fill_alpha=0.5)
ap25 = VArea(x="dateto", y1="prcp_min", y2="prcp_p25", fill_color="orange", fill_alpha=0.5)
ap50 = VArea(x="dateto", y1="prcp_p25", y2="prcp_p75", fill_color="green", fill_alpha=0.5)
ap75 = VArea(x="dateto", y1="prcp_p75", y2="prcp_max", fill_color="cyan", fill_alpha=0.5)
amax = VArea(x="dateto", y1="prcp_max", y2=prcp_ub, fill_color="blue", fill_alpha=0.5)
lp50 = Line(x="dateto", y="prcp_p50", line_color='grey', line_width=3)
p.add_glyph(src_ref, amin)
p.add_glyph(src_ref, ap25)
p.add_glyph(src_ref, ap50)
p.add_glyph(src_ref, ap75)
p.add_glyph(src_ref, amax)
rref = p.add_glyph(src_ref, lp50)
ttp_ref = [
("Date", "@dateto{%F}"),
("Day Index", "@day_index"),
("Precipitation min", "@prcp_min{0.}"),
("Precipitation p25", "@prcp_p25{0.}"),
("Precipitation p50", "@prcp_p50{0.}"),
("Precipitation p75", "@prcp_p75{0.}"),
("Precipitation max", "@prcp_max{0.}"),
]
hover_ref = HoverTool(renderers=[rref], tooltips=ttp_ref, formatters={"@dateto": "datetime"})
p.add_tools(hover_ref)
if not cprcp.empty:
lcur = Line(x='dateto', y='ytd_prcp', line_color='red', line_width=3)
rcur = p.add_glyph(src_cur, lcur)
ttp_cur = [
("Date", "@dateto{%F}"),
("Day Index", "@day_index"),
("Precipitation that day (mm)", "@prcp_mm{0.}"),
("Precipitation 3rd year cumulative observed (mm)", "@cum_prcp{0.}"),
("Fill rate 3rd year cumulative", "@cum_fillrate{0.000}"),
("Precipitation 3rd year cumulative predicted (mm)", "@ytd_prcp{0.}"),
]
hover_cur = HoverTool(renderers=[rcur], tooltips=ttp_cur, formatters={"@dateto": "datetime"})
p.add_tools(hover_cur)
return p
def cum_fillrate_plot(
stlabel: str,
rdf: pd.DataFrame,
cprcp: pd.DataFrame,
curr_fillrate: float,
curr_fillrate_cdf: float,
):
f = plt.figure(figsize=(16, 9))
if not cprcp.empty:
plt.plot(cprcp.index.get_level_values('dateto'), cprcp['cum_fillrate'], c='red', linewidth=3)
if not rdf.empty:
plt.fill_between(rdf['dateto'], y1=rdf['fill_min'], y2=rdf['fill_max'], color='lightgray', alpha=0.5)
plt.fill_between(rdf['dateto'], y1=rdf['fill_p25'], y2=rdf['fill_p75'], color='darkgray', alpha=0.5)
plt.plot(rdf['dateto'], rdf['fill_p50'], color='gray')
ax = plt.gca()
ax.set_ylim(0, 1)
title = f"{stlabel}: current fill rate is {curr_fillrate:.2f} which is {100 * curr_fillrate_cdf:.0f} percentile"
ax.set_title(title)
ax.set_ylabel('fill rate')
ax.grid(True)
return f
def totals_barchart_matplotlib(dfy: pd.DataFrame):
"""Deprecated in favor of totals_barchart."""
f = plt.figure(figsize=(12, 12))
ax = plt.gca()
ax.set_ylabel("annual precipitation in mm")
ax.set_title(f"Yearly precipitation totals")
if not dfy.empty:
xx = dfy['year'].values
yy = dfy['prcp_mm'].values / 10
dd = dfy['observed_days']
x1 = np.min(xx)
x2 = np.max(xx)
mx = np.mean(xx)
my = np.mean(yy)
plt.bar(xx, yy, width=0.8)
plt.step(xx, dd, c='red')
plt.plot([x1, x2], [my, my], color='blue')
ax.annotate(
f"{my:.0f}",
xy=(mx, my),
xytext=(0, 3), # 3 points vertical offset
textcoords="offset points",
ha='center',
va='bottom',
fontsize='x-large',
color='blue',
)
return f
def totals_barchart(df: pd.DataFrame):
dfy = df.copy()
dfy['prcp_mm'] = df.prcp_mm / 10
dfy['available_days'] = 365 + (dfy.year % 4 == 0)
dfy['fill_rate'] = dfy.observed_days / dfy.available_days
dfy['prcp_pred'] = dfy.prcp_mm / dfy.fill_rate
prcp_pred_mean = dfy.prcp_pred.mean()
dfy['prcp_pred_mean'] = prcp_pred_mean
f = figure(
plot_width=800,
plot_height=800,
title=f"Yearly precipitation totals, mean={prcp_pred_mean:.0f}mm",
y_axis_label="annual precipitation in mm",
)
if not dfy.empty:
src = ColumnDataSource(dfy)
bobs = VBar(x='year', top='prcp_mm', fill_color='blue', line_color='blue', width=0.8)
bpre = VBar(x='year', bottom='prcp_mm', top='prcp_pred', fill_color='lightblue', line_color='blue', width=0.8)
lpre = Line(x='year', y='prcp_pred_mean', line_color='darkblue', line_width=3)
f.add_glyph(src, bobs)
f.add_glyph(src, bpre)
f.add_glyph(src, lpre)
ttp = [
("Year", "@year"),
("Precipitation observed (mm)", "@prcp_mm{0.}"),
("Observed days", "@observed_days"),
("Available days", "@available_days"),
("Fill rate", "@fill_rate{0.000}"),
("Precipitation predicted (mm)", "@prcp_pred{0.}"),
]
hover_tool = HoverTool(tooltips=ttp)
f.add_tools(hover_tool)
return f
def drought_rate_data(stid: str, year: int, engine=None) -> tuple:
prcp = df_station(stid, engine)
if not prcp.empty:
if engine is None:
refq = calc_reference_quantiles(prcp)
else:
refq = load_reference_quantiles(stid, engine)
data_end_date = prcp.index.get_level_values('dateto')[-1]
day_index = make_day_index(year)
rdf = day_index.merge(refq, on='day_index', how='left') >> mask(X.dateto <= data_end_date)
if engine is None:
cprcp = calc_cumprcp(prcp, year)
else:
cprcp = calc_cumprcp(prcp, year) # TODO load from db
if not cprcp.empty:
curr_cprcp = cprcp.iloc[-1, :]
curr_fillrate = curr_cprcp['cum_fillrate']
if refq.empty:
curr_drought_rate = np.nan
curr_fillrate_cdf = np.nan
else:
curr_drought_rate = current_drought_rate(refq, curr_cprcp)
curr_fillrate_cdf = current_fillrate_cdf(refq, curr_cprcp)
else:
curr_drought_rate = np.nan
curr_fillrate = np.nan
curr_fillrate_cdf = np.nan
else:
rdf = pd.DataFrame()
cprcp = pd.DataFrame()
curr_drought_rate = np.nan
curr_fillrate = np.nan
curr_fillrate_cdf = np.nan
return rdf, cprcp, curr_drought_rate, curr_fillrate, curr_fillrate_cdf
def sql_engine():
engine = create_engine('postgres://postgres:@localhost/ghcn')
return engine
def get_stations_noref(engine, stations: pd.DataFrame) -> pd.DataFrame:
cols = ['station', 'dispatched_at', 'completed_at']
sql_noref = (
f"select {', '.join(cols)}\n"
"from reference_job\n"
"where completed_at is null"
)
station_noref = pd.DataFrame(engine.execute(sql_noref).fetchall(), columns=cols).set_index('station')
station_coord = station_noref.join(stations)
lat = station_coord.latitude
lon = station_coord.longitude
center_point = (49.9629345, 14.0600897) # x=14.0600897&y=49.9629345 = <NAME> 1005
station_coord['perimeter_km'] = np.array([geodesic(center_point, station_point).km for station_point in zip(lat, lon)])
return station_coord.sort_values(by='perimeter_km')
def ded_prcp(engine) -> date:
sql_query = (
"select max(dateto) as ded\n"
"from prcp"
)
df = pd.DataFrame(engine.execute(sql_query).fetchall(), columns=['ded'])
data_end_dt = df['ded'].iat[0]
data_end_date = date(data_end_dt.year, data_end_dt.month, data_end_dt.day)
logging.debug(f"data_end_date={data_end_date}")
return data_end_date
def ded_cump(engine, year: int) -> tuple:
sql_query = (
"select max(dateto) as ded, max(day_index) as dei\n"
"from cumprcp\n"
f"where year={year}"
)
df = pd.DataFrame(engine.execute(sql_query).fetchall(), columns=['ded', 'dei'])
data_end_dt = df['ded'].iat[0]
data_end_index = df['dei'].iat[0]
if data_end_dt is None:
data_end_date = None
else:
data_end_date = date(data_end_dt.year, data_end_dt.month, data_end_dt.day)
logging.debug(f"cump_end_date={data_end_date}")
return data_end_date, data_end_index
def prcp_dateto(engine, dateto: date) -> pd.DataFrame:
"""Select all rows from prcp table as of dateto."""
sql_query = f"select station, cast(prcp_mm as float) as prcp_mm from prcp where dateto=date'{dateto.isoformat()}'"
logging.debug(sql_query)
df = pd.DataFrame(engine.execute(sql_query).fetchall(), columns=['station', 'prcp_mm'])
return df
def increment_cumprcp(engine, year: int, day_index: int, dateto: date, cum_days_available: int):
"""Insert new records to cumprcp for the spacified day assuming that the previous day is there."""
cols_previous = ['station', 'year', 'day_index', 'dateto', 'cum_days_observed', 'cum_prcp']
sql_previous = f"select {', '.join(cols_previous)} from cumprcp where year={year} and day_index={day_index - 1}"
cumprcp_previous = pd.DataFrame(engine.execute(sql_previous).fetchall(), columns=cols_previous)
assert not cumprcp_previous.empty
prcp = prcp_dateto(engine, dateto)
cols_both = ['station', 'year']
cumprcp = cumprcp_previous[cols_both].merge(prcp, how='left', on='station')
cumprcp['day_index'] = day_index
cumprcp['dateto'] = dateto
cumprcp['flag_observed'] = cumprcp.prcp_mm.notnull()
cumprcp['cum_days_observed'] = cumprcp_previous.cum_days_observed + cumprcp.flag_observed
cumprcp['cum_fillrate'] = cumprcp.cum_days_observed / cum_days_available
cumprcp['cum_prcp'] = cumprcp_previous.cum_prcp + cumprcp.prcp_mm.fillna(0)
cumprcp['cum_prcp_pred'] = cumprcp.cum_prcp / cumprcp.cum_fillrate
cols_out = [
'station',
'year',
'day_index',
'dateto',
'flag_observed',
'cum_days_observed',
'cum_fillrate',
'cum_prcp',
'cum_prcp_pred',
]
insert_with_progress(cumprcp[cols_out], engine, table_name='cumprcp', reset_index=False)
def update_cumprcp(engine):
"""Update table cumprcp if new data is available in prcp table."""
stations = load_stations()
prcp_end_date = ded_prcp(engine)
year = prcp_end_date.year
day_index = make_day_index(year)
first_day_index = day_index['day_index'].iat[0]
cump_end_date, cump_end_index = ded_cump(engine, year)
if cump_end_date is None: # create new year skeleton
dateto0 = day_index['dateto'].iat[0]
logging.debug(dateto0)
prcp0 = prcp_dateto(engine, dateto0)
cump0 = stations >> left_join(prcp0, by='station')
flag_observed = cump0.prcp_mm.notnull()
skeleton = pd.DataFrame({
'station': stations.index,
'year': year,
'day_index': first_day_index,
'dateto': dateto0,
'flag_observed': flag_observed,
'cum_days_observed': flag_observed.astype(int),
'cum_fillrate': flag_observed.astype(float),
'cum_prcp': cump0.prcp_mm.fillna(0),
'cum_prcp_pred': cump0.prcp_mm,
})
insert_with_progress(skeleton, engine, table_name='cumprcp', reset_index=False)
cump_end_date = dateto0
day_index_todo = day_index.loc[(day_index.dateto > cump_end_date) & (day_index.dateto <= prcp_end_date), :]
for x in day_index_todo.itertuples():
logging.debug(x)
cum_days_available = x.day_index - first_day_index + 1
increment_cumprcp(engine, x.year, x.day_index, x.dateto, cum_days_available)
logging.debug("completed")
def do_worker_job(engine, station_id: str):
if station_id:
prcp = df_station(station_id)
if not prcp.empty:
refq = calc_reference_quantiles(prcp)
if not refq.empty:
insert_with_progress(refq, engine, table_name='reference', chunksize=2000)
return
def make_station_tree(stations: pd.DataFrame) -> Tuple[pd.DataFrame, pd.DataFrame]:
"""All nodes of the stations tree are searcheable in the autocomplete by node_name."""
def node_name_station(station_record: NamedTuple) -> str:
name = f"{station_record.station_name} (station in {station_record.country_name})"
return name
def node_name_country(station_record: NamedTuple) -> str:
name = f"{station_record.country_name} (country in {station_record.continent_name})"
return name
station_name = pd.Series([node_name_station(x) for x in stations.itertuples()])
if station_name.is_unique:
logging.debug("tree nodes are unique")
else:
logging.error("tree nodes are not unique")
freq = station_name.value_counts()
ndup = np.sum(freq > 1)
logging.debug(f"{ndup} duplicated names")
for index, value in tqdm(freq[:ndup].iteritems(), total=ndup):
# logging.debug(f"{index}: {value}x")
# deduplication - add i/n at the end of each name
dupidx = np.flatnonzero(station_name == index)
for i, (idx, ndname) in enumerate(station_name.iloc[dupidx].iteritems()):
# logging.debug(f"{idx}: {ndname} {i+1}/{value}")
station_name.at[idx] = f"{ndname} {i+1}/{value}"
country_name = pd.Series([node_name_country(x) for x in stations.itertuples()])
continent_name = stations['continent_name']
node_name = pd.concat([station_name, country_name, continent_name], axis=0)
stidx = stations.index.values
station = np.concatenate((stidx, stidx, stidx), axis=0)
node_station = pd.DataFrame({
'node_name': node_name,
'station': station,
})
nvc = node_station['node_name'].value_counts()
tree = pd.DataFrame({
'node_name': nvc.index,
'num_stations': nvc.values,
})
return tree, node_station
def refresh_drought(engine):
"""Refresh table drought from cumprcp and reference for the last day_index available."""
sql_year = "select max(year) from cumprcp"
max_year = engine.execute(sql_year).fetchone()[0]
logging.debug(f"max_year={max_year}")
sql_index = f"select max(day_index) from cumprcp where year = {max_year}"
max_day_index = engine.execute(sql_index).fetchone()[0]
logging.debug(f"max_day_index={max_day_index}")
last_cumprcp_cols = ["station", "cum_prcp_pred", "cum_fillrate"]
sql_last_cumprcp = (
f"select {', '.join(last_cumprcp_cols)} "
f"from cumprcp where year = {max_year} and day_index = {max_day_index} "
f"and {' and '.join([c + ' is not null' for c in last_cumprcp_cols])}"
)
logging.debug(sql_last_cumprcp)
last_cumprcp = pd.DataFrame(
engine.execute(sql_last_cumprcp).fetchall(), columns=last_cumprcp_cols
).set_index('station')
logging.debug(f"last_cumprcp={last_cumprcp.shape}")
reference_prcp_cols = ["prcp_min", "prcp_p25", "prcp_p50", "prcp_p75", "prcp_max"]
last_reference_cols = ["station"] + reference_prcp_cols
sql_last_reference = (
f"select {', '.join(last_reference_cols)} "
f"from reference where day_index = {max_day_index} "
f"and {' and '.join([c + ' is not null' for c in reference_prcp_cols])}"
)
logging.debug(sql_last_reference)
last_reference = pd.DataFrame(
engine.execute(sql_last_reference).fetchall(), columns=last_reference_cols
).set_index('station')
logging.debug(f"last_reference={last_reference.shape}")
drought = last_reference.join(last_cumprcp, how='inner')
logging.debug(f"drought={drought.shape}")
drought['drought_index'] = [drought_index(x.cum_prcp_pred, x[reference_prcp_cols]) for s, x in drought.iterrows()]
engine.execute("truncate table drought")
out_df = drought.reset_index() >> select(X.station, X.drought_index, X.cum_prcp_pred, X.cum_fillrate)
logging.debug(f"out_df={out_df.shape}")
insert_with_progress(out_df, engine, table_name='drought', reset_index=False)
def get_drought(engine) -> pd.DataFrame:
cols = ["station", "drought_index", "cum_prcp_pred", "cum_fillrate"]
df = pd.DataFrame(engine.execute("select * from drought").fetchall(), columns=cols).set_index("station")
return df
def get_drought_stats(drought: pd.DataFrame) -> tuple:
"""Calculate stats from the not aggregated drought data."""
flag_dry = (drought.drought_index >= 0.5)
num_dry = np.sum(flag_dry)
num_stations = len(drought)
if num_stations > 0:
drought_rate = num_dry / num_stations
else:
drought_rate = np.nan
return num_stations, num_dry, drought_rate
def aggregate_drought(drought: pd.DataFrame, agg_stations=50) -> pd.DataFrame:
"""Return aggd, num_dry, num_stations."""
drought = drought.sort_values(by='drought_index')
num_stations = len(drought)
bucket_size = np.ceil(num_stations / agg_stations)
drought['bucket_mark'] = (np.arange(0, num_stations) // bucket_size + 1) * bucket_size
aggd = drought >> group_by(X.bucket_mark) >> summarize(
drought_index=X.drought_index.mean(),
cum_fillrate=X.cum_fillrate.mean(),
bucket_size=n(X.drought_index),
)
aggd = aggd.set_index('bucket_mark')
aggd.index = aggd.index.set_names('index')
return aggd
def drought_add_facecolor(drought: pd.DataFrame) -> pd.DataFrame:
"""Add facecolor column before drought plot."""
bar_colors = np.array(['blue', 'cyan', 'green', 'orange', 'red'])
drought_index_band = pd.cut(drought.drought_index, [-1.0, -0.99, -0.5, +0.5, +0.99, +1.001], right=False)
drought['facecolor'] = bar_colors[drought_index_band.cat.codes.values]
if "bucket_size" in drought.columns:
drought['barwidth'] = drought['bucket_size'] * 0.8
else:
drought['barwidth'] = 0.8
return drought
def drought_rate_plot_core(drought: pd.DataFrame, drought_stats: tuple, tooltips: list):
num_stations, num_dry, drought_rate = drought_stats
src = ColumnDataSource(drought)
p = figure(
plot_width=1600,
plot_height=600,
title=f"Drought rate is {100 * drought_rate:.0f}%={num_dry}/{num_stations}.",
y_axis_label="Drought Index",
x_axis_label="Station Rank by Drought Index",
)
p.vbar(
x="index",
top="drought_index",
width="barwidth",
alpha="cum_fillrate",
fill_color="facecolor",
line_color=None,
source=src)
p.y_range.start = -1.0
p.y_range.end = +1.0
p.add_tools(HoverTool(tooltips=tooltips))
p.xgrid.grid_line_color = None
return p
def drought_rate_plot(drought: pd.DataFrame):
drought_stats = get_drought_stats(drought)
ttp = [
("cum_prcp_predicted", "@cum_prcp_pred{00.}"),
("cum_fillrate", "@cum_fillrate{0.00}"),
("id", "@station"),
("station", "@station_name"),
("country", "@country_name"),
("elevation", "@elevation{0.}"),
]
plotdf = drought.sort_values(by='drought_index').reset_index()
p = drought_rate_plot_core(plotdf, drought_stats, ttp)
return p
def drought_rate_plot_agg(drought: pd.DataFrame, agg_stations=50):
drought_stats = get_drought_stats(drought)
aggd = aggregate_drought(drought, agg_stations=agg_stations)
ttp = [
("Mean drought index", "@drought_index{0.000}"),
("Bucket size", "@bucket_size"),
("Mean fill rate", "@cum_fillrate{0.000}"),
]
aggd = drought_add_facecolor(aggd)
p = drought_rate_plot_core(aggd, drought_stats, ttp)
return p
def update_yearly_totals(year: int):
prcp_path = '../../data/prcp'
out_file = '../../data/yearly_totals/prcp_totals.csv'
logging.debug(f"current_year={year}")
prcp = df_prcp(year, prcp_path)
totals = prcp >> group_by(X.station) >> summarize(
prcp_mm=X.prcp.sum(),
observed_days=n(X.prcp),
) >> mutate(year=year)
logging.debug(f"{len(totals)} totals calculated")
df1 = | pd.read_csv(out_file) | pandas.read_csv |
# coding=utf-8
# Author: <NAME>
# Date: Sept 02, 2019
#
# Description: Reads a MultiLayer network (HS, MM & DM) and extracts subgraphs based on parameters for the networkbrowser.
#
#
import pandas as pd
pd.set_option('display.max_rows', 100)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
import networkx as nx
from matplotlib import colors
from utils import get_network_layer, get_network_largest_connected_component, ensurePathExists
import argparse
#
from data_spermatocyte_pca_modules_dm import spermatocyte_pca_modules_dm
from data_spermatocyte_pca_modules_mm import spermatocyte_pca_modules_mm
from data_spermatocyte_pca_modules_hs import spermatocyte_pca_modules_hs
#
from data_enterocyte_pca_modules_dm import enterocyte_pca_modules_dm
from data_enterocyte_pca_modules_mm import enterocyte_pca_modules_mm
from data_enterocyte_pca_modules_hs import enterocyte_pca_modules_hs
cmap_meanfertrate = colors.LinearSegmentedColormap.from_list(name='cmap-mean-fert-rate', colors=['#d62728', '#1f77b4'], N=256)
def fert_rate_color(x):
if pd.isnull(x):
return '#FFFFFF' # white
else:
return colors.to_hex(cmap_meanfertrate(x)) # color
if __name__ == '__main__':
#
# Args
#
parser = argparse.ArgumentParser()
parser.add_argument("--celltype", default='spermatocyte', type=str, choices=['spermatocyte', 'enterocyte'], help="Cell type. Must be either 'spermatocyte' or 'enterocyte'. Defaults to spermatocyte")
parser.add_argument("--network", default='thr', type=str, help="Network to use. Defaults to 'thr'.")
parser.add_argument("--threshold", default=0.5, type=float, help="Threshold value. Defaults to 0.5.")
#
parser.add_argument("--add_modules", default=True, type=bool, help="Add PCA module information to the network.")
parser.add_argument("--add_conserved", default=True, type=bool, help="Add gene conservation information to the network.")
parser.add_argument("--add_core", default=True, type=bool, help="Add core gene information to the network.")
parser.add_argument("--add_backbone", default=True, type=bool, help="Add edge backbone to the network.")
parser.add_argument("--add_ortho_backbone", default=True, type=bool, help="Add edge ortho-backbone to the network.")
#
parser.add_argument("--add_mdlc_dge_results", default=True, type=bool, help="Add gene mdlc DGE results to the DM network.")
parser.add_argument("--add_splicing_defects", default=True, type=bool, help="Add gene mdlc splicing defects results to the DM network.")
#
parser.add_argument("--remove_isolates", default=True, type=bool, help="Remove isolate nodes from layers.")
parser.add_argument("--only_largest_component", default=True, type=bool, help="Only output the largest connected component.")
# parser.add_argument("--layer", default='DM', type=str, choices=['DM', 'MM', 'HS'], help="Network layer to compute SVD. Defaults to 'DM'.")
args = parser.parse_args()
#
celltype = args.celltype # spermatocyte or enterocyte
network = args.network
threshold = args.threshold
threshold_str = str(threshold).replace('.', 'p')
#
add_modules = args.add_modules
add_conserved = args.add_conserved
add_core = args.add_core
add_backbone = args.add_backbone
add_ortho_backbone = args.add_ortho_backbone
#
add_mdlc_dge_results = args.add_mdlc_dge_results
add_splicing_defects = args.add_splicing_defects
#
remove_isolates = args.remove_isolates
only_largest_component = args.only_largest_component
#
placeholder = {'HS': None, 'MM': None, 'DM': None}
data = {
'spermatocyte': {
'PCA': dict(placeholder),
'distance-angle': dict(placeholder),
'entropy': dict(placeholder),
'modules': {
'HS': spermatocyte_pca_modules_hs,
'MM': spermatocyte_pca_modules_mm,
'DM': spermatocyte_pca_modules_dm,
},
},
'enterocyte': {
'PCA': dict(placeholder),
'distance-angle': dict(placeholder),
'entropy': dict(placeholder),
'modules': {
'HS': enterocyte_pca_modules_hs,
'MM': enterocyte_pca_modules_mm,
'DM': enterocyte_pca_modules_dm,
}
}
}
#
print('Reading Network')
path_net = '../../04-network/results/network/{celltype:}/'.format(celltype=celltype)
if network == 'thr':
rGfile_gpickle = path_net + 'net-{celltype:s}-{network:s}-{threshold:s}.gpickle'.format(celltype=celltype, network=network, threshold=threshold_str)
G = nx.read_gpickle(rGfile_gpickle)
if add_conserved:
path_fpkm = '../../02-core_genes/results/FPKM/'
df_HS = pd.read_csv(path_fpkm + 'HS/HS-FPKM-{celltype:s}.csv.gz'.format(celltype=celltype), index_col='id_string')
df_MM = pd.read_csv(path_fpkm + 'MM/MM-FPKM-{celltype:s}.csv.gz'.format(celltype=celltype), index_col='id_string')
df_DM = pd.read_csv(path_fpkm + 'DM/DM-FPKM-{celltype:s}.csv.gz'.format(celltype=celltype), index_col='id_string')
dict_string_gene_HS = df_HS['id_gene'].to_dict()
dict_string_gene_MM = df_MM['id_gene'].to_dict()
dict_string_gene_DM = df_DM['id_gene'].to_dict()
print('Loading {celltype:s} meta genes'.format(celltype=celltype))
path = '../../02-core_genes/results/'
dfM = pd.read_csv(path + 'meta-genes/meta-{celltype:s}-genes.csv.gz'.format(celltype=celltype), index_col='id_eggnog', usecols=['id_eggnog', 'id_string_HS', 'id_string_MM', 'id_string_DM'])
dfM['id_string_HS'] = dfM['id_string_HS'].apply(lambda x: x.split(',') if not pd.isnull(x) else [])
dfM['id_string_MM'] = dfM['id_string_MM'].apply(lambda x: x.split(',') if not pd.isnull(x) else [])
dfM['id_string_DM'] = dfM['id_string_DM'].apply(lambda x: x.split(',') if not pd.isnull(x) else [])
dfM['id_gene_HS'] = dfM['id_string_HS'].apply(lambda x: [dict_string_gene_HS[i] for i in x])
dfM['id_gene_MM'] = dfM['id_string_MM'].apply(lambda x: [dict_string_gene_MM[i] for i in x])
dfM['id_gene_DM'] = dfM['id_string_DM'].apply(lambda x: [dict_string_gene_DM[i] for i in x])
dfM = dfM[['id_gene_HS', 'id_gene_MM', 'id_gene_DM']]
# Only keep meta genes with homologs in all three species
dfM = dfM.loc[dfM.applymap(len).applymap(bool).sum(axis='columns') == 3]
for layer in ['HS', 'MM', 'DM']:
if layer != 'DM':
continue
#
print('Isolate {layer:s} Layer'.format(layer=layer))
Gt = get_network_layer(G, layer=layer)
# Add Module Information
if add_modules:
print('Load PCA Results ({layer:s})'.format(layer=layer))
rPCAFile = '../../04-network/results/pca/{celltype:s}/{layer:s}/pca-{celltype:s}-{network:s}-{threshold:s}-{layer:s}-dim.csv.gz'.format(celltype=celltype, network=network, threshold=threshold_str, layer=layer)
rDiAnFile = '../../04-network/results/pca/{celltype:s}/{layer:s}/pca-{celltype:s}-{network:s}-{threshold:s}-{layer:s}-dian.csv.gz'.format(celltype=celltype, network=network, threshold=threshold_str, layer=layer)
rEntFile = '../../04-network/results/pca/{celltype:s}/{layer:s}/pca-{celltype:s}-{network:s}-{threshold:s}-{layer:s}-entropy.csv.gz'.format(celltype=celltype, network=network, threshold=threshold_str, layer=layer)
df_pca = pd.read_csv(rPCAFile, index_col=0)
df_dian = pd.read_csv(rDiAnFile, index_col=0)
df_ent = pd.read_csv(rEntFile, index_col=0)
modules = data[celltype]['modules'][layer]
#
rPCAFile = '../../04-network/results/pca/{celltype:s}/{layer:s}/pca-{celltype:s}-{network:s}-{threshold:s}-{layer:s}-dim.csv.gz'.format(celltype=celltype, network=network, threshold=threshold_str, layer=layer)
dfPCA = pd.read_csv(rPCAFile, index_col=0, encoding='utf-8')
# Extract Component Modules
for module in modules:
mid = module['id']
mname = module['name']
print("Identifying module: M{mid:d} {mname:s} ".format(mid=mid, mname=mname))
xc = module['dim-coords']['xdim']
yc = module['dim-coords']['ydim']
cx = "{xc:d}c".format(xc=xc)
cy = "{yc:d}c".format(yc=yc)
cxy = '{xc:d}c-{yc:d}c-dist'.format(xc=xc, yc=yc) # label-1c-2c-dist
cxl, cxh = module['dim-coords']['xvals']
cyl, cyh = module['dim-coords']['yvals']
cutrank = module['dim-coords']['radius-rank']
cutradius = df_ent.loc[((df_ent['dim'] == xc) & (df_ent['cut-rank'] == cutrank)), 'radius-start'].squeeze()
df_pca_tmp = df_pca.loc[
(
(df_pca[cx] >= cxl) & (df_pca[cx] <= cxh) & (df_pca[cy] >= cyl) & (df_pca[cy] <= cyh) & (df_dian[cxy] >= cutradius)
), ['gene', cx, cy]].copy()
component_ids = {g: True for g in df_pca_tmp.index.tolist()}
net_attribute_name = 'module-pca-{layer:s}-{mid:d}'.format(layer=layer, mid=mid)
nx.set_node_attributes(Gt, values=component_ids, name=net_attribute_name)
# Add Conserved Information
if add_conserved:
dict_conserved = {gene: True for gene in dfM['id_gene_' + layer].explode().tolist()}
#
nx.set_node_attributes(Gt, values=dict_conserved, name='conserved')
if add_core:
rCOREFile = '../../02-core_genes/results/pipeline-core/{layer:s}_meiotic_genes.csv'.format(layer=layer)
dfC = pd.read_csv(rCOREFile, index_col=0)
dict_core = {gene: True for gene in dfC.index.tolist()}
#
nx.set_node_attributes(Gt, values=dict_core, name='core')
# Remove Isolates
if remove_isolates:
isolates = list(nx.isolates(Gt))
print('Removing {n:d} isolated nodes'.format(n=len(isolates)))
Gt.remove_nodes_from(isolates)
if add_backbone:
print('Adding backbone data')
path_backbone = "../../04-network/results/network-closure/{celltype:s}/".format(celltype=celltype)
rBfile = path_backbone + "net-closure-{celltype:s}-{network:s}-{threshold:s}-{layer:s}.gpickle".format(celltype=celltype, network=network, threshold=threshold_str, layer=layer)
B = nx.read_gpickle(rBfile)
#
is_metric = nx.get_edge_attributes(B, name='is_metric')
is_ultrametric = nx.get_edge_attributes(B, name='is_ultrametric')
#
nx.set_edge_attributes(Gt, name='is_metric', values=is_metric)
nx.set_edge_attributes(Gt, name='is_ultrametric', values=is_ultrametric)
if add_ortho_backbone and (celltype == 'spermatocyte'):
print('Adding ortho-backbone data')
path_ortho_backbone = "../../04-network/results/network-closure-ortho/{celltype:s}/".format(celltype=celltype)
rOBfile = path_ortho_backbone + "net-closure-ortho-{celltype:s}-{network:s}-{threshold:s}-{layer:s}.gpickle".format(celltype=celltype, network=network, threshold=threshold_str, layer=layer)
OB = nx.read_gpickle(rOBfile)
#
is_metric_ortho = nx.get_edge_attributes(OB, name='is_metric_ortho')
#
nx.set_edge_attributes(Gt, name='is_metric_ortho', values=is_metric_ortho)
if add_mdlc_dge_results and (celltype == 'spermatocyte') and (layer == 'DM'):
print('Adding mdlc DGE results')
rMDLCFile = '../../01-diff-gene-exp/results/mdlc/{layer:s}-DGE-mdlc_vs_control.csv'.format(layer=layer)
dfM = | pd.read_csv(rMDLCFile, index_col=0, usecols=['id', 'gene', 'logFC', 'logCPM', 'F', 'PValue', 'FDR']) | pandas.read_csv |
#!/usr/bin/env python
# coding: utf-8
import numpy as np
import pandas as pd
import _pickle as cPickle
import argparse
from copy import deepcopy
import japanize_matplotlib
import lightgbm as lgb
import matplotlib.pyplot as plt
import pickle
from sklearn.metrics import mean_squared_error
import time
from tqdm import tqdm
import os
code_path = os.path.dirname(os.path.abspath(__file__))
parser = argparse.ArgumentParser()
arg = parser.add_argument
arg('seed', type=int)
arg('iteration_mul', type=float)
arg('train_file', type=str)
arg('test_file', type=str)
arg('--learning_rate', type=float, default=0.05)
arg('--num_leaves', type=int, default=31)
arg('--n_estimators', type=int, default=500)
args = parser.parse_args()#args=['1', '0.5','train_fe.ftr', 'test_fe.ftr'])
# print(args)
train_fe = | pd.read_feather(f'{code_path}/../prepare_data/{args.train_file}') | pandas.read_feather |
import json
import os
import pandas as pd
import scraper
class full_version:
def __init__(self):
self.data={}
self.name=""
self.email=""
self.user_data = os.path.join(
os.path.dirname(
os.path.dirname(
os.path.abspath(__file__))),
"json",
"user_data.json"
)
self.user_list = os.path.join(
os.path.dirname(
os.path.dirname(
os.path.abspath(__file__))),
"csvs",
"user_list.csv"
)
self.df= | pd.DataFrame() | pandas.DataFrame |
# coding: utf-8
# ## Lending Club - classification of loans
#
# This project aims to analyze data for loans through 2007-2015 from Lending Club available on Kaggle. Dataset contains over 887 thousand observations and 74 variables among which one is describing the loan status. The goal is to create machine learning model to categorize the loans as good or bad.
#
# Contents:
#
# 1. Preparing dataset for preprocessing
# 2. Reviewing variables - drop and edit
# 3. Missing values
# 4. Preparing dataset for modeling
# 5. Undersampling approach
# In[1]:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
get_ipython().run_line_magic('matplotlib', 'inline')
import datetime
import warnings
warnings.filterwarnings('ignore')
import seaborn as sns
sns.set(font_scale=1.6)
from sklearn.preprocessing import StandardScaler
# ### 1. Preparing dataset for preprocessing
#
# In this part I will load data, briefly review the variables and prepare the 'y' value that will describe each loan as good or bad.
# In[2]:
data=pd.read_csv('../input/loan.csv',parse_dates=True)
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', 20)
# In[3]:
data.shape
# In[4]:
data.head()
# In[5]:
pd.value_counts(data.loan_status).to_frame().reset_index()
# There are 9 unique loan statuses. I will drop ones that are fully paid as these are historical entries. Next step will be to assign 0 (good) to Current loans and 1 (bad) to rest including: default and late loans, ones that were charged off or are in grace period.
#
# First two are self-explanatory, charged off loan is a debt that is deemed unlikely to be collected by the creditor but the debt is not necessarily forgiven or written off entirely, a grace period is a provision in most loan contracts which allows payment to be received for a certain period of time after the actual due date.
# In[6]:
data = data[data.loan_status != 'Fully Paid']
data = data[data.loan_status != 'Does not meet the credit policy. Status:Fully Paid']
# In[7]:
data['rating'] = np.where((data.loan_status != 'Current'), 1, 0)
# In[8]:
pd.value_counts(data.rating).to_frame()
# In[9]:
print ('Bad Loan Ratio: %.2f%%' % (data.rating.sum()/len(data)*100))
# The data is strongly imbalanced, however there are over 75 thousand bad loans that should suffice for a model to learn.
# In[10]:
data.info()
# ### 2. Reviewing variables - drop and edit
#
# In this part I will review each non-numerical variable to either edit or drop it.
# There are two columns that describe a reason for the loan - title and purpose. As shown below title has many more categories which makes it less specific and helpful for the model, so it will be dropped.
# In[11]:
pd.value_counts(data.title).to_frame()
# In[12]:
pd.value_counts(data.purpose).to_frame()
# Application type variable shows whether the loan is individual or joint - number of joint loans will reflect huge number of NaN values in other variables dedicated for these loans.
#
# Will change this variable to binary.
# In[13]:
pd.value_counts(data.application_type).to_frame()
# In[14]:
app_type={'INDIVIDUAL':0,'JOINT':1}
data.application_type.replace(app_type,inplace=True)
# In[15]:
pd.value_counts(data.term).to_frame()
# Term variable will be changed to numerical.
# In[16]:
term={' 36 months':36,' 60 months':60}
data.term.replace(term,inplace=True)
# Following two variables are dedicated to credit rating of each individual. Will change them to numerical while making sure that the hierarchy is taken into account. Lowest number will mean best grade/subgrade.
# In[17]:
pd.value_counts(data.grade).to_frame()
# In[18]:
grade=data.grade.unique()
grade.sort()
grade
# In[19]:
for x,e in enumerate(grade):
data.grade.replace(to_replace=e,value=x,inplace=True)
# In[20]:
data.grade.unique()
# In[21]:
pd.value_counts(data.sub_grade).to_frame()
# In[22]:
sub_grade=data.sub_grade.unique()
sub_grade.sort()
sub_grade
# In[23]:
for x,e in enumerate(sub_grade):
data.sub_grade.replace(to_replace=e,value=x,inplace=True)
data.sub_grade.unique()
# Following two variables describe title and length of employment. Title has 212 thousand categories so it will be dropped. Lenghth of employment should be sufficient to show whether an individual has a stable job.
# In[24]:
pd.value_counts(data.emp_title).to_frame()
# In[25]:
pd.value_counts(data.emp_length).to_frame()
# In[26]:
emp_len={'n/a':0,'< 1 year':1,'1 year':2,'2 years':3,'3 years':4,'4 years':5,'5 years':6,'6 years':7,'7 years':8,'8 years':9,'9 years':10,'10+ years':11}
data.emp_length.replace(emp_len,inplace=True)
data.emp_length=data.emp_length.replace(np.nan,0)
data.emp_length.unique()
# Home ownership variable should be informative for model as individuals who own their home should be much safer clients that ones that only rent it.
# In[27]:
pd.value_counts(data.home_ownership).to_frame()
# Verification status variable indicated whether the source of income of a client was verified.
# In[28]:
pd.value_counts(data.verification_status).to_frame()
# Payment plan variable will be dropped as it has only 3 'y' values.
# In[29]:
pd.value_counts(data.pymnt_plan).to_frame()
# Zip code information is to specific, there are 930 individual values, and there is no sense to make it more general as cutting it to two digits as this will only describe state, which does next veriable. Zip code will be dropped.
# In[30]:
pd.value_counts(data.zip_code).to_frame()
# In[31]:
pd.value_counts(data.addr_state).to_frame()
# Next variable is initial listing status of the loan. Possible values are – W, F and will be changed to binary.
# In[32]:
pd.value_counts(data.initial_list_status).to_frame()
# In[33]:
int_status={'w':0,'f':1}
data.initial_list_status.replace(int_status,inplace=True)
# Policy code has only 1 value so will be dropped.
# In[34]:
pd.value_counts(data.policy_code).to_frame()
# Recoveries variable informs about post charge off gross recovery. Will transform this to binary that will show whether this loan was recoveried. Will drop recovery fee as it is doubling similar information.
# In[35]:
pd.value_counts(data.recoveries).to_frame()
# In[36]:
data['recovery'] = np.where((data.recoveries != 0.00), 1, 0)
# In[37]:
pd.value_counts(data.collection_recovery_fee).to_frame()
# There are couple variables that can be transformed to date time.
# In[38]:
data.issue_d=pd.to_datetime(data.issue_d)
# In[39]:
earliest_cr_line=pd.to_datetime(data.earliest_cr_line)
data.earliest_cr_line=earliest_cr_line.dt.year
# In[40]:
data.last_pymnt_d=pd.to_datetime(data.last_pymnt_d)
data.next_pymnt_d=pd.to_datetime(data.next_pymnt_d)
data.last_credit_pull_d=pd.to_datetime(data.last_credit_pull_d)
# Dropping all variables mentioned above.
# In[41]:
data.drop(['id','member_id','desc','loan_status','url', 'title','collection_recovery_fee','recoveries','policy_code','zip_code','emp_title','pymnt_plan'],axis=1,inplace=True)
# In[42]:
data.head(10)
# ### 3. Missing values
#
# There are observations that contain missing values, I will review and transform them variable by variable.
# Starting with defining a function to create a data frame of metadata containing count of null values and type.
# In[43]:
def meta (dataframe):
metadata = []
for f in data.columns:
# Counting null values
null = data[f].isnull().sum()
# Defining the data type
dtype = data[f].dtype
# Creating a Dict that contains all the metadata for the variable
f_dict = {
'varname': f,
'nulls':null,
'dtype': dtype
}
metadata.append(f_dict)
meta = pd.DataFrame(metadata, columns=['varname','nulls', 'dtype'])
meta.set_index('varname', inplace=True)
meta=meta.sort_values(by=['nulls'],ascending=False)
return meta
# In[44]:
meta(data)
# Variables: dti_joint, annual_inc_joint and verification_status_joint have so many null values as there are only 510 joint loans. Will replace NaN with 0 and 'None' for status.
# In[45]:
data.dti_joint=data.dti_joint.replace(np.nan,0)
data.annual_inc_joint=data.annual_inc_joint.replace(np.nan,0)
data.verification_status_joint=data.verification_status_joint.replace(np.nan,'None')
# Investigating variables connected to open_acc_6m which shows number of open trades in last 6 months. Variables open_il_6m, open_il_12m, open_il_24m, mths_since_rcnt_il, total_bal_il, il_util, open_rv_12m, open_rv_24m, max_bal_bc, all_util, inq_fi, total_cu_tl, inq_last_12m, collections_12_mths_ex_med have null values for the same rows - I will change them all to 0 as missing vaules show lack of open trades.
# In[46]:
data.loc[(data.open_acc_6m.isnull())].info()
# In[47]:
variables1=['open_acc_6m', 'open_il_6m', 'open_il_12m', 'open_il_24m', 'mths_since_rcnt_il', 'total_bal_il', 'il_util', 'open_rv_12m', 'open_rv_24m', 'max_bal_bc', 'all_util', 'inq_fi', 'total_cu_tl', 'inq_last_12m','collections_12_mths_ex_med']
for e in variables1:
data[e]=data[e].replace(np.nan,0)
meta(data)
# Variables containing month since last occurence of specific action have plenty null values that I understand as lack of the occurence.
# In[48]:
pd.value_counts(data.mths_since_last_record).unique()
# In[49]:
pd.value_counts(data.mths_since_last_major_derog).unique()
# In[50]:
pd.value_counts(data.mths_since_last_delinq).unique()
# Null values in these columns can't be replaced with 0 as it would mean that the last occurence was very recent. My understanding of these variables is that the key information is whether the specific action took place (delinquency, public record, worse rating), so I will turn these into binary categories of Yes (1), No (0).
# In[51]:
data.loc[(data.mths_since_last_delinq.notnull()),'delinq']=1
data.loc[(data.mths_since_last_delinq.isnull()),'delinq']=0
data.loc[(data.mths_since_last_major_derog.notnull()),'derog']=1
data.loc[(data.mths_since_last_major_derog.isnull()),'derog']=0
data.loc[(data.mths_since_last_record.notnull()),'public_record']=1
data.loc[(data.mths_since_last_record.isnull()),'public_record']=0
data.drop(['mths_since_last_delinq','mths_since_last_major_derog','mths_since_last_record'],axis=1,inplace=True)
meta(data)
# Investigating tot_coll_amt, tot_cur_bal, total_rev_hi_lim - these are three totals that have missing values for the same observations. I will change them to 0 as they should mean that the total is 0.
# In[52]:
data.loc[(data.tot_coll_amt.isnull())].info()
# In[53]:
variables2=['tot_coll_amt', 'tot_cur_bal', 'total_rev_hi_lim']
for e in variables2:
data[e]=data[e].replace(np.nan,0)
meta(data)
# Variable revol_util is revolving line utilization rate, or the amount of credit the borrower is using relative to all available revolving credit.
# In[54]:
data.loc[(data.revol_util.isnull())].head(10)
# In[55]:
| pd.value_counts(data.revol_util) | pandas.value_counts |
'''
Created on 19 maj 2020
@author: spasz
@brief: Trend inidicator. Rising/Falling, based on given data by an argument.
'''
from scipy import signal
import numpy
import datetime
import pandas as pd
import matplotlib.pyplot as plt
from core.indicator import indicator
class trend(indicator):
def __init__(self, data, ttype='rising'):
indicator.__init__(self, 'Trend', 'trend', data.index)
self.type = ttype
self.trends = self.Init(data)
def Init(self, data):
'''Init trend based on given data'''
if (self.type == 'rising'):
return self.FindUptrends(data)
return self.FindDowntrends(data)
@staticmethod
def FindMaxPeaks(data, n=7):
'''Return series of max points from given data'''
maxs = data.iloc[signal.argrelextrema(
data.values, numpy.greater_equal, order=n)[0]]
return maxs
@staticmethod
def FindMinPeaks(data, n=7):
'''Return series of min points from given data'''
mins = data.iloc[signal.argrelextrema(
data.values, numpy.less_equal, order=n)[0]]
return mins
@staticmethod
def GetTrendDaysLength(trend):
''' Returns trend days length '''
delta = trend.index[-1]-trend.index[0]
return delta.days
def FindUptrends(self, data, days=6, n=2):
''' Downtrend calculation is based on mins '''
uptrends = []
trend = | pd.Series() | pandas.Series |
# -*- coding: utf-8 -*-
import pandas as pd
def mne_channel_extract(raw, name):
"""Channel array extraction from MNE.
Select one or several channels by name and returns them in a dataframe.
Parameters
----------
raw : mne.io.Raw
Raw EEG data.
name : str or list
Channel's name(s).
Returns
----------
DataFrame
A DataFrame or Series containing the channel(s).
Example
----------
>>> import neurokit2 as nk
>>> import mne
>>>
>>> raw = mne.io.read_raw_fif(mne.datasets.sample.data_path() +
... '/MEG/sample/sample_audvis_raw.fif', preload=True) #doctest: +SKIP
>>>
>>> raw_channel = nk.mne_channel_extract(raw, "EEG 055") # doctest: +SKIP
"""
if isinstance(name, list) is False:
name = [name]
channels, __ = raw.copy().pick_channels(name)[:]
if len(name) > 1:
channels = pd.DataFrame(channels.T, columns=name)
else:
channels = | pd.Series(channels[0]) | pandas.Series |
import numpy as nmp
import pandas as pnd
import theano.tensor as tns
import pymc3 as pmc
##
def bin_lpdf(r, R, theta):
return tns.gammaln(R + 1.0) - tns.gammaln(r + 1.0) - tns.gammaln(R - r + 1.0)\
+ r*tns.log(theta) + (R - r)*tns.log1p(-theta)
##
def binmix_logp_fcn(R, theta, lw):
def logp(r):
lp = lw + bin_lpdf(r, R, theta).sum(1)
return pmc.logsumexp(lp, 0)#.sum()
return logp
##
def betabin_lpdf(r, R, a, b):
return tns.gammaln(R + 1.0) - tns.gammaln(r + 1.0) - tns.gammaln(R - r + 1.0)\
+ tns.gammaln(r + a) + tns.gammaln(R - r + b) - tns.gammaln(R + a + b)\
+ tns.gammaln(a + b) - tns.gammaln(a) - tns.gammaln(b)
##
def betabinmix_logp_fcn(R, u, theta, lw):
a = u * theta
b = u * (1.0 - theta)
def logp(r):
lp = lw + betabin_lpdf(r, R, a, b).sum(1)
return pmc.logsumexp(lp, 0)#.sum()
return logp
##
def cov_expquad(x1, x2, tau):
return tns.exp(-0.5 * tau * (x1 - x2)**2)
##
def cov_exp(x1, x2, tau):
return tns.exp(-tns.sqrt(tau) * tns.abs_(x1 - x2))
##
def cov_mat32(x1, x2, tau):
r = tns.abs_(x1 - x2)
c = tns.sqrt(3.0) * r * tns.sqrt(tau)
return (1.0 + c) * tns.exp(-c)
##
def cov_mat52(x1, x2, tau):
r = tns.abs_(x1 - x2)
c = tns.sqrt(5.0) * r * tns.sqrt(tau)
return (1.0 + c + 5.0/3.0 * r**2 * tau) * tns.exp(-c)
##
def stick_breaking_log(u):
"""Return log of weights from stick-breaking process."""
lu = tns.concatenate((tns.log(u), [0.0]))
cs = tns.concatenate(([0.0], tns.cumsum(tns.log1p(-u))))
lw = lu + cs
return lw
##
COV_FCNS = {
'ExpQ': cov_expquad,
'Exp': cov_exp,
'Mat32': cov_mat32,
'Mat52': cov_mat52
}
##
def calculate_cluster_weights(trace, threshold, alpha):
w_samples = nmp.exp(trace['lw'])
# re-weight cluster weights
w = nmp.median(w_samples, 0)
wids = w < threshold
w_samples[:, wids] = 0
w_samples = w_samples / nmp.sum(w_samples, 1, keepdims=True)
# median, credible interval
w_lo, w, w_hi = nmp.quantile(w_samples, [0.5*alpha, 0.5, 1 - 0.5*alpha], axis=0)
#
return pnd.DataFrame({
'CLUSTERID': nmp.arange(w.size) + 1,
'W': w,
'W_LO': w_lo,
'W_HI': w_hi
})
##
def calculate_cluster_centres(data, trace, alpha):
phi_samples = trace['phi']
phi_lo, phi, phi_hi = nmp.quantile(phi_samples, [0.5*alpha, 0.5, 1 - 0.5*alpha], axis=0)
sid = data['samples'].SAMPLEID
cid = nmp.arange(phi_samples.shape[1]) + 1
centres = pnd.concat({
'PHI': pnd.DataFrame(phi, index=cid, columns=sid),
'PHI_LO': pnd.DataFrame(phi_lo, index=cid, columns=sid),
'PHI_HI': pnd.DataFrame(phi_hi, index=cid, columns=sid)
}, axis=1).stack().reset_index().rename(columns={'level_0': 'CLUSTERID'})
if 'TIME2' in data['samples']:
centres = pnd.merge(centres, data['samples'][['SAMPLEID', 'TIME2']], how='left', on = 'SAMPLEID')
centres = centres[['CLUSTERID', 'SAMPLEID', 'TIME2', 'PHI', 'PHI_LO', 'PHI_HI']]
#
return centres
##
def calculate_ccf_and_hard_clusters(data, trace, threshold, alpha):
r, R, VAF0 = data['r'].values.T, data['R'].values.T, data['VAF0'].values.T
r, R, VAF0 = r[None, None, :, :], R[None, None, :, :], VAF0[None, None, :, :]
phi, lw = trace.phi, trace.lw
theta = VAF0 * phi[:, :, :, None]
# re-weight cluster weights
w_samples = nmp.exp(lw)
w = nmp.median(w_samples, 0)
wids = w < threshold
w_samples[:, wids] = 0
w_samples = w_samples / nmp.sum(w_samples, 1, keepdims=True)
lw = nmp.log(w_samples)
# calculate logliks
if 'u' in trace.varnames: # implies BetaBinomial model
u = trace.u[:, None, :, None]
a = u * theta
b = u * (1.0 - theta)
lp = betabin_lpdf(r, R, a, b).eval()
else: # implies Binomial model
lp = bin_lpdf(r, R, theta).eval()
# ppd
w = nmp.exp(lp + lw[:, :, None, None])
ppd_ = nmp.sum(w * R, axis=1)
ppd_lo, ppd, ppd_hi = nmp.quantile(ppd_, [alpha * 0.5, 0.5, 1 - alpha * 0.5], axis=0)
ppd = pnd.concat({
'PPD': pnd.DataFrame(ppd.T, index=data['r'].index, columns=data['r'].columns),
'PPD_LO': pnd.DataFrame(ppd_lo.T, index=data['r'].index, columns=data['r'].columns),
'PPD_HI': pnd.DataFrame(ppd_hi.T, index=data['r'].index, columns=data['r'].columns)
}, axis=1)
lppd = nmp.ma.masked_invalid(nmp.log(ppd_)).sum(axis=(1,2))
lppd_lo, lppd, lppd_hi = nmp.quantile(lppd, [alpha * 0.5, 0.5, 1 - alpha * 0.5])
lppd = | pnd.DataFrame({'PPD': lppd, 'PPD_LO': lppd_lo, 'PPD_HI': lppd_hi}, index=[0]) | pandas.DataFrame |
#
# Copyright 2016 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import OrderedDict
from textwrap import dedent
from parameterized import parameterized
import numpy as np
from numpy import nan
import pandas as pd
from zipline._protocol import handle_non_market_minutes, BarData
from zipline.assets import Asset, Equity
from zipline.errors import (
HistoryInInitialize,
HistoryWindowStartsBeforeData,
)
from zipline.finance.asset_restrictions import NoRestrictions
from zipline.testing import (
create_minute_df_for_asset,
str_to_seconds,
MockDailyBarReader,
)
import zipline.testing.fixtures as zf
OHLC = ['open', 'high', 'low', 'close']
OHLCP = OHLC + ['price']
ALL_FIELDS = OHLCP + ['volume']
class WithHistory(zf.WithCreateBarData, zf.WithDataPortal):
TRADING_START_DT = TRADING_ENV_MIN_DATE = START_DATE = pd.Timestamp(
'2014-01-03',
tz='UTC',
)
TRADING_END_DT = END_DATE = pd.Timestamp('2016-01-29', tz='UTC')
SPLIT_ASSET_SID = 4
DIVIDEND_ASSET_SID = 5
MERGER_ASSET_SID = 6
HALF_DAY_TEST_ASSET_SID = 7
SHORT_ASSET_SID = 8
# asset1:
# - 2014-03-01 (rounds up to TRADING_START_DT) to 2016-01-29.
# - every minute/day.
# asset2:
# - 2015-01-05 to 2015-12-31
# - every minute/day.
# asset3:
# - 2015-01-05 to 2015-12-31
# - trades every 10 minutes
# SPLIT_ASSET:
# - 2015-01-04 to 2015-12-31
# - trades every minute
# - splits on 2015-01-05 and 2015-01-06
# DIVIDEND_ASSET:
# - 2015-01-04 to 2015-12-31
# - trades every minute
# - dividends on 2015-01-05 and 2015-01-06
# MERGER_ASSET
# - 2015-01-04 to 2015-12-31
# - trades every minute
# - merger on 2015-01-05 and 2015-01-06
@classmethod
def init_class_fixtures(cls):
super().init_class_fixtures()
cls.trading_days = cls.trading_calendar.sessions_in_range(
cls.TRADING_START_DT,
cls.TRADING_END_DT
)
cls.ASSET1 = cls.asset_finder.retrieve_asset(1)
cls.ASSET2 = cls.asset_finder.retrieve_asset(2)
cls.ASSET3 = cls.asset_finder.retrieve_asset(3)
cls.SPLIT_ASSET = cls.asset_finder.retrieve_asset(
cls.SPLIT_ASSET_SID,
)
cls.DIVIDEND_ASSET = cls.asset_finder.retrieve_asset(
cls.DIVIDEND_ASSET_SID,
)
cls.MERGER_ASSET = cls.asset_finder.retrieve_asset(
cls.MERGER_ASSET_SID,
)
cls.HALF_DAY_TEST_ASSET = cls.asset_finder.retrieve_asset(
cls.HALF_DAY_TEST_ASSET_SID,
)
cls.SHORT_ASSET = cls.asset_finder.retrieve_asset(
cls.SHORT_ASSET_SID,
)
@classmethod
def make_equity_info(cls):
jan_5_2015 = pd.Timestamp('2015-01-05', tz='UTC')
day_after_12312015 = pd.Timestamp('2016-01-04', tz='UTC')
return pd.DataFrame.from_dict(
{
1: {
'start_date': pd.Timestamp('2014-01-03', tz='UTC'),
'end_date': cls.TRADING_END_DT,
'symbol': 'ASSET1',
'exchange': "TEST",
},
2: {
'start_date': jan_5_2015,
'end_date': day_after_12312015,
'symbol': 'ASSET2',
'exchange': "TEST",
},
3: {
'start_date': jan_5_2015,
'end_date': day_after_12312015,
'symbol': 'ASSET3',
'exchange': "TEST",
},
cls.SPLIT_ASSET_SID: {
'start_date': jan_5_2015,
'end_date': day_after_12312015,
'symbol': 'SPLIT_ASSET',
'exchange': "TEST",
},
cls.DIVIDEND_ASSET_SID: {
'start_date': jan_5_2015,
'end_date': day_after_12312015,
'symbol': 'DIVIDEND_ASSET',
'exchange': "TEST",
},
cls.MERGER_ASSET_SID: {
'start_date': jan_5_2015,
'end_date': day_after_12312015,
'symbol': 'MERGER_ASSET',
'exchange': "TEST",
},
cls.HALF_DAY_TEST_ASSET_SID: {
'start_date': pd.Timestamp('2014-07-02', tz='UTC'),
'end_date': day_after_12312015,
'symbol': 'HALF_DAY_TEST_ASSET',
'exchange': "TEST",
},
cls.SHORT_ASSET_SID: {
'start_date': pd.Timestamp('2015-01-05', tz='UTC'),
'end_date': pd.Timestamp('2015-01-06', tz='UTC'),
'symbol': 'SHORT_ASSET',
'exchange': "TEST",
}
},
orient='index',
)
@classmethod
def make_splits_data(cls):
return pd.DataFrame([
{
'effective_date': str_to_seconds('2015-01-06'),
'ratio': 0.25,
'sid': cls.SPLIT_ASSET_SID,
},
{
'effective_date': str_to_seconds('2015-01-07'),
'ratio': 0.5,
'sid': cls.SPLIT_ASSET_SID,
},
])
@classmethod
def make_mergers_data(cls):
return pd.DataFrame([
{
'effective_date': str_to_seconds('2015-01-06'),
'ratio': 0.25,
'sid': cls.MERGER_ASSET_SID,
},
{
'effective_date': str_to_seconds('2015-01-07'),
'ratio': 0.5,
'sid': cls.MERGER_ASSET_SID,
}
])
@classmethod
def make_dividends_data(cls):
return pd.DataFrame([
{
# only care about ex date, the other dates don't matter here
'ex_date':
pd.Timestamp('2015-01-06', tz='UTC').to_datetime64(),
'record_date':
pd.Timestamp('2015-01-06', tz='UTC').to_datetime64(),
'declared_date':
pd.Timestamp('2015-01-06', tz='UTC').to_datetime64(),
'pay_date':
pd.Timestamp('2015-01-06', tz='UTC').to_datetime64(),
'amount': 2.0,
'sid': cls.DIVIDEND_ASSET_SID,
},
{
'ex_date':
pd.Timestamp('2015-01-07', tz='UTC').to_datetime64(),
'record_date':
pd.Timestamp('2015-01-07', tz='UTC').to_datetime64(),
'declared_date':
pd.Timestamp('2015-01-07', tz='UTC').to_datetime64(),
'pay_date':
pd.Timestamp('2015-01-07', tz='UTC').to_datetime64(),
'amount': 4.0,
'sid': cls.DIVIDEND_ASSET_SID,
}],
columns=[
'ex_date',
'record_date',
'declared_date',
'pay_date',
'amount',
'sid'],
)
@classmethod
def make_adjustment_writer_equity_daily_bar_reader(cls):
return MockDailyBarReader(
dates=cls.trading_calendar.sessions_in_range(
cls.TRADING_START_DT,
cls.TRADING_END_DT,
),
)
def verify_regular_dt(self, idx, dt, mode, fields=None, assets=None):
if mode == 'daily':
freq = '1d'
else:
freq = '1m'
cal = self.trading_calendar
equity_cal = self.trading_calendars[Equity]
def reindex_to_primary_calendar(a, field):
"""
Reindex an array of prices from a window on the NYSE
calendar by the window on the primary calendar with the same
dt and window size.
"""
if mode == 'daily':
dts = cal.sessions_window(dt, -9)
# `dt` may not be a session on the equity calendar, so
# find the next valid session.
equity_sess = equity_cal.minute_to_session_label(dt)
equity_dts = equity_cal.sessions_window(equity_sess, -9)
elif mode == 'minute':
dts = cal.minutes_window(dt, -10)
equity_dts = equity_cal.minutes_window(dt, -10)
output = pd.Series(
index=equity_dts,
data=a,
).reindex(dts)
# Fill after reindexing, to ensure we don't forward fill
# with values that are being dropped.
if field == 'volume':
return output.fillna(0)
elif field == 'price':
return output.fillna(method='ffill')
else:
return output
fields = fields if fields is not None else ALL_FIELDS
assets = assets if assets is not None else [self.ASSET2, self.ASSET3]
bar_data = self.create_bardata(
simulation_dt_func=lambda: dt,
)
check_internal_consistency(
bar_data, assets, fields, 10, freq
)
for field in fields:
for asset in assets:
asset_series = bar_data.history(asset, field, 10, freq)
base = MINUTE_FIELD_INFO[field] + 2
if idx < 9:
missing_count = 9 - idx
present_count = 9 - missing_count
if field in OHLCP:
if asset == self.ASSET2:
# asset2 should have some leading nans
np.testing.assert_array_equal(
np.full(missing_count, np.nan),
asset_series[0:missing_count]
)
# asset2 should also have some real values
np.testing.assert_array_equal(
np.array(range(base,
base + present_count + 1)),
asset_series[(9 - present_count):]
)
if asset == self.ASSET3:
# asset3 should be NaN the entire time
np.testing.assert_array_equal(
np.full(10, np.nan),
asset_series
)
elif field == 'volume':
if asset == self.ASSET2:
# asset2 should have some zeros (instead of nans)
np.testing.assert_array_equal(
np.zeros(missing_count),
asset_series[0:missing_count]
)
# and some real values
np.testing.assert_array_equal(
np.array(
range(base, base + present_count + 1)
) * 100,
asset_series[(9 - present_count):]
)
if asset == self.ASSET3:
# asset3 is all zeros, no volume yet
np.testing.assert_array_equal(
np.zeros(10),
asset_series
)
else:
# asset3 should have data every 10 minutes
# construct an array full of nans, put something in the
# right slot, and test for comparison
position_from_end = ((idx + 1) % 10) + 1
# asset3's baseline data is 9 NaNs, then 11, then 9 NaNs,
# then 21, etc. for idx 9 to 19, value_for_asset3 should
# be a baseline of 11 (then adjusted for the individual
# field), thus the rounding down to the nearest 10.
value_for_asset3 = (((idx + 1) // 10) * 10) + \
MINUTE_FIELD_INFO[field] + 1
if field in OHLC:
asset3_answer_key = np.full(10, np.nan)
asset3_answer_key[-position_from_end] = \
value_for_asset3
asset3_answer_key = reindex_to_primary_calendar(
asset3_answer_key,
field,
)
if asset == self.ASSET2:
np.testing.assert_array_equal(
reindex_to_primary_calendar(
np.array(
range(base + idx - 9, base + idx + 1)
),
field,
),
asset_series
)
if asset == self.ASSET3:
np.testing.assert_array_equal(
asset3_answer_key,
asset_series
)
elif field == 'volume':
asset3_answer_key = np.zeros(10)
asset3_answer_key[-position_from_end] = \
value_for_asset3 * 100
asset3_answer_key = reindex_to_primary_calendar(
asset3_answer_key,
field,
)
if asset == self.ASSET2:
np.testing.assert_array_equal(
reindex_to_primary_calendar(
np.array(
range(base + idx - 9, base + idx + 1)
) * 100,
field,
),
asset_series
)
if asset == self.ASSET3:
np.testing.assert_array_equal(
asset3_answer_key,
asset_series
)
elif field == 'price':
# price is always forward filled
# asset2 has prices every minute, so it's easy
if asset == self.ASSET2:
# at idx 9, the data is 2 to 11
np.testing.assert_array_equal(
reindex_to_primary_calendar(
range(idx - 7, idx + 3),
field=field,
),
asset_series
)
if asset == self.ASSET3:
# Second part begins on the session after
# `position_from_end` on the NYSE calendar.
second_begin = (
dt - equity_cal.day * (position_from_end - 1)
)
# First part goes up until the start of the
# second part, because we forward-fill.
first_end = second_begin - cal.day
first_part = asset_series[:first_end]
second_part = asset_series[second_begin:]
decile_count = ((idx + 1) // 10)
# in our test data, asset3 prices will be nine
# NaNs, then ten 11s, ten 21s, ten 31s...
if len(second_part) >= 10:
np.testing.assert_array_equal(
np.full(len(first_part), np.nan),
first_part
)
elif decile_count == 1:
np.testing.assert_array_equal(
np.full(len(first_part), np.nan),
first_part
)
np.testing.assert_array_equal(
np.array([11] * len(second_part)),
second_part
)
else:
np.testing.assert_array_equal(
np.array([decile_count * 10 - 9] *
len(first_part)),
first_part
)
np.testing.assert_array_equal(
np.array([decile_count * 10 + 1] *
len(second_part)),
second_part
)
def check_internal_consistency(bar_data, assets, fields, bar_count, freq):
if isinstance(assets, Asset):
asset_list = [assets]
else:
asset_list = assets
if isinstance(fields, str):
field_list = [fields]
else:
field_list = fields
multi_field_dict = {
asset: bar_data.history(asset, field_list, bar_count, freq)
for asset in asset_list
}
multi_asset_dict = {
field: bar_data.history(asset_list, field, bar_count, freq)
for field in fields
}
panel = bar_data.history(asset_list, field_list, bar_count, freq)
for field in field_list:
# make sure all the different query forms are internally
# consistent
for asset in asset_list:
series = bar_data.history(asset, field, bar_count, freq)
np.testing.assert_array_equal(
series,
multi_asset_dict[field][asset]
)
np.testing.assert_array_equal(
series,
multi_field_dict[asset][field]
)
np.testing.assert_array_equal(
series,
panel[field][asset]
)
# each minute's OHLCV data has a consistent offset for each field.
# for example, the open is always 1 higher than the close, the high
# is always 2 higher than the close, etc.
MINUTE_FIELD_INFO = {
'open': 1,
'high': 2,
'low': -1,
'close': 0,
'price': 0,
'volume': 0, # unused, later we'll multiply by 100
}
class MinuteEquityHistoryTestCase(WithHistory,
zf.WithMakeAlgo,
zf.ZiplineTestCase):
EQUITY_DAILY_BAR_SOURCE_FROM_MINUTE = True
DATA_PORTAL_FIRST_TRADING_DAY = zf.alias('TRADING_START_DT')
@classmethod
def make_equity_minute_bar_data(cls):
equities_cal = cls.trading_calendars[Equity]
data = {}
sids = {2, 5, cls.SHORT_ASSET_SID, cls.HALF_DAY_TEST_ASSET_SID}
for sid in sids:
asset = cls.asset_finder.retrieve_asset(sid)
data[sid] = create_minute_df_for_asset(
equities_cal,
asset.start_date,
asset.end_date,
start_val=2,
)
data[1] = create_minute_df_for_asset(
equities_cal,
pd.Timestamp('2014-01-03', tz='utc'),
pd.Timestamp('2016-01-29', tz='utc'),
start_val=2,
)
asset2 = cls.asset_finder.retrieve_asset(2)
data[asset2.sid] = create_minute_df_for_asset(
equities_cal,
asset2.start_date,
equities_cal.previous_session_label(asset2.end_date),
start_val=2,
minute_blacklist=[
pd.Timestamp('2015-01-08 14:31', tz='UTC'),
pd.Timestamp('2015-01-08 21:00', tz='UTC'),
],
)
# Start values are crafted so that the thousands place are equal when
# adjustments are applied correctly.
# The splits and mergers are defined as 4:1 then 2:1 ratios, so the
# prices approximate that adjustment by quartering and then halving
# the thousands place.
data[cls.MERGER_ASSET_SID] = data[cls.SPLIT_ASSET_SID] = pd.concat((
create_minute_df_for_asset(
equities_cal,
pd.Timestamp('2015-01-05', tz='UTC'),
pd.Timestamp('2015-01-05', tz='UTC'),
start_val=8000),
create_minute_df_for_asset(
equities_cal,
pd.Timestamp('2015-01-06', tz='UTC'),
pd.Timestamp('2015-01-06', tz='UTC'),
start_val=2000),
create_minute_df_for_asset(
equities_cal,
pd.Timestamp('2015-01-07', tz='UTC'),
pd.Timestamp('2015-01-07', tz='UTC'),
start_val=1000),
create_minute_df_for_asset(
equities_cal,
pd.Timestamp('2015-01-08', tz='UTC'),
pd.Timestamp('2015-01-08', tz='UTC'),
start_val=1000)
))
asset3 = cls.asset_finder.retrieve_asset(3)
data[3] = create_minute_df_for_asset(
equities_cal,
asset3.start_date,
asset3.end_date,
start_val=2,
interval=10,
)
return data.items()
def test_history_in_initialize(self):
algo_text = dedent(
"""\
from zipline.api import history
def initialize(context):
history([1], 10, '1d', 'price')
def handle_data(context, data):
pass
"""
)
algo = self.make_algo(script=algo_text)
with self.assertRaises(HistoryInInitialize):
algo.run()
def test_negative_bar_count(self):
"""
Negative bar counts leak future information.
"""
with self.assertRaisesRegex(
ValueError,
"bar_count must be >= 1, but got -1"
):
self.data_portal.get_history_window(
[self.ASSET1],
pd.Timestamp('2015-01-07 14:35', tz='UTC'),
-1,
'1d',
'close',
'minute',
)
def test_daily_splits_and_mergers(self):
# self.SPLIT_ASSET and self.MERGER_ASSET had splits/mergers
# on 1/6 and 1/7
jan5 = pd.Timestamp('2015-01-05', tz='UTC')
for asset in [self.SPLIT_ASSET, self.MERGER_ASSET]:
# before any of the adjustments, 1/4 and 1/5
window1 = self.data_portal.get_history_window(
[asset],
self.trading_calendar.open_and_close_for_session(jan5)[1],
2,
'1d',
'close',
'minute',
)[asset]
np.testing.assert_array_equal(np.array([np.nan, 8389]), window1)
# straddling the first event
window2 = self.data_portal.get_history_window(
[asset],
pd.Timestamp('2015-01-06 14:35', tz='UTC'),
2,
'1d',
'close',
'minute',
)[asset]
# Value from 1/5 should be quartered
np.testing.assert_array_equal(
[2097.25,
# Split occurs. The value of the thousands place should
# match.
2004],
window2
)
# straddling both events!
window3 = self.data_portal.get_history_window(
[asset],
pd.Timestamp('2015-01-07 14:35', tz='UTC'),
3,
'1d',
'close',
'minute',
)[asset]
np.testing.assert_array_equal(
[1048.625, 1194.50, 1004.0],
window3
)
# after last event
window4 = self.data_portal.get_history_window(
[asset],
pd.Timestamp('2015-01-08 14:40', tz='UTC'),
2,
'1d',
'close',
'minute',
)[asset]
# should not be adjusted
np.testing.assert_array_equal([1389, 1009], window4)
def test_daily_dividends(self):
# self.DIVIDEND_ASSET had dividends on 1/6 and 1/7
jan5 = pd.Timestamp('2015-01-05', tz='UTC')
asset = self.DIVIDEND_ASSET
# before any of the dividends
window1 = self.data_portal.get_history_window(
[asset],
self.trading_calendar.session_close(jan5),
2,
'1d',
'close',
'minute',
)[asset]
np.testing.assert_array_equal(np.array([nan, 391]), window1)
# straddling the first event
window2 = self.data_portal.get_history_window(
[asset],
pd.Timestamp('2015-01-06 14:35', tz='UTC'),
2,
'1d',
'close',
'minute',
)[asset]
np.testing.assert_array_equal(
[383.18, # 391 (last close) * 0.98 (first div)
# Dividend occurs prior.
396],
window2
)
# straddling both events!
window3 = self.data_portal.get_history_window(
[asset],
pd.Timestamp('2015-01-07 14:35', tz='UTC'),
3,
'1d',
'close',
'minute',
)[asset]
np.testing.assert_array_equal(
[367.853, # 391 (last close) * 0.98 * 0.96 (both)
749.76, # 781 (last_close) * 0.96 (second div)
786], # no adjustment
window3
)
# after last event
window4 = self.data_portal.get_history_window(
[asset],
pd.Timestamp('2015-01-08 14:40', tz='UTC'),
2,
'1d',
'close',
'minute',
)[asset]
# should not be adjusted, should be 787 to 791
np.testing.assert_array_equal([1171, 1181], window4)
def test_minute_before_assets_trading(self):
# since asset2 and asset3 both started trading on 1/5/2015, let's do
# some history windows that are completely before that
minutes = self.trading_calendar.minutes_for_session(
self.trading_calendar.previous_session_label(pd.Timestamp(
'2015-01-05', tz='UTC'
))
)[0:60]
for idx, minute in enumerate(minutes):
bar_data = self.create_bardata(
lambda: minute,
)
check_internal_consistency(
bar_data, [self.ASSET2, self.ASSET3], ALL_FIELDS, 10, '1m'
)
for field in ALL_FIELDS:
# OHLCP should be NaN
# Volume should be 0
asset2_series = bar_data.history(self.ASSET2, field, 10, '1m')
asset3_series = bar_data.history(self.ASSET3, field, 10, '1m')
if field == 'volume':
np.testing.assert_array_equal(np.zeros(10), asset2_series)
np.testing.assert_array_equal(np.zeros(10), asset3_series)
else:
np.testing.assert_array_equal(
np.full(10, np.nan),
asset2_series
)
np.testing.assert_array_equal(
np.full(10, np.nan),
asset3_series
)
@parameterized.expand([
('open_sid_2', 'open', 2),
('high_sid_2', 'high', 2),
('low_sid_2', 'low', 2),
('close_sid_2', 'close', 2),
('volume_sid_2', 'volume', 2),
('open_sid_3', 'open', 3),
('high_sid_3', 'high', 3),
('low_sid_3', 'low', 3),
('close_sid_3', 'close', 3),
('volume_sid_3', 'volume', 3),
])
def test_minute_regular(self, name, field, sid):
# asset2 and asset3 both started on 1/5/2015, but asset3 trades every
# 10 minutes
asset = self.asset_finder.retrieve_asset(sid)
# Check the first hour of equities trading.
minutes = self.trading_calendars[Equity].minutes_for_session(
pd.Timestamp('2015-01-05', tz='UTC')
)[0:60]
for idx, minute in enumerate(minutes):
self.verify_regular_dt(idx, minute, 'minute',
assets=[asset],
fields=[field])
def test_minute_sunday_midnight(self):
# Most trading calendars aren't open at midnight on Sunday.
sunday_midnight = pd.Timestamp('2015-01-09', tz='UTC')
# Find the closest prior minute when the trading calendar was
# open (note that if the calendar is open at `sunday_midnight`,
# this will be `sunday_midnight`).
trading_minutes = self.trading_calendar.all_minutes
last_minute = trading_minutes[trading_minutes <= sunday_midnight][-1]
sunday_midnight_bar_data = self.create_bardata(lambda: sunday_midnight)
last_minute_bar_data = self.create_bardata(lambda: last_minute)
# Ensure that we get the same results at midnight on Sunday as
# the last open minute.
with handle_non_market_minutes(sunday_midnight_bar_data):
for field in ALL_FIELDS:
np.testing.assert_array_equal(
sunday_midnight_bar_data.history(
self.ASSET2,
field,
30,
'1m',
),
last_minute_bar_data.history(self.ASSET2, field, 30, '1m')
)
def test_minute_after_asset_stopped(self):
# SHORT_ASSET's last day was 2015-01-06
# get some history windows that straddle the end
minutes = self.trading_calendars[Equity].minutes_for_session(
pd.Timestamp('2015-01-07', tz='UTC')
)[0:60]
for idx, minute in enumerate(minutes):
bar_data = self.create_bardata(
lambda: minute
)
check_internal_consistency(
bar_data, self.SHORT_ASSET, ALL_FIELDS, 30, '1m'
)
# Reset data portal because it has advanced past next test date.
data_portal = self.make_data_portal()
# close high low open price volume
# 2015-01-06 20:47:00+00:00 768 770 767 769 768 76800
# 2015-01-06 20:48:00+00:00 769 771 768 770 769 76900
# 2015-01-06 20:49:00+00:00 770 772 769 771 770 77000
# 2015-01-06 20:50:00+00:00 771 773 770 772 771 77100
# 2015-01-06 20:51:00+00:00 772 774 771 773 772 77200
# 2015-01-06 20:52:00+00:00 773 775 772 774 773 77300
# 2015-01-06 20:53:00+00:00 774 776 773 775 774 77400
# 2015-01-06 20:54:00+00:00 775 777 774 776 775 77500
# 2015-01-06 20:55:00+00:00 776 778 775 777 776 77600
# 2015-01-06 20:56:00+00:00 777 779 776 778 777 77700
# 2015-01-06 20:57:00+00:00 778 780 777 779 778 77800
# 2015-01-06 20:58:00+00:00 779 781 778 780 779 77900
# 2015-01-06 20:59:00+00:00 780 782 779 781 780 78000
# 2015-01-06 21:00:00+00:00 781 783 780 782 781 78100
# 2015-01-07 14:31:00+00:00 NaN NaN NaN NaN NaN 0
# 2015-01-07 14:32:00+00:00 NaN NaN NaN NaN NaN 0
# 2015-01-07 14:33:00+00:00 NaN NaN NaN NaN NaN 0
# 2015-01-07 14:34:00+00:00 NaN NaN NaN NaN NaN 0
# 2015-01-07 14:35:00+00:00 NaN NaN NaN NaN NaN 0
# 2015-01-07 14:36:00+00:00 NaN NaN NaN NaN NaN 0
# 2015-01-07 14:37:00+00:00 NaN NaN NaN NaN NaN 0
# 2015-01-07 14:38:00+00:00 NaN NaN NaN NaN NaN 0
# 2015-01-07 14:39:00+00:00 NaN NaN NaN NaN NaN 0
# 2015-01-07 14:40:00+00:00 NaN NaN NaN NaN NaN 0
# 2015-01-07 14:41:00+00:00 NaN NaN NaN NaN NaN 0
# 2015-01-07 14:42:00+00:00 NaN NaN NaN NaN NaN 0
# 2015-01-07 14:43:00+00:00 NaN NaN NaN NaN NaN 0
# 2015-01-07 14:44:00+00:00 NaN NaN NaN NaN NaN 0
# 2015-01-07 14:45:00+00:00 NaN NaN NaN NaN NaN 0
# 2015-01-07 14:46:00+00:00 NaN NaN NaN NaN NaN 0
# choose a window that contains the last minute of the asset
window_start = pd.Timestamp('2015-01-06 20:47', tz='UTC')
window_end = pd.Timestamp('2015-01-07 14:46', tz='UTC')
bar_data = BarData(
data_portal=data_portal,
simulation_dt_func=lambda: minutes[15],
data_frequency='minute',
restrictions=NoRestrictions(),
trading_calendar=self.trading_calendar,
)
bar_count = len(
self.trading_calendar.minutes_in_range(window_start, window_end)
)
window = bar_data.history(
self.SHORT_ASSET,
ALL_FIELDS,
bar_count,
'1m',
)
# Window should start with 14 values and end with 16 NaNs/0s.
for field in ALL_FIELDS:
if field == 'volume':
np.testing.assert_array_equal(
range(76800, 78101, 100),
window['volume'][0:14]
)
np.testing.assert_array_equal(
np.zeros(16),
window['volume'][-16:]
)
else:
np.testing.assert_array_equal(
np.array(range(768, 782)) + MINUTE_FIELD_INFO[field],
window[field][0:14]
)
np.testing.assert_array_equal(
np.full(16, np.nan),
window[field][-16:]
)
# now do a smaller window that is entirely contained after the asset
# ends
window = bar_data.history(self.SHORT_ASSET, ALL_FIELDS, 5, '1m')
for field in ALL_FIELDS:
if field == 'volume':
np.testing.assert_array_equal(np.zeros(5), window['volume'])
else:
np.testing.assert_array_equal(np.full(5, np.nan),
window[field])
def test_minute_splits_and_mergers(self):
# self.SPLIT_ASSET and self.MERGER_ASSET had splits/mergers
# on 1/6 and 1/7
jan5 = pd.Timestamp('2015-01-05', tz='UTC')
# the assets' close column starts at 2 on the first minute of
# 1/5, then goes up one per minute forever
for asset in [self.SPLIT_ASSET, self.MERGER_ASSET]:
# before any of the adjustments, last 10 minutes of jan 5
equity_cal = self.trading_calendars[Equity]
window1 = self.data_portal.get_history_window(
[asset],
equity_cal.open_and_close_for_session(jan5)[1],
10,
'1m',
'close',
'minute',
)[asset]
np.testing.assert_array_equal(
np.array(range(8380, 8390)), window1)
# straddling the first event - begins with the last 5 equity
# minutes on 2015-01-05, ends with the first 5 on
# 2015-01-06.
window2_start = pd.Timestamp('2015-01-05 20:56', tz='UTC')
window2_end = pd.Timestamp('2015-01-06 14:35', tz='UTC')
window2_count = len(self.trading_calendar.minutes_in_range(
window2_start,
window2_end,
))
window2 = self.data_portal.get_history_window(
[asset],
pd.Timestamp('2015-01-06 14:35', tz='UTC'),
window2_count,
'1m',
'close',
'minute',
)[asset]
# five minutes from 1/5 should be halved
np.testing.assert_array_equal(
[2096.25,
2096.5,
2096.75,
2097,
2097.25],
window2[:5],
)
# Split occurs. The value of the thousands place should
# match.
np.testing.assert_array_equal(
[2000,
2001,
2002,
2003,
2004],
window2[-5:],
)
# straddling both events! on the equities calendar this is 5
# minutes of 1/7, 390 of 1/6, and 5 minutes of 1/5.
window3_start = pd.Timestamp('2015-01-05 20:56', tz='UTC')
window3_end = pd.Timestamp('2015-01-07 14:35', tz='UTC')
window3_minutes = self.trading_calendar.minutes_in_range(
window3_start,
window3_end,
)
window3_count = len(window3_minutes)
window3 = self.data_portal.get_history_window(
[asset],
pd.Timestamp('2015-01-07 14:35', tz='UTC'),
window3_count,
'1m',
'close',
'minute',
)[asset]
# first five minutes should be 4385-4390, but eigthed
np.testing.assert_array_equal(
[1048.125, 1048.25, 1048.375, 1048.5, 1048.625],
window3[0:5]
)
# next 390 minutes (the 2015-01-06 session) should be
# 2000-2390, but halved
middle_day_open_i = window3_minutes.searchsorted(
pd.Timestamp('2015-01-06 14:31', tz='UTC')
)
middle_day_close_i = window3_minutes.searchsorted(
pd.Timestamp('2015-01-06 21:00', tz='UTC')
)
np.testing.assert_array_equal(
np.array(range(2000, 2390), dtype='float64') / 2,
window3[middle_day_open_i:middle_day_close_i + 1]
)
# final 5 minutes should be 1000-1004
np.testing.assert_array_equal(range(1000, 1005), window3[-5:])
# after last event
window4 = self.data_portal.get_history_window(
[asset],
pd.Timestamp('2015-01-07 14:40', tz='UTC'),
5,
'1m',
'close',
'minute',
)[asset]
# should not be adjusted, should be 1005 to 1009
np.testing.assert_array_equal(range(1005, 1010), window4)
def test_minute_dividends(self):
# self.DIVIDEND_ASSET had dividends on 1/6 and 1/7
# before any of the dividends
window1 = self.data_portal.get_history_window(
[self.DIVIDEND_ASSET],
pd.Timestamp('2015-01-05 21:00', tz='UTC'),
10,
'1m',
'close',
'minute',
)[self.DIVIDEND_ASSET]
np.testing.assert_array_equal(np.array(range(382, 392)), window1)
# straddling the first dividend (10 active equity minutes)
window2_start = pd.Timestamp('2015-01-05 20:56', tz='UTC')
window2_end = pd.Timestamp('2015-01-06 14:35', tz='UTC')
window2_count = len(
self.trading_calendar.minutes_in_range(window2_start, window2_end)
)
window2 = self.data_portal.get_history_window(
[self.DIVIDEND_ASSET],
window2_end,
window2_count,
'1m',
'close',
'minute',
)[self.DIVIDEND_ASSET]
# first dividend is 2%, so the first five values should be 2% lower
# than before
np.testing.assert_array_almost_equal(
np.array(range(387, 392), dtype='float64') * 0.98,
window2[0:5]
)
# second half of window is unadjusted
np.testing.assert_array_equal(range(392, 397), window2[-5:])
# straddling both dividends (on the equities calendar, this is
# 5 minutes of 1/7, 390 of 1/6, and 5 minutes of 1/5).
window3_start = pd.Timestamp('2015-01-05 20:56', tz='UTC')
window3_end = pd.Timestamp('2015-01-07 14:35', tz='UTC')
window3_minutes = self.trading_calendar.minutes_in_range(
window3_start,
window3_end,
)
window3_count = len(window3_minutes)
window3 = self.data_portal.get_history_window(
[self.DIVIDEND_ASSET],
window3_end,
window3_count,
'1m',
'close',
'minute',
)[self.DIVIDEND_ASSET]
# first five minute from 1/7 should be hit by 0.9408 (= 0.98 * 0.96)
np.testing.assert_array_almost_equal(
np.around(np.array(range(387, 392), dtype='float64') * 0.9408, 3),
window3[0:5]
)
# next 390 minutes (the 2015-01-06 session) should be hit by 0.96
# (second dividend)
middle_day_open_i = window3_minutes.searchsorted(
pd.Timestamp('2015-01-06 14:31', tz='UTC')
)
middle_day_close_i = window3_minutes.searchsorted(
pd.Timestamp('2015-01-06 21:00', tz='UTC')
)
np.testing.assert_array_almost_equal(
np.array(range(392, 782), dtype='float64') * 0.96,
window3[middle_day_open_i:middle_day_close_i + 1]
)
# last 5 minutes should not be adjusted
np.testing.assert_array_equal(np.array(range(782, 787)), window3[-5:])
def test_passing_iterable_to_history_regular_hours(self):
# regular hours
current_dt = pd.Timestamp("2015-01-06 9:45", tz='US/Eastern')
bar_data = self.create_bardata(
lambda: current_dt,
)
bar_data.history(pd.Index([self.ASSET1, self.ASSET2]),
"high", 5, "1m")
def test_passing_iterable_to_history_bts(self):
# before market hours
current_dt = pd.Timestamp("2015-01-07 8:45", tz='US/Eastern')
bar_data = self.create_bardata(
lambda: current_dt,
)
with handle_non_market_minutes(bar_data):
bar_data.history(pd.Index([self.ASSET1, self.ASSET2]),
"high", 5, "1m")
def test_overnight_adjustments(self):
# Should incorporate adjustments on midnight 01/06
current_dt = pd.Timestamp('2015-01-06 8:45', tz='US/Eastern')
bar_data = self.create_bardata(
lambda: current_dt,
)
adj_expected = {
'open': np.arange(8381, 8391) / 4.0,
'high': np.arange(8382, 8392) / 4.0,
'low': np.arange(8379, 8389) / 4.0,
'close': np.arange(8380, 8390) / 4.0,
'volume': np.arange(8380, 8390) * 100 * 4.0,
'price': np.arange(8380, 8390) / 4.0,
}
expected = {
'open': np.arange(383, 393) / 2.0,
'high': np.arange(384, 394) / 2.0,
'low': np.arange(381, 391) / 2.0,
'close': np.arange(382, 392) / 2.0,
'volume': np.arange(382, 392) * 100 * 2.0,
'price': np.arange(382, 392) / 2.0,
}
# Use a window looking back to 3:51pm from 8:45am the following day.
# This contains the last ten minutes of the equity session for
# 2015-01-05.
window_start = pd.Timestamp('2015-01-05 20:51', tz='UTC')
window_end = pd.Timestamp('2015-01-06 13:44', tz='UTC')
window_length = len(
self.trading_calendar.minutes_in_range(window_start, window_end)
)
with handle_non_market_minutes(bar_data):
# Single field, single asset
for field in ALL_FIELDS:
values = bar_data.history(
self.SPLIT_ASSET,
field,
window_length,
'1m',
)
# The first 10 bars the `values` correspond to the last
# 10 minutes in the 2015-01-05 session.
np.testing.assert_array_equal(values.values[:10],
adj_expected[field],
err_msg=field)
# Multi field, single asset
values = bar_data.history(
self.SPLIT_ASSET, ['open', 'volume'], window_length, '1m'
)
np.testing.assert_array_equal(values.open.values[:10],
adj_expected['open'])
np.testing.assert_array_equal(values.volume.values[:10],
adj_expected['volume'])
# Single field, multi asset
values = bar_data.history(
[self.SPLIT_ASSET, self.ASSET2], 'open', window_length, '1m'
)
np.testing.assert_array_equal(values[self.SPLIT_ASSET].values[:10],
adj_expected['open'])
np.testing.assert_array_equal(values[self.ASSET2].values[:10],
expected['open'] * 2)
# Multi field, multi asset
values = bar_data.history(
[self.SPLIT_ASSET, self.ASSET2],
['open', 'volume'],
window_length,
'1m',
)
np.testing.assert_array_equal(
values.open[self.SPLIT_ASSET].values[:10],
adj_expected['open']
)
np.testing.assert_array_equal(
values.volume[self.SPLIT_ASSET].values[:10],
adj_expected['volume']
)
np.testing.assert_array_equal(
values.open[self.ASSET2].values[:10],
expected['open'] * 2
)
np.testing.assert_array_equal(
values.volume[self.ASSET2].values[:10],
expected['volume'] / 2
)
def test_minute_early_close(self):
# 2014-07-03 is an early close
# HALF_DAY_TEST_ASSET started trading on 2014-07-02, how convenient
#
# five minutes into the day after the early close, get 20 1m bars
cal = self.trading_calendar
window_start = pd.Timestamp('2014-07-03 16:46:00', tz='UTC')
window_end = | pd.Timestamp('2014-07-07 13:35:00', tz='UTC') | pandas.Timestamp |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import pickle
import os
import yaml
from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import r2_score
# from .utils import Boba_Utils as u
# from ._03_Modeling import Boba_Modeling as m
class Boba_Sys_Diagnostics():
def __init__(self):
pass
def run_sys_scoring(self, model, target,prod):
if prod == True:
pass
elif (self.position_group == 'hitters' and target in ['BABIP','BB%','K%']):
pass
elif (self.position_group == 'SP' and target in ['OBP','SLG','ShO_per_GS','CG_per_GS']):
pass
elif (self.position_group == 'RP' and target in ['OBP','SLG','HLD_per_G']):
pass
else:
master_df = pd.read_csv('data/processed/'+self.position_group+'/master_df.csv',index_col=0)
path = 'data/scoring/evaluation_'+self.position_group+'_'+str(self.year-1)+'.csv'
if os.path.exists(path):
print('does exist')
evaluation_df = pd.read_csv(path,index_col=0)
else:
print('does not exist')
data_group = 'hitters' if self.position_group == 'hitters' else 'pitchers'
zips_df = pd.read_csv('data/raw/'+data_group+'/projection_systems/zips/'+str(self.year-1)+'.csv')
atc_df = pd.read_csv('data/raw/'+data_group+'/projection_systems/atc/'+str(self.year-1)+'.csv')
bat_df = pd.read_csv('data/raw/'+data_group+'/projection_systems/thebat/'+str(self.year-1)+'.csv')
stmr_df = pd.read_csv('data/raw/'+data_group+'/projection_systems/steamer/'+str(self.year-1)+'.csv')
zips_df = zips_df.rename(columns={"K/9": "K_per_9", "BB/9": "BB_per_9"})
atc_df = atc_df.rename(columns={"K/9": "K_per_9", "BB/9": "BB_per_9"})
bat_df = bat_df.rename(columns={"K/9": "K_per_9", "BB/9": "BB_per_9"})
stmr_df = stmr_df.rename(columns={"K/9": "K_per_9", "BB/9": "BB_per_9"})
evaluation_df = master_df[master_df['Season']==(self.year-1)]
evaluation_df['playerID'] = evaluation_df['playerID'].astype('str')
if self.position_group == 'hitters':
evaluation_df = evaluation_df[self.information_cols+[self.pt_metric]+self.model_targets+self.counting_stats]
zips_df = zips_df[['playerid']+[self.pt_metric]+[x for x in zips_df.columns if x in self.model_targets]+[x for x in zips_df.columns if x in self.counting_stats]]
atc_df = atc_df[['playerid']+[self.pt_metric]+[x for x in atc_df.columns if x in self.model_targets]+[x for x in atc_df.columns if x in self.counting_stats]]
bat_df = bat_df[['playerid']+[self.pt_metric]+[x for x in bat_df.columns if x in self.model_targets]+[x for x in bat_df.columns if x in self.counting_stats]]
stmr_df = stmr_df[['playerid']+[self.pt_metric]+[x for x in stmr_df.columns if x in self.model_targets]+[x for x in stmr_df.columns if x in self.counting_stats]]
else:
evaluation_df = evaluation_df[self.information_cols+[self.pt_metric]+[self.per_metric]+self.model_targets+self.counting_stats]
zips_df = zips_df[['playerid']+[self.pt_metric]+[self.per_metric]+[x for x in zips_df.columns if x in self.model_targets]+[x for x in zips_df.columns if x in self.counting_stats]]
atc_df = atc_df[['playerid']+[self.pt_metric]+[self.per_metric]+[x for x in atc_df.columns if x in self.model_targets]+[x for x in atc_df.columns if x in self.counting_stats]]
bat_df = bat_df[['playerid']+[self.pt_metric]+[self.per_metric]+[x for x in bat_df.columns if x in self.model_targets]+[x for x in bat_df.columns if x in self.counting_stats]]
stmr_df = stmr_df[['playerid']+[self.pt_metric]+[self.per_metric]+[x for x in stmr_df.columns if x in self.model_targets]+[x for x in stmr_df.columns if x in self.counting_stats]]
evaluation_df = pd.merge(evaluation_df,zips_df,how='left',left_on='playerID', right_on='playerid',suffixes=('','_zips')).drop('playerid',axis=1)
evaluation_df = pd.merge(evaluation_df,atc_df,how='left',left_on='playerID', right_on='playerid',suffixes=('','_atc')).drop('playerid',axis=1)
evaluation_df = pd.merge(evaluation_df,bat_df,how='left',left_on='playerID', right_on='playerid',suffixes=('','_bat')).drop('playerid',axis=1)
evaluation_df = pd.merge(evaluation_df,stmr_df,how='left',left_on='playerID', right_on='playerid',suffixes=('','_stmr')).drop('playerid',axis=1)
evaluation_df.to_csv(path)
temp_df = master_df[master_df['Season']==(self.year-1)]
temp_df['Season'] = (self.year-2)
temp_df = self.isolate_relevant_columns(modeling_df = temp_df,target = target)
temp_df = temp_df.drop([target],axis=1)
pipeline = pickle.load(open('data/modeling/'+self.position_group+'/'+target+'/preprocessing_pipeline_eval.sav', 'rb'))
temp_df_2 = pipeline.transform(temp_df)
with open(r'data/modeling/'+self.position_group+'/'+target+'/model_features_eval.yaml') as file:
yaml_data = yaml.load(file, Loader=yaml.FullLoader)
model_features = yaml_data[target]
temp_df = pd.DataFrame(temp_df_2, columns = model_features,index=temp_df.index)
temp_df[target+'_Boba'] = model.predict(temp_df)
temp_df = temp_df[[target+'_Boba']]
evaluation_df = evaluation_df.drop([target+'_Boba'],axis=1,errors='ignore')
new_df = pd.merge(evaluation_df,temp_df,left_index=True,right_index=True)
rate_stats = [c+'_per_'+self.per_metric for c in self.counting_stats]
if target in rate_stats:
colname = target.replace('_per_'+self.per_metric, '')
new_df[colname+'_Boba'] = new_df[target+'_Boba']*new_df[self.per_metric+'_zips']
else:
colname = target
zips_metric = colname+'_zips'
atc_metric = colname+'_atc'
bat_metric = colname+'_bat'
stmr_metric = colname+'_stmr'
BOBA_metric = colname+'_Boba'
systems_list = [c for c in [BOBA_metric,zips_metric,stmr_metric,bat_metric,atc_metric] if c in list(new_df.columns)]
new_df[colname+'_averaged'] = new_df[systems_list].mean(axis=1)
new_df.to_csv(path)
return new_df
def run_sys_diagnostics(self, evaluation_df, target,prod):
rate_stats = [c+'_per_'+self.per_metric for c in self.counting_stats]
if target in rate_stats:
colname = target.replace('_per_'+self.per_metric, '')
else:
colname = target
if prod == True:
share_df = pd.DataFrame(columns = ['winning_sys'],index=['Boba','Zips','ATC','STMR','BAT','averaged']).fillna(0)
return share_df
elif (self.position_group == 'hitters' and target in ['BABIP','BB%','K%']):
data = {'system': ['BOBA', 'zips','steamer','atc','bat','averaged']}
compare_df = pd.DataFrame(data,columns = ['system','WinShare','R2','Corr','RMSE','MAE']).fillna(0)
return compare_df
elif (self.position_group == 'SP' and target in ['OBP','SLG','ShO_per_GS','CG_per_GS']):
data = {'system': ['BOBA', 'zips','steamer','atc','bat','averaged']}
compare_df = pd.DataFrame(data,columns = ['system','WinShare','R2','Corr','RMSE','MAE']).fillna(0)
return compare_df
elif (self.position_group == 'RP' and target in ['OBP','SLG','HLD_per_G']):
data = {'system': ['BOBA', 'zips','steamer','atc','bat','averaged']}
compare_df = pd.DataFrame(data,columns = ['system','WinShare','R2','Corr','RMSE','MAE']).fillna(0)
return compare_df
else:
zips_metric = colname+'_zips'
atc_metric = colname+'_atc'
bat_metric = colname+'_bat'
stmr_metric = colname+'_stmr'
BOBA_metric = colname+'_Boba'
averaged_metric = colname+'_averaged'
systems_list = [c for c in [colname,BOBA_metric,zips_metric,stmr_metric,bat_metric,atc_metric,averaged_metric] if c in list(evaluation_df.columns)]
eval_results_df = evaluation_df[['Name','playerID','Age']+systems_list].sort_values(averaged_metric,ascending=False)
eval_results_df['Boba'] = abs(eval_results_df[colname]-eval_results_df[BOBA_metric])*-1
if zips_metric in list(eval_results_df.columns):
eval_results_df['Zips'] = abs(eval_results_df[colname]-eval_results_df[zips_metric])*-1
if atc_metric in list(eval_results_df.columns):
eval_results_df['ATC'] = abs(eval_results_df[colname]-eval_results_df[atc_metric])*-1
if stmr_metric in list(eval_results_df.columns):
eval_results_df['STMR'] = abs(eval_results_df[colname]-eval_results_df[stmr_metric])*-1
if bat_metric in list(eval_results_df.columns):
eval_results_df['BAT'] = abs(eval_results_df[colname]-eval_results_df[bat_metric])*-1
eval_results_df['averaged'] = abs(eval_results_df[colname]-eval_results_df[colname+'_averaged'])*-1
systems_list_names = [c for c in ['Boba','Zips','ATC','STMR','BAT','averaged'] if c in list(eval_results_df.columns)]
eval_results_df['winning_val'] = eval_results_df[systems_list_names].max(axis=1)
eval_results_df['winning_sys'] = eval_results_df[systems_list_names].idxmax(axis=1)
remove_na_df = eval_results_df.dropna(subset=systems_list)
share_df = remove_na_df.groupby('winning_sys')['winning_sys'].count().sort_values(ascending=False)
sns.set(style="darkgrid")
pva = eval_results_df.groupby(pd.qcut(eval_results_df[averaged_metric], 10,duplicates='drop'))[systems_list].mean()
pva.index = list(np.arange(1,11,1))
pva = pva.reset_index()
df = pva.melt('index', var_name='cols', value_name='vals')
sns.factorplot(x="index", y= 'vals',hue='cols',data=df,legend_out=False)
plt.title("System Comparison vs Actuals for {}".format(colname))
plt.xlabel("Average Prediction Sorted Decile")
plt.ylabel("{}".format(colname))
plt.legend(loc='upper left')
plt.show()
try:
n = 3
ws_plot_df = remove_na_df.groupby([pd.qcut(remove_na_df[averaged_metric], n),'winning_sys'])['winning_sys'].count()
ws_plot_df = | pd.DataFrame(ws_plot_df) | pandas.DataFrame |
import pickle
import pandas as pd
import time as time
def merge_with_metatable(from_sp, to_sp, df_spectra, save=False):
"""
merge_with_metatable()
Parameters
----------
from_sp : string
The number from which to merge spectra with meta-data. String, beceause it
must match the filename in folder data/sdss/spectra/
to_sp : string
The number which specifies the upper limit to merge spectra with meta-data.
String, beceause it must match the filename in folder data/sdss/spectra/
df_spectra : pd.DataFrame
The DataFrame that comes from downloading the raw spectral data. None by
default, in which case its loaded from disk.
save : boolean
When True, save the resulting merged table into a pickle
When False, don't save the resulting merged table
Returns
-------
df_merged : pandas.DataFrame
A merged table that contains spectral data all meta information from
data/sdss/meta_table.pkl:
columns: 'flux_list',
'wavelength',
'objid',
'bestObjID',
'fluxObjID',
'targetObjID',
'plate',
'class',
'subClass',
'zErr',
'petroMag_u',
'petroMag_g',
'petroMag_r',
'petroMag_i',
'petroMag_z',
'petroMagErr_u',
'petroMagErr_g',
'petroMagErr_r',
'petroMagErr_i',
'petroMagErr_z',
'dec',
'z',
'ra'
"""
df_meta_data = | pd.read_pickle('data/quasar_meta_table.pkl') | pandas.read_pickle |
# -*- coding: utf-8 -*-
"""
Created on Nov 04 2020
@author: <NAME>
@supervisor: <NAME>
Tools to work with Red Clumps
"""
import os
import sys
import copy
import math
import numpy as np
import pandas as pd
from astropy.coordinates import SkyCoord
from astropy import units as u
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle
from scipy.optimize import curve_fit
from scipy.optimize import leastsq
from scipy.signal import find_peaks
# Use LaTex fonts
from matplotlib import rc
rc('text', usetex=True)
plt.rcParams.update({'font.size': 12})
# set comma to dot - Brazilian decimal notation
import locale
locale.setlocale(locale.LC_ALL, 'pt_BR.UTF-8')
locale.setlocale(locale.LC_NUMERIC, 'pt_BR.UTF-8')
import matplotlib as mpl
mpl.rcParams['axes.formatter.use_locale'] = True
# my library
sys.path.append('/home/botan/OneDrive/Doutorado/VVV_DATA/my_modules/')
import math_functions
class RedClump(object):
def __init__(self,Rv):
self.gc_distance = 8178 # +- 13 pc https://www.aanda.org/articles/aa/full_html/2019/05/aa35656-19/aa35656-19.html
self.Rv = Rv
self.path = '/home/botan/OneDrive/Doutorado/VVV_DATA'
def cartezian_projections(self,d,gal_l,gal_b):
dx = d*np.cos(math.radians(gal_b))*np.cos(math.radians(gal_l))
rx = dx - self.gc_distance
ry = d*np.cos(math.radians(gal_b))*np.sin(math.radians(gal_l))
rz = d*np.sin(math.radians(gal_b))
return rx,ry,rz
def red_clump_distance(self,Ks_mag,Ks_err,c,c_err):
# Ruiz-Dern et al. (2018) https://ui.adsabs.harvard.edu/abs/2018A%26A...609A.116R/abstract
c_0 = 0.66
c_0_err = 0.02
MKs = -1.605
MKs_err = 0.009
# Minniti (2011) AJ Letters 73:L43
mu = Ks_mag - self.Rv * (c - c_0) - MKs
mu_err = np.sqrt(Ks_err**2 + c_err**2 + c_0_err**2 + MKs_err**2)
dist = 10**((mu + 5)/5)
dist_err = 2**((mu + 5)/5) * 5**(mu/5)*np.log(10) * mu_err
return dist,dist_err
def find_RC_color_peak(self, color, color_mask, bins=50, show=False):
'''
Find RC peaks color and color sigma and
return fit parameters for color peak (gaussian)
'''
y = color[color_mask]
hist, bin_edges = np.histogram(y,bins=bins)
flor = hist > hist[0]
binSize = bin_edges[1]-bin_edges[0]
x = np.empty(len(bin_edges)-1)
x[0] = bin_edges[0] + binSize/2
i = 1
while i < len(bin_edges)-1:
x[i] = x[i-1] + binSize
i+=1
guess = [hist.max(),y.median(),0.5]
fit = leastsq(func=math_functions.single_gaussian_residuals,
x0=guess,
args=(x[flor],hist[flor]))
if show:
func = math_functions.single_gaussian(x,fit[0])
plt.hist(y,bins=bins)
plt.plot(x,func,'-')
plt.ylabel('\#\ stars')
plt.xlabel('J-Ks')
plt.show()
return fit[0]
def find_RC_mag_peak(self, mag, mag_mask, mu1, mu2, bins=100, show=False):
'''
find RC peaks in magnitudes
renturn fit parameters for peaks (two gaussians) and
Luminosity Function (exponential)
'''
hist, bin_edges = np.histogram(mag[mag_mask],bins=bins)
binSize = bin_edges[1]-bin_edges[0]
x = np.empty(len(bin_edges)-1)
x[0] = bin_edges[0] + binSize/2
i = 1
while i < len(bin_edges)-1:
x[i] = x[i-1] + binSize
i+=1
# exponential fit to Luminosity Function
mask2fit = ((x<12.2) | ((x>15.5) & (x<16))) # Mask mag around RC
guess = [-1e4,3e3,0.1]
lum_fit = leastsq( func = math_functions.exponential_residuals,
x0 = guess,
args=(x[mask2fit],hist[mask2fit]))
lum_func = math_functions.exponential(x,lum_fit[0])
# RC peaks
RC_peaks = hist - lum_func
mask2peaks = ((x>12)&(x<14.5))
x_RC_peaks = x[mask2peaks]
y_RC_peaks = RC_peaks[mask2peaks]
guess = [RC_peaks.max(),mu1,0.5,0.7*RC_peaks.max(),mu2,0.2]
peak_fit = leastsq( func=math_functions.double_gaussian_residuals,
x0=guess,
args=(x_RC_peaks,y_RC_peaks))
if show:
y = math_functions.double_gaussian(x,peak_fit[0])
plt.hist(mag[mag_mask],bins=bins)
plt.plot(x,y+lum_func,'-')
plt.plot(x,lum_func,'k--')
plt.ylabel('\#\ stars')
plt.xlabel('J-Ks')
plt.show()
return peak_fit[0],lum_fit[0]
def find_RC_dist_peak(self, distances, bins, show=False):
'''
find RC peaks in disttance
renturn fit parameters for peaks (two gaussians)
'''
hist, bin_edges = np.histogram(distances,bins=bins)
binSize = bin_edges[1]-bin_edges[0]
x = np.empty(len(bin_edges)-1)
x[0] = bin_edges[0] + binSize/2
i = 1
while i < len(bin_edges)-1:
x[i] = x[i-1] + binSize
i+=1
# gaussian
guess = [hist.max(), 8000 ,1000 , 0.5*hist.max(), 11000, 2000]
peak_fit = leastsq( func=math_functions.double_gaussian_residuals,
x0=guess,
args=(x,hist))
if show:
y1 = math_functions.single_gaussian(x,peak_fit[0][:3])
y2 = math_functions.single_gaussian(x,peak_fit[0][3:])
plt.hist(distances,bins=bins)
plt.plot(x,y1,'k--')
plt.plot(x,y2,'r--')
plt.ylabel('\#\ stars')
plt.xlabel('d [pc]')
plt.show()
return peak_fit[0]
def red_clump_inclination(self,method='2gaussian',plotHist=False):
'''
method = '1gaussian'
method = '2gaussian'
method = 'polynomial'
'''
# params dict [cmin,cmax,ymin,ymax,xmin,xmax]
params_JKs = { 'b293':[0.85,1.00,11.01,15.49,0.7,2.6],
'b294':[0.86,1.00,11.01,15.49,0.7,2.6],
'b295':[0.95,1.20,11.01,15.49,0.7,2.6],
'b296':[1.05,1.35,11.01,15.49,0.7,2.6],
'b307':[1.00,1.40,11.01,15.49,0.7,2.6],
'b308':[1.19,1.71,11.01,15.49,0.7,2.6],
'b309':[1.19,1.71,11.01,15.49,0.7,2.6],
'b310':[1.45,1.80,11.01,15.49,0.7,2.6]}
params_HKs = { 'b293':[0.19,0.32,11.01,15.49,0.1,0.9],
'b294':[0.19,0.32,11.01,15.49,0.1,0.9],
'b295':[0.23,0.36,11.01,15.49,0.1,0.9],
'b296':[0.29,0.45,11.01,15.49,0.1,0.9],
'b307':[0.22,0.45,11.01,15.49,0.1,0.9],
'b308':[0.30,0.59,11.01,15.49,0.1,0.9],
'b309':[0.32,0.62,11.01,15.49,0.1,0.9],
'b310':[0.45,0.70,11.01,15.49,0.1,0.9]}
params_band = { 'J-Ks':params_JKs,
'H-Ks':params_HKs}
# CMD axes dict
axes_dict = { 'b293':[1,3],
'b294':[1,2],
'b295':[1,1],
'b296':[1,0],
'b307':[0,3],
'b308':[0,2],
'b309':[0,1],
'b310':[0,0]}
for color_band in list(params_band.keys()):#[:1]:
params_dict = params_band[color_band]
plt.rcParams.update({'font.size': 14})
fig, axes = plt.subplots(2, 4, figsize=(16,8))
fig.subplots_adjust(wspace=0.1)
tiles = sorted(os.listdir(f'{self.path}/data/psf_ts/'))
for tile in tiles:#['b309','b310','b296']:#tiles:#[:1]:
tileData = []
chips = [_[:-3] for _ in os.listdir(f'{self.path}/data/psf_ts/{tile}/chips/') if _.endswith('.ts')]
for chip in chips:
chipData = pd.read_csv(f'{self.path}/data/psf_ts/{tile}/chips/{chip}.ts',index_col='ID')
tileData.append(chipData)
tileData = pd.concat(tileData)
magCols = [_ for _ in tileData.columns if _[:3] == 'MAG']
errCols = [_ for _ in tileData.columns if _[:3] == 'ERR']
err_msk = ( tileData[errCols] > 0.2).values
f = color_band.split('-')[0]
color = tileData[f'mag_{f}'] - tileData.mag_Ks
msk = ~color.isnull()
mag = tileData.mag_Ks
mag = mag[msk]
color = color[msk]
yRCpeak = []
xRCpeak = []
if method == '1gaussian':
# Single Gaussian fit
num_bins = 20
cmin = params_dict[tile][0]
cmax = params_dict[tile][1]
n = cmin
while n < cmax:
dc = abs(cmax-cmin)/10
cmsk = ((color > n) & (color <= n+dc) & (mag < 14))
hist, bin_edges = np.histogram(mag[cmsk],bins=num_bins)
binSize = bin_edges[1]-bin_edges[0]
x = [bin_edges[0] + binSize/2]
i = 1
while i < len(bin_edges)-1:
x.append(x[i-1] + binSize)
i+=1
guess = [500,13.2,0.5,]
fit = leastsq(math_functions.single_gaussian_residuals,guess,args=(x,hist))
params = fit[0]
yfit = math_functions.single_gaussian(x,params)
if plotHist:
fig,ax=plt.subplots()
ax.hist(mag[cmsk],num_bins)
ax.plot(x,yfit,'-')
plt.show()
yRCpeak.append(params[1])
xRCpeak.append(n)
n+=dc
if method == '2gaussian':
# DOuble Gaussian
num_bins = 80
cmin = params_dict[tile][0]
cmax = params_dict[tile][1]
n = cmin
while n < cmax:
dc = 0.05 #abs(cmax-cmin)/10
cmsk = ((color > n) & (color <= n+dc))# & (mag < 17.5))
hist, bin_edges = np.histogram(mag[cmsk],bins=num_bins)
binSize = bin_edges[1]-bin_edges[0]
x = [bin_edges[0] + binSize/2]
i = 1
while i < len(bin_edges)-1:
x.append(x[i-1] + binSize)
i+=1
mu1 = 13.0 #params_dict[tile][6] # initial guess for fisrt peak mag
mu2 = 13.6 #params_dict[tile][7] # initial guess for second peak mag
peak_fit, lum_fit = self.find_RC_mag_peak(mag, cmsk, mu1, mu2, show=False)
#peak_fit, lum_fit = find_RC_mag_peak(1,mag, cmsk, mu1, mu2, bins=num_bins, show=False)
x = np.arange(11,18,(18-12)/1000)
lum_func = math_functions.exponential(x,lum_fit)
RC_fit = math_functions.double_gaussian(x,peak_fit)
fitted_curve = RC_fit + lum_func
crop = x < 14.5
mag_peak = x[crop][np.where(fitted_curve[crop] == fitted_curve[crop].max())[0][0]]
if plotHist:
yaxis_ref = np.histogram(mag[cmsk],bins=num_bins)[0].max()
fig,ax=plt.subplots(figsize=[6,4])
ax.hist(x=mag[cmsk],
bins=num_bins,
histtype='barstacked',
lw=0.5,
color='dodgerblue',
edgecolor='w',
alpha=0.6)
ax.plot(x,RC_fit+lum_func,'r-',lw=1)
ax.plot(x,lum_func,'k--',lw=1)
ptxt = '{:#.3n}'.format(mag_peak)
ax.axvline(mag_peak,lw=0.8,c='gray')
ax.text(s=ptxt,x=mag_peak+0.2,y=0.95*yaxis_ref,ha='left')
title = '{:#.3n}'.format(n) + ' < J-Ks < ' + '{:#.3n}'.format(n+dc)
ax.text(s=f'Tile: {tile} | {title}', x=0.5, y=1.02, ha='center', transform=ax.transAxes)
ax.set_ylabel('Número de estrelas')
ax.set_xlabel('Ks [mag]')
ax.set_ylim(-yaxis_ref*0.01,yaxis_ref+yaxis_ref*0.04)
plt.tight_layout()
plt.savefig(f'{self.path}/figuras_tese/RC_peaks_{tile}_{n}.png',dpi=300)
plt.show()
plt.close()
yRCpeak.append(mag_peak)
xRCpeak.append(n)
n+=dc
if method == 'polynomial':
# Polynomial fit
num_bins = 100
cmin = params_dict[tile][0]
cmax = params_dict[tile][1]
n = cmin
while n < cmax:
dc = (cmax-cmin)/8
cmsk = ((color > n) & (color <= n+dc) & (mag < 17.5))
hist, bin_edges = np.histogram(mag[cmsk],bins=num_bins)
binSize = bin_edges[1]-bin_edges[0]
x = [bin_edges[0] + binSize/2]
i = 1
while i < len(bin_edges)-1:
x.append(x[i-1] + binSize)
i+=1
x = np.array(x)
fit = np.polyfit(x, hist, 200)
yp = np.poly1d(fit)
x2 = np.arange(mag[cmsk].min(),mag[cmsk].max(),(mag[cmsk].max() - mag[cmsk].min())/1000)
msk = ((x2>12.5)&(x2<14))
peaks,_ = find_peaks(yp(x2[msk]))
if plotHist:
fig,ax=plt.subplots()
ax.hist(mag[cmsk],num_bins)
ax.plot(x,yp(x),'-')
ax.plot(x2[msk][peaks],yp(x2[msk][peaks]),"*")
ax.plot(x2[msk][peaks[0]],yp(x2[msk][peaks[0]]),"*")
plt.show()
yRCpeak.append(x2[msk][peaks[0]])
xRCpeak.append(n)
n+=dc
# CMD plot
y = np.array(yRCpeak)
x = np.array(xRCpeak)
xlim= params_dict[tile][4:6]
ylim= params_dict[tile][2:4]
xlabel= color_band
ylabel='Ks [mag]'
guess = [0.6,13]
c,cov = curve_fit( f = math_functions.linear,
xdata = x,
ydata = y,
p0 = guess,
sigma = y*.01,
absolute_sigma = False)
xfit = np.array(xlim)
yfit = math_functions.linear(xfit,c[0],c[1])
bins=(600,400)
cmap = copy.copy(mpl.cm.get_cmap("jet"))# plt.cm.jet
cmap.set_bad('w', 1.)
cmap_multicolor = copy.copy(mpl.cm.get_cmap("jet")) # plt.cm.jet
cmap_multicolor.set_bad('w', 1.)
clip = ~color.isnull()
N, xedges, yedges = np.histogram2d(color[clip],mag[clip],bins=bins)
ax1 = axes_dict[tile][0]
ax2 = axes_dict[tile][1]
img = axes[ax1,ax2].imshow(np.log10(N.T), origin='lower',
extent=[xedges[0], xedges[-1], yedges[0], yedges[-1]],
aspect='auto', interpolation='nearest', cmap=cmap)
red_inc = '{:#.3n}'.format(c[0])
if ax1==0:
if ax2==1 or ax2==2:
axes[ax1,ax2].plot(x,y,'k.')
axes[ax1,ax2].plot(xfit,yfit,'k-')
axes[ax1,ax2].text( s=f'Tile: {tile} | AKs/E({color_band}) = {red_inc}',
x=0.5,
y=1.025,
ha='center',
transform=axes[ax1,ax2].transAxes)
else:
axes[ax1,ax2].text( s=f'Tile: {tile}',
x=0.025,
y=1.025,
ha='left',
transform=axes[ax1,ax2].transAxes)
else:
axes[ax1,ax2].text( s=f'Tile: {tile}',
x=0.025,
y=1.025,
ha='left',
transform=axes[ax1,ax2].transAxes)
axes[ax1,ax2].set_xlim(xlim)
axes[ax1,ax2].set_ylim(ylim)
axes[ax1,ax2].set_xlabel(xlabel)
axes[ax1,ax2].set_ylabel(ylabel)
axes[ax1,ax2].invert_yaxis()
for im in plt.gca().get_images():
im.set_clim(0, 3)
for ax in fig.get_axes():
ax.label_outer()
cbar_ax = plt.axes([0.92, 0.2, 0.01, 0.6])
cb = fig.colorbar(img,
ticks=[0, 1, 2, 3],
format=r'$10^{%i}$',
shrink=0.6 ,
cax=cbar_ax)
cb.set_label('Número por pixel',rotation=90)
#cb.set_label(r'$\mathrm{number\ in\ pixel}$',rotation=90)
#plt.tight_layout()
plt.savefig(f'{self.path}/figuras_tese/red_clump_reddening_{color_band}.png',dpi=200)
plt.show()
plt.rcParams.update({'font.size': 12})
plt.close()
def find_RC_peaks(self,plot=False,show=False):
# params dict [ymin,ymax,xmin,xmaxc,cmin,cmax,RC_peak1,RC_peak2]
params_dict = { 'b293':[11,17.9,0.0,1.4,0.65,1.10,13.0,13.8],
'b294':[11,17.9,0.0,1.5,0.70,1.20,13.0,13.8],
'b295':[11,17.9,0.2,1.5,0.75,1.30,13.0,13.9],
'b296':[11,17.9,0.2,1.7,0.85,1.64,13.0,14.1],
'b307':[11,17.9,0.1,2.0,0.85,1.50,13.1,13.8],
'b308':[11,17.9,0.1,2.3,1.00,1.60,13.2,14.0],
'b309':[11,17.9,0.1,2.3,1.00,2.00,13.2,14.2],
'b310':[11,17.9,0.3,2.6,1.20,2.00,13.2,14.3]}
tiles = sorted(os.listdir(f'{self.path}/data/psf_ts/'))
cols = ['RC_peak1_Ks_mag','RC_peak1_Ks_sigma',
'RC_peak1_color' ,'RC_peak1_color_sigma',
'RC_peak1_dist' ,'RC_peak1_dist_sigma',
'RC_peak2_Ks_mag','RC_peak2_Ks_sigma',
'RC_peak2_color' ,'RC_peak2_color_sigma',
'RC_peak2_dist' ,'RC_peak2_dist_sigma',
'tile_central_l' ,'tile_central_b']
RC_info = pd.DataFrame(index=tiles,columns=cols)
for tile in tiles:#[:1]:
tileData = []
chips = [_[:-3] for _ in os.listdir(f'{self.path}/data/psf_ts/{tile}/chips/') if _.endswith('.ts')]
for chip in chips:
chipData = pd.read_csv(f'{self.path}/data/psf_ts/{tile}/chips/{chip}.ts',index_col='ID')
tileData.append(chipData)
tileData = pd.concat(tileData)
ra = tileData.RA
dec = tileData.DEC
c_icrs = SkyCoord(ra=ra, dec=dec,unit=(u.deg, u.deg))
c_gal = c_icrs.galactic
tileData.loc[tileData.index,'gal_l'] = c_gal.l.deg
tileData.loc[tileData.index,'gal_b'] = c_gal.b.deg
color = tileData.mag_J - tileData.mag_Ks
msk = ~color.isnull()
color = color[msk]
mag = tileData.mag_Ks[msk]
color_min = params_dict[tile][4]
# get RC peaks magnitudes
mag_mask = ((color > color_min))
mu1 = params_dict[tile][6] # initial guess for fisrt peak mag
mu2 = params_dict[tile][7] # initial guess for second peak mag
peak_fit, lum_fit = self.find_RC_mag_peak(mag, mag_mask, mu1, mu2, show=False)
# get RC peaks colors
color_masks = []
peak_colors = []
i = 1
while i < 6:
peak_mag, peak_sigma = peak_fit[i], peak_fit[i+1]
# RC peaks color and color sigma
color_mask = (((color > color_min) & (color < 2.6)) & ((mag > peak_mag - abs(peak_sigma)) & (mag < peak_mag + abs(peak_sigma))))
color_fit = self.find_RC_color_peak(color, color_mask, show=False)
peak_colors += [color_fit[1], abs(color_fit[2])]
color_masks.append(color_mask)
i+=3
# calculate distances
dist1,dist1_sigma = self.red_clump_distance(peak_fit[1],peak_fit[2],peak_colors[0],abs(peak_colors[1]))
dist2,dist2_sigma = self.red_clump_distance(peak_fit[4],peak_fit[5],peak_colors[2],abs(peak_colors[3]))
# tile central l and b
tile_l = (tileData.gal_l.max() - tileData.gal_l.min())/2 + tileData.gal_l.min()
tile_b = (tileData.gal_b.max() - tileData.gal_b.min())/2 + tileData.gal_b.min()
# save peaks info into a pandas DataFrame
info = list(peak_fit[1:3]) + peak_colors[:2] + [dist1,dist1_sigma] + list(peak_fit[4:6]) + peak_colors[2:] + [dist2,dist2_sigma,tile_l,tile_b]
RC_info.loc[tile,cols] = info
if plot:
# Plot CMD
xlim = params_dict[tile][2:4]
ylim = params_dict[tile][:2]
xlabel='J-Ks'
ylabel='Ks [mag]'
bins=(600,400)
cmap = copy.copy(mpl.cm.get_cmap("jet"))# plt.cm.jet
cmap.set_bad('w', 1.)
cmap_multicolor = copy.copy(mpl.cm.get_cmap("jet")) # plt.cm.jet
cmap_multicolor.set_bad('w', 1.)
clip = ~color.isnull()
N, xedges, yedges = np.histogram2d(color[clip],mag[clip],bins=bins)
fig, axes = plt.subplots(1, 2, figsize=(10,4))
img = axes[0].imshow( np.log10(N.T),
origin='lower',
extent=[xedges[0], xedges[-1], yedges[0], yedges[-1]],
aspect='auto',
interpolation='nearest',
cmap=cmap)
axes[0].errorbar( x=info[2],
y=info[0],
xerr=info[3],
yerr=info[1],
marker="o",
mfc='k',
mec='k',
ecolor='k',
ms=3,
lw=.8,
capsize=3)
axes[0].errorbar( x=info[6],
y=info[4],
xerr=info[7],
yerr=info[5],
marker="o",
mfc='k',
mec='k',
ecolor='k',
ms=3,
lw=.8,
capsize=3)
axes[0].set_xlim(xlim)
axes[0].set_ylim(ylim)
axes[0].set_xlabel(xlabel)
axes[0].set_ylabel(ylabel)
axes[0].invert_yaxis()
axes[0].axvline(color_min,c='k',lw=1)
axes[0].text(s=f'Tile: {tile}',x=0.5,y=1.02,ha='center',transform=axes[0].transAxes)
cb = fig.colorbar( img,
ax=axes[0],
ticks=[0, 1, 2, 3],
format=r'$10^{%i}$',
shrink=0.6,
orientation='vertical')
cb.set_label(r'$\mathrm{Número\ por\ pixel}$',rotation=90)
# to plot luminosity ans peaks functions
x = np.arange(11,18,(18-12)/1000)
lum_func = math_functions.exponential(x,lum_fit)
RC_fit = math_functions.double_gaussian(x,peak_fit)
# mask test:
#axes[0].plot(color[color_masks[0]],mag[color_masks[0]],'b.',ms=.8,alpha=.01)
#axes[0].plot(color[color_masks[1]],mag[color_masks[1]],'b.',ms=.8,alpha=.01)
yaxis_ref = np.histogram(mag[mag_mask],bins=100)[0].max() # reference value
axes[1].hist( x=mag[mag_mask],
bins=100,
histtype='barstacked',
lw=.5,
color='dodgerblue',
edgecolor='w',
alpha=0.6)#,range=range)
axes[1].plot(x,RC_fit+lum_func,'r-',lw=1)
axes[1].plot(x,lum_func,'k--',lw=1)
axes[1].axvline(x=peak_fit[1],
ls='--',
c='gray',
lw=1)
m1 = '{:#.4n}'.format(peak_fit[1])
axes[1].text( s=f'{m1}',
x=peak_fit[1],
y=.9*yaxis_ref)
axes[1].axvline(x=peak_fit[4],
ls='--',
c='gray',
lw=1)
m2 = '{:#.4n}'.format(peak_fit[4])
axes[1].text( s=f'{m2}',
x=peak_fit[4],
y=.8*yaxis_ref)
axes[1].set_xlabel(ylabel)
axes[1].set_ylabel('Número de estrelas')
a = '{:#.2n}'.format(color_min)
axes[1].text(s=f'J-Ks > {a}',x=0.5,y=1.02,ha='center',transform=axes[1].transAxes)
axes[1].yaxis.set_label_position("right")
axes[1].yaxis.tick_right()
axes[1].set_ylim(-yaxis_ref*.01,yaxis_ref+yaxis_ref*.04)
plt.tight_layout()
plt.savefig(f'{self.path}/figuras_tese/{tile}_RC_bumps.png',dpi=200)
if show:
plt.show()
plt.close()
return RC_info
''' ======================= WORK IN PROGRESS ========================'''
def RC_peak_distance_distribution(self,plot=False,show=False):
path = '/home/botan/OneDrive/Doutorado/VVV_DATA'
params_dict = { 'b293':[11,17.9,0.0,1.4,0.65,1.10,13.0,13.8],
'b294':[11,17.9,0.0,1.5,0.70,1.20,13.0,13.8],
'b295':[11,17.9,0.2,1.5,0.75,1.30,13.0,13.9],
'b296':[11,17.9,0.2,1.7,0.85,1.64,13.0,14.1],
'b307':[11,17.9,0.1,2.0,0.85,1.50,13.1,13.8],
'b308':[11,17.9,0.1,2.3,1.00,1.60,13.2,14.0],
'b309':[11,17.9,0.1,2.3,1.00,2.00,13.2,14.2],
'b310':[11,17.9,0.3,2.6,1.20,2.00,13.2,14.3]}
tiles = sorted(os.listdir(f'{path}/data/psf_ts/'))
cols = ['mag_peak1','mag_err_peak1',
'color_peark1','color_err_peark1',
'distance1','distance_err1',
'x1','y1','z1',
'mag_peak2','err_peak2',
'color_peark2','color_err_peark2',
'distance2','distance_err2',
'x2','y2','z2',
'tile_l','tile_b']
RC_info = pd.DataFrame(index=tiles,columns=cols)
for tile in tiles:
tileData = []
chips = [_[:-3] for _ in os.listdir(f'{path}/data/psf_ts/{tile}/chips/') if _.endswith('.ts')]
for chip in chips:
chipData = pd.read_csv(f'{path}/data/psf_ts/{tile}/chips/{chip}.ts',index_col='ID')
tileData.append(chipData)
tileData = | pd.concat(tileData) | pandas.concat |
import matplotlib as mpl
# mpl.use('Agg')
import matplotlib.pyplot as plt
from shutil import copyfile
import fortranformat as ff
from itertools import zip_longest
from scipy.signal import argrelextrema, argrelmin
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import datetime
from ast import literal_eval as make_tuple
import pyshtools
from scipy.io import loadmat
from pathlib import Path
from scipy.special import lpmn
from matplotlib import cm
from matplotlib.colors import ListedColormap, LinearSegmentedColormap
import copy
import cartopy.feature as cfeature
"""
author: <NAME>
contact: <EMAIL>
description: A scipt containing tools to post-process the orbits, obtained from a forward simulation (and also recovery), from epos-oc.
"""
def create_element_lines(ffp, splitstring):
#get titles
with open(ffp) as f:
LINES = f.readlines()
starts = []
for i,line in enumerate(LINES):
if line.startswith(splitstring):
starts.append(i)
ends=[]
for i in range(len(starts)):
ends.append(starts[i]+16)
blocks = list(zip(starts,ends))
format_float = ff.FortranRecordWriter('(E19.13)')
for block in blocks:
with open(ffp) as fp:
for i, line in enumerate(fp):
if i in range(block[0],block[1]):
if i==block[0]:
outfile = open('%s_ELEMENTSnew.txt' %line.strip(),'w')
outfile.write('\n')
outfile.write(' --- Begin initial elements GRACE-C\n')
if i>block[0]+1:
if line.startswith('Sat'):
outfile.write(' --- End initial elements GRACE-C\n')
outfile.write('\n')
outfile.write(' --- Begin initial elements GRACE-D\n')
if line.startswith('ELEMENT'):
val = line.strip().split()
val[5] = str(format_float.write([np.float(val[5])])).replace('E','e')
val[6] = str(format_float.write([np.float(val[6])])).replace('E', 'e')
if val[7] == '0201201': val[7] = '1804701'
if val[7] == '0201202': val[7] = '1804702'
str_new2 = ('%7.7s' '%4.3s' '%2.1i' '%2.1i' '%2.1i' '%20.19s' '%20.19s' '%8.7s') \
% (val[0], val[1], int(val[2]), int(val[3]),
int(val[4]), val[5], val[6], val[7])
outfile.write('%s\n' %str_new2)
if i==block[1]-1:
outfile.write(' --- End initial elements GRACE-D')
break
#
#
# def create_element_lines(ffp, splitstring):
# #input: Unformatted file that contains orbit elements needed to start each of the runs for GRACE-FO simulation
# #output: Orbit elements that can be used as input for prepare_EPOSIN_4_orbit_integration.sh (located at
# #/GFZ/project/f_grace/NGGM_SIM/SIM_FORWARD )
# with open(ffp) as f:
# lines = f.read().splitlines()
# splits = [i for i in lines if i.startswith(splitstring)]
# print(splits)
# n = 2 # group size
# m = 1 # overlap size
# splits_grouped = [splits[i:i + n] for i in range(0, len(splits), n - m)]
# print(splits_grouped)
#
#
# # # print(lines)
# # split = [i for i in lines if i.startswith('PP')]
# for i in splits_grouped:
# if len(i) > 1:
# start = i[0]
# end = i[1]
# out = '%s_ELEMENT_lines.txt' % (start.strip())
# with open(ffp) as infile, open(out, 'w') as outfile:
# copy = False
# titlewritten0 = False
# titlewritten1 = False
# firsttime6 = False
# linesread = 0
# outfile.write("\n")
#
# for line in infile:
# if line.strip() == start.strip():
# copy = True
# continue
# elif line.strip() == end.strip():
# copy = False
# continue
# elif copy:
# linesread += 1
#
# if not titlewritten0:
# outfile.write(' --- Begin initial elements GRACE-C\n')
# titlewritten0 = True
# if line.startswith(
# 'ELEMENT') and titlewritten0: # if line starts with ELEMENT and the first title has been written
# val = list(filter(None, line.strip().split(' ')))[0:-3]
# format_float = ff.FortranRecordWriter('(E19.13)')
# val5 = str(format_float.write([np.float(val[5])]))
# val6 = str(format_float.write([np.float(val[6])]))
#
# val5 = val5.replace('E', 'e')
# val6 = val6.replace('E', 'e')
#
#
# if val[7] == '0201201': val[7] = '1804701'
# if val[7] == '0201202': val[7] = '1804702'
# str_new2 = ('%7.7s' '%4.3s' '%2.1i' '%2.1i' '%2.1i' '%20.19s' '%20.19s' '%8.7s') % (val[0], val[1], int(val[2]), int(val[3]), int(val[4]), val5, val6, val[7])
#
#
# # outfile.write("\n")
# if int(val[2]) < 6:
# outfile.write(str_new2)
# outfile.write("\n")
#
# if int(val[
# 2]) == 6 and not titlewritten1: # if element six has been reached and no 'end1' has been written yet:
# if not firsttime6:
# titlewritten1 = True
# # titlewritten2 = True
# outfile.write(str_new2)
# outfile.write("\n")
# outfile.write(' --- End initial elements GRACE-C\n\n')
# outfile.write(' --- Begin initial elements GRACE-D\n')
#
# if int(val[2]) == 6:
# print(linesread)
# if linesread > 7:
# outfile.write(str_new2)
# outfile.write("\n")
#
# outfile.write(' --- End initial elements GRACE-D')
# outfile.write("\n")
# outfile.write('\n')
# outfile.close()
# infile.close()
def files(path):
#input: path to a directory
#output: files within the directory (omitting nested directories)
for file in os.listdir(path):
if os.path.isfile(os.path.join(path, file)):
yield file
def create_case_directories(fp, fp_out):
#function to prepare the case directories for each of the simulations specified for the GRACE-FO project.
#It will
element_files = []
# current_dir = os.path.dirname(__file__)
for file in files(fp):
element_files.append(file)
IDs = ['PP.1', 'PP.2']
altitudes = [490, 490]
extens = [0, 0]
angles = [89, 89]
seperations = [200, 100]
repeats = [30, 30]
simdirs = ['FD', 'FD']
df = pd.DataFrame(columns=['id', 'altitude', 'extens', 'seperation', 'repeatvals', 'sim_direction'])
df['id'] = IDs
df['altitude'] = altitudes
df['angles'] = angles
df['extens'] = extens
df['seperation'] = seperations
df['repeatvals'] = repeats
df['sim_direction'] = simdirs
df.set_index('id', inplace=True)
for idx in df.index:
dirname = '%s_%i_%i_%i_%i_%id_%s' % (idx, df.loc[idx].altitude,
df.loc[idx].angles,
df.loc[idx].z,
df.loc[idx].seperation,
df.loc[idx].repeatvals,
df.loc[idx].sim_direction
)
if not os.path.exists(dirname):
os.mkdir(dirname)
ef = [f for f in element_files if f.startswith(idx)][0]
dst = os.path.abspath(fp, dirname, 'ELEMENT_lines')
src = os.path.abspath(os.path.join(os.path.dirname(__file__), ef))
copyfile(src, dst)
def serial_date_to_string(srl_no):
new_date = datetime.datetime(2000,1,1) + datetime.timedelta(srl_no+1)
return new_date.strftime("%Y-%m-%d")
def cart_2_kep_matrix(R, V, mu, Re):
# step1
h_bar = np.cross(R, V)
h = np.linalg.norm(h_bar, axis=1)
# step2
r = np.linalg.norm(R, axis=1)
v = np.linalg.norm(V, axis=1)
# step3
E = 0.5 * (v ** 2) - mu / r
# step4
a = -mu / (2 * E)
return (a-Re)/1000.0
def cart_2_kep(r_vec, v_vec, t, mu, Re):
# step1
h_bar = np.cross(r_vec, v_vec)
h = np.linalg.norm(h_bar)
# step2
r = np.linalg.norm(r_vec)
v = np.linalg.norm(v_vec)
# step3
E = 0.5 * (v ** 2) - mu / r
# step4
a = -mu / (2 * E)
# step5
e = np.sqrt(1 - (h ** 2) / (a * mu))
# step6
i = np.arccos(h_bar[2] / h)
# step7
omega_LAN = np.arctan2(h_bar[0], -h_bar[1])
# step8
# beware of division by zero here
lat = np.arctan2(np.divide(r_vec[2], (np.sin(i))), \
(r_vec[0] * np.cos(omega_LAN) + r_vec[1] * np.sin(omega_LAN)))
# step9
p = a * (1 - e ** 2)
nu = np.arctan2(np.sqrt(p / mu) * np.dot(r_vec, v_vec), p - r)
# step10
omega_AP = lat - nu
# step11
EA = 2 * np.arctan(np.sqrt((1 - e) / (1 + e)) * np.tan(nu / 2))
# step12
n = np.sqrt(mu / (a ** 3))
T = t - (1 / n) * (EA - e * np.sin(EA))
return a, e, i, omega_AP, omega_LAN, T, EA
def orbit_altitude(satfile):
mu = G.value * M_earth.value
Re = R_earth.value
with open(satfile) as infile:
"""read all lines from CIS files"""
lines = infile.readlines()
"""set the start and end characters for splitting the lines into X,Y,Z, U,V,W coordinates"""
start0, start1, start2, start3, start4, start5, end = 23, 41, 59, 77, 95, 113, 131
X = np.array([np.float(i[start0:start1]) for i in lines])
Y = np.array([np.float(i[start1:start2]) for i in lines])
Z = np.array([np.float(i[start2:start3]) for i in lines])
X = X.reshape(X.shape[0], 1)
Y = Y.reshape(Y.shape[0], 1)
Z = Z.reshape(Z.shape[0], 1)
R = np.concatenate((X, Y, Z), axis=1)
U = np.array([np.float(i[start3:start4]) for i in lines])
V = np.array([np.float(i[start4:start5]) for i in lines])
W = np.array([np.float(i[start5:end]) for i in lines])
U = U.reshape(U.shape[0], 1)
V = V.reshape(V.shape[0], 1)
W = W.reshape(W.shape[0], 1)
V = np.concatenate((U, V, W), axis=1)
"""calculate orbit altitude and convert to km"""
ALTITUDE_sec = cart_2_kep_matrix(R, V, mu, Re)
"""read the days and seconds """
daysStart, daysEnd, secondsStart, secondsEnd = 4, 11, 11, 23
seconds = np.array([np.float(i[secondsStart:secondsEnd]) for i in lines])
days = np.array([np.float(i[daysStart:daysEnd]) for i in lines])
"""calculate the decimal format for the days and subtract 51.184 to convert to correct time format"""
decimalDays = days + (seconds-51.184)/(24.0*60.*60.)
"""convert decimal days to a date"""
YMDHMS = [datetime.datetime(2000, 1, 1, 12) + datetime.timedelta(day) for day in decimalDays]
"""create an empty Pandas dataframe, called df"""
df = pd.DataFrame()
"""add the dates, decimal days and separation to the dataframe"""
df['date'] = | pd.to_datetime(YMDHMS) | pandas.to_datetime |
import pandas as pd
import numpy as np
import pickle
import secrets
import datetime
import warnings
import time
from seeq import spy
from packaging import version
import IPython
from IPython.display import clear_output, Javascript
from ipywidgets import widgets
from ipywidgets import HTML
from ipywidgets import Layout, Text
from ipywidgets import VBox, HBox
from ipywidgets import Image, Checkbox
from .display import loading
from .ui import checksum, selectType, clusterUnsupervised, clusterSupervised, startSupervised
from .. import seeqInterface
from .. import historicalBenchmarking
from ..._external_calc_override import ext_calc_override
key = '<KEY>'
__all__ = ('GUI',)
def push_clusterer_definition_to_seeq_property(serialized_definition, unique_key):
"""Push a serialized definition of clusterer to a seeq propery. The name will be EKPPropertyStorage<unique_key>
args:
serialized_definition (str): Serialized string of binary blob defining the clusterer
unique_key (str): Identifier for EKPPropertyStorage in Seeq
returns:
(str) : ID of pushed capsule.
"""
data = pd.DataFrame({
'Name':['EKPPropertyStorage{}'.format(unique_key)],
'Capsule Start':[pd.Timestamp("10/31/1993")],
'Capsule End': [pd.Timestamp("11/1/1993")],
'clusterDefn': ['{}'.format(serialized_definition)],
'Type':['Condition'],
'Maximum Duration':['1day']
})
pushed_ID = seeqInterface.push_capsule(data)
return pushed_ID
class App():
def __init__(self, workbook_id, worksheet_id, api_url,
auth_token, quiet = True):
"""need docstring"""
self.workbook_id = workbook_id
self.worksheet_id = worksheet_id
self.api_url = api_url
self.auth_token = auth_token
self.quiet = quiet
workbook = seeqInterface.get_workbook(workbook_id, quiet = False) #quiet False for loading
self.workbook = workbook
worksheet = seeqInterface.get_worksheet_from_workbook(worksheet_id, workbook)
self.worksheet = worksheet
signals = seeqInterface.get_signals(worksheet)
self.signals = signals
conditions = seeqInterface.get_conditions(worksheet)
self.conditions = conditions
display_range = seeqInterface.get_display_range(worksheet)
self.display_range = display_range
grid = seeqInterface.get_minumum_maximum_interpolation_for_signals_df(signals, display_range)
self.grid = grid
worksheet_name = seeqInterface.get_worksheet_name(worksheet)
self.worksheet_name = worksheet_name
def cluster(self, signal_list, min_cluster_size, datadf = None, **kwargs):
"""
Cluster the data. If datadf is None, we will use hdbscan to cluster and predict. If datadf is passed, the final column must be 'clustern' and we will use contour definition.
args:
signal_list (array-like of str): Names of signals to cluster on
min_cluster_size (int): Minimum cluster size for hdbscan
datadf (pandas.DataFrame): DataFrame with column 'clustern' which already specifies cluster structure
"""
if type(datadf) == type(None):
#case for doing density based (hdbscan)
for i, sig in enumerate(signal_list):
if i == 0:
indexer = (self.signals['Name'] == sig).values
else:
indexer = indexer + ((self.signals['Name'] == sig).values)
to_pull = self.signals[indexer]
datadf = seeqInterface.get_signals_samples(
to_pull,
display_range = self.display_range,
grid = self.grid
)
clusteron = list(datadf.columns)
clusterer = historicalBenchmarking.cluster(datadf, conditioner_cols=clusteron, mcs = min_cluster_size, **kwargs)
else:
#case of visual selection and need to use contour
clusteron = list(datadf.columns)[:-1] #ignore the last one because it is already clustern
clusterer = historicalBenchmarking.Cluster_Contour(datadf, clusteron).random_walk
self.clusteron = clusteron
self.clusterer = clusterer
self.xname = signal_list[0]
self.yname = signal_list[1]
return
def push_clusterer(self,):
"""
Push clusterer to Seeq. Stored as binary blob on Seeq Property.
"""
pushed_ids = dict()
conditioners = self.clusteron
try:
scalar = self.extent_scalar
except AttributeError:
scalar = 1.25
#todo: update scalar to work with clusterer (i.e. allow for zooming in and out on the cluster definition region)
idlist = [self.signals.query("Name == '{}'".format(conditioner)).ID.values[0] for conditioner in conditioners]
byte_clusterer = pickle.dumps(self.clusterer)
byte_cluster_str = byte_clusterer.hex()
obj_id_of_cluster_str = push_clusterer_definition_to_seeq_property(byte_cluster_str, secrets.token_hex(10))
self.clusterer_seeq_id = obj_id_of_cluster_str
self.idlist = idlist
return
def push_cluster_formulas(self, checksum, basename, timeOfRun):
"""
Push cluster formulas to Seeq.
args:
checksum (str): unique checksum that matches externalCalc checksum.
basename (str): Basename for the clusters
timeOfRun (str): Datetime of run. This gives us a unique identifier.
"""
if version.parse(spy.__version__) >= version.parse('53.4') or ext_calc_override:
### R54 case
self.push_clusterer()
conditioners = self.clusteron
bodies = [] #initializing for spy.push
#determine if we are doing density based or visual:
try:
iterable = np.sort(list(set(self.clusterer.labels_)))
except AttributeError: #case when we are doing contours and visual selection
iterable = [0]
#need to account for alphanumeric sorting of clusters:
max_clustern = max(iterable)
#how long should each label be? i.e. if we have over 10 clusters, each label should be two digits. if over 100, it should be 3 digits
len_of_label = len(str(max_clustern))
for clustern in iterable:
if clustern == -1:
continue
##now generate the formula
alphabet = 'abcdefghijklmnopqrst'
seeq_dollarsign_ids = []
j = 0 #multiplier duplicate count of letters
for i in range(len(conditioners)):
if np.mod(i,len(alphabet)) == 0:
j+=1
seeq_dollarsign_ids.append(alphabet[np.mod(i, len(alphabet))]*j)
insertion_into_formula = ""#an example would be .toSignal(), $a, $b) this is the $a, $b part
for dollarsign_id in seeq_dollarsign_ids:
insertion_into_formula += "$" + str(dollarsign_id) + ","
insertion_into_formula = insertion_into_formula[:-1]
formula_string = "ClusteringCalc_ndim('{}&&{}&&{}&&{}'.toSignal(),"+ insertion_into_formula +").setMaxInterpolation({}).toCondition().merge(0, true)"
formula = formula_string.format(self.api_url, self.auth_token, self.clusterer_seeq_id, clustern, self.grid)
#print(formula)
parametersdict = dict({seeq_dollarsign_ids[i]:self.idlist[i] for i in range(len(conditioners))})
label = (str('0'*len_of_label) + str(clustern))[-len_of_label:] #for alpha numeric sorting if needed.
name = basename + ' ' + label + ' ' + timeOfRun
body={'Name':name, 'Formula':formula,
'Formula Parameters':parametersdict, 'Type':'Condition'}
bodies.append(body)
metatag = | pd.DataFrame(bodies) | pandas.DataFrame |
import numpy as np
from typing import Tuple
import plotnine as gg
import pandas as pd
from IMLearn.metalearners.adaboost import AdaBoost
from IMLearn.learners.classifiers import DecisionStump
from IMLearn.metrics.loss_functions import accuracy
from utils import *
import plotly.graph_objects as go
from plotly.subplots import make_subplots
import matplotlib as plt
def generate_data(n: int, noise_ratio: float) -> Tuple[np.ndarray, np.ndarray]:
"""
Generate a dataset in R^2 of specified size
Parameters
----------
n: int
Number of samples to generate
noise_ratio: float
Ratio of labels to invert
Returns
-------
X: np.ndarray of shape (n_samples,2)
Design matrix of samples
y: np.ndarray of shape (n_samples,)
Labels of samples
"""
'''
generate samples X with shape: (num_samples, 2) and labels y with shape (num_samples).
num_samples: the number of samples to generate
noise_ratio: invert the label for this ratio of the samples
'''
X, y = np.random.rand(n, 2) * 2 - 1, np.ones(n)
y[np.sum(X ** 2, axis=1) < 0.5 ** 2] = -1
y[np.random.choice(n, int(noise_ratio * n))] *= -1
return X, y
def decision_surface(predict, xrange, yrange, T, density=120):
xrange, yrange = np.linspace(*xrange, density), np.linspace(*yrange, density)
xx, yy = np.meshgrid(xrange, yrange)
pred = predict(np.c_[xx.ravel(), yy.ravel()], T)
df = pd.DataFrame({"x": xx.ravel(), "y": yy.ravel(), "Prediction": pred.astype(str), "Iterations": T})
return df
def fit_and_evaluate_adaboost(noise, n_learners=250, train_size=5000, test_size=500):
(train_X, train_y), (test_X, test_y) = generate_data(train_size, noise), generate_data(test_size, noise)
# Question 1: Train- and test errors of AdaBoost in noiseless case
adaboost = AdaBoost(wl=lambda: DecisionStump(), iterations=n_learners)
adaboost.fit(train_X, train_y)
test = [adaboost.partial_loss(test_X, test_y, i) for i in range(1, n_learners)]
train = [adaboost.partial_loss(train_X, train_y, i) for i in range(1, n_learners)]
range_learner = range(1, n_learners)
df1 = pd.DataFrame({"x": range_learner, "y": train, "Data": "Train data"})
df2 = pd.DataFrame({"x": range_learner, "y": test, "Data": "Test data"})
df = | pd.concat([df1, df2]) | pandas.concat |
import datetime
import os
import time
import copy
from enum import Enum
import requests
import yaml
import pandas as pd
class ExceptionFogifySDK(Exception):
pass
class FogifySDK(object):
def __init__(self, url: str, docker_compose: str = None):
self.url = url
self.docker_compose = None
self.nodes = []
self.networks = []
self.topology = []
self.services = []
self.docker_swarm_rep = None
if docker_compose:
try:
file = open(docker_compose, "r")
self.docker_compose = file.read()
file.close()
self.parse_docker_swarm()
except FileNotFoundError:
raise ExceptionFogifySDK("No such file or directory: " + docker_compose)
class Action_type(Enum):
HORIZONTAL_SCALING = 'HORIZONTAL_SCALING'
VERTICAL_SCALING = 'VERTICAL_SCALING'
NETWORK = 'NETWORK'
STRESS = 'STRESS'
COMMAND = 'COMMAND'
UPDATE_LINKS = 'UPDATE_LINKS'
def get_url(self, path: str = ""):
if not self.url.startswith("http://"):
return "https://%s" % (self.url + path)
return self.url + path
def check_docker_swarm_existence(self):
if self.docker_compose is None:
raise ExceptionFogifySDK('You can not apply this functionality with fogify yaml')
def parse_docker_swarm(self):
self.check_docker_swarm_existence()
self.docker_swarm_rep = yaml.safe_load(self.docker_compose)
if 'services' not in self.docker_swarm_rep:
raise ExceptionFogifySDK("The docker-compose should have at least services")
if 'x-fogify' in self.docker_swarm_rep:
if self.docker_swarm_rep['x-fogify']:
self.networks = self.docker_swarm_rep['x-fogify']['networks'] if 'networks' in self.docker_swarm_rep[
'x-fogify'] else []
self.nodes = self.docker_swarm_rep['x-fogify']['nodes'] if 'nodes' in self.docker_swarm_rep[
'x-fogify'] else []
self.scenarios = self.docker_swarm_rep['x-fogify']['scenarios'] if 'scenarios' in self.docker_swarm_rep[
'x-fogify'] else []
self.topology = self.docker_swarm_rep['x-fogify']['topology'] if 'topology' in self.docker_swarm_rep[
'x-fogify'] else []
self.services = [i for i in self.docker_swarm_rep["services"]]
def upload_file(self, remove_file: bool = True):
if self.docker_compose:
self.docker_swarm_rep["x-fogify"] = {
"networks": self.networks if hasattr(self, 'networks') else [],
"topology": self.topology if hasattr(self, 'topology') else [],
"nodes": self.nodes if hasattr(self, 'nodes') else [],
"scenarios": self.scenarios if hasattr(self, 'scenarios') else []
}
f = open("fogified-docker-compose.yaml", "w")
f.write(yaml.dump(self.docker_swarm_rep))
f.close()
self.fogify_yaml = open("fogified-docker-compose.yaml", "rb")
if remove_file:
os.remove("fogified-docker-compose.yaml")
return self.fogify_yaml
def __del__(self):
if hasattr(self, 'fogify_yaml') and self.fogify_yaml:
self.fogify_yaml.close()
del self
def deploy(self, timeout: int = 120):
url = self.get_url("/topology/")
self.clean_metrics()
self.clean_annotations()
response = requests.post(url, files={"file": self.upload_file()}, headers={}).json()
if not ('message' in response and response['message'].upper() == "OK"):
raise ExceptionFogifySDK("The deployment is failed (%s)"%str(response))
service_count = {name: response['swarm']['services'][name]['deploy']['replicas'] for name in
response['swarm']['services']}
from tqdm import tqdm
total = sum([int(service_count[i]) for i in service_count])
pbar = tqdm(total=total, desc="Deploy process")
count = 0
current_iteration = 0
while (count < total and current_iteration < timeout):
time.sleep(5)
response = requests.get(url, headers={})
if response.status_code != 200:
raise ExceptionFogifySDK("The deployment is failed (%s)" % str(response.json()))
response = response.json()
new_count = 0
for i in response:
new_count += len(response[i])
dif = new_count - count
pbar.update(dif)
count = new_count
current_iteration += 5
pbar.close()
if current_iteration > timeout:
self.undeploy()
raise ExceptionFogifySDK("The deployment is failed")
return {
"message": "The services are deployed ( %s )" % str(service_count)
}
def undeploy(self, timeout: int = 120):
url = self.get_url("/topology/")
response = requests.delete(url)
if response.status_code != 200:
raise ExceptionFogifySDK("Server error ( %s )" % str(response.json()))
response = requests.get(url, headers={}).json()
total = 0
for i in response:
total += len(response[i])
from tqdm import tqdm
pbar = tqdm(total=total, desc="Undeploy process")
count = total
current_iteration = 0
while (count > 0 and current_iteration < timeout):
time.sleep(5)
response = requests.get(url, headers={}).json()
new_count = 0
for i in response:
new_count += len(response[i])
dif = count - new_count
pbar.update(dif)
count = new_count
current_iteration += 5
self.data = {}
pbar.close()
if current_iteration > timeout:
raise ExceptionFogifySDK("The undeployment is failed")
return {
"message": "The %s services are undeployed" % str(total)
}
def get_metrics(self, service: str = None, from_timestamp: str = None, to_timestamp: str = None):
query = ""
query += "from_timestamp=" + str(
int(datetime.datetime.timestamp(from_timestamp))) + "&" if from_timestamp else ""
query += "to_timestamp=" + str(int(datetime.datetime.timestamp(to_timestamp))) + "&" if to_timestamp else ""
query += "service=" + service if service else ""
if hasattr(self, 'data') and service in self.data:
resp = requests.get(self.get_url("/monitorings/") + "?" + query).json()
if service in resp:
resp[service].sort(key=lambda k: k['count'])
intervals = [i['count'] for i in self.data[service]]
for i in resp[service]:
if i['count'] not in intervals:
self.data[service].append(i)
else:
self.data = requests.get(self.get_url("/monitorings/") + "?" + query).json()
for i in self.data:
self.data[i].sort(key=lambda k: k['count'])
return self
def get_network_packets_from(self, service: str, from_timestamp: str = None, to_timestamp: str = None,
packet_type: str = None):
query = ""
query += "from_timestamp=" + str(
int(datetime.datetime.timestamp(from_timestamp))) + "&" if from_timestamp else ""
query += "to_timestamp=" + str(int(datetime.datetime.timestamp(to_timestamp))) + "&" if to_timestamp else ""
query += "packet_type=" + str(packet_type) + "&" if packet_type else ""
query += "service=" + service
data = requests.get(self.get_url("/packets/") + "?" + query).json()
if "res" not in data:
raise ExceptionFogifySDK("The API call for packets does not response readable object")
res = pd.DataFrame.from_records(data["res"])
return res
def get_metrics_from(self, service: str):
if hasattr(self, 'data') and service in self.data:
self.get_metrics(service=service,
from_timestamp=datetime.datetime.strptime(self.data[service][-1]['timestamp'],
"%a, %d %b %Y %H:%M:%S %Z") - datetime.timedelta(
milliseconds=100))
else:
self.get_metrics()
res = pd.DataFrame.from_records(self.data[service])
res.timestamp = pd.to_datetime(res['timestamp']).dt.tz_localize(None)
res.set_index('timestamp', inplace=True)
return res
def clean_metrics(self):
if hasattr(self, 'data'):
del self.data
return requests.delete(self.get_url("/monitorings/")).json()
def horizontal_scaling_up(self, instance_type: str, num_of_instances: int = 1):
return self.action(
FogifySDK.Action_type.HORIZONTAL_SCALING.value,
instance_type=instance_type,
instances=num_of_instances,
type="up"
)
def horizontal_scaling_down(self, instance_type: str, num_of_instances: int = 1):
return self.action(
FogifySDK.Action_type.HORIZONTAL_SCALING.value,
instance_type=instance_type,
instances=num_of_instances,
type="down"
)
def vertical_scaling(self, instance_type: str, num_of_instances: int = 1, cpu: str = None, memory: str = None):
if cpu and memory:
raise ExceptionFogifySDK("You can not scale-up both cpu and memory at once.")
if cpu is None and memory is None:
raise ExceptionFogifySDK("You did not select neither cpu nor memory for vertical scaling.")
if cpu:
if type(cpu) != str:
raise ExceptionFogifySDK("cpu parameter should be string")
if cpu[0] not in ['-', '+']:
raise ExceptionFogifySDK("Select +/- to increase or decrease the cpu processing power")
try:
int(cpu[1:])
except Exception:
raise ExceptionFogifySDK("The percent should be numeric")
params = {'action': 'cpu', 'value': cpu}
if memory:
params = {'action': 'memory', 'value': memory}
return self.action(
FogifySDK.Action_type.VERTICAL_SCALING.value,
instance_type=instance_type,
instances=num_of_instances,
**params
)
def stress(self, instance_type: str, duration: int = 60, num_of_instances: int = 1, cpu=None, io=None, vm=None,
vm_bytes=None):
if all(v is None for v in [cpu, io, vm, vm_bytes]):
raise ExceptionFogifySDK("You can not set all stress parameters as None")
res = {}
if cpu:
res['cpu'] = cpu
if io:
res['io'] = io
if vm:
res['vm'] = vm
if vm_bytes:
res['vm_bytes'] = vm_bytes
return self.action(
FogifySDK.Action_type.STRESS.value,
instance_type=instance_type,
instances=num_of_instances,
action=dict(
duration=duration,
**res
)
)
def command(self, instance_type: str, command: str, num_of_instances: int = 1):
res = {}
res['command'] = command
return self.action(
FogifySDK.Action_type.COMMAND.value,
instance_type=instance_type,
instances=num_of_instances,
action=dict(
**res
)
)
def action(self, action_type: Action_type, **kwargs):
headers = {'Content-Type': 'application/json; charset=utf-8'}
action_type_to_url = {
self.Action_type.HORIZONTAL_SCALING.value: "/actions/horizontal_scaling/",
self.Action_type.VERTICAL_SCALING.value: "/actions/vertical_scaling/",
self.Action_type.NETWORK.value: "/actions/network/",
self.Action_type.STRESS.value: "/actions/stress/",
self.Action_type.COMMAND.value: "/actions/command/",
self.Action_type.UPDATE_LINKS.value: "/actions/links/"
}
if action_type not in [e.value for e in FogifySDK.Action_type]:
raise ExceptionFogifySDK("The action type %s is not defined." % action_type)
res = requests.request("POST",
self.get_url(action_type_to_url[action_type]),
json={"params": kwargs}, headers=headers
).json()
if "message" in res and res["message"].upper() == "OK":
return res
else:
raise ExceptionFogifySDK("The API did not return proper response for that action (%S)" % str(res))
def scenario_execution(self,
name: str = None,
remove_previous_metrics: bool = True):
print("Scenario execution process: ")
from tqdm import tqdm
if remove_previous_metrics:
self.clean_metrics()
if len(self.scenarios) == 0:
raise ExceptionFogifySDK("There is no scenarios")
if name is None:
selected_scenarios = self.scenarios[0]
else:
for i in self.scenarios:
if i['name'] == name:
selected_scenarios = i
break
selected_scenarios['actions'] = sorted(selected_scenarios['actions'], key=lambda x: x['position'])
pbar = tqdm(total=sum([int(i['time']) for i in selected_scenarios['actions']]))
start = datetime.datetime.now()
for i in selected_scenarios['actions']:
for j in range(i['time']):
time.sleep(1)
pbar.update(1)
try:
action = i['action'] if 'action' in i else {}
type_action = action['type'] if 'type' in action else ""
params = action['parameters'] if 'parameters' in action else {}
params['instance_type'] = i['instance_type'] if 'instance_type' in i else ""
params['instances'] = i['instances'] if 'instances' in i else ""
if action != "NOOP":
self.action(type_action.upper(), **params)
print("The action %s is executed." % type_action)
except Exception as e:
print("There was a problem at the scenario execution process %s" % e)
print("The input data is %s" % action)
pbar.close()
print("Scenario is finished")
stop = datetime.datetime.now()
if remove_previous_metrics:
self.get_metrics()
return start, stop
def add_node(self, name: str, cpu_cores: int, cpu_freq: int, memory: str, disk=""):
self.check_docker_swarm_existence()
for i in self.nodes:
if i['name'] == name:
raise ExceptionFogifySDK("The device already exists")
self.nodes.append(
dict(
name=name,
capabilities=dict(
processor=dict(
cores=int(cpu_cores),
clock_speed=int(cpu_freq)),
memory=memory,
disk=disk
)
)
)
def add_network(self, name: str, uplink: dict, downlink: dict, capacity: int = None):
self.check_docker_swarm_existence()
for i in self.networks:
if i['name'] == name:
raise ExceptionFogifySDK("The network already exists")
self.networks.append(
dict(
name=name,
uplink=uplink,
downlink=downlink,
capacity=capacity
)
)
def add_bidirectional_network(self, name: str, bidirectional: dict, capacity: int = None):
self.check_docker_swarm_existence()
for i in self.networks:
if i['name'] == name:
raise ExceptionFogifySDK("The network already exists")
self.networks.append(
dict(
name=name,
bidirectional=bidirectional,
capacity=capacity
)
)
def update_network(self, instance_type: str, network: str, network_characteristics: dict = {},
num_of_instances: int = 1):
network_characteristics['network'] = network
return self.action(
FogifySDK.Action_type.NETWORK.value,
instance_type=instance_type,
instances=num_of_instances,
network=network_characteristics
)
def update_link(self,
network_name: str,
from_node: str,
to_node: str,
parameters: dict,
bidirectional: bool = True):
return self.update_links(network_name, [
{
"from_node": from_node,
"to_node": to_node,
"bidirectional": bidirectional,
"parameters": parameters
}
])
def update_links(self,
network_name: str,
links: list):
res = {}
for link in links:
if 'from_node' not in link:
raise ExceptionFogifySDK("A link should have the 'from_node' parameter")
if 'to_node' not in link:
raise ExceptionFogifySDK("A link should have the 'to_node' parameter")
if link['from_node'] not in res: res[link['from_node']] = []
res[link['from_node']].append(copy.deepcopy(link))
if 'bidirectional' in link and link['bidirectional']:
if link['to_node'] not in res: res[link['to_node']] = []
res[link['to_node']].append(copy.deepcopy(link))
responses = {}
for instance_type, instance_links in res.items():
try:
responses[instance_type] = self.action(FogifySDK.Action_type.UPDATE_LINKS.value,
network=network_name,
links=instance_links,
instance_type=instance_type,
instances=1000,
)
except Exception as ex:
responses[instance_type] = ex
return responses
def add_link(self, network_name: str, from_node: str, to_node: str, parameters: dict, bidirectional: bool = True):
self.check_docker_swarm_existence()
exists = False
for i in self.networks:
if network_name == i["name"]:
exists = True
break
if not exists:
raise ExceptionFogifySDK("The network does not exist")
links = i['links'] if 'links' in i else []
if 'properties' in parameters:
links.append({
"from_node": from_node,
"to_node": to_node,
"bidirectional": bidirectional,
"properties": parameters['properties']
})
elif 'uplink' in parameters and 'downlink' in parameters:
links.append({
"from_node": from_node,
"to_node": to_node,
"bidirectional": bidirectional,
"uplink": parameters['uplink'],
"downlink": parameters['downlink'],
})
else:
raise ExceptionFogifySDK("A link should either have 'properties' field or both 'uplink' and 'downlink' fields")
i['links'] = links
res = []
for j in self.networks:
if network_name == j["name"]:
res.append(i)
else:
res.append(j)
self.networks = res
def add_topology_node(self, label: str, service: str, device: str, networks: list = [], replicas: int = 1):
self.check_docker_swarm_existence()
if service not in self.services:
raise ExceptionFogifySDK('There is no service with name %s in swarm file.' % service)
if label in [i['label'] for i in self.topology]:
raise ExceptionFogifySDK('There is another topology node with %s in your model.' % label)
if device not in [i['name'] for i in self.nodes]:
raise ExceptionFogifySDK('There is no device with name %s in your model.' % device)
for network in networks:
if network not in [i['name'] for i in self.networks]:
raise ExceptionFogifySDK('There is no network with name %s in your model.' % network)
if label in [i['label'] for i in self.topology]:
raise ExceptionFogifySDK('There is another topology node with %s in your model.' % label)
self.topology.append(
dict(
service=service,
node=device,
networks=networks,
label=label,
replicas=replicas
)
)
def plot(self, ax, service: str = None, metric: str = None, func=None, label: str = None, duration: dict = {},
style: dict = {}):
df = self.get_metrics_from(service)
df.timestamp = pd.to_datetime(df['timestamp'])
if 'from' in duration:
df = df[df.timestamp >= duration['from']]
if 'to' in duration:
df = df[df.timestamp <= duration['to']]
metric_line = df.set_index('timestamp')[metric]
if func == 'diff':
metric_line = metric_line.diff()
metric_line.plot(ax=ax, x='timestamp', label=label, **style)
return self
def plot_annotations(self, ax, start=None, stop=None, label: str = 'annotation', colors_gist: str = 'gist_yarg',
linestyle: str = '--'):
import matplotlib.pyplot as plt
ad = self.get_annotations().annotations
ad.timestamp = | pd.to_datetime(ad['timestamp']) | pandas.to_datetime |
import math
__author__ = 'r_milk01'
import os
import pandas as pd
from configparser import ConfigParser
import matplotlib.pyplot as plt
import matplotlib
import itertools
import logging
import difflib
import colors as color_util
TIMINGS = ['usr', 'sys', 'wall']
MEASURES = ['max', 'avg']
SPECIALS = ['run', 'threads', 'ranks', 'cores']
'''markers = {0: u'tickleft', 1: u'tickright', 2: u'tickup', 3: u'tickdown', 4: u'caretleft', u'D': u'diamond',
6: u'caretup', 7: u'caretdown', u's': u'square', u'|': u'vline', u'': u'nothing', u'None': u'nothing',
None: u'nothing', u'x': u'x', 5: u'caretright', u'_': u'hline', u'^': u'triangle_up', u' ': u'nothing',
u'd': u'thin_diamond', u'h': u'hexagon1', u'+': u'plus', u'*': u'star', u',': u'pixel', u'o': u'circle',
u'.': u'point', u'1': u'tri_down', u'p': u'pentagon', u'3': u'tri_left', u'2': u'tri_up', u'4': u'tri_right',
u'H': u'hexagon2', u'v': u'triangle_down', u'8': u'octagon', u'<': u'triangle_left', u'>': u'triangle_right'}
'''
MARKERS = ['s', 'o', 4, 5, 7, '|', '*', 1, 2, 3, 4, 6, 7]
FIGURE_OUTPUTS = ['png', 'pdf', 'pgf']
# pd.options.display.mpl_style = 'default'
# matplotlib.rc('font', family='sans-serif')
# matplotlib.rc('xtick', labelsize=20)
# matplotlib.rc('ytick', labelsize=20)
SMALL_SIZE = 11
MEDIUM_SIZE = 13
BIGGER_SIZE = 16
matplotlib.rc('font', size=MEDIUM_SIZE, family='sans-serif') # controls default text sizes
matplotlib.rc('axes', titlesize=BIGGER_SIZE) # fontsize of the axes title
matplotlib.rc('axes', labelsize=BIGGER_SIZE) # fontsize of the x and y labels
matplotlib.rc('xtick', labelsize=SMALL_SIZE) # fontsize of the tick labels
matplotlib.rc('ytick', labelsize=SMALL_SIZE) # fontsize of the tick labels
matplotlib.rc('legend', fontsize=SMALL_SIZE) # legend fontsize
matplotlib.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title
# http://nerdjusttyped.blogspot.de/2010/07/type-1-fonts-and-matplotlib-figures.html
matplotlib.rcParams['ps.useafm'] = True
matplotlib.rcParams['text.usetex'] = True
matplotlib.rcParams['pgf.texsystem'] = 'pdflatex'
def common_substring(strings, glue='_'):
first, last = strings[0], strings[-1]
seq = difflib.SequenceMatcher(None, first, last, autojunk=False)
mb = seq.get_matching_blocks()
return glue.join([first[m.a : m.a + m.size] for m in mb]).replace(os.path.sep, '')
def make_val(val, round_digits=3):
try:
return round(float(val), round_digits)
except ValueError:
return str(val)
def m_strip(s, timings=None, measures=None):
timings = timings or TIMINGS
measures = measures or MEASURES
for t, m in itertools.product(timings, measures):
s = s.replace('_{}_{}'.format(m, t), '')
return s
def read_files(dirnames, specials=None):
current = None
specials = specials or SPECIALS
header = {'memory': [], 'profiler': [], 'params': [], 'errors': []}
for fn in dirnames:
assert os.path.isdir(fn)
prof = os.path.join(fn, 'profiler.csv')
try:
new = pd.read_csv(prof)
except pd.parser.CParserError as e:
logging.error('Failed parsing {}'.format(prof))
raise e
header['profiler'] = list(new.columns.values)
params = ConfigParser()
param_fn = ['dsc_parameter.log', 'dxtc_parameter.log']
subdirs = ['', 'logs', 'logdata']
params.read([os.path.join(fn, sd, pfn) for sd, pfn in itertools.product(subdirs, param_fn)])
p = {}
for section in params.sections():
p.update({'{}.{}'.format(section, n): make_val(v) for n, v in params.items(section)})
p['grids.total_macro_cells'] = math.pow(p['grids.macro_cells_per_dim'], p['grids.dim'])
p['grids.total_fine_cells'] = p['grids.total_macro_cells'] * math.pow(
p['grids.micro_cells_per_macrocell_dim'], p['grids.dim']
)
param = | pd.DataFrame(p, index=[0]) | pandas.DataFrame |
import datetime as dt
import unittest
from unittest.mock import patch
import numpy as np
import numpy.testing as npt
import pandas as pd
from pandas.util.testing import assert_frame_equal, assert_series_equal, assert_index_equal
import seaice.timeseries.warp as warp
from seaice.timeseries.common import SeaIceTimeseriesInvalidArgument
class Test_filter_failed_qa(unittest.TestCase):
def test_failed_qa_set_to_na(self):
columns = ['Foo', 'Bar', 'failed_qa', 'filename']
actual = pd.DataFrame([[1, 2, True, '/foo'], [1, 2, False, '/foo'], [1, 2, True, '/foo']],
columns=columns)
expected = pd.DataFrame([[np.nan, np.nan, True, ''],
[1, 2, False, '/foo'],
[np.nan, np.nan, True, '']], columns=columns)
actual = warp.filter_failed_qa(actual)
assert_frame_equal(expected, actual)
class Test_climatologyMeans(unittest.TestCase):
def test_means(self):
index = pd.period_range(start='2000-05', end='2016-05', freq='12M')
values = np.array([10, 20, 30, 40, 50, 50, 50, 50, 90, 99,
100, 100, 100, 100, 100, 100, 10])
climatology_years = (2010, 2015)
series = pd.Series(values, index=index)
expected = pd.Series(100, index=[5])
actual = warp.climatology_means(series, climatology_years)
assert_series_equal(expected, actual)
def test_multiple_months_in_series(self):
anything = 3.14159
index = pd.PeriodIndex(['2000-05', '2000-11', '2001-05', '2001-11', '2002-05', '2002-11',
'2003-05', '2003-11', '2004-05', '2004-11', '2005-05'],
freq='6M')
climatology_years = (2000, 2001)
values = [15., 99., 15., 99., anything, anything,
anything, anything, anything, anything, anything]
series = pd.Series(values, index=index)
actual = warp.climatology_means(series, climatology_years)
expected = pd.Series([15., 99], index=[5, 11])
assert_series_equal(expected, actual)
class TestFilterHemisphere(unittest.TestCase):
def setUp(self):
datetimes = pd.to_datetime(['1990-01-01', '1995-01-01', '2000-01-01', '2010-01-01'])
daily_period_index = datetimes.to_period(freq='D')
monthly_period_index = datetimes.to_period(freq='M')
self.daily_df = pd.DataFrame({
'hemisphere': ['S', 'N', 'S', 'N'],
'total_extent_km2': [19900000.0, 1995000.0, 2000000.0, 2010000.0],
'total_area_km2': [1990.0, 1995.0, 2000.0, 2010.0]
}, index=daily_period_index)
self.monthly_df = pd.DataFrame({
'hemisphere': ['S', 'N', 'S', 'N'],
'total_extent_km2': [19900000.0, 1995000.0, 2000000.0, 2010000.0],
'total_area_km2': [1990.0, 1995.0, 2000.0, 2010.0]
}, index=monthly_period_index)
def test_daily_works_with_hemisphere(self):
expected = self.daily_df.copy().ix[[0, 2]]
actual = warp.filter_hemisphere(self.daily_df, 'S')
assert_frame_equal(expected, actual)
def test_daily_raises_error_with_none(self):
with self.assertRaises(SeaIceTimeseriesInvalidArgument):
warp.filter_hemisphere(self.daily_df, None)
def test_monthly_works_with_hemisphere(self):
expected = self.monthly_df.copy().ix[[1, 3]]
actual = warp.filter_hemisphere(self.monthly_df, 'N')
assert_frame_equal(expected, actual)
def test_monthly_works_with_none(self):
with self.assertRaises(SeaIceTimeseriesInvalidArgument):
warp.filter_hemisphere(self.monthly_df, None)
class TestCollapseHemisphereFilter(unittest.TestCase):
def test_frame_collapses(self):
frame_length = 10
index = pd.MultiIndex.from_tuples([('foo', 'N')]*frame_length, names=('date', 'hemisphere'))
df = pd.DataFrame({'data': [5]*frame_length}, index=index)
expected = df.reset_index(level='hemisphere', drop=False)
actual = warp.collapse_hemisphere_index(df)
assert_frame_equal(expected, actual)
class TestFilterBeforeAndFilterAfter(unittest.TestCase):
def setUp(self):
datetimes = pd.to_datetime(['1990-01-01', '1995-01-01', '2000-01-01', '2010-01-01'])
daily_period_index = datetimes.to_period(freq='D')
monthly_period_index = datetimes.to_period(freq='M')
self.daily_df = pd.DataFrame({
'total_extent_km2': [19900000.0, 1995000.0, 2000000.0, 2010000.0],
'total_area_km2': [1990.0, 1995.0, 2000.0, 2010.0]
}, index=daily_period_index)
self.daily_df_with_datetimeindex = pd.DataFrame({
'total_extent_km2': [19900000.0, 1995000.0, 2000000.0, 2010000.0],
'total_area_km2': [1990.0, 1995.0, 2000.0, 2010.0]
}, index=datetimes)
self.monthly_df = pd.DataFrame({
'total_extent_km2': [19900000.0, 1995000.0, 2000000.0, 2010000.0],
'total_area_km2': [1990.0, 1995.0, 2000.0, 2010.0]
}, index=monthly_period_index)
self.monthly_df_with_datetimeindex = pd.DataFrame({
'total_extent_km2': [19900000.0, 1995000.0, 2000000.0, 2010000.0],
'total_area_km2': [1990.0, 1995.0, 2000.0, 2010.0]
}, index=monthly_period_index.to_timestamp())
def test_filter_before_works_with_daily_df_and_none(self):
expected = self.daily_df.copy()
actual = warp.filter_before(self.daily_df, None)
assert_frame_equal(expected, actual)
def test_filter_before_works_with_daily_df_and_none_and_DateTimeIndex(self):
expected = self.daily_df_with_datetimeindex.copy()
actual = warp.filter_before(self.daily_df_with_datetimeindex, None)
assert_frame_equal(expected, actual)
def test_filter_before_works_with_daily_df(self):
expected = self.daily_df.copy().ix[1:]
actual = warp.filter_before(self.daily_df, dt.datetime(1990, 5, 21))
assert_frame_equal(expected, actual)
def test_filter_before_works_with_monthly_df_and_none(self):
expected = self.monthly_df.copy()
actual = warp.filter_before(self.monthly_df, None)
assert_frame_equal(expected, actual)
def test_filter_before_works_with_monthly_df_and_none_and_DateTimeIndex(self):
expected = self.monthly_df_with_datetimeindex.copy()
actual = warp.filter_before(self.monthly_df_with_datetimeindex, None)
assert_frame_equal(expected, actual)
def test_filter_before_works_with_monthly_df(self):
expected = self.monthly_df.copy().ix[1:]
actual = warp.filter_before(self.monthly_df, dt.datetime(1990, 5, 21))
assert_frame_equal(expected, actual)
def test_filter_after_works_with_daily_df_and_none(self):
expected = self.daily_df.copy()
actual = warp.filter_after(self.daily_df, None)
assert_frame_equal(expected, actual)
def test_filter_after_works_with_daily_df_and_none_and_DateTimeIndex(self):
expected = self.daily_df_with_datetimeindex.copy()
actual = warp.filter_after(self.daily_df_with_datetimeindex, None)
assert_frame_equal(expected, actual)
def test_filter_after_works_with_daily_df(self):
expected = self.daily_df.copy().ix[0:1]
actual = warp.filter_after(self.daily_df, dt.datetime(1990, 5, 21))
assert_frame_equal(expected, actual)
def test_filter_after_works_with_monthly_df_and_none(self):
expected = self.monthly_df.copy()
actual = warp.filter_after(self.monthly_df, None)
assert_frame_equal(expected, actual)
def test_filter_after_works_with_monthly_df_and_none_and_DateTimeIndex(self):
expected = self.monthly_df_with_datetimeindex.copy()
actual = warp.filter_after(self.monthly_df_with_datetimeindex, None)
| assert_frame_equal(expected, actual) | pandas.util.testing.assert_frame_equal |
import sys
from time import time
import click
import pyhecdss
from vtools.functions import filter
import pandas as pd
import numpy as np
from pydsm.ptm_animator import ptm_animate
from pydsm.hydro_slicer import slice_hydro
from pydsm.postpro import load_location_file, load_location_table
from pydsm.functions import tsmath
@click.group()
def main():
pass
def _build_column(columns, cpart_append, epart_replace=None):
'''
builds column name based on /A/B/C/D/E/F/ DSS pathname and
replacing the cpart with existing cpart + cpart_append value
'''
def append_cpart(name):
parts = name.split('/')
parts[3] = parts[3]+cpart_append
if epart_replace:
parts[5] = epart_replace
return '/'.join(parts)
return [append_cpart(name) for name in columns]
def _restart_console_line():
sys.stdout.write('\r')
sys.stdout.flush()
def _extract_processing(df, godin_filter, daily_average, daily_max, daily_min, monthly_average):
results = df
results_monthly = None
if godin_filter:
results = filter.godin_filter(results)
if daily_average: # if godin filtered then replace that with daily averaged values
tdf = results.resample('1D', closed='right', label='right').mean()
tdf.columns = _build_column(df.columns, '-MEAN', '1DAY')
results = tdf
if daily_max:
tdf = df.resample('1D', closed='right', label='right').max()
tdf.columns = _build_column(df.columns, '-MAX', '1DAY')
results = results.join(tdf, how='outer')
if daily_min:
tdf = df.resample('1D', closed='right', label='right').min()
tdf.columns = _build_column(df.columns, '-MIN', '1DAY')
results = results.join(tdf, how='outer')
if monthly_average:
results_monthly = df.resample('M', closed='right', label='right').mean()
results_monthly.columns = _build_column(df.columns, '-MONTHLY-AVG', '1MON')
return results, results_monthly
def _write_to_dss(od, rtg_daily, rtg_monthly, units, ptype='PER-VAL'):
for i in range(len(rtg_daily.columns)):
r = rtg_daily.iloc[:, i].to_frame()
od.write_rts(r.columns[0], r, units, ptype)
try:
r = rtg_monthly.iloc[:, 0].to_frame()
od.write_rts(r.columns[0], r, units, ptype)
except Exception:
pass
def _build_column(columns, cpart_append, epart_replace=None):
'''
builds column name based on /A/B/C/D/E/F/ DSS pathname and
replacing the cpart with existing cpart + cpart_append value
'''
def append_cpart(name):
parts = name.split('/')
parts[3] = parts[3]+cpart_append
if epart_replace:
parts[5] = epart_replace
return '/'.join(parts)
return [append_cpart(name) for name in columns]
@click.command()
@click.option("-o", "--outfile", default="out.gz", help="path to output file (ends in .zip, .gz, .bz2 for compression), (.h5 for hdf5), (.dss for dss)")
@click.option("--cpart", help="filter by cpart string match (e.g. EC for only loading EC)")
@click.option("-godin", "--godin-filter", is_flag=True, default=False, help="apply godin filter before writing out")
@click.option("-davg", "--daily-average", is_flag=True, default=False, help="average to daily values")
@click.option("-dmax", "--daily-max", is_flag=True, default=False, help="maximum daily value")
@click.option("-dmin", "--daily-min", is_flag=True, default=False, help="minimum daily value")
@click.option("-mavg", "--monthly-average", is_flag=True, default=False, help="monthly average value")
@click.argument("dssfile", type=click.Path(exists=True))
def extract_dss(dssfile, outfile, cpart, godin_filter, daily_average, daily_max, daily_min, monthly_average):
'''
Extract data from DSS file, optionally filtering it and writing to a pickle for quick future loads
'''
pyhecdss.set_message_level(0)
d = pyhecdss.DSSFile(dssfile)
od = None
if outfile.endswith('dss'):
od = pyhecdss.DSSFile(outfile)
catdf = d.read_catalog()
catec = catdf[catdf.C == cpart]
plist = d.get_pathnames(catec)
if len(plist) == 0:
print("No pathnames found in dssfile: %s for cpart=%s" %
(dssfile, cpart))
sys.stdout.write('@ %d / %d ==> Processing: %s' % (0, len(plist), plist[0]))
r, u, p = d.read_rts(plist[0])
results_daily, results_monthly = [], []
rtg_daily, rtg_monthly = _extract_processing(
r, godin_filter, daily_average, daily_max, daily_min, monthly_average)
if od:
_write_to_dss(od, rtg_daily, rtg_monthly, u)
else:
results_daily.append(rtg_daily)
results_monthly.append(rtg_monthly)
for index, p in enumerate(plist, start=1):
_restart_console_line()
sys.stdout.write('@ %d / %d ==> Processing: %s' % (index, len(plist), p))
r, u, p = d.read_rts(p)
rtg_daily, rtg_monthly = _extract_processing(
r, godin_filter, daily_average, daily_max, daily_min, monthly_average)
if od:
_write_to_dss(od, rtg_daily, rtg_monthly, u)
else:
results_daily.append(rtg_daily)
results_monthly.append(rtg_monthly)
if od:
print('Done writing to DSS: %s' % outfile)
od.close()
else:
all_daily = | pd.concat(results_daily, axis=1) | pandas.concat |
import requests
import re
import numpy as np
import pandas as pd
import shutil
'''
Author: <NAME>
Purpose: To extract the Biological Information and images for Pokemon from Bulbapedia.
This is done in four parts. The first part retrieves the bio and CDN directory links.
The second part of the script downloads and stores the Pokemon's image.
The third part creates a vector of booleans for each Pokemon, indicating which of
the 20 selected moves are learnt by that Pokemon.
The final part combines all this data into one comphrensive file.
'''
# Part 1 - biology and imageurl extraction
# Get list of Pokemon Names
df = pd.read_csv('D:/UIP/scraping/pokemonstats.csv', header=0)
pokemon_names = df['Name']
# Lists to store the biological information and bulbapedia image URL for each Pokemon
bio = []
imageurls = []
for i in range(802):
# Handling special cases of Pokemon names with different URL structure
if pokemon_names[i] == 'Nidoran-M':
URL = "https://bulbapedia.bulbagarden.net/wiki/{}_(Pok%C3%A9mon)".format('Nidoran%E2%99%82')
elif pokemon_names[i] == 'Nidoran-F':
URL = "https://bulbapedia.bulbagarden.net/wiki/{}_(Pok%C3%A9mon)".format('Nidoran%E2%99%80')
else:
URL = "https://bulbapedia.bulbagarden.net/wiki/{}_(Pok%C3%A9mon)".format(pokemon_names[i])
# Getting HTML data from bulbapedia page
r = requests.get(URL)
# Searching for html tags with CDN directory
imgloc = re.search(r'<img alt="(.*?) src="(.*?)" width="250"', r.text).group(2)
# Getting CDN sub-directory with Pokemon's image
details = re.search(r'thumb/(.*?).png', imgloc).group(1)
imageurls.append(details)
# Getting the text from the Biology section on Bulbapedia
content = re.search(
'<h2><span class="mw-headline" id="Biology">Biology</span></h2>(.*?)<h2><span class="mw-headline" id="In_the_anime">In the anime</span></h2>',
r.text,
re.DOTALL
).group(1)
# Removing HTML tags and cleaning text
content = re.sub(r'&#.{4};', '', content)
content = re.sub(r'<a href=(.*?)>', '', content)
content = re.sub(r'<(/)?(p|sup|a|b|span|I)>', '', content)
content = re.sub(r'\(Japanese:(.*?)\)', '', content)
content = re.sub(r'<(span) class(.*?)>', '', content)
content = re.sub(r'<img (.*)/>', '', content)
content = re.sub(r'<sup id(.*?)>', '', content)
content = re.sub(r'<div class(.*)>(.*)</div>', '', content)
content = re.sub(r'<br(.*?)/>', '', content)
content = re.sub(r'<(.*)>(.*?)</(.*?)>', '', content)
content = re.sub(r' \.', '.', content)
# Adding Pokemon's bio to the list and notifying user of success
bio.append(content)
print("Completed text retrieval for {}".format(pokemon_names[i]))
# Storing the biological information on a CSV file
bio_data = pd.DataFrame(bio)
bio_data.to_csv('D:/UIP/scraping/pokemonbio.csv')
# Storing image urls on a CSV file for image retrieval in part 2
url_data = pd.DataFrame(imageurls)
url_data.to_csv('D:/UIP/scraping/pokemonimgurls.csv')
# Part 2 - image extraction
# Get list of Pokemon Names
df = pd.read_csv('D:/UIP/scraping/pokemonstats.csv', header=0)
pokemon_names = df['Name']
# Get Pokemon URLs with CDN directory
dfI = pd.read_csv('D:/UIP/scraping/pokemonimgurls.csv')
pokemon_images = dfI['0']
for i in range(802):
# Define URL depending on Pokemon name and CDN folder structure
URL = 'https://cdn.bulbagarden.net/upload/{}.png'.format(pokemon_images[i])
# Stream image content from URL
resp = requests.get(URL, stream=True)
# Create a local file to store image contents
pname = '{}.jpg'.format(pokemon_names[i])
local_image = open(pname, 'wb')
# Decoding image content
resp.raw.decode_content = True
# Storing the stream data on local image file
shutil.copyfileobj(resp.raw, local_image)
# Remove the image url response object.
del resp
# Prints success message
print('Image retrieved for {}'.format(pname))
# Part 3 - Getting data for moves learnt by Pokemon
# Get list of Pokemon Names
df = pd.read_csv('D:/UIP/scraping/pokemonstats.csv', header=0)
pokemon_names = df['Name']
# List of moves to query for
# move_list = ['Bounce', 'Flamethrower', 'Ice_Beam', 'Thunderbolt', 'Sludge_Bomb', 'Iron_Head', 'Brick_Break', 'Dragon_Pulse', 'Absorb',
# 'Wing_Attack', 'Bite', 'Dazzling_Gleam', 'Confusion', 'Rock_Blast', 'Hypnosis', 'High_Jump_Kick', "Dark_Pulse", 'Mud_Shot', 'Scald', 'Bug_Bite']
move_list = ['Frost_Breath', 'Flame_Charge', 'Bug_Bite', 'Discharge', 'Metal_Claw', 'Psyshock', 'Draco_Meteor', 'Stealth_Rock', 'Magnitude', 'Foul_Play', 'Rock_Throw', 'Hex', 'Shadow_Sneak', 'Scald', 'Synthesis', 'Dazzling_Gleam', 'Wing_Attack', 'Close_Combat', 'High_Jump_Kick', 'Aurora_Veil', 'Shift_Gear']
# Array to store boolean values
move_data = np.zeros((len(pokemon_names), len(move_list)))
for j in range(len(move_list)):
# Get Bulbapedia URL of that move
URL = 'https://bulbapedia.bulbagarden.net/wiki/{}_(move)'.format(move_list[j])
r = requests.get(URL)
# Get a list of all Pokemon that learn that move
imgloc = re.findall(
r'<td style="text-align:center;" width="26px"> <a href="/wiki/(.*?)_', r.text)
# Encode the corresponding column in the move_data array as 0 or 1
for i in range(802):
if pokemon_names[i] in imgloc:
move_data[i, j] = 1
# Prints success message
print('Done for {}'.format(move_list[j]))
# Converts array to dataframe and stores as csv for future use
df = pd.DataFrame(move_data, columns=move_list)
df.to_csv('D:/UIP/scraping/pokemonmoves.csv')
# Part 4 - Creating the complete dataset
# Get list of Pokemon Names
df = | pd.read_csv('D:/UIP/scraping/pokemonstats.csv', header=0) | pandas.read_csv |
# apis for ndp_d2 betaman edition
import requests
from owslib.fes import *
from owslib.etree import etree
from owslib.wfs import WebFeatureService
from io import StringIO
import pandas as pd
import geopandas as gpd
import streamlit as st
mml_key = st.secrets['MML_MTK']
url_mt = 'https://avoin-paikkatieto.maanmittauslaitos.fi/maastotiedot/features/v1/collections/rakennus/items?'
@st.cache(allow_output_mutation=True)
def pno_data(kunta,vuosi=2021):
url = 'http://geo.stat.fi/geoserver/postialue/wfs' # vaestoruutu tai postialue
wfs = WebFeatureService(url=url, version="2.0.0")
layer = f'postialue:pno_tilasto_{vuosi}'
data_ = wfs.getfeature(typename=layer, outputFormat='json') # propertyname=['kunta'],
gdf_all = gpd.read_file(data_)
noneed = ['id', 'euref_x', 'euref_y', 'pinta_ala']
paavodata = gdf_all.drop(columns=noneed)
kuntakoodit = pd.read_csv('config/kunta_dict.csv', index_col=False, header=0).astype(str)
kuntakoodit['koodi'] = kuntakoodit['koodi'].str.zfill(3)
kunta_dict = pd.Series(kuntakoodit.kunta.values, index=kuntakoodit.koodi).to_dict()
paavodata['kunta'] = paavodata['kunta'].apply(lambda x: kunta_dict[x])
dict_feat = pd.read_csv('config/paavo2021_dict.csv', skipinitialspace=True, header=None, index_col=0,squeeze=True).to_dict()
selkopaavo = paavodata.rename(columns=dict_feat).sort_values('Kunta')
pno_valinta = selkopaavo[selkopaavo['Kunta'] == kunta].sort_values('Asukkaat yhteensä', ascending=False)
return pno_valinta
@st.cache(allow_output_mutation=True)
def pno_hist(kunta,pno):
url = 'http://geo.stat.fi/geoserver/postialue/wfs'
wfs11 = WebFeatureService(url=url, version='1.1.0')#, auth=auth)
kuntakoodit = | pd.read_csv('config/kunta_dict.csv', index_col=False, header=0) | pandas.read_csv |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Python Script related to:
Deep Neural Network model to predict the electrostatic parameters in the polarizable classical Drude oscillator force field
<NAME>, <NAME>, <NAME> and <NAME>.
Copyright (c) 2022, University of Maryland Baltimore
"""
import numpy as np
import pandas as pd
from tensorflow import keras
from collections import OrderedDict
def load_train_charge():
charge_fea_train=pd.read_pickle('dgenff_dataset.2021/train_charge_feature.pkl')
charge_target_train=pd.read_pickle('dgenff_dataset.2021/train_charge_target.pkl')
train_charge_dataset=charge_fea_train.iloc[:,1:].values
train_charge_target=charge_target_train.iloc[:,1].values
train_charge_molid=np.array(charge_fea_train.index)
train_charge_atomid=charge_fea_train.iloc[:,0].values
return train_charge_molid,train_charge_atomid,train_charge_dataset,train_charge_target
def load_test_charge():
charge_fea_test=pd.read_pickle('dgenff_dataset.2021/test_charge_feature.pkl')
charge_target_test=pd.read_pickle('dgenff_dataset.2021/test_charge_target.pkl')
test_charge_dataset=charge_fea_test.iloc[:,1:].values
test_charge_target=charge_target_test.iloc[:,1].values
test_charge_molid=np.array(charge_fea_test.index)
test_charge_atomid=charge_fea_test.iloc[:,0].values
return test_charge_molid,test_charge_atomid,test_charge_dataset,test_charge_target
def load_train_pol():
alphathole_fea_train=pd.read_pickle('dgenff_dataset.2021/train_alphathole_feature.pkl')
alphathole_target_train=pd.read_pickle('dgenff_dataset.2021/train_alphathole_target.pkl')
train_alphathole_dataset=alphathole_fea_train.iloc[:,1:].values
train_alpha_target=alphathole_target_train.iloc[:,1].values
train_thole_target=alphathole_target_train.iloc[:,2].values
train_alphathole_molid=np.array(alphathole_fea_train.index)
train_alphathole_atomid=alphathole_fea_train.iloc[:,0].values
return train_alphathole_molid,train_alphathole_atomid,train_alphathole_dataset,train_alpha_target,train_thole_target
def load_test_pol():
alphathole_fea_test=pd.read_pickle('dgenff_dataset.2021/test_alphathole_feature.pkl')
alphathole_target_test=pd.read_pickle('dgenff_dataset.2021/test_alphathole_target.pkl')
test_alphathole_dataset=alphathole_fea_test.iloc[:,1:].values
test_alpha_target=alphathole_target_test.iloc[:,1].values
test_thole_target=alphathole_target_test.iloc[:,2].values
test_alphathole_molid=np.array(alphathole_fea_test.index)
test_alphathole_atomid=alphathole_fea_test.iloc[:,0].values
return test_alphathole_molid,test_alphathole_atomid,test_alphathole_dataset,test_alpha_target,test_thole_target
def DNN_model(input_shape=[1]):
activation_func="relu"
optimizer=keras.optimizers.Adam(lr=0.0005)
model = keras.Sequential()
model.add(keras.layers.InputLayer(input_shape=input_shape))
model.add(keras.layers.Dense(1024, activation=activation_func,kernel_initializer='he_normal',kernel_constraint=keras.constraints.MaxNorm(3)))
model.add(keras.layers.Dropout(0.2))
model.add(keras.layers.Dense(512, activation=activation_func,kernel_initializer='he_normal',kernel_constraint=keras.constraints.MaxNorm(3)))
model.add(keras.layers.Dropout(0.2))
model.add(keras.layers.Dense(1))
model.compile(loss='mean_squared_error', optimizer=optimizer)
return model
if __name__ == "__main__":
pred_charge_dict=OrderedDict()
pred_alphathole_dict=OrderedDict()
pred_charge_dict['MOLID'],pred_charge_dict['ATOMID'],test_charge_features,test_charge=load_test_charge()
pred_alphathole_dict['MOLID'],pred_alphathole_dict['ATOMID'],test_alphathole_features,test_alpha,test_thole=load_test_pol()
pred_charges = pd.DataFrame(pred_charge_dict)
pred_charges['QM-CHARGE']=test_charge
pred_alphathole = | pd.DataFrame(pred_alphathole_dict) | pandas.DataFrame |
import datetime
from datetime import timedelta
from distutils.version import LooseVersion
from io import BytesIO
import os
import re
from warnings import catch_warnings, simplefilter
import numpy as np
import pytest
from pandas.compat import is_platform_little_endian, is_platform_windows
import pandas.util._test_decorators as td
from pandas.core.dtypes.common import is_categorical_dtype
import pandas as pd
from pandas import (
Categorical,
CategoricalIndex,
DataFrame,
DatetimeIndex,
Index,
Int64Index,
MultiIndex,
RangeIndex,
Series,
Timestamp,
bdate_range,
concat,
date_range,
isna,
timedelta_range,
)
from pandas.tests.io.pytables.common import (
_maybe_remove,
create_tempfile,
ensure_clean_path,
ensure_clean_store,
safe_close,
safe_remove,
tables,
)
import pandas.util.testing as tm
from pandas.io.pytables import (
ClosedFileError,
HDFStore,
PossibleDataLossError,
Term,
read_hdf,
)
from pandas.io import pytables as pytables # noqa: E402 isort:skip
from pandas.io.pytables import TableIterator # noqa: E402 isort:skip
_default_compressor = "blosc"
ignore_natural_naming_warning = pytest.mark.filterwarnings(
"ignore:object name:tables.exceptions.NaturalNameWarning"
)
@pytest.mark.single
class TestHDFStore:
def test_format_kwarg_in_constructor(self, setup_path):
# GH 13291
with ensure_clean_path(setup_path) as path:
with pytest.raises(ValueError):
HDFStore(path, format="table")
def test_context(self, setup_path):
path = create_tempfile(setup_path)
try:
with HDFStore(path) as tbl:
raise ValueError("blah")
except ValueError:
pass
finally:
safe_remove(path)
try:
with HDFStore(path) as tbl:
tbl["a"] = tm.makeDataFrame()
with HDFStore(path) as tbl:
assert len(tbl) == 1
assert type(tbl["a"]) == DataFrame
finally:
safe_remove(path)
def test_conv_read_write(self, setup_path):
path = create_tempfile(setup_path)
try:
def roundtrip(key, obj, **kwargs):
obj.to_hdf(path, key, **kwargs)
return read_hdf(path, key)
o = tm.makeTimeSeries()
tm.assert_series_equal(o, roundtrip("series", o))
o = tm.makeStringSeries()
tm.assert_series_equal(o, roundtrip("string_series", o))
o = tm.makeDataFrame()
tm.assert_frame_equal(o, roundtrip("frame", o))
# table
df = DataFrame(dict(A=range(5), B=range(5)))
df.to_hdf(path, "table", append=True)
result = read_hdf(path, "table", where=["index>2"])
tm.assert_frame_equal(df[df.index > 2], result)
finally:
safe_remove(path)
def test_long_strings(self, setup_path):
# GH6166
df = DataFrame(
{"a": tm.rands_array(100, size=10)}, index=tm.rands_array(100, size=10)
)
with ensure_clean_store(setup_path) as store:
store.append("df", df, data_columns=["a"])
result = store.select("df")
tm.assert_frame_equal(df, result)
def test_api(self, setup_path):
# GH4584
# API issue when to_hdf doesn't accept append AND format args
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.iloc[:10].to_hdf(path, "df", append=True, format="table")
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
# append to False
df.iloc[:10].to_hdf(path, "df", append=False, format="table")
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.iloc[:10].to_hdf(path, "df", append=True)
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
# append to False
df.iloc[:10].to_hdf(path, "df", append=False, format="table")
df.iloc[10:].to_hdf(path, "df", append=True)
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.to_hdf(path, "df", append=False, format="fixed")
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df", append=False, format="f")
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df", append=False)
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df")
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_store(setup_path) as store:
path = store._path
df = tm.makeDataFrame()
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=True, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
# append to False
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
# formats
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format=None)
tm.assert_frame_equal(store.select("df"), df)
with ensure_clean_path(setup_path) as path:
# Invalid.
df = tm.makeDataFrame()
with pytest.raises(ValueError):
df.to_hdf(path, "df", append=True, format="f")
with pytest.raises(ValueError):
df.to_hdf(path, "df", append=True, format="fixed")
with pytest.raises(TypeError):
df.to_hdf(path, "df", append=True, format="foo")
with pytest.raises(TypeError):
df.to_hdf(path, "df", append=False, format="bar")
# File path doesn't exist
path = ""
with pytest.raises(FileNotFoundError):
read_hdf(path, "df")
def test_api_default_format(self, setup_path):
# default_format option
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
pd.set_option("io.hdf.default_format", "fixed")
_maybe_remove(store, "df")
store.put("df", df)
assert not store.get_storer("df").is_table
with pytest.raises(ValueError):
store.append("df2", df)
pd.set_option("io.hdf.default_format", "table")
_maybe_remove(store, "df")
store.put("df", df)
assert store.get_storer("df").is_table
_maybe_remove(store, "df2")
store.append("df2", df)
assert store.get_storer("df").is_table
pd.set_option("io.hdf.default_format", None)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
pd.set_option("io.hdf.default_format", "fixed")
df.to_hdf(path, "df")
with HDFStore(path) as store:
assert not store.get_storer("df").is_table
with pytest.raises(ValueError):
df.to_hdf(path, "df2", append=True)
pd.set_option("io.hdf.default_format", "table")
df.to_hdf(path, "df3")
with HDFStore(path) as store:
assert store.get_storer("df3").is_table
df.to_hdf(path, "df4", append=True)
with HDFStore(path) as store:
assert store.get_storer("df4").is_table
pd.set_option("io.hdf.default_format", None)
def test_keys(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeStringSeries()
store["c"] = tm.makeDataFrame()
assert len(store) == 3
expected = {"/a", "/b", "/c"}
assert set(store.keys()) == expected
assert set(store) == expected
def test_keys_ignore_hdf_softlink(self, setup_path):
# GH 20523
# Puts a softlink into HDF file and rereads
with ensure_clean_store(setup_path) as store:
df = DataFrame(dict(A=range(5), B=range(5)))
store.put("df", df)
assert store.keys() == ["/df"]
store._handle.create_soft_link(store._handle.root, "symlink", "df")
# Should ignore the softlink
assert store.keys() == ["/df"]
def test_iter_empty(self, setup_path):
with ensure_clean_store(setup_path) as store:
# GH 12221
assert list(store) == []
def test_repr(self, setup_path):
with ensure_clean_store(setup_path) as store:
repr(store)
store.info()
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeStringSeries()
store["c"] = tm.makeDataFrame()
df = tm.makeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["bool3"] = True
df["int1"] = 1
df["int2"] = 2
df["timestamp1"] = Timestamp("20010102")
df["timestamp2"] = Timestamp("20010103")
df["datetime1"] = datetime.datetime(2001, 1, 2, 0, 0)
df["datetime2"] = datetime.datetime(2001, 1, 3, 0, 0)
df.loc[3:6, ["obj1"]] = np.nan
df = df._consolidate()._convert(datetime=True)
with catch_warnings(record=True):
simplefilter("ignore", pd.errors.PerformanceWarning)
store["df"] = df
# make a random group in hdf space
store._handle.create_group(store._handle.root, "bah")
assert store.filename in repr(store)
assert store.filename in str(store)
store.info()
# storers
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
store.append("df", df)
s = store.get_storer("df")
repr(s)
str(s)
@ignore_natural_naming_warning
def test_contains(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeDataFrame()
store["foo/bar"] = tm.makeDataFrame()
assert "a" in store
assert "b" in store
assert "c" not in store
assert "foo/bar" in store
assert "/foo/bar" in store
assert "/foo/b" not in store
assert "bar" not in store
# gh-2694: tables.NaturalNameWarning
with catch_warnings(record=True):
store["node())"] = tm.makeDataFrame()
assert "node())" in store
def test_versioning(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeDataFrame()
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df1")
store.append("df1", df[:10])
store.append("df1", df[10:])
assert store.root.a._v_attrs.pandas_version == "0.15.2"
assert store.root.b._v_attrs.pandas_version == "0.15.2"
assert store.root.df1._v_attrs.pandas_version == "0.15.2"
# write a file and wipe its versioning
_maybe_remove(store, "df2")
store.append("df2", df)
# this is an error because its table_type is appendable, but no
# version info
store.get_node("df2")._v_attrs.pandas_version = None
with pytest.raises(Exception):
store.select("df2")
def test_mode(self, setup_path):
df = tm.makeTimeDataFrame()
def check(mode):
with ensure_clean_path(setup_path) as path:
# constructor
if mode in ["r", "r+"]:
with pytest.raises(IOError):
HDFStore(path, mode=mode)
else:
store = HDFStore(path, mode=mode)
assert store._handle.mode == mode
store.close()
with ensure_clean_path(setup_path) as path:
# context
if mode in ["r", "r+"]:
with pytest.raises(IOError):
with HDFStore(path, mode=mode) as store: # noqa
pass
else:
with HDFStore(path, mode=mode) as store:
assert store._handle.mode == mode
with ensure_clean_path(setup_path) as path:
# conv write
if mode in ["r", "r+"]:
with pytest.raises(IOError):
df.to_hdf(path, "df", mode=mode)
df.to_hdf(path, "df", mode="w")
else:
df.to_hdf(path, "df", mode=mode)
# conv read
if mode in ["w"]:
with pytest.raises(ValueError):
read_hdf(path, "df", mode=mode)
else:
result = read_hdf(path, "df", mode=mode)
tm.assert_frame_equal(result, df)
def check_default_mode():
# read_hdf uses default mode
with ensure_clean_path(setup_path) as path:
df.to_hdf(path, "df", mode="w")
result = read_hdf(path, "df")
tm.assert_frame_equal(result, df)
check("r")
check("r+")
check("a")
check("w")
check_default_mode()
def test_reopen_handle(self, setup_path):
with ensure_clean_path(setup_path) as path:
store = HDFStore(path, mode="a")
store["a"] = tm.makeTimeSeries()
# invalid mode change
with pytest.raises(PossibleDataLossError):
store.open("w")
store.close()
assert not store.is_open
# truncation ok here
store.open("w")
assert store.is_open
assert len(store) == 0
store.close()
assert not store.is_open
store = HDFStore(path, mode="a")
store["a"] = tm.makeTimeSeries()
# reopen as read
store.open("r")
assert store.is_open
assert len(store) == 1
assert store._mode == "r"
store.close()
assert not store.is_open
# reopen as append
store.open("a")
assert store.is_open
assert len(store) == 1
assert store._mode == "a"
store.close()
assert not store.is_open
# reopen as append (again)
store.open("a")
assert store.is_open
assert len(store) == 1
assert store._mode == "a"
store.close()
assert not store.is_open
def test_open_args(self, setup_path):
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
# create an in memory store
store = HDFStore(
path, mode="a", driver="H5FD_CORE", driver_core_backing_store=0
)
store["df"] = df
store.append("df2", df)
tm.assert_frame_equal(store["df"], df)
tm.assert_frame_equal(store["df2"], df)
store.close()
# the file should not have actually been written
assert not os.path.exists(path)
def test_flush(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store.flush()
store.flush(fsync=True)
def test_get(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
left = store.get("a")
right = store["a"]
tm.assert_series_equal(left, right)
left = store.get("/a")
right = store["/a"]
tm.assert_series_equal(left, right)
with pytest.raises(KeyError, match="'No object named b in the file'"):
store.get("b")
@pytest.mark.parametrize(
"where, expected",
[
(
"/",
{
"": ({"first_group", "second_group"}, set()),
"/first_group": (set(), {"df1", "df2"}),
"/second_group": ({"third_group"}, {"df3", "s1"}),
"/second_group/third_group": (set(), {"df4"}),
},
),
(
"/second_group",
{
"/second_group": ({"third_group"}, {"df3", "s1"}),
"/second_group/third_group": (set(), {"df4"}),
},
),
],
)
def test_walk(self, where, expected, setup_path):
# GH10143
objs = {
"df1": pd.DataFrame([1, 2, 3]),
"df2": pd.DataFrame([4, 5, 6]),
"df3": pd.DataFrame([6, 7, 8]),
"df4": pd.DataFrame([9, 10, 11]),
"s1": pd.Series([10, 9, 8]),
# Next 3 items aren't pandas objects and should be ignored
"a1": np.array([[1, 2, 3], [4, 5, 6]]),
"tb1": np.array([(1, 2, 3), (4, 5, 6)], dtype="i,i,i"),
"tb2": np.array([(7, 8, 9), (10, 11, 12)], dtype="i,i,i"),
}
with ensure_clean_store("walk_groups.hdf", mode="w") as store:
store.put("/first_group/df1", objs["df1"])
store.put("/first_group/df2", objs["df2"])
store.put("/second_group/df3", objs["df3"])
store.put("/second_group/s1", objs["s1"])
store.put("/second_group/third_group/df4", objs["df4"])
# Create non-pandas objects
store._handle.create_array("/first_group", "a1", objs["a1"])
store._handle.create_table("/first_group", "tb1", obj=objs["tb1"])
store._handle.create_table("/second_group", "tb2", obj=objs["tb2"])
assert len(list(store.walk(where=where))) == len(expected)
for path, groups, leaves in store.walk(where=where):
assert path in expected
expected_groups, expected_frames = expected[path]
assert expected_groups == set(groups)
assert expected_frames == set(leaves)
for leaf in leaves:
frame_path = "/".join([path, leaf])
obj = store.get(frame_path)
if "df" in leaf:
tm.assert_frame_equal(obj, objs[leaf])
else:
tm.assert_series_equal(obj, objs[leaf])
def test_getattr(self, setup_path):
with ensure_clean_store(setup_path) as store:
s = tm.makeTimeSeries()
store["a"] = s
# test attribute access
result = store.a
tm.assert_series_equal(result, s)
result = getattr(store, "a")
tm.assert_series_equal(result, s)
df = tm.makeTimeDataFrame()
store["df"] = df
result = store.df
tm.assert_frame_equal(result, df)
# errors
for x in ["d", "mode", "path", "handle", "complib"]:
with pytest.raises(AttributeError):
getattr(store, x)
# not stores
for x in ["mode", "path", "handle", "complib"]:
getattr(store, "_{x}".format(x=x))
def test_put(self, setup_path):
with ensure_clean_store(setup_path) as store:
ts = tm.makeTimeSeries()
df = tm.makeTimeDataFrame()
store["a"] = ts
store["b"] = df[:10]
store["foo/bar/bah"] = df[:10]
store["foo"] = df[:10]
store["/foo"] = df[:10]
store.put("c", df[:10], format="table")
# not OK, not a table
with pytest.raises(ValueError):
store.put("b", df[10:], append=True)
# node does not currently exist, test _is_table_type returns False
# in this case
_maybe_remove(store, "f")
with pytest.raises(ValueError):
store.put("f", df[10:], append=True)
# can't put to a table (use append instead)
with pytest.raises(ValueError):
store.put("c", df[10:], append=True)
# overwrite table
store.put("c", df[:10], format="table", append=False)
tm.assert_frame_equal(df[:10], store["c"])
def test_put_string_index(self, setup_path):
with ensure_clean_store(setup_path) as store:
index = Index(
["I am a very long string index: {i}".format(i=i) for i in range(20)]
)
s = Series(np.arange(20), index=index)
df = DataFrame({"A": s, "B": s})
store["a"] = s
tm.assert_series_equal(store["a"], s)
store["b"] = df
tm.assert_frame_equal(store["b"], df)
# mixed length
index = Index(
["abcdefghijklmnopqrstuvwxyz1234567890"]
+ ["I am a very long string index: {i}".format(i=i) for i in range(20)]
)
s = Series(np.arange(21), index=index)
df = DataFrame({"A": s, "B": s})
store["a"] = s
tm.assert_series_equal(store["a"], s)
store["b"] = df
tm.assert_frame_equal(store["b"], df)
def test_put_compression(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = tm.makeTimeDataFrame()
store.put("c", df, format="table", complib="zlib")
tm.assert_frame_equal(store["c"], df)
# can't compress if format='fixed'
with pytest.raises(ValueError):
store.put("b", df, format="fixed", complib="zlib")
@td.skip_if_windows_python_3
def test_put_compression_blosc(self, setup_path):
df = tm.makeTimeDataFrame()
with ensure_clean_store(setup_path) as store:
# can't compress if format='fixed'
with pytest.raises(ValueError):
store.put("b", df, format="fixed", complib="blosc")
store.put("c", df, format="table", complib="blosc")
tm.assert_frame_equal(store["c"], df)
def test_complibs_default_settings(self, setup_path):
# GH15943
df = tm.makeDataFrame()
# Set complevel and check if complib is automatically set to
# default value
with ensure_clean_path(setup_path) as tmpfile:
df.to_hdf(tmpfile, "df", complevel=9)
result = pd.read_hdf(tmpfile, "df")
tm.assert_frame_equal(result, df)
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 9
assert node.filters.complib == "zlib"
# Set complib and check to see if compression is disabled
with ensure_clean_path(setup_path) as tmpfile:
df.to_hdf(tmpfile, "df", complib="zlib")
result = pd.read_hdf(tmpfile, "df")
tm.assert_frame_equal(result, df)
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 0
assert node.filters.complib is None
# Check if not setting complib or complevel results in no compression
with ensure_clean_path(setup_path) as tmpfile:
df.to_hdf(tmpfile, "df")
result = pd.read_hdf(tmpfile, "df")
tm.assert_frame_equal(result, df)
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 0
assert node.filters.complib is None
# Check if file-defaults can be overridden on a per table basis
with ensure_clean_path(setup_path) as tmpfile:
store = pd.HDFStore(tmpfile)
store.append("dfc", df, complevel=9, complib="blosc")
store.append("df", df)
store.close()
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 0
assert node.filters.complib is None
for node in h5file.walk_nodes(where="/dfc", classname="Leaf"):
assert node.filters.complevel == 9
assert node.filters.complib == "blosc"
def test_complibs(self, setup_path):
# GH14478
df = tm.makeDataFrame()
# Building list of all complibs and complevels tuples
all_complibs = tables.filters.all_complibs
# Remove lzo if its not available on this platform
if not tables.which_lib_version("lzo"):
all_complibs.remove("lzo")
# Remove bzip2 if its not available on this platform
if not tables.which_lib_version("bzip2"):
all_complibs.remove("bzip2")
all_levels = range(0, 10)
all_tests = [(lib, lvl) for lib in all_complibs for lvl in all_levels]
for (lib, lvl) in all_tests:
with ensure_clean_path(setup_path) as tmpfile:
gname = "foo"
# Write and read file to see if data is consistent
df.to_hdf(tmpfile, gname, complib=lib, complevel=lvl)
result = pd.read_hdf(tmpfile, gname)
tm.assert_frame_equal(result, df)
# Open file and check metadata
# for correct amount of compression
h5table = tables.open_file(tmpfile, mode="r")
for node in h5table.walk_nodes(where="/" + gname, classname="Leaf"):
assert node.filters.complevel == lvl
if lvl == 0:
assert node.filters.complib is None
else:
assert node.filters.complib == lib
h5table.close()
def test_put_integer(self, setup_path):
# non-date, non-string index
df = DataFrame(np.random.randn(50, 100))
self._check_roundtrip(df, tm.assert_frame_equal, setup_path)
@td.xfail_non_writeable
def test_put_mixed_type(self, setup_path):
df = tm.makeTimeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["bool3"] = True
df["int1"] = 1
df["int2"] = 2
df["timestamp1"] = Timestamp("20010102")
df["timestamp2"] = Timestamp("20010103")
df["datetime1"] = datetime.datetime(2001, 1, 2, 0, 0)
df["datetime2"] = datetime.datetime(2001, 1, 3, 0, 0)
df.loc[3:6, ["obj1"]] = np.nan
df = df._consolidate()._convert(datetime=True)
with ensure_clean_store(setup_path) as store:
_maybe_remove(store, "df")
# PerformanceWarning
with catch_warnings(record=True):
simplefilter("ignore", pd.errors.PerformanceWarning)
store.put("df", df)
expected = store.get("df")
tm.assert_frame_equal(expected, df)
@pytest.mark.filterwarnings(
"ignore:object name:tables.exceptions.NaturalNameWarning"
)
def test_append(self, setup_path):
with ensure_clean_store(setup_path) as store:
# this is allowed by almost always don't want to do it
# tables.NaturalNameWarning):
with catch_warnings(record=True):
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df1")
store.append("df1", df[:10])
store.append("df1", df[10:])
tm.assert_frame_equal(store["df1"], df)
_maybe_remove(store, "df2")
store.put("df2", df[:10], format="table")
store.append("df2", df[10:])
tm.assert_frame_equal(store["df2"], df)
_maybe_remove(store, "df3")
store.append("/df3", df[:10])
store.append("/df3", df[10:])
tm.assert_frame_equal(store["df3"], df)
# this is allowed by almost always don't want to do it
# tables.NaturalNameWarning
_maybe_remove(store, "/df3 foo")
store.append("/df3 foo", df[:10])
store.append("/df3 foo", df[10:])
tm.assert_frame_equal(store["df3 foo"], df)
# dtype issues - mizxed type in a single object column
df = DataFrame(data=[[1, 2], [0, 1], [1, 2], [0, 0]])
df["mixed_column"] = "testing"
df.loc[2, "mixed_column"] = np.nan
_maybe_remove(store, "df")
store.append("df", df)
tm.assert_frame_equal(store["df"], df)
# uints - test storage of uints
uint_data = DataFrame(
{
"u08": Series(
np.random.randint(0, high=255, size=5), dtype=np.uint8
),
"u16": Series(
np.random.randint(0, high=65535, size=5), dtype=np.uint16
),
"u32": Series(
np.random.randint(0, high=2 ** 30, size=5), dtype=np.uint32
),
"u64": Series(
[2 ** 58, 2 ** 59, 2 ** 60, 2 ** 61, 2 ** 62],
dtype=np.uint64,
),
},
index=np.arange(5),
)
_maybe_remove(store, "uints")
store.append("uints", uint_data)
tm.assert_frame_equal(store["uints"], uint_data)
# uints - test storage of uints in indexable columns
_maybe_remove(store, "uints")
# 64-bit indices not yet supported
store.append("uints", uint_data, data_columns=["u08", "u16", "u32"])
tm.assert_frame_equal(store["uints"], uint_data)
def test_append_series(self, setup_path):
with ensure_clean_store(setup_path) as store:
# basic
ss = tm.makeStringSeries()
ts = tm.makeTimeSeries()
ns = Series(np.arange(100))
store.append("ss", ss)
result = store["ss"]
tm.assert_series_equal(result, ss)
assert result.name is None
store.append("ts", ts)
result = store["ts"]
tm.assert_series_equal(result, ts)
assert result.name is None
ns.name = "foo"
store.append("ns", ns)
result = store["ns"]
tm.assert_series_equal(result, ns)
assert result.name == ns.name
# select on the values
expected = ns[ns > 60]
result = store.select("ns", "foo>60")
tm.assert_series_equal(result, expected)
# select on the index and values
expected = ns[(ns > 70) & (ns.index < 90)]
result = store.select("ns", "foo>70 and index<90")
tm.assert_series_equal(result, expected)
# multi-index
mi = DataFrame(np.random.randn(5, 1), columns=["A"])
mi["B"] = np.arange(len(mi))
mi["C"] = "foo"
mi.loc[3:5, "C"] = "bar"
mi.set_index(["C", "B"], inplace=True)
s = mi.stack()
s.index = s.index.droplevel(2)
store.append("mi", s)
tm.assert_series_equal(store["mi"], s)
def test_store_index_types(self, setup_path):
# GH5386
# test storing various index types
with ensure_clean_store(setup_path) as store:
def check(format, index):
df = DataFrame(np.random.randn(10, 2), columns=list("AB"))
df.index = index(len(df))
_maybe_remove(store, "df")
store.put("df", df, format=format)
tm.assert_frame_equal(df, store["df"])
for index in [
tm.makeFloatIndex,
tm.makeStringIndex,
tm.makeIntIndex,
tm.makeDateIndex,
]:
check("table", index)
check("fixed", index)
# period index currently broken for table
# seee GH7796 FIXME
check("fixed", tm.makePeriodIndex)
# check('table',tm.makePeriodIndex)
# unicode
index = tm.makeUnicodeIndex
check("table", index)
check("fixed", index)
@pytest.mark.skipif(
not is_platform_little_endian(), reason="reason platform is not little endian"
)
def test_encoding(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(dict(A="foo", B="bar"), index=range(5))
df.loc[2, "A"] = np.nan
df.loc[3, "B"] = np.nan
_maybe_remove(store, "df")
store.append("df", df, encoding="ascii")
tm.assert_frame_equal(store["df"], df)
expected = df.reindex(columns=["A"])
result = store.select("df", Term("columns=A", encoding="ascii"))
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"val",
[
[b"E\xc9, 17", b"", b"a", b"b", b"c"],
[b"E\xc9, 17", b"a", b"b", b"c"],
[b"EE, 17", b"", b"a", b"b", b"c"],
[b"E\xc9, 17", b"\xf8\xfc", b"a", b"b", b"c"],
[b"", b"a", b"b", b"c"],
[b"\xf8\xfc", b"a", b"b", b"c"],
[b"A\xf8\xfc", b"", b"a", b"b", b"c"],
[np.nan, b"", b"b", b"c"],
[b"A\xf8\xfc", np.nan, b"", b"b", b"c"],
],
)
@pytest.mark.parametrize("dtype", ["category", object])
def test_latin_encoding(self, setup_path, dtype, val):
enc = "latin-1"
nan_rep = ""
key = "data"
val = [x.decode(enc) if isinstance(x, bytes) else x for x in val]
ser = pd.Series(val, dtype=dtype)
with ensure_clean_path(setup_path) as store:
ser.to_hdf(store, key, format="table", encoding=enc, nan_rep=nan_rep)
retr = read_hdf(store, key)
s_nan = ser.replace(nan_rep, np.nan)
if is_categorical_dtype(s_nan):
assert is_categorical_dtype(retr)
tm.assert_series_equal(
s_nan, retr, check_dtype=False, check_categorical=False
)
else:
tm.assert_series_equal(s_nan, retr)
# FIXME: don't leave commented-out
# fails:
# for x in examples:
# roundtrip(s, nan_rep=b'\xf8\xfc')
def test_append_some_nans(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(
{
"A": Series(np.random.randn(20)).astype("int32"),
"A1": np.random.randn(20),
"A2": np.random.randn(20),
"B": "foo",
"C": "bar",
"D": Timestamp("20010101"),
"E": datetime.datetime(2001, 1, 2, 0, 0),
},
index=np.arange(20),
)
# some nans
_maybe_remove(store, "df1")
df.loc[0:15, ["A1", "B", "D", "E"]] = np.nan
store.append("df1", df[:10])
store.append("df1", df[10:])
tm.assert_frame_equal(store["df1"], df)
# first column
df1 = df.copy()
df1.loc[:, "A1"] = np.nan
_maybe_remove(store, "df1")
store.append("df1", df1[:10])
store.append("df1", df1[10:])
tm.assert_frame_equal(store["df1"], df1)
# 2nd column
df2 = df.copy()
df2.loc[:, "A2"] = np.nan
_maybe_remove(store, "df2")
store.append("df2", df2[:10])
store.append("df2", df2[10:])
tm.assert_frame_equal(store["df2"], df2)
# datetimes
df3 = df.copy()
df3.loc[:, "E"] = np.nan
_maybe_remove(store, "df3")
store.append("df3", df3[:10])
store.append("df3", df3[10:])
tm.assert_frame_equal(store["df3"], df3)
def test_append_all_nans(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(
{"A1": np.random.randn(20), "A2": np.random.randn(20)},
index=np.arange(20),
)
df.loc[0:15, :] = np.nan
# nan some entire rows (dropna=True)
_maybe_remove(store, "df")
store.append("df", df[:10], dropna=True)
store.append("df", df[10:], dropna=True)
tm.assert_frame_equal(store["df"], df[-4:])
# nan some entire rows (dropna=False)
_maybe_remove(store, "df2")
store.append("df2", df[:10], dropna=False)
store.append("df2", df[10:], dropna=False)
tm.assert_frame_equal(store["df2"], df)
# tests the option io.hdf.dropna_table
pd.set_option("io.hdf.dropna_table", False)
_maybe_remove(store, "df3")
store.append("df3", df[:10])
store.append("df3", df[10:])
tm.assert_frame_equal(store["df3"], df)
pd.set_option("io.hdf.dropna_table", True)
_maybe_remove(store, "df4")
store.append("df4", df[:10])
store.append("df4", df[10:])
tm.assert_frame_equal(store["df4"], df[-4:])
# nan some entire rows (string are still written!)
df = DataFrame(
{
"A1": np.random.randn(20),
"A2": np.random.randn(20),
"B": "foo",
"C": "bar",
},
index=np.arange(20),
)
df.loc[0:15, :] = np.nan
_maybe_remove(store, "df")
store.append("df", df[:10], dropna=True)
store.append("df", df[10:], dropna=True)
tm.assert_frame_equal(store["df"], df)
_maybe_remove(store, "df2")
store.append("df2", df[:10], dropna=False)
store.append("df2", df[10:], dropna=False)
tm.assert_frame_equal(store["df2"], df)
# nan some entire rows (but since we have dates they are still
# written!)
df = DataFrame(
{
"A1": np.random.randn(20),
"A2": np.random.randn(20),
"B": "foo",
"C": "bar",
"D": Timestamp("20010101"),
"E": datetime.datetime(2001, 1, 2, 0, 0),
},
index=np.arange(20),
)
df.loc[0:15, :] = np.nan
_maybe_remove(store, "df")
store.append("df", df[:10], dropna=True)
store.append("df", df[10:], dropna=True)
tm.assert_frame_equal(store["df"], df)
_maybe_remove(store, "df2")
store.append("df2", df[:10], dropna=False)
store.append("df2", df[10:], dropna=False)
tm.assert_frame_equal(store["df2"], df)
# Test to make sure defaults are to not drop.
# Corresponding to Issue 9382
df_with_missing = DataFrame(
{"col1": [0, np.nan, 2], "col2": [1, np.nan, np.nan]}
)
with ensure_clean_path(setup_path) as path:
df_with_missing.to_hdf(path, "df_with_missing", format="table")
reloaded = read_hdf(path, "df_with_missing")
tm.assert_frame_equal(df_with_missing, reloaded)
def test_read_missing_key_close_store(self, setup_path):
# GH 25766
with ensure_clean_path(setup_path) as path:
df = pd.DataFrame({"a": range(2), "b": range(2)})
df.to_hdf(path, "k1")
with pytest.raises(KeyError, match="'No object named k2 in the file'"):
pd.read_hdf(path, "k2")
# smoke test to test that file is properly closed after
# read with KeyError before another write
df.to_hdf(path, "k2")
def test_read_missing_key_opened_store(self, setup_path):
# GH 28699
with ensure_clean_path(setup_path) as path:
df = pd.DataFrame({"a": range(2), "b": range(2)})
df.to_hdf(path, "k1")
store = pd.HDFStore(path, "r")
with pytest.raises(KeyError, match="'No object named k2 in the file'"):
pd.read_hdf(store, "k2")
# Test that the file is still open after a KeyError and that we can
# still read from it.
pd.read_hdf(store, "k1")
def test_append_frame_column_oriented(self, setup_path):
with ensure_clean_store(setup_path) as store:
# column oriented
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df1")
store.append("df1", df.iloc[:, :2], axes=["columns"])
store.append("df1", df.iloc[:, 2:])
tm.assert_frame_equal(store["df1"], df)
result = store.select("df1", "columns=A")
expected = df.reindex(columns=["A"])
tm.assert_frame_equal(expected, result)
# selection on the non-indexable
result = store.select("df1", ("columns=A", "index=df.index[0:4]"))
expected = df.reindex(columns=["A"], index=df.index[0:4])
tm.assert_frame_equal(expected, result)
# this isn't supported
with pytest.raises(TypeError):
store.select("df1", "columns=A and index>df.index[4]")
def test_append_with_different_block_ordering(self, setup_path):
# GH 4096; using same frames, but different block orderings
with ensure_clean_store(setup_path) as store:
for i in range(10):
df = DataFrame(np.random.randn(10, 2), columns=list("AB"))
df["index"] = range(10)
df["index"] += i * 10
df["int64"] = Series([1] * len(df), dtype="int64")
df["int16"] = Series([1] * len(df), dtype="int16")
if i % 2 == 0:
del df["int64"]
df["int64"] = Series([1] * len(df), dtype="int64")
if i % 3 == 0:
a = df.pop("A")
df["A"] = a
df.set_index("index", inplace=True)
store.append("df", df)
# test a different ordering but with more fields (like invalid
# combinate)
with ensure_clean_store(setup_path) as store:
df = DataFrame(np.random.randn(10, 2), columns=list("AB"), dtype="float64")
df["int64"] = Series([1] * len(df), dtype="int64")
df["int16"] = Series([1] * len(df), dtype="int16")
store.append("df", df)
# store additional fields in different blocks
df["int16_2"] = Series([1] * len(df), dtype="int16")
with pytest.raises(ValueError):
store.append("df", df)
# store multile additional fields in different blocks
df["float_3"] = Series([1.0] * len(df), dtype="float64")
with pytest.raises(ValueError):
store.append("df", df)
def test_append_with_strings(self, setup_path):
with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
def check_col(key, name, size):
assert (
getattr(store.get_storer(key).table.description, name).itemsize
== size
)
# avoid truncation on elements
df = DataFrame([[123, "asdqwerty"], [345, "dggnhebbsdfbdfb"]])
store.append("df_big", df)
tm.assert_frame_equal(store.select("df_big"), df)
check_col("df_big", "values_block_1", 15)
# appending smaller string ok
df2 = DataFrame([[124, "asdqy"], [346, "dggnhefbdfb"]])
store.append("df_big", df2)
expected = concat([df, df2])
tm.assert_frame_equal(store.select("df_big"), expected)
check_col("df_big", "values_block_1", 15)
# avoid truncation on elements
df = DataFrame([[123, "asdqwerty"], [345, "dggnhebbsdfbdfb"]])
store.append("df_big2", df, min_itemsize={"values": 50})
tm.assert_frame_equal(store.select("df_big2"), df)
check_col("df_big2", "values_block_1", 50)
# bigger string on next append
store.append("df_new", df)
df_new = DataFrame(
[[124, "abcdefqhij"], [346, "abcdefghijklmnopqrtsuvwxyz"]]
)
with pytest.raises(ValueError):
store.append("df_new", df_new)
# min_itemsize on Series index (GH 11412)
df = tm.makeMixedDataFrame().set_index("C")
store.append("ss", df["B"], min_itemsize={"index": 4})
tm.assert_series_equal(store.select("ss"), df["B"])
# same as above, with data_columns=True
store.append(
"ss2", df["B"], data_columns=True, min_itemsize={"index": 4}
)
tm.assert_series_equal(store.select("ss2"), df["B"])
# min_itemsize in index without appending (GH 10381)
store.put("ss3", df, format="table", min_itemsize={"index": 6})
# just make sure there is a longer string:
df2 = df.copy().reset_index().assign(C="longer").set_index("C")
store.append("ss3", df2)
tm.assert_frame_equal(store.select("ss3"), pd.concat([df, df2]))
# same as above, with a Series
store.put("ss4", df["B"], format="table", min_itemsize={"index": 6})
store.append("ss4", df2["B"])
tm.assert_series_equal(
store.select("ss4"), pd.concat([df["B"], df2["B"]])
)
# with nans
_maybe_remove(store, "df")
df = tm.makeTimeDataFrame()
df["string"] = "foo"
df.loc[1:4, "string"] = np.nan
df["string2"] = "bar"
df.loc[4:8, "string2"] = np.nan
df["string3"] = "bah"
df.loc[1:, "string3"] = np.nan
store.append("df", df)
result = store.select("df")
tm.assert_frame_equal(result, df)
with ensure_clean_store(setup_path) as store:
def check_col(key, name, size):
assert getattr(
store.get_storer(key).table.description, name
).itemsize, size
df = DataFrame(dict(A="foo", B="bar"), index=range(10))
# a min_itemsize that creates a data_column
_maybe_remove(store, "df")
store.append("df", df, min_itemsize={"A": 200})
check_col("df", "A", 200)
assert store.get_storer("df").data_columns == ["A"]
# a min_itemsize that creates a data_column2
_maybe_remove(store, "df")
store.append("df", df, data_columns=["B"], min_itemsize={"A": 200})
check_col("df", "A", 200)
assert store.get_storer("df").data_columns == ["B", "A"]
# a min_itemsize that creates a data_column2
_maybe_remove(store, "df")
store.append("df", df, data_columns=["B"], min_itemsize={"values": 200})
check_col("df", "B", 200)
check_col("df", "values_block_0", 200)
assert store.get_storer("df").data_columns == ["B"]
# infer the .typ on subsequent appends
_maybe_remove(store, "df")
store.append("df", df[:5], min_itemsize=200)
store.append("df", df[5:], min_itemsize=200)
tm.assert_frame_equal(store["df"], df)
# invalid min_itemsize keys
df = DataFrame(["foo", "foo", "foo", "barh", "barh", "barh"], columns=["A"])
_maybe_remove(store, "df")
with pytest.raises(ValueError):
store.append("df", df, min_itemsize={"foo": 20, "foobar": 20})
def test_append_with_empty_string(self, setup_path):
with ensure_clean_store(setup_path) as store:
# with all empty strings (GH 12242)
df = DataFrame({"x": ["a", "b", "c", "d", "e", "f", ""]})
store.append("df", df[:-1], min_itemsize={"x": 1})
store.append("df", df[-1:], min_itemsize={"x": 1})
tm.assert_frame_equal(store.select("df"), df)
def test_to_hdf_with_min_itemsize(self, setup_path):
with ensure_clean_path(setup_path) as path:
# min_itemsize in index with to_hdf (GH 10381)
df = tm.makeMixedDataFrame().set_index("C")
df.to_hdf(path, "ss3", format="table", min_itemsize={"index": 6})
# just make sure there is a longer string:
df2 = df.copy().reset_index().assign(C="longer").set_index("C")
df2.to_hdf(path, "ss3", append=True, format="table")
tm.assert_frame_equal(pd.read_hdf(path, "ss3"), pd.concat([df, df2]))
# same as above, with a Series
df["B"].to_hdf(path, "ss4", format="table", min_itemsize={"index": 6})
df2["B"].to_hdf(path, "ss4", append=True, format="table")
tm.assert_series_equal(
pd.read_hdf(path, "ss4"), pd.concat([df["B"], df2["B"]])
)
@pytest.mark.parametrize(
"format", [pytest.param("fixed", marks=td.xfail_non_writeable), "table"]
)
def test_to_hdf_errors(self, format, setup_path):
data = ["\ud800foo"]
ser = pd.Series(data, index=pd.Index(data))
with ensure_clean_path(setup_path) as path:
# GH 20835
ser.to_hdf(path, "table", format=format, errors="surrogatepass")
result = pd.read_hdf(path, "table", errors="surrogatepass")
tm.assert_series_equal(result, ser)
def test_append_with_data_columns(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = tm.makeTimeDataFrame()
df.iloc[0, df.columns.get_loc("B")] = 1.0
_maybe_remove(store, "df")
store.append("df", df[:2], data_columns=["B"])
store.append("df", df[2:])
tm.assert_frame_equal(store["df"], df)
# check that we have indices created
assert store._handle.root.df.table.cols.index.is_indexed is True
assert store._handle.root.df.table.cols.B.is_indexed is True
# data column searching
result = store.select("df", "B>0")
expected = df[df.B > 0]
tm.assert_frame_equal(result, expected)
# data column searching (with an indexable and a data_columns)
result = store.select("df", "B>0 and index>df.index[3]")
df_new = df.reindex(index=df.index[4:])
expected = df_new[df_new.B > 0]
tm.assert_frame_equal(result, expected)
# data column selection with a string data_column
df_new = df.copy()
df_new["string"] = "foo"
df_new.loc[1:4, "string"] = np.nan
df_new.loc[5:6, "string"] = "bar"
_maybe_remove(store, "df")
store.append("df", df_new, data_columns=["string"])
result = store.select("df", "string='foo'")
expected = df_new[df_new.string == "foo"]
tm.assert_frame_equal(result, expected)
# using min_itemsize and a data column
def check_col(key, name, size):
assert (
getattr(store.get_storer(key).table.description, name).itemsize
== size
)
with ensure_clean_store(setup_path) as store:
_maybe_remove(store, "df")
store.append(
"df", df_new, data_columns=["string"], min_itemsize={"string": 30}
)
check_col("df", "string", 30)
_maybe_remove(store, "df")
store.append("df", df_new, data_columns=["string"], min_itemsize=30)
check_col("df", "string", 30)
_maybe_remove(store, "df")
store.append(
"df", df_new, data_columns=["string"], min_itemsize={"values": 30}
)
check_col("df", "string", 30)
with ensure_clean_store(setup_path) as store:
df_new["string2"] = "foobarbah"
df_new["string_block1"] = "foobarbah1"
df_new["string_block2"] = "foobarbah2"
_maybe_remove(store, "df")
store.append(
"df",
df_new,
data_columns=["string", "string2"],
min_itemsize={"string": 30, "string2": 40, "values": 50},
)
check_col("df", "string", 30)
check_col("df", "string2", 40)
check_col("df", "values_block_1", 50)
with ensure_clean_store(setup_path) as store:
# multiple data columns
df_new = df.copy()
df_new.iloc[0, df_new.columns.get_loc("A")] = 1.0
df_new.iloc[0, df_new.columns.get_loc("B")] = -1.0
df_new["string"] = "foo"
sl = df_new.columns.get_loc("string")
df_new.iloc[1:4, sl] = np.nan
df_new.iloc[5:6, sl] = "bar"
df_new["string2"] = "foo"
sl = df_new.columns.get_loc("string2")
df_new.iloc[2:5, sl] = np.nan
df_new.iloc[7:8, sl] = "bar"
_maybe_remove(store, "df")
store.append("df", df_new, data_columns=["A", "B", "string", "string2"])
result = store.select(
"df", "string='foo' and string2='foo' and A>0 and B<0"
)
expected = df_new[
(df_new.string == "foo")
& (df_new.string2 == "foo")
& (df_new.A > 0)
& (df_new.B < 0)
]
tm.assert_frame_equal(result, expected, check_index_type=False)
# yield an empty frame
result = store.select("df", "string='foo' and string2='cool'")
expected = df_new[(df_new.string == "foo") & (df_new.string2 == "cool")]
tm.assert_frame_equal(result, expected, check_index_type=False)
with ensure_clean_store(setup_path) as store:
# doc example
df_dc = df.copy()
df_dc["string"] = "foo"
df_dc.loc[4:6, "string"] = np.nan
df_dc.loc[7:9, "string"] = "bar"
df_dc["string2"] = "cool"
df_dc["datetime"] = Timestamp("20010102")
df_dc = df_dc._convert(datetime=True)
df_dc.loc[3:5, ["A", "B", "datetime"]] = np.nan
_maybe_remove(store, "df_dc")
store.append(
"df_dc", df_dc, data_columns=["B", "C", "string", "string2", "datetime"]
)
result = store.select("df_dc", "B>0")
expected = df_dc[df_dc.B > 0]
tm.assert_frame_equal(result, expected, check_index_type=False)
result = store.select("df_dc", ["B > 0", "C > 0", "string == foo"])
expected = df_dc[(df_dc.B > 0) & (df_dc.C > 0) & (df_dc.string == "foo")]
tm.assert_frame_equal(result, expected, check_index_type=False)
with ensure_clean_store(setup_path) as store:
# doc example part 2
np.random.seed(1234)
index = date_range("1/1/2000", periods=8)
df_dc = DataFrame(
np.random.randn(8, 3), index=index, columns=["A", "B", "C"]
)
df_dc["string"] = "foo"
df_dc.loc[4:6, "string"] = np.nan
df_dc.loc[7:9, "string"] = "bar"
df_dc.loc[:, ["B", "C"]] = df_dc.loc[:, ["B", "C"]].abs()
df_dc["string2"] = "cool"
# on-disk operations
store.append("df_dc", df_dc, data_columns=["B", "C", "string", "string2"])
result = store.select("df_dc", "B>0")
expected = df_dc[df_dc.B > 0]
tm.assert_frame_equal(result, expected)
result = store.select("df_dc", ["B > 0", "C > 0", 'string == "foo"'])
expected = df_dc[(df_dc.B > 0) & (df_dc.C > 0) & (df_dc.string == "foo")]
tm.assert_frame_equal(result, expected)
def test_create_table_index(self, setup_path):
with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
def col(t, column):
return getattr(store.get_storer(t).table.cols, column)
# data columns
df = tm.makeTimeDataFrame()
df["string"] = "foo"
df["string2"] = "bar"
store.append("f", df, data_columns=["string", "string2"])
assert col("f", "index").is_indexed is True
assert col("f", "string").is_indexed is True
assert col("f", "string2").is_indexed is True
# specify index=columns
store.append(
"f2", df, index=["string"], data_columns=["string", "string2"]
)
assert col("f2", "index").is_indexed is False
assert col("f2", "string").is_indexed is True
assert col("f2", "string2").is_indexed is False
# try to index a non-table
_maybe_remove(store, "f2")
store.put("f2", df)
with pytest.raises(TypeError):
store.create_table_index("f2")
def test_append_hierarchical(self, setup_path):
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["foo", "bar"],
)
df = DataFrame(np.random.randn(10, 3), index=index, columns=["A", "B", "C"])
with ensure_clean_store(setup_path) as store:
store.append("mi", df)
result = store.select("mi")
tm.assert_frame_equal(result, df)
# GH 3748
result = store.select("mi", columns=["A", "B"])
expected = df.reindex(columns=["A", "B"])
tm.assert_frame_equal(result, expected)
with ensure_clean_path("test.hdf") as path:
df.to_hdf(path, "df", format="table")
result = read_hdf(path, "df", columns=["A", "B"])
expected = df.reindex(columns=["A", "B"])
tm.assert_frame_equal(result, expected)
def test_column_multiindex(self, setup_path):
# GH 4710
# recreate multi-indexes properly
index = MultiIndex.from_tuples(
[("A", "a"), ("A", "b"), ("B", "a"), ("B", "b")], names=["first", "second"]
)
df = DataFrame(np.arange(12).reshape(3, 4), columns=index)
expected = df.copy()
if isinstance(expected.index, RangeIndex):
expected.index = Int64Index(expected.index)
with ensure_clean_store(setup_path) as store:
store.put("df", df)
tm.assert_frame_equal(
store["df"], expected, check_index_type=True, check_column_type=True
)
store.put("df1", df, format="table")
tm.assert_frame_equal(
store["df1"], expected, check_index_type=True, check_column_type=True
)
with pytest.raises(ValueError):
store.put("df2", df, format="table", data_columns=["A"])
with pytest.raises(ValueError):
store.put("df3", df, format="table", data_columns=True)
# appending multi-column on existing table (see GH 6167)
with ensure_clean_store(setup_path) as store:
store.append("df2", df)
store.append("df2", df)
tm.assert_frame_equal(store["df2"], concat((df, df)))
# non_index_axes name
df = DataFrame(
np.arange(12).reshape(3, 4), columns=Index(list("ABCD"), name="foo")
)
expected = df.copy()
if isinstance(expected.index, RangeIndex):
expected.index = Int64Index(expected.index)
with ensure_clean_store(setup_path) as store:
store.put("df1", df, format="table")
tm.assert_frame_equal(
store["df1"], expected, check_index_type=True, check_column_type=True
)
def test_store_multiindex(self, setup_path):
# validate multi-index names
# GH 5527
with ensure_clean_store(setup_path) as store:
def make_index(names=None):
return MultiIndex.from_tuples(
[
(datetime.datetime(2013, 12, d), s, t)
for d in range(1, 3)
for s in range(2)
for t in range(3)
],
names=names,
)
# no names
_maybe_remove(store, "df")
df = DataFrame(np.zeros((12, 2)), columns=["a", "b"], index=make_index())
store.append("df", df)
tm.assert_frame_equal(store.select("df"), df)
# partial names
_maybe_remove(store, "df")
df = DataFrame(
np.zeros((12, 2)),
columns=["a", "b"],
index=make_index(["date", None, None]),
)
store.append("df", df)
tm.assert_frame_equal(store.select("df"), df)
# series
_maybe_remove(store, "s")
s = Series(np.zeros(12), index=make_index(["date", None, None]))
store.append("s", s)
xp = Series(np.zeros(12), index=make_index(["date", "level_1", "level_2"]))
tm.assert_series_equal(store.select("s"), xp)
# dup with column
_maybe_remove(store, "df")
df = DataFrame(
np.zeros((12, 2)),
columns=["a", "b"],
index=make_index(["date", "a", "t"]),
)
with pytest.raises(ValueError):
store.append("df", df)
# dup within level
_maybe_remove(store, "df")
df = DataFrame(
np.zeros((12, 2)),
columns=["a", "b"],
index=make_index(["date", "date", "date"]),
)
with pytest.raises(ValueError):
store.append("df", df)
# fully names
_maybe_remove(store, "df")
df = DataFrame(
np.zeros((12, 2)),
columns=["a", "b"],
index=make_index(["date", "s", "t"]),
)
store.append("df", df)
tm.assert_frame_equal(store.select("df"), df)
def test_select_columns_in_where(self, setup_path):
# GH 6169
# recreate multi-indexes when columns is passed
# in the `where` argument
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["foo_name", "bar_name"],
)
# With a DataFrame
df = DataFrame(np.random.randn(10, 3), index=index, columns=["A", "B", "C"])
with ensure_clean_store(setup_path) as store:
store.put("df", df, format="table")
expected = df[["A"]]
tm.assert_frame_equal(store.select("df", columns=["A"]), expected)
tm.assert_frame_equal(store.select("df", where="columns=['A']"), expected)
# With a Series
s = Series(np.random.randn(10), index=index, name="A")
with ensure_clean_store(setup_path) as store:
store.put("s", s, format="table")
tm.assert_series_equal(store.select("s", where="columns=['A']"), s)
def test_mi_data_columns(self, setup_path):
# GH 14435
idx = pd.MultiIndex.from_arrays(
[date_range("2000-01-01", periods=5), range(5)], names=["date", "id"]
)
df = pd.DataFrame({"a": [1.1, 1.2, 1.3, 1.4, 1.5]}, index=idx)
with ensure_clean_store(setup_path) as store:
store.append("df", df, data_columns=True)
actual = store.select("df", where="id == 1")
expected = df.iloc[[1], :]
tm.assert_frame_equal(actual, expected)
def test_pass_spec_to_storer(self, setup_path):
df = tm.makeDataFrame()
with ensure_clean_store(setup_path) as store:
store.put("df", df)
with pytest.raises(TypeError):
store.select("df", columns=["A"])
with pytest.raises(TypeError):
store.select("df", where=[("columns=A")])
@td.xfail_non_writeable
def test_append_misc(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
store.append("df", df, chunksize=1)
result = store.select("df")
tm.assert_frame_equal(result, df)
store.append("df1", df, expectedrows=10)
result = store.select("df1")
tm.assert_frame_equal(result, df)
# more chunksize in append tests
def check(obj, comparator):
for c in [10, 200, 1000]:
with ensure_clean_store(setup_path, mode="w") as store:
store.append("obj", obj, chunksize=c)
result = store.select("obj")
comparator(result, obj)
df = tm.makeDataFrame()
df["string"] = "foo"
df["float322"] = 1.0
df["float322"] = df["float322"].astype("float32")
df["bool"] = df["float322"] > 0
df["time1"] = Timestamp("20130101")
df["time2"] = Timestamp("20130102")
check(df, tm.assert_frame_equal)
# empty frame, GH4273
with ensure_clean_store(setup_path) as store:
# 0 len
df_empty = DataFrame(columns=list("ABC"))
store.append("df", df_empty)
with pytest.raises(KeyError, match="'No object named df in the file'"):
store.select("df")
# repeated append of 0/non-zero frames
df = DataFrame(np.random.rand(10, 3), columns=list("ABC"))
store.append("df", df)
tm.assert_frame_equal(store.select("df"), df)
store.append("df", df_empty)
tm.assert_frame_equal(store.select("df"), df)
# store
df = DataFrame(columns=list("ABC"))
store.put("df2", df)
tm.assert_frame_equal(store.select("df2"), df)
def test_append_raise(self, setup_path):
with ensure_clean_store(setup_path) as store:
# test append with invalid input to get good error messages
# list in column
df = tm.makeDataFrame()
df["invalid"] = [["a"]] * len(df)
assert df.dtypes["invalid"] == np.object_
with pytest.raises(TypeError):
store.append("df", df)
# multiple invalid columns
df["invalid2"] = [["a"]] * len(df)
df["invalid3"] = [["a"]] * len(df)
with pytest.raises(TypeError):
store.append("df", df)
# datetime with embedded nans as object
df = tm.makeDataFrame()
s = Series(datetime.datetime(2001, 1, 2), index=df.index)
s = s.astype(object)
s[0:5] = np.nan
df["invalid"] = s
assert df.dtypes["invalid"] == np.object_
with pytest.raises(TypeError):
store.append("df", df)
# directly ndarray
with pytest.raises(TypeError):
store.append("df", np.arange(10))
# series directly
with pytest.raises(TypeError):
store.append("df", Series(np.arange(10)))
# appending an incompatible table
df = tm.makeDataFrame()
store.append("df", df)
df["foo"] = "foo"
with pytest.raises(ValueError):
store.append("df", df)
def test_table_index_incompatible_dtypes(self, setup_path):
df1 = DataFrame({"a": [1, 2, 3]})
df2 = DataFrame({"a": [4, 5, 6]}, index=date_range("1/1/2000", periods=3))
with ensure_clean_store(setup_path) as store:
store.put("frame", df1, format="table")
with pytest.raises(TypeError):
store.put("frame", df2, format="table", append=True)
def test_table_values_dtypes_roundtrip(self, setup_path):
with ensure_clean_store(setup_path) as store:
df1 = DataFrame({"a": [1, 2, 3]}, dtype="f8")
store.append("df_f8", df1)
tm.assert_series_equal(df1.dtypes, store["df_f8"].dtypes)
df2 = DataFrame({"a": [1, 2, 3]}, dtype="i8")
store.append("df_i8", df2)
tm.assert_series_equal(df2.dtypes, store["df_i8"].dtypes)
# incompatible dtype
with pytest.raises(ValueError):
store.append("df_i8", df1)
# check creation/storage/retrieval of float32 (a bit hacky to
# actually create them thought)
df1 = DataFrame(np.array([[1], [2], [3]], dtype="f4"), columns=["A"])
store.append("df_f4", df1)
tm.assert_series_equal(df1.dtypes, store["df_f4"].dtypes)
assert df1.dtypes[0] == "float32"
# check with mixed dtypes
df1 = DataFrame(
{
c: Series(np.random.randint(5), dtype=c)
for c in ["float32", "float64", "int32", "int64", "int16", "int8"]
}
)
df1["string"] = "foo"
df1["float322"] = 1.0
df1["float322"] = df1["float322"].astype("float32")
df1["bool"] = df1["float32"] > 0
df1["time1"] = Timestamp("20130101")
df1["time2"] = Timestamp("20130102")
store.append("df_mixed_dtypes1", df1)
result = store.select("df_mixed_dtypes1").dtypes.value_counts()
result.index = [str(i) for i in result.index]
expected = Series(
{
"float32": 2,
"float64": 1,
"int32": 1,
"bool": 1,
"int16": 1,
"int8": 1,
"int64": 1,
"object": 1,
"datetime64[ns]": 2,
}
)
result = result.sort_index()
expected = expected.sort_index()
tm.assert_series_equal(result, expected)
def test_table_mixed_dtypes(self, setup_path):
# frame
df = tm.makeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["bool3"] = True
df["int1"] = 1
df["int2"] = 2
df["timestamp1"] = Timestamp("20010102")
df["timestamp2"] = Timestamp("20010103")
df["datetime1"] = datetime.datetime(2001, 1, 2, 0, 0)
df["datetime2"] = datetime.datetime(2001, 1, 3, 0, 0)
df.loc[3:6, ["obj1"]] = np.nan
df = df._consolidate()._convert(datetime=True)
with ensure_clean_store(setup_path) as store:
store.append("df1_mixed", df)
tm.assert_frame_equal(store.select("df1_mixed"), df)
def test_unimplemented_dtypes_table_columns(self, setup_path):
with ensure_clean_store(setup_path) as store:
dtypes = [("date", datetime.date(2001, 1, 2))]
# currently not supported dtypes ####
for n, f in dtypes:
df = tm.makeDataFrame()
df[n] = f
with pytest.raises(TypeError):
store.append("df1_{n}".format(n=n), df)
# frame
df = tm.makeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["datetime1"] = datetime.date(2001, 1, 2)
df = df._consolidate()._convert(datetime=True)
with ensure_clean_store(setup_path) as store:
# this fails because we have a date in the object block......
with pytest.raises(TypeError):
store.append("df_unimplemented", df)
@td.xfail_non_writeable
@pytest.mark.skipif(
LooseVersion(np.__version__) == LooseVersion("1.15.0"),
reason=(
"Skipping pytables test when numpy version is "
"exactly equal to 1.15.0: gh-22098"
),
)
def test_calendar_roundtrip_issue(self, setup_path):
# 8591
# doc example from tseries holiday section
weekmask_egypt = "Sun Mon Tue Wed Thu"
holidays = [
"2012-05-01",
datetime.datetime(2013, 5, 1),
np.datetime64("2014-05-01"),
]
bday_egypt = pd.offsets.CustomBusinessDay(
holidays=holidays, weekmask=weekmask_egypt
)
dt = datetime.datetime(2013, 4, 30)
dts = date_range(dt, periods=5, freq=bday_egypt)
s = Series(dts.weekday, dts).map(Series("Mon Tue Wed Thu Fri Sat Sun".split()))
with ensure_clean_store(setup_path) as store:
store.put("fixed", s)
result = store.select("fixed")
tm.assert_series_equal(result, s)
store.append("table", s)
result = store.select("table")
tm.assert_series_equal(result, s)
def test_roundtrip_tz_aware_index(self, setup_path):
# GH 17618
time = pd.Timestamp("2000-01-01 01:00:00", tz="US/Eastern")
df = pd.DataFrame(data=[0], index=[time])
with ensure_clean_store(setup_path) as store:
store.put("frame", df, format="fixed")
recons = store["frame"]
tm.assert_frame_equal(recons, df)
assert recons.index[0].value == 946706400000000000
def test_append_with_timedelta(self, setup_path):
# GH 3577
# append timedelta
df = DataFrame(
dict(
A=Timestamp("20130101"),
B=[
Timestamp("20130101") + timedelta(days=i, seconds=10)
for i in range(10)
],
)
)
df["C"] = df["A"] - df["B"]
df.loc[3:5, "C"] = np.nan
with ensure_clean_store(setup_path) as store:
# table
_maybe_remove(store, "df")
store.append("df", df, data_columns=True)
result = store.select("df")
tm.assert_frame_equal(result, df)
result = store.select("df", where="C<100000")
tm.assert_frame_equal(result, df)
result = store.select("df", where="C<pd.Timedelta('-3D')")
tm.assert_frame_equal(result, df.iloc[3:])
result = store.select("df", "C<'-3D'")
tm.assert_frame_equal(result, df.iloc[3:])
# a bit hacky here as we don't really deal with the NaT properly
result = store.select("df", "C<'-500000s'")
result = result.dropna(subset=["C"])
tm.assert_frame_equal(result, df.iloc[6:])
result = store.select("df", "C<'-3.5D'")
result = result.iloc[1:]
tm.assert_frame_equal(result, df.iloc[4:])
# fixed
_maybe_remove(store, "df2")
store.put("df2", df)
result = store.select("df2")
tm.assert_frame_equal(result, df)
def test_remove(self, setup_path):
with ensure_clean_store(setup_path) as store:
ts = tm.makeTimeSeries()
df = tm.makeDataFrame()
store["a"] = ts
store["b"] = df
_maybe_remove(store, "a")
assert len(store) == 1
tm.assert_frame_equal(df, store["b"])
_maybe_remove(store, "b")
assert len(store) == 0
# nonexistence
with pytest.raises(
KeyError, match="'No object named a_nonexistent_store in the file'"
):
store.remove("a_nonexistent_store")
# pathing
store["a"] = ts
store["b/foo"] = df
_maybe_remove(store, "foo")
_maybe_remove(store, "b/foo")
assert len(store) == 1
store["a"] = ts
store["b/foo"] = df
_maybe_remove(store, "b")
assert len(store) == 1
# __delitem__
store["a"] = ts
store["b"] = df
del store["a"]
del store["b"]
assert len(store) == 0
def test_invalid_terms(self, setup_path):
with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
df = tm.makeTimeDataFrame()
df["string"] = "foo"
df.loc[0:4, "string"] = "bar"
store.put("df", df, format="table")
# some invalid terms
with pytest.raises(TypeError):
Term()
# more invalid
with pytest.raises(ValueError):
store.select("df", "df.index[3]")
with pytest.raises(SyntaxError):
store.select("df", "index>")
# from the docs
with ensure_clean_path(setup_path) as path:
dfq = DataFrame(
np.random.randn(10, 4),
columns=list("ABCD"),
index=date_range("20130101", periods=10),
)
dfq.to_hdf(path, "dfq", format="table", data_columns=True)
# check ok
read_hdf(
path, "dfq", where="index>Timestamp('20130104') & columns=['A', 'B']"
)
read_hdf(path, "dfq", where="A>0 or C>0")
# catch the invalid reference
with ensure_clean_path(setup_path) as path:
dfq = DataFrame(
np.random.randn(10, 4),
columns=list("ABCD"),
index=date_range("20130101", periods=10),
)
dfq.to_hdf(path, "dfq", format="table")
with pytest.raises(ValueError):
read_hdf(path, "dfq", where="A>0 or C>0")
def test_same_name_scoping(self, setup_path):
with ensure_clean_store(setup_path) as store:
import pandas as pd
df = DataFrame(
np.random.randn(20, 2), index=pd.date_range("20130101", periods=20)
)
store.put("df", df, format="table")
expected = df[df.index > pd.Timestamp("20130105")]
import datetime # noqa
result = store.select("df", "index>datetime.datetime(2013,1,5)")
tm.assert_frame_equal(result, expected)
from datetime import datetime # noqa
# technically an error, but allow it
result = store.select("df", "index>datetime.datetime(2013,1,5)")
tm.assert_frame_equal(result, expected)
result = store.select("df", "index>datetime(2013,1,5)")
tm.assert_frame_equal(result, expected)
def test_series(self, setup_path):
s = tm.makeStringSeries()
self._check_roundtrip(s, tm.assert_series_equal, path=setup_path)
ts = tm.makeTimeSeries()
self._check_roundtrip(ts, tm.assert_series_equal, path=setup_path)
ts2 = Series(ts.index, Index(ts.index, dtype=object))
self._check_roundtrip(ts2, tm.assert_series_equal, path=setup_path)
ts3 = Series(ts.values, Index(np.asarray(ts.index, dtype=object), dtype=object))
self._check_roundtrip(
ts3, tm.assert_series_equal, path=setup_path, check_index_type=False
)
def test_float_index(self, setup_path):
# GH #454
index = np.random.randn(10)
s = Series(np.random.randn(10), index=index)
self._check_roundtrip(s, tm.assert_series_equal, path=setup_path)
@td.xfail_non_writeable
def test_tuple_index(self, setup_path):
# GH #492
col = np.arange(10)
idx = [(0.0, 1.0), (2.0, 3.0), (4.0, 5.0)]
data = np.random.randn(30).reshape((3, 10))
DF = DataFrame(data, index=idx, columns=col)
with catch_warnings(record=True):
simplefilter("ignore", pd.errors.PerformanceWarning)
self._check_roundtrip(DF, tm.assert_frame_equal, path=setup_path)
@td.xfail_non_writeable
@pytest.mark.filterwarnings("ignore::pandas.errors.PerformanceWarning")
def test_index_types(self, setup_path):
with catch_warnings(record=True):
values = np.random.randn(2)
func = lambda l, r: tm.assert_series_equal(
l, r, check_dtype=True, check_index_type=True, check_series_type=True
)
with catch_warnings(record=True):
ser = Series(values, [0, "y"])
self._check_roundtrip(ser, func, path=setup_path)
with catch_warnings(record=True):
ser = Series(values, [datetime.datetime.today(), 0])
self._check_roundtrip(ser, func, path=setup_path)
with catch_warnings(record=True):
ser = Series(values, ["y", 0])
self._check_roundtrip(ser, func, path=setup_path)
with catch_warnings(record=True):
ser = Series(values, [datetime.date.today(), "a"])
self._check_roundtrip(ser, func, path=setup_path)
with catch_warnings(record=True):
ser = Series(values, [0, "y"])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(values, [datetime.datetime.today(), 0])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(values, ["y", 0])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(values, [datetime.date.today(), "a"])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(values, [1.23, "b"])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(values, [1, 1.53])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(values, [1, 5])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(
values, [datetime.datetime(2012, 1, 1), datetime.datetime(2012, 1, 2)]
)
self._check_roundtrip(ser, func, path=setup_path)
def test_timeseries_preepoch(self, setup_path):
dr = bdate_range("1/1/1940", "1/1/1960")
ts = Series(np.random.randn(len(dr)), index=dr)
try:
self._check_roundtrip(ts, tm.assert_series_equal, path=setup_path)
except OverflowError:
pytest.skip("known failer on some windows platforms")
@td.xfail_non_writeable
@pytest.mark.parametrize(
"compression", [False, pytest.param(True, marks=td.skip_if_windows_python_3)]
)
def test_frame(self, compression, setup_path):
df = tm.makeDataFrame()
# put in some random NAs
df.values[0, 0] = np.nan
df.values[5, 3] = np.nan
self._check_roundtrip_table(
df, tm.assert_frame_equal, path=setup_path, compression=compression
)
self._check_roundtrip(
df, tm.assert_frame_equal, path=setup_path, compression=compression
)
tdf = tm.makeTimeDataFrame()
self._check_roundtrip(
tdf, tm.assert_frame_equal, path=setup_path, compression=compression
)
with ensure_clean_store(setup_path) as store:
# not consolidated
df["foo"] = np.random.randn(len(df))
store["df"] = df
recons = store["df"]
assert recons._data.is_consolidated()
# empty
self._check_roundtrip(df[:0], tm.assert_frame_equal, path=setup_path)
@td.xfail_non_writeable
def test_empty_series_frame(self, setup_path):
s0 = Series(dtype=object)
s1 = Series(name="myseries", dtype=object)
df0 = DataFrame()
df1 = DataFrame(index=["a", "b", "c"])
df2 = DataFrame(columns=["d", "e", "f"])
self._check_roundtrip(s0, tm.assert_series_equal, path=setup_path)
self._check_roundtrip(s1, tm.assert_series_equal, path=setup_path)
self._check_roundtrip(df0, tm.assert_frame_equal, path=setup_path)
self._check_roundtrip(df1, tm.assert_frame_equal, path=setup_path)
self._check_roundtrip(df2, tm.assert_frame_equal, path=setup_path)
@td.xfail_non_writeable
@pytest.mark.parametrize(
"dtype", [np.int64, np.float64, np.object, "m8[ns]", "M8[ns]"]
)
def test_empty_series(self, dtype, setup_path):
s = Series(dtype=dtype)
self._check_roundtrip(s, tm.assert_series_equal, path=setup_path)
def test_can_serialize_dates(self, setup_path):
rng = [x.date() for x in bdate_range("1/1/2000", "1/30/2000")]
frame = DataFrame(np.random.randn(len(rng), 4), index=rng)
self._check_roundtrip(frame, tm.assert_frame_equal, path=setup_path)
def test_store_hierarchical(self, setup_path):
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["foo", "bar"],
)
frame = DataFrame(np.random.randn(10, 3), index=index, columns=["A", "B", "C"])
self._check_roundtrip(frame, tm.assert_frame_equal, path=setup_path)
self._check_roundtrip(frame.T, tm.assert_frame_equal, path=setup_path)
self._check_roundtrip(frame["A"], tm.assert_series_equal, path=setup_path)
# check that the names are stored
with ensure_clean_store(setup_path) as store:
store["frame"] = frame
recons = store["frame"]
tm.assert_frame_equal(recons, frame)
def test_store_index_name(self, setup_path):
df = tm.makeDataFrame()
df.index.name = "foo"
with ensure_clean_store(setup_path) as store:
store["frame"] = df
recons = store["frame"]
tm.assert_frame_equal(recons, df)
def test_store_index_name_with_tz(self, setup_path):
# GH 13884
df = pd.DataFrame({"A": [1, 2]})
df.index = pd.DatetimeIndex([1234567890123456787, 1234567890123456788])
df.index = df.index.tz_localize("UTC")
df.index.name = "foo"
with ensure_clean_store(setup_path) as store:
store.put("frame", df, format="table")
recons = store["frame"]
tm.assert_frame_equal(recons, df)
@pytest.mark.parametrize("table_format", ["table", "fixed"])
def test_store_index_name_numpy_str(self, table_format, setup_path):
# GH #13492
idx = pd.Index(
pd.to_datetime([datetime.date(2000, 1, 1), datetime.date(2000, 1, 2)]),
name="cols\u05d2",
)
idx1 = pd.Index(
pd.to_datetime([datetime.date(2010, 1, 1), datetime.date(2010, 1, 2)]),
name="rows\u05d0",
)
df = pd.DataFrame(np.arange(4).reshape(2, 2), columns=idx, index=idx1)
# This used to fail, returning numpy strings instead of python strings.
with ensure_clean_path(setup_path) as path:
df.to_hdf(path, "df", format=table_format)
df2 = read_hdf(path, "df")
tm.assert_frame_equal(df, df2, check_names=True)
assert type(df2.index.name) == str
assert type(df2.columns.name) == str
def test_store_series_name(self, setup_path):
df = tm.makeDataFrame()
series = df["A"]
with ensure_clean_store(setup_path) as store:
store["series"] = series
recons = store["series"]
tm.assert_series_equal(recons, series)
@td.xfail_non_writeable
@pytest.mark.parametrize(
"compression", [False, pytest.param(True, marks=td.skip_if_windows_python_3)]
)
def test_store_mixed(self, compression, setup_path):
def _make_one():
df = tm.makeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["int1"] = 1
df["int2"] = 2
return df._consolidate()
df1 = _make_one()
df2 = _make_one()
self._check_roundtrip(df1, tm.assert_frame_equal, path=setup_path)
self._check_roundtrip(df2, tm.assert_frame_equal, path=setup_path)
with ensure_clean_store(setup_path) as store:
store["obj"] = df1
tm.assert_frame_equal(store["obj"], df1)
store["obj"] = df2
tm.assert_frame_equal(store["obj"], df2)
# check that can store Series of all of these types
self._check_roundtrip(
df1["obj1"],
tm.assert_series_equal,
path=setup_path,
compression=compression,
)
self._check_roundtrip(
df1["bool1"],
tm.assert_series_equal,
path=setup_path,
compression=compression,
)
self._check_roundtrip(
df1["int1"],
tm.assert_series_equal,
path=setup_path,
compression=compression,
)
@pytest.mark.filterwarnings(
"ignore:\\nduplicate:pandas.io.pytables.DuplicateWarning"
)
def test_select_with_dups(self, setup_path):
# single dtypes
df = DataFrame(np.random.randn(10, 4), columns=["A", "A", "B", "B"])
df.index = date_range("20130101 9:30", periods=10, freq="T")
with ensure_clean_store(setup_path) as store:
store.append("df", df)
result = store.select("df")
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
result = store.select("df", columns=df.columns)
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
result = store.select("df", columns=["A"])
expected = df.loc[:, ["A"]]
tm.assert_frame_equal(result, expected)
# dups across dtypes
df = concat(
[
DataFrame(np.random.randn(10, 4), columns=["A", "A", "B", "B"]),
DataFrame(
np.random.randint(0, 10, size=20).reshape(10, 2), columns=["A", "C"]
),
],
axis=1,
)
df.index = date_range("20130101 9:30", periods=10, freq="T")
with ensure_clean_store(setup_path) as store:
store.append("df", df)
result = store.select("df")
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
result = store.select("df", columns=df.columns)
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
expected = df.loc[:, ["A"]]
result = store.select("df", columns=["A"])
tm.assert_frame_equal(result, expected, by_blocks=True)
expected = df.loc[:, ["B", "A"]]
result = store.select("df", columns=["B", "A"])
tm.assert_frame_equal(result, expected, by_blocks=True)
# duplicates on both index and columns
with ensure_clean_store(setup_path) as store:
store.append("df", df)
store.append("df", df)
expected = df.loc[:, ["B", "A"]]
expected = concat([expected, expected])
result = store.select("df", columns=["B", "A"])
tm.assert_frame_equal(result, expected, by_blocks=True)
def test_overwrite_node(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeDataFrame()
ts = tm.makeTimeSeries()
store["a"] = ts
tm.assert_series_equal(store["a"], ts)
def test_select(self, setup_path):
with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
# select with columns=
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df")
store.append("df", df)
result = store.select("df", columns=["A", "B"])
expected = df.reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# equivalently
result = store.select("df", [("columns=['A', 'B']")])
expected = df.reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# with a data column
_maybe_remove(store, "df")
store.append("df", df, data_columns=["A"])
result = store.select("df", ["A > 0"], columns=["A", "B"])
expected = df[df.A > 0].reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# all a data columns
_maybe_remove(store, "df")
store.append("df", df, data_columns=True)
result = store.select("df", ["A > 0"], columns=["A", "B"])
expected = df[df.A > 0].reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# with a data column, but different columns
_maybe_remove(store, "df")
store.append("df", df, data_columns=["A"])
result = store.select("df", ["A > 0"], columns=["C", "D"])
expected = df[df.A > 0].reindex(columns=["C", "D"])
tm.assert_frame_equal(expected, result)
def test_select_dtypes(self, setup_path):
with ensure_clean_store(setup_path) as store:
# with a Timestamp data column (GH #2637)
df = DataFrame(
dict(ts=bdate_range("2012-01-01", periods=300), A=np.random.randn(300))
)
_maybe_remove(store, "df")
store.append("df", df, data_columns=["ts", "A"])
result = store.select("df", "ts>=Timestamp('2012-02-01')")
expected = df[df.ts >= Timestamp("2012-02-01")]
tm.assert_frame_equal(expected, result)
# bool columns (GH #2849)
df = DataFrame(np.random.randn(5, 2), columns=["A", "B"])
df["object"] = "foo"
df.loc[4:5, "object"] = "bar"
df["boolv"] = df["A"] > 0
_maybe_remove(store, "df")
store.append("df", df, data_columns=True)
expected = df[df.boolv == True].reindex(columns=["A", "boolv"]) # noqa
for v in [True, "true", 1]:
result = store.select(
"df", "boolv == {v!s}".format(v=v), columns=["A", "boolv"]
)
tm.assert_frame_equal(expected, result)
expected = df[df.boolv == False].reindex(columns=["A", "boolv"]) # noqa
for v in [False, "false", 0]:
result = store.select(
"df", "boolv == {v!s}".format(v=v), columns=["A", "boolv"]
)
tm.assert_frame_equal(expected, result)
# integer index
df = DataFrame(dict(A=np.random.rand(20), B=np.random.rand(20)))
_maybe_remove(store, "df_int")
store.append("df_int", df)
result = store.select("df_int", "index<10 and columns=['A']")
expected = df.reindex(index=list(df.index)[0:10], columns=["A"])
tm.assert_frame_equal(expected, result)
# float index
df = DataFrame(
dict(
A=np.random.rand(20),
B=np.random.rand(20),
index=np.arange(20, dtype="f8"),
)
)
_maybe_remove(store, "df_float")
store.append("df_float", df)
result = store.select("df_float", "index<10.0 and columns=['A']")
expected = df.reindex(index=list(df.index)[0:10], columns=["A"])
tm.assert_frame_equal(expected, result)
with ensure_clean_store(setup_path) as store:
# floats w/o NaN
df = DataFrame(dict(cols=range(11), values=range(11)), dtype="float64")
df["cols"] = (df["cols"] + 10).apply(str)
store.append("df1", df, data_columns=True)
result = store.select("df1", where="values>2.0")
expected = df[df["values"] > 2.0]
tm.assert_frame_equal(expected, result)
# floats with NaN
df.iloc[0] = np.nan
expected = df[df["values"] > 2.0]
store.append("df2", df, data_columns=True, index=False)
result = store.select("df2", where="values>2.0")
tm.assert_frame_equal(expected, result)
# https://github.com/PyTables/PyTables/issues/282
# bug in selection when 0th row has a np.nan and an index
# store.append('df3',df,data_columns=True)
# result = store.select(
# 'df3', where='values>2.0')
# tm.assert_frame_equal(expected, result)
# not in first position float with NaN ok too
df = DataFrame(dict(cols=range(11), values=range(11)), dtype="float64")
df["cols"] = (df["cols"] + 10).apply(str)
df.iloc[1] = np.nan
expected = df[df["values"] > 2.0]
store.append("df4", df, data_columns=True)
result = store.select("df4", where="values>2.0")
tm.assert_frame_equal(expected, result)
# test selection with comparison against numpy scalar
# GH 11283
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
expected = df[df["A"] > 0]
store.append("df", df, data_columns=True)
np_zero = np.float64(0) # noqa
result = store.select("df", where=["A>np_zero"])
tm.assert_frame_equal(expected, result)
def test_select_with_many_inputs(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(
dict(
ts=bdate_range("2012-01-01", periods=300),
A=np.random.randn(300),
B=range(300),
users=["a"] * 50
+ ["b"] * 50
+ ["c"] * 100
+ ["a{i:03d}".format(i=i) for i in range(100)],
)
)
_maybe_remove(store, "df")
store.append("df", df, data_columns=["ts", "A", "B", "users"])
# regular select
result = store.select("df", "ts>=Timestamp('2012-02-01')")
expected = df[df.ts >= Timestamp("2012-02-01")]
tm.assert_frame_equal(expected, result)
# small selector
result = store.select(
"df", "ts>=Timestamp('2012-02-01') & users=['a','b','c']"
)
expected = df[
(df.ts >= Timestamp("2012-02-01")) & df.users.isin(["a", "b", "c"])
]
tm.assert_frame_equal(expected, result)
# big selector along the columns
selector = ["a", "b", "c"] + ["a{i:03d}".format(i=i) for i in range(60)]
result = store.select(
"df", "ts>=Timestamp('2012-02-01') and users=selector"
)
expected = df[(df.ts >= Timestamp("2012-02-01")) & df.users.isin(selector)]
tm.assert_frame_equal(expected, result)
selector = range(100, 200)
result = store.select("df", "B=selector")
expected = df[df.B.isin(selector)]
tm.assert_frame_equal(expected, result)
assert len(result) == 100
# big selector along the index
selector = Index(df.ts[0:100].values)
result = store.select("df", "ts=selector")
expected = df[df.ts.isin(selector.values)]
tm.assert_frame_equal(expected, result)
assert len(result) == 100
def test_select_iterator(self, setup_path):
# single table
with ensure_clean_store(setup_path) as store:
df = tm.makeTimeDataFrame(500)
_maybe_remove(store, "df")
store.append("df", df)
expected = store.select("df")
results = list(store.select("df", iterator=True))
result = concat(results)
tm.assert_frame_equal(expected, result)
results = list(store.select("df", chunksize=100))
assert len(results) == 5
result = concat(results)
tm.assert_frame_equal(expected, result)
results = list(store.select("df", chunksize=150))
result = concat(results)
tm.assert_frame_equal(result, expected)
with ensure_clean_path(setup_path) as path:
df = tm.makeTimeDataFrame(500)
df.to_hdf(path, "df_non_table")
with pytest.raises(TypeError):
read_hdf(path, "df_non_table", chunksize=100)
with pytest.raises(TypeError):
read_hdf(path, "df_non_table", iterator=True)
with ensure_clean_path(setup_path) as path:
df = tm.makeTimeDataFrame(500)
df.to_hdf(path, "df", format="table")
results = list(read_hdf(path, "df", chunksize=100))
result = concat(results)
assert len(results) == 5
tm.assert_frame_equal(result, df)
tm.assert_frame_equal(result, read_hdf(path, "df"))
# multiple
with ensure_clean_store(setup_path) as store:
df1 = tm.makeTimeDataFrame(500)
store.append("df1", df1, data_columns=True)
df2 = tm.makeTimeDataFrame(500).rename(columns="{}_2".format)
df2["foo"] = "bar"
store.append("df2", df2)
df = concat([df1, df2], axis=1)
# full selection
expected = store.select_as_multiple(["df1", "df2"], selector="df1")
results = list(
store.select_as_multiple(["df1", "df2"], selector="df1", chunksize=150)
)
result = concat(results)
tm.assert_frame_equal(expected, result)
def test_select_iterator_complete_8014(self, setup_path):
# GH 8014
# using iterator and where clause
chunksize = 1e4
# no iterator
with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100064, "S")
_maybe_remove(store, "df")
store.append("df", expected)
beg_dt = expected.index[0]
end_dt = expected.index[-1]
# select w/o iteration and no where clause works
result = store.select("df")
tm.assert_frame_equal(expected, result)
# select w/o iterator and where clause, single term, begin
# of range, works
where = "index >= '{beg_dt}'".format(beg_dt=beg_dt)
result = store.select("df", where=where)
tm.assert_frame_equal(expected, result)
# select w/o iterator and where clause, single term, end
# of range, works
where = "index <= '{end_dt}'".format(end_dt=end_dt)
result = store.select("df", where=where)
tm.assert_frame_equal(expected, result)
# select w/o iterator and where clause, inclusive range,
# works
where = "index >= '{beg_dt}' & index <= '{end_dt}'".format(
beg_dt=beg_dt, end_dt=end_dt
)
result = store.select("df", where=where)
tm.assert_frame_equal(expected, result)
# with iterator, full range
with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100064, "S")
_maybe_remove(store, "df")
store.append("df", expected)
beg_dt = expected.index[0]
end_dt = expected.index[-1]
# select w/iterator and no where clause works
results = list(store.select("df", chunksize=chunksize))
result = concat(results)
tm.assert_frame_equal(expected, result)
# select w/iterator and where clause, single term, begin of range
where = "index >= '{beg_dt}'".format(beg_dt=beg_dt)
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
tm.assert_frame_equal(expected, result)
# select w/iterator and where clause, single term, end of range
where = "index <= '{end_dt}'".format(end_dt=end_dt)
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
tm.assert_frame_equal(expected, result)
# select w/iterator and where clause, inclusive range
where = "index >= '{beg_dt}' & index <= '{end_dt}'".format(
beg_dt=beg_dt, end_dt=end_dt
)
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
tm.assert_frame_equal(expected, result)
def test_select_iterator_non_complete_8014(self, setup_path):
# GH 8014
# using iterator and where clause
chunksize = 1e4
# with iterator, non complete range
with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100064, "S")
_maybe_remove(store, "df")
store.append("df", expected)
beg_dt = expected.index[1]
end_dt = expected.index[-2]
# select w/iterator and where clause, single term, begin of range
where = "index >= '{beg_dt}'".format(beg_dt=beg_dt)
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
rexpected = expected[expected.index >= beg_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, single term, end of range
where = "index <= '{end_dt}'".format(end_dt=end_dt)
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
rexpected = expected[expected.index <= end_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, inclusive range
where = "index >= '{beg_dt}' & index <= '{end_dt}'".format(
beg_dt=beg_dt, end_dt=end_dt
)
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
rexpected = expected[
(expected.index >= beg_dt) & (expected.index <= end_dt)
]
tm.assert_frame_equal(rexpected, result)
# with iterator, empty where
with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100064, "S")
_maybe_remove(store, "df")
store.append("df", expected)
end_dt = expected.index[-1]
# select w/iterator and where clause, single term, begin of range
where = "index > '{end_dt}'".format(end_dt=end_dt)
results = list(store.select("df", where=where, chunksize=chunksize))
assert 0 == len(results)
def test_select_iterator_many_empty_frames(self, setup_path):
# GH 8014
# using iterator and where clause can return many empty
# frames.
chunksize = int(1e4)
# with iterator, range limited to the first chunk
with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100000, "S")
_maybe_remove(store, "df")
store.append("df", expected)
beg_dt = expected.index[0]
end_dt = expected.index[chunksize - 1]
# select w/iterator and where clause, single term, begin of range
where = "index >= '{beg_dt}'".format(beg_dt=beg_dt)
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
rexpected = expected[expected.index >= beg_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, single term, end of range
where = "index <= '{end_dt}'".format(end_dt=end_dt)
results = list(store.select("df", where=where, chunksize=chunksize))
assert len(results) == 1
result = concat(results)
rexpected = expected[expected.index <= end_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, inclusive range
where = "index >= '{beg_dt}' & index <= '{end_dt}'".format(
beg_dt=beg_dt, end_dt=end_dt
)
results = list(store.select("df", where=where, chunksize=chunksize))
# should be 1, is 10
assert len(results) == 1
result = concat(results)
rexpected = expected[
(expected.index >= beg_dt) & (expected.index <= end_dt)
]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause which selects
# *nothing*.
#
# To be consistent with Python idiom I suggest this should
# return [] e.g. `for e in []: print True` never prints
# True.
where = "index <= '{beg_dt}' & index >= '{end_dt}'".format(
beg_dt=beg_dt, end_dt=end_dt
)
results = list(store.select("df", where=where, chunksize=chunksize))
# should be []
assert len(results) == 0
@pytest.mark.filterwarnings(
"ignore:\\nthe :pandas.io.pytables.AttributeConflictWarning"
)
def test_retain_index_attributes(self, setup_path):
# GH 3499, losing frequency info on index recreation
df = DataFrame(
dict(A=Series(range(3), index=date_range("2000-1-1", periods=3, freq="H")))
)
with ensure_clean_store(setup_path) as store:
_maybe_remove(store, "data")
store.put("data", df, format="table")
result = store.get("data")
tm.assert_frame_equal(df, result)
for attr in ["freq", "tz", "name"]:
for idx in ["index", "columns"]:
assert getattr(getattr(df, idx), attr, None) == getattr(
getattr(result, idx), attr, None
)
# try to append a table with a different frequency
with catch_warnings(record=True):
df2 = DataFrame(
dict(
A=Series(
range(3), index=date_range("2002-1-1", periods=3, freq="D")
)
)
)
store.append("data", df2)
assert store.get_storer("data").info["index"]["freq"] is None
# this is ok
_maybe_remove(store, "df2")
df2 = DataFrame(
dict(
A=Series(
range(3),
index=[
Timestamp("20010101"),
Timestamp("20010102"),
Timestamp("20020101"),
],
)
)
)
store.append("df2", df2)
df3 = DataFrame(
dict(
A=Series(
range(3), index= | date_range("2002-1-1", periods=3, freq="D") | pandas.date_range |
import pandas as pd
import numpy as np
from sklearn.preprocessing import MinMaxScaler
from sklearn.base import BaseEstimator, RegressorMixin
from statsmodels.tsa.statespace.sarimax import SARIMAX
from fbprophet import Prophet
class SetTempAsPower:
"""
Makes a forecast by selecting the hottest days
"""
def __init__(self, col="temp_max"):
self.col = col
def fit(self, X, y):
self.y_fit = y
self.X_fit = X
self.max_power = y.max()
self.min_power = y.min()
return self
def predict(self, X):
self.X_predict = X
minmaxscaler = MinMaxScaler(feature_range=(self.min_power, self.max_power))
minmaxscaler.fit(self.X_fit[[self.col]])
scaled = minmaxscaler.transform(self.X_predict[[self.col]])
self.predict_yhat = pd.Series(
data=scaled.reshape(1, -1)[0], index=self.X_predict.index
)
return self.predict_yhat
def get_pred_values(self):
# All Data is only available after fit and predict
# Build a return DataFrame that looks similar to the prophet output
# date index | y | yhat| yhat_lower | yhat_upper | is_forecast
X_fit = self.X_fit.copy()
X_predict = self.X_predict.copy()
y = self.y_fit
if self.X_fit.equals(self.X_predict):
yhat = self.predict_yhat.copy(deep=True)
else:
self.fit(X_fit, y)
y.name = "y"
fit_yhat = self.predict(X_fit)
pred_yhat = self.predict(X_predict)
y_pred = pd.Series(np.NaN, index=X_predict.index)
y = pd.concat([y, y_pred], axis=0)
yhat = pd.concat([fit_yhat, pred_yhat], axis=0)
yhat.name = "yhat"
y.name = "y"
y = self.y_fit.copy()
full_suite = pd.concat([yhat, y], axis=1)
full_suite["is_forecast"] = 0
full_suite["is_forecast"] = full_suite["y"].isna().astype(int)
full_suite["yhat_upper"] = np.NaN
full_suite["yhat_lower"] = np.NaN
full_suite = full_suite[
["y", "yhat", "yhat_lower", "yhat_upper", "is_forecast"]
]
return full_suite
class SK_SARIMAX(BaseEstimator, RegressorMixin):
""" A universal sklearn-style wrapper for statsmodels regressors """
def __init__(self, order=(2, 0, 1), seasonal_order=(2, 0, 0, 96), trend="c"):
self.order = order
self.seasonal_order = seasonal_order
self.trend = trend
def fit(self, X, y):
self.fit_X = X
self.fit_y = y
self.model = SARIMAX(
self.fit_y,
order=self.order,
seasonal_order=self.seasonal_order,
trend=self.trend,
exog=self.fit_X,
)
self.results = self.model.fit()
return self.model
def predict(self, X, y=None):
self.predict_X = X
self.forecast_object = self.results.get_forecast(steps=len(X), exog=X)
self.conf_int = self.forecast_object.conf_int()
self.ser = pd.Series(
data=self.forecast_object.predicted_mean.values, index=self.predict_X.index
)
return self.ser
def get_pred_values(self):
# All Data is only available after fit and predict
# Build a return DataFrame that looks similar to the prophet output
# date index y | yhat| yhat_lower | yhat_upper | is_forecast
fitted = self.fit_y.copy(deep=True)
fitted.name = "y"
fitted = pd.DataFrame(fitted)
fitted["is_forecast"] = 0
fitted["yhat"] = self.results.predict()
ser = self.ser.copy(deep=True)
predict_y = pd.DataFrame(self.ser, columns=["yhat"])
predict_y["is_forecast"] = 1
conf_ints = pd.DataFrame(
self.forecast_object.conf_int().values,
index=predict_y.index,
columns=["yhat_lower", "yhat_upper"],
)
unknown = | pd.concat([predict_y, conf_ints], axis=1, sort=True) | pandas.concat |
import ast
import datetime
import time
import math
import pypandoc
import os
import matplotlib.pyplot as plt
import numpy as np
import numpy.ma as ma
import pandas as pd
import statsmodels.api as sm
from library.api import API_HOST, fetch_objects, fetch_objects_by_id, get_token
from library.settings import MIN_VIDEO_LENGTH
def get_unix_date(date):
if date:
timestamp = time.mktime(datetime.datetime.strptime(date.split('+')[0], "%Y-%m-%dT%H:%M:%SZ").timetuple())
return int(timestamp)
return None
def html2latex(text):
output = pypandoc.convert(text, 'latex', format='html', extra_args=['-f', 'html+tex_math_dollars'])
return output
def process_step_url(row):
if ('max_step_variation' not in row.index) or (row.max_step_variation == 1):
# no step variations
return '{}/lesson/{}/step/{}'.format(API_HOST, row.lesson_id, row.step_position)
return '{}/lesson/{}/step/{}?alternative={}'.format(API_HOST,
row.lesson_id, row.step_position, row.step_variation)
# API functions
def get_course_structure(course_id, cached=True, token=None):
# use cache
course_structure_filename = 'cache/course-{}-structure.csv'.format(course_id)
if os.path.isfile(course_structure_filename) and cached:
course_structure = pd.read_csv(course_structure_filename)
return course_structure
if not token:
token = get_token()
course = fetch_objects_by_id('courses', course_id, token=token)[0]
sections = fetch_objects('sections', token=token, id=course['sections'])
unit_ids = [unit for section in sections for unit in section['units']]
units = fetch_objects('units', token=token, id=unit_ids)
lesson_ids = [unit['lesson'] for unit in units]
lessons = fetch_objects('lessons', token=token, id=lesson_ids)
step_ids = [step for lesson in lessons for step in lesson['steps']]
steps = fetch_objects('steps', token=token, id=step_ids)
step_id = [step['id'] for step in steps]
step_position = [step['position'] for step in steps]
step_type = [step['block']['name'] for step in steps]
step_lesson = [step['lesson'] for step in steps]
step_correct_ratio = [step['correct_ratio'] for step in steps]
course_structure = pd.DataFrame({'course_id': course_id,
'lesson_id': step_lesson,
'step_id': step_id,
'step_position': step_position,
'step_type': step_type,
'step_correct_ratio': step_correct_ratio})
module_position = [[section['position']]*len(section['units']) for section in sections]
module_position = [value for small_list in module_position for value in small_list]
module_id = [[section['id']]*len(section['units']) for section in sections]
module_id = [value for small_list in module_id for value in small_list]
module_hard_deadline = [[section['hard_deadline']]*len(section['units']) for section in sections]
module_hard_deadline = [value for small_list in module_hard_deadline for value in small_list]
module_begin_date = [[section['begin_date']]*len(section['units']) for section in sections]
module_begin_date = [value for small_list in module_begin_date for value in small_list]
lesson_position = [unit['position'] for unit in units]
module_structure = pd.DataFrame({'lesson_id': lesson_ids,
'lesson_position': lesson_position,
'module_id': module_id,
'module_position': module_position,
'hard_deadline': module_hard_deadline,
'begin_date': module_begin_date})
course_structure = course_structure.merge(module_structure)
course_structure = course_structure.sort_values(['module_position', 'lesson_position', 'step_position'])
course_structure.to_csv(course_structure_filename, index=False)
return course_structure
def get_course_submissions(course_id, course_structure=pd.DataFrame(), cached=True, token=None):
header = ['submission_id', 'step_id', 'user_id', 'attempt_time', 'submission_time', 'status']
# use cache
course_submissions_filename = 'cache/course-{}-submissions.csv'.format(course_id)
if os.path.isfile(course_submissions_filename) and cached:
course_submissions = pd.read_csv(course_submissions_filename)
course_submissions = course_submissions[header]
return course_submissions
if not token:
token = get_token()
if course_structure.empty:
course_structure = get_course_structure(course_id, token)
course_submissions = pd.DataFrame()
for step in course_structure.step_id.unique().tolist():
step_submissions = pd.DataFrame(fetch_objects('submissions', token=token, step=step))
if step_submissions.empty:
continue
step_submissions = step_submissions.rename(columns={'id': 'submission_id',
'time': 'submission_time',
'attempt': 'attempt_id'})
attempt_ids = step_submissions['attempt_id'].unique().tolist()
step_attempts = pd.DataFrame(fetch_objects_by_id('attempts', attempt_ids, token=token))
step_attempts = step_attempts.rename(columns={'id': 'attempt_id',
'time': 'attempt_time',
'status': 'attempt_status'})
step_submissions = pd.merge(step_submissions, step_attempts, on='attempt_id')
step_submissions['step_id'] = step
course_submissions = course_submissions.append(step_submissions)
if course_submissions.empty:
return pd.DataFrame(columns=header)
course_submissions['submission_time'] = course_submissions['submission_time'].apply(get_unix_date)
course_submissions['attempt_time'] = course_submissions['attempt_time'].apply(get_unix_date)
course_submissions = course_submissions.rename(columns={'user': 'user_id'})
course_submissions = course_submissions[header]
course_submissions.to_csv(course_submissions_filename, index=False)
return course_submissions
def get_course_grades(course_id, cached=True, token=None):
header = ['user_id', 'step_id', 'is_passed', 'score', 'total_score', 'date_joined', 'last_viewed']
# use cache
course_grades_filename = 'cache/course-{}-grades.csv'.format(course_id)
if os.path.isfile(course_grades_filename) and cached:
course_grades = pd.read_csv(course_grades_filename)
course_grades = course_grades[header]
return course_grades
if not token:
token = get_token()
course_grades = pd.DataFrame()
grades = fetch_objects('course-grades', course=course_id, token=token)
for grade in grades:
user_grade = pd.DataFrame(grade['results']).transpose()
user_grade['user_id'] = grade['user']
user_grade['total_score'] = grade['score']
user_grade['date_joined'] = grade['date_joined']
user_grade['last_viewed'] = grade['last_viewed']
course_grades = course_grades.append(user_grade)
course_grades['date_joined'] = course_grades['date_joined'].apply(get_unix_date)
course_grades['last_viewed'] = course_grades['last_viewed'].apply(get_unix_date)
course_grades = course_grades.reset_index(drop=True)
course_grades = course_grades[header]
course_grades.to_csv(course_grades_filename, index=False)
return course_grades
def get_enrolled_users(course_id, token=None):
if not token:
token = get_token()
learner_group = fetch_objects('courses', token=token, pk=course_id)[0]['learners_group']
users = fetch_objects('groups', token=token, pk=learner_group)[0]['users']
return users
def process_options_with_name(data, reply, option_names):
data = ast.literal_eval(data)
reply = ast.literal_eval(reply)['choices']
is_multiple = data['is_multiple_choice']
options = data['options']
option_id = []
clue = []
for op in options:
if op in option_names.option_name.tolist():
val = option_names.loc[option_names.option_name == op, 'option_id'].values[0]
clue_val = option_names.loc[option_names.option_name == op, 'is_correct'].values[0]
else:
val = np.nan
clue_val = np.nan
option_id += [val]
clue += [clue_val]
answer = [(c == r) for c, r in zip(clue, reply)]
options = pd.DataFrame({'is_multiple': is_multiple,
'option_id': option_id,
'answer': answer,
'clue': clue})
options = options[['is_multiple', 'option_id', 'answer', 'clue']]
return options
def get_question(step_id):
source = fetch_objects('step-sources', id=step_id)
try:
question = source[0]['block']['text']
except:
question = '\n'
question = html2latex(question)
return question
def get_step_options(step_id):
source = fetch_objects('step-sources', id=step_id)
try:
options = source[0]['block']['source']['options']
options = pd.DataFrame(options)
is_multiple = source[0]['block']['source']['is_multiple_choice']
except KeyError:
options = | pd.DataFrame(columns=['step_id', 'option_id', 'option_name', 'is_correct', 'is_multiple']) | pandas.DataFrame |
"""
Test various functions regarding chapter 18: Microstructural Features.
"""
import os
import unittest
import numpy as np
import pandas as pd
from mlfinlab.data_structures import get_volume_bars
from mlfinlab.microstructural_features import (get_vpin, get_bar_based_amihud_lambda, get_bar_based_kyle_lambda,
get_bekker_parkinson_vol, get_corwin_schultz_estimator,
get_bar_based_hasbrouck_lambda, get_roll_impact, get_roll_measure,
quantile_mapping, sigma_mapping, MicrostructuralFeaturesGenerator)
from mlfinlab.microstructural_features.encoding import encode_tick_rule_array
from mlfinlab.microstructural_features.entropy import get_plug_in_entropy, get_shannon_entropy, get_lempel_ziv_entropy, \
get_konto_entropy, _match_length
from mlfinlab.util import get_bvc_buy_volume
class TestMicrostructuralFeatures(unittest.TestCase):
"""
Test get_inter_bar_features, test_first_generation, test_second_generation, test_misc
"""
def setUp(self):
"""
Set the file path for the sample dollar bars data.
"""
project_path = os.path.dirname(__file__)
self.path = project_path + '/test_data/dollar_bar_sample.csv'
self.trades_path = project_path + '/test_data/tick_data.csv'
self.data = | pd.read_csv(self.path, index_col='date_time', parse_dates=[0]) | pandas.read_csv |
import os
import re
from pathlib import Path
import pandas as pd
from sparc.curation.tools.errors import IncorrectAnnotationError, NotAnnotatedError, IncorrectDerivedFromError, \
IncorrectSourceOfError, BadManifestError
from sparc.curation.tools.base import Singleton
from sparc.curation.tools.definitions import FILE_LOCATION_COLUMN, FILENAME_COLUMN, SUPPLEMENTAL_JSON_COLUMN, \
ADDITIONAL_TYPES_COLUMN, ANATOMICAL_ENTITY_COLUMN, SCAFFOLD_META_MIME, SCAFFOLD_VIEW_MIME, \
SCAFFOLD_THUMBNAIL_MIME, SCAFFOLD_DIR_MIME, DERIVED_FROM_COLUMN, SOURCE_OF_COLUMN, MANIFEST_DIR_COLUMN, MANIFEST_FILENAME, SHEET_NAME_COLUMN
from sparc.curation.tools.utilities import is_same_file
class ManifestDataFrame(metaclass=Singleton):
# dataFrame_dir = ""
_manifestDataFrame = None
_scaffold_data = None
_dataset_dir = None
def setup_dataframe(self, dataset_dir):
self._dataset_dir = dataset_dir
self._read_manifests()
self._scaffold_data = ManifestDataFrame.Scaffold(self)
return self
def _read_manifests(self, depth=0):
self._manifestDataFrame = | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
def llr(k):
'''
Compute loglikelihood ratio see
http://tdunning.blogspot.de/2008/03/surprise-and-coincidence.html
And
https://github.com/apache/mahout/blob/4f2108c576daaa3198671568eaa619266e787b1a/math/src/main/java/org/apache/mahout/math/stats/LogLikelihood.java#L100
And https://en.wikipedia.org/wiki/G-test
'''
def H(k):
N = k.values.sum()
wtf = pd.np.log(k / N + (k == 0).astype(int))
return (k / N * wtf).values.sum()
return 2 * k.values.sum() * (H(k) - H(k.sum(0)) - H(k.sum(1)))
def compute_scores(A, B, skip_diagonal=False):
'''
Compute the scores for a primary and secondary action (across all items)
'A' is the user x item matrix of the primary action
'B' is the user x item matrix of a secondary action
the result is a dataframe where
'primary_item' is the item associated with the primary event (ie 'buy')
'secondary_item' is the item associated with the secondary event (ie 'click')
'score' is the log likelihood score representing the strength of
association (the higher the score the stronger association)
For example, people who 'primary action' item_A do 'secondary action'
item_B with strength 'score'
Loosely based on:
https://github.com/apache/mahout/blob/4f2108c576daaa3198671568eaa619266e787b1a/math-scala/src/main/scala/org/apache/mahout/math/cf/SimilarityAnalysis.scala#L312
'''
# We ignore the original interaction value and create a binary (binarize) 0-1 matrix
# as we only consider whether interactions happened or did not happen
# only consider action B for users where action A occured
A = (A != 0).astype(int)
B = (B != 0).astype(int)
AtB = A.loc[B.index, B.columns].transpose().dot(B)
numInteractionsWithAandB = AtB
numInteractionsWithA = A.sum()
numInteractionsWithB = B.sum()
# Total number of interactions is
# total number of users where primary event occurs
numInteractions = len(A)
K11 = numInteractionsWithAandB
K12 = numInteractionsWithAandB.rsub(numInteractionsWithA, axis=0).dropna()
K21 = numInteractionsWithAandB.rsub(numInteractionsWithB, axis=1)
K22 = numInteractions + numInteractionsWithAandB.sub(
numInteractionsWithB, axis=1).sub(
numInteractionsWithA, axis=0)
the_data = zip(
K11.apply(lambda x: x.index + '_' + x.name).values.flatten(),
K11.values.flatten(), K12.values.flatten(), K21.values.flatten(),
K22.values.flatten())
container = []
for name, k11, k12, k21, k22 in the_data:
item_A, item_B = name.split('_')
if k11 != 0 and not (skip_diagonal and item_A == item_B):
df = pd.DataFrame([[k11, k12], [k21, k22]])
score = llr(df)
else:
score = 0 # Warning! while llr score could be calculated, for cooccurance purposes, it doesn't makes sense to compute llr when cooccurnace (k11) is zero
container.append((item_A, item_B, score))
return pd.DataFrame(
container, columns=['primary_item', 'secondary_item',
'score']).sort_values(
['primary_item', 'score'],
ascending=[True, False])
def train(raw_data, primary_action):
'''
this is like the 'main' funciton: takes a dataset and returns a dataframe with LLR scores
raw_data is a dataframe with the columns: user, action, item
primary_action is the action from raw_data that we want to determine associations for
'A' is the matrix of primary actions
'B' is a matrix of secondary actions
'''
# pretty sure we only want to keep users and user metadata for only the users where the primary action occurs
# not sure where this happens int he Mahout code though...
users_who_did_primary_action = pd.unique(
raw_data.loc[raw_data.action == primary_action, 'user'])
data = raw_data.loc[raw_data.user.isin(users_who_did_primary_action), :]
freq = data.groupby(['user', 'action',
'item']).size().to_frame('freq').reset_index()
freq_actions = freq.groupby('action')
A = freq_actions.get_group(primary_action).pivot(
index='user', columns='item', values='freq').fillna(0)
cco_results = []
for action, matrix in freq_actions:
skip_diagonal = primary_action == action
B = matrix.pivot(index='user', columns='item', values='freq').fillna(0)
scores = compute_scores(A, B, skip_diagonal)
scores['primary_action'] = primary_action
scores['secondary_action'] = action
cco_results.append(scores)
all_data = pd.concat(cco_results, ignore_index=True)
return all_data[[
'primary_action', 'primary_item', 'secondary_action', 'secondary_item',
'score'
]]
if __name__ == '__main__':
'''
These unit tests are the same as the Apache Mahout unit tests per:
https://github.com/apache/mahout/blob/08e02602e947ff945b9bd73ab5f0b45863df3e53/spark/src/test/scala/org/apache/mahout/cf/SimilarityAnalysisSuite.scala#L49
https://github.com/apache/mahout/blob/08e02602e947ff945b9bd73ab5f0b45863df3e53/math/src/test/java/org/apache/mahout/math/stats/LogLikelihoodTest.java#L50
https://github.com/apache/mahout/blob/4f2108c576daaa3198671568eaa619266e787b1a/math/src/main/java/org/apache/mahout/math/stats/LogLikelihood.java#L1
'''
# test compute_scores
a = pd.DataFrame(
[(1, 1, 0, 0, 0), (0, 0, 1, 1, 0), (0, 0, 0, 0, 1), (1, 0, 0, 1, 0)],
columns=['a', 'b', 'c', 'd', 'e'])
b = pd.DataFrame(
[(1, 1, 1, 1, 0), (1, 1, 1, 1, 0), (0, 0, 1, 0, 1), (1, 1, 0, 1, 0)],
columns=['a', 'b', 'c', 'd', 'e'])
AtAControl = pd.DataFrame(
[(0.0, 1.7260924347106847, 0.0, 0.0, 0.0),
(1.7260924347106847, 0.0, 0.0, 0.0, 0.0),
(0.0, 0.0, 0.0, 1.7260924347106847, 0.0),
(0.0, 0.0, 1.7260924347106847, 0.0, 0.0),
(0.0, 0.0, 0.0, 0.0, 0.0)])\
.round(5)\
.as_matrix()
AtBControl = pd.DataFrame(
[(1.7260924347106847, 1.7260924347106847, 1.7260924347106847, 1.7260924347106847, 0.0),
(0.6795961471815897, 0.6795961471815897, 0.6795961471815897, 0.6795961471815897, 0.0),
(0.6795961471815897, 0.6795961471815897, 0.6795961471815897, 0.6795961471815897, 0.0),
(1.7260924347106847, 1.7260924347106847, 1.7260924347106847, 1.7260924347106847, 0.0),
(0.0, 0.0, 0.6795961471815897, 0.0, 4.498681156950466)])\
.round(5)\
.as_matrix()
ata = compute_scores(a, a, True).pivot(
index='primary_item', columns='secondary_item',
values='score').round(5).as_matrix()
atb = compute_scores(a, b, False).pivot(
index='primary_item', columns='secondary_item',
values='score').round(5).as_matrix()
assert pd.np.array_equal(ata, AtAControl)
assert | pd.np.array_equal(atb, AtBControl) | pandas.np.array_equal |
from unittest import TestCase
import pandas as pd
import infiltrate.models.deck_search as deck_search
from infiltrate.value_frames import _CardValueDataframeGetter
class TestCardValueDataframeGetter(TestCase):
def test__merge_value_dfs(self):
deck_search_value_dfs = [
deck_search.DeckSearchValue_DF(
{
"set_num": [1, 1, 2],
"card_num": [1, 2, 1],
"count_in_deck": [1, 1, 1],
"decksearch_id": [1, 2, 3],
"value": [1, 3, 5],
}
),
deck_search.DeckSearchValue_DF(
{
"set_num": [1, 1, 2],
"card_num": [1, 2, 2],
"count_in_deck": [1, 1, 1],
"decksearch_id": [1, 2, 3],
"value": [2, 4, 6],
}
),
]
sut = _CardValueDataframeGetter
summed_df = sut._merge_playabilities(deck_search_value_dfs)
self.assertTrue(summed_df["value"].equals( | pd.Series([3, 7, 5, 6]) | pandas.Series |
from datetime import date
import numpy as np
import pandas as pd
from scipy import stats
def gross_rate_of_return(initial_value, final_value):
assert initial_value, f'initial value cannot be zero!'
return (final_value - initial_value) / initial_value
def compound_annual_growth_rate(initial_value, final_value, start_date: date, end_date: date):
assert end_date > start_date, f'end date must be larger than start date'
r = gross_rate_of_return(initial_value, final_value)
return np.power(1 + r, 365 / (end_date - start_date).days)
def CAGR(initial_value, final_value, start_date: date, end_date: date):
return compound_annual_growth_rate(initial_value, final_value, start_date, end_date)
def sharp_ratio(r_p, r_f, sigma):
return (r_p - r_f) / sigma
def max_drawdown(data):
series = | pd.Series(data) | pandas.Series |
'''
...
'''
import os
import numpy as np
import pandas as pd
import datetime as dt
from tqdm import tqdm
import lib.utils as utils
import lib.db_utils as dutils
from datetime import timedelta
from collections import defaultdict
from dateutil.relativedelta import relativedelta
class DefineCohortSettings:
def __init__(self, vacineja2plus_df, init_cohort, final_cohort):
'''
Description.
Args:
vacineja2plus_df:
'''
self.vacineja2plus_df = vacineja2plus_df.copy()
self.init_cohort = init_cohort
self.final_cohort = final_cohort
def define_eligibility(self, partial=14, fully=14, return_=True):
'''
'''
subset = ["DATA D1(VACINADOS)", "DATA D2(VACINADOS)"]
self.vacineja2plus_df["VACINA STATUS - COORTE"] = self.vacineja2plus_df[subset].apply(lambda x: f_when_vaccine(x, self.init_cohort, self.final_cohort), axis=1)
self.vacineja2plus_df["IMUNIZACAO MAXIMA ATE FIM DA COORTE"] = self.vacineja2plus_df[subset].apply(lambda x: f_immunization(x, self.init_cohort, self.final_cohort, partial, fully), axis=1)
# --> Eligibility by tests
subset = ["DATA SOLICITACAO(TESTES)", "DATA COLETA(TESTES)", "RESULTADO FINAL GAL-INTEGRASUS"]
self.vacineja2plus_df["ELIGIBILIDADE TESTE"] = self.vacineja2plus_df[subset].apply(lambda x: f_eligible_test(x, self.init_cohort, self.final_cohort), axis=1)
subset = "IMUNIZACAO MAXIMA ATE FIM DA COORTE"
aptos = ["NAO VACINADO", "PARCIALMENTE IMUNIZADO", "TOTALMENTE IMUNIZADO", "VACINADO SEM IMUNIZACAO"]
self.vacineja2plus_df["ELIGIBILIDADE COORTE GERAL"] = self.vacineja2plus_df[subset].apply(lambda x: "APTO" if x in aptos else "NAO APTO")
# --> Eligibility for cases partial
self.vacineja2plus_df["ELIGIBILIDADE EXPOSTO PARCIAL"] = self.vacineja2plus_df[subset].apply(lambda x: "APTO" if x=="PARCIALMENTE IMUNIZADO" else "NAO APTO")
# --> Eligibility for cases fully
self.vacineja2plus_df["ELIGIBILIDADE EXPOSTO TOTAL"] = self.vacineja2plus_df[subset].apply(lambda x: "APTO" if x=="TOTALMENTE IMUNIZADO" else "NAO APTO")
# --> Create column with age based on the final of cohort.
self.vacineja2plus_df["IDADE"] = self.vacineja2plus_df["DATA NASCIMENTO(VACINEJA)"].apply(lambda x: relativedelta(self.final_cohort, x.date()).years)
self.vacineja2plus_df = self.vacineja2plus_df.drop_duplicates(subset=["CPF"], keep="first")
if return_:
return self.vacineja2plus_df
def dynamical_matching(self, vaccine="CORONAVAC", return_=True, verbose=False, age_thr=18, seed=0):
'''
Description.
Args:
return_:
Return:
'''
if "ELIGIBILIDADE TESTE" not in self.vacineja2plus_df.columns:
return -1
datelst = utils.generate_date_list(self.init_cohort, self.final_cohort)
# --> Apply essential filters
# First, consider only people with age older or equal to 18 years old.
df = self.vacineja2plus_df[self.vacineja2plus_df["IDADE"]>=age_thr]
df = df[df["OBITO INCONSISTENCIA"]!="S"]
df = df[df["DATA VACINA CONSISTENCIA"]!="N"]
# Filter by eligibility
df = df[(df["ELIGIBILIDADE TESTE"]=="APTO") & (df["ELIGIBILIDADE COORTE GERAL"]=="APTO")]
# Obtain set of vaccinated and unvaccinated.
df_vaccinated = df[df["VACINA(VACINADOS)"]==vaccine]
df_vaccinated = df_vaccinated.dropna(subset=["DATA D1(VACINADOS)"], axis=0)
df_unvaccinated = df[pd.isna(df["VACINA(VACINADOS)"])]
if verbose:
print(f"Dimensão de elegíveis após aplicacão das condições: {df.shape}")
print(f"Número restante de óbitos: {df['DATA OBITO'].notnull().sum()}")
print(f"Número restante de hospitalizados: {df['DATA HOSPITALIZACAO'].notnull().sum()}")
print(f"Número restante de testes: {df['DATA SOLICITACAO(TESTES)'].notnull().sum()}")
print(f"Número de vacinados elegíveis para {vaccine}: {df_vaccinated.shape[0]}")
#condition_exposed1 = df_vaccinated["ELIGIBILIDADE TESTE"]=="APTO"
#condition_exposed2 = df_vaccinated["ELIGIBILIDADE COORTE GERAL"]=="APTO"
#df_vaccinated = df_vaccinated[(condition_exposed1) & (condition_exposed2)]
#condition_unexposed1 = df_unvaccinated["ELIGIBILIDADE TESTE"]=="APTO"
#condition_unexposed2 = df_unvaccinated["ELIGIBILIDADE COORTE GERAL"]=="APTO"
#df_unvaccinated = df_unvaccinated[(condition_unexposed1) & (condition_unexposed2)]
# -- CREATE CONTROL RESERVOIR --
control_dates = {
"D1": defaultdict(lambda:-1),
"DEATH": defaultdict(lambda:-1),
"HOSPITAL": defaultdict(lambda:-1)
}
control_reservoir = defaultdict(lambda:[])
control_used = defaultdict(lambda: False)
df_join = pd.concat([df_vaccinated, df_unvaccinated])
print("Criando reservatório de controles ...")
for j in tqdm(range(0, df_join.shape[0])):
cpf = df_join["CPF"].iat[j]
age = df_join["IDADE"].iat[j]
sex = df_join["SEXO(VACINEJA)"].iat[j]
d1 = df_join["DATA D1(VACINADOS)"].iat[j]
dt_death = df_join["DATA OBITO"].iat[j]
dt_hospt = df_join["DATA HOSPITALIZACAO"].iat[j]
control_reservoir[(age,sex)].append(cpf)
if not pd.isna(d1):
control_dates["D1"][cpf] = d1.date()
if not pd.isna(dt_death):
control_dates["DEATH"][cpf] = dt_death.date()
if not pd.isna(dt_hospt):
control_dates["HOSPITAL"][cpf] = dt_hospt.date()
if seed!=0:
np.random.seed(seed)
for key in control_reservoir.keys():
np.random.shuffle(control_reservoir[key])
matchings = defaultdict(lambda:-1)
print("Executando pareamento ...")
for cur_date in tqdm(datelst):
# Select all people who was vaccinated at the current date
df_vaccinated["compare_date"] = df_vaccinated["DATA D1(VACINADOS)"].apply(lambda x: "TRUE" if x.date()==cur_date else "FALSE")
current_vaccinated = df_vaccinated[df_vaccinated["compare_date"]=="TRUE"]
#print(current_vaccinated.shape)
cpf_list = current_vaccinated["CPF"].tolist()
age_list = current_vaccinated["IDADE"].tolist()
sex_list = current_vaccinated["SEXO(VACINEJA)"].tolist()
date_list = current_vaccinated["DATA D1(VACINADOS)"].tolist()
# For each person vaccinated at the current date, check if there is a control for he/she.
for j in range(0, len(cpf_list)):
pair = find_pair(cur_date, age_list[j], sex_list[j], control_reservoir, control_used, control_dates)
if pair!=-1:
matchings[cpf_list[j]] = pair
items_matching = matchings.items()
pareados = pd.DataFrame({"CPF CASO": [ x[0] for x in items_matching ], "CPF CONTROLE": [ x[1] for x in items_matching ]})
events_df = self.get_intervals(pareados, df_vaccinated, df_unvaccinated)
matched = defaultdict(lambda:False)
for cpf in [ x[0] for x in items_matching ]+[ x[1] for x in items_matching ]:
matched[cpf]=True
df_join["PAREADO"] = df_join["CPF"].apply(lambda x: "SIM" if matched[x] else "NAO")
return events_df, df_join
def get_intervals(self, df_pairs, df_vac, df_unvac):
'''
Description.
Args:
df_pairs:
df_vac:
df_unvac:
'''
pareado = defaultdict(lambda: False)
matched_cpfs = df_pairs["CPF CASO"].tolist()+df_pairs["CPF CONTROLE"].tolist()
[ pareado.update({cpf:True}) for cpf in matched_cpfs ]
data_teste = defaultdict(lambda: np.nan)
data_hospitalizado = defaultdict(lambda:np.nan)
data_obito = defaultdict(lambda:np.nan)
data_d1 = defaultdict(lambda:np.nan)
data_d2 = defaultdict(lambda:np.nan)
df_join = pd.concat([df_vac, df_unvac])
for j in range(0, df_join.shape[0]):
cpf = df_join["CPF"].iat[j]
obito = df_join["DATA OBITO"].iat[j]
teste = df_join["DATA SOLICITACAO(TESTES)"].iat[j]
hospitalizacao = df_join["DATA HOSPITALIZACAO"].iat[j]
d1_dt = df_join["DATA D1(VACINADOS)"].iat[j]
d2_dt = df_join["DATA D2(VACINADOS)"].iat[j]
if not pd.isna(obito):
data_obito[cpf] = obito
if not pd.isna(d1_dt):
data_d1[cpf] = d1_dt
if not | pd.isna(d2_dt) | pandas.isna |
import pytest
import os
from mapping import util
from pandas.util.testing import assert_frame_equal, assert_series_equal
import pandas as pd
from pandas import Timestamp as TS
import numpy as np
@pytest.fixture
def price_files():
cdir = os.path.dirname(__file__)
path = os.path.join(cdir, 'data/')
files = ["CME-FVU2014.csv", "CME-FVZ2014.csv"]
return [os.path.join(path, f) for f in files]
def assert_dict_of_frames(dict1, dict2):
assert dict1.keys() == dict2.keys()
for key in dict1:
assert_frame_equal(dict1[key], dict2[key])
def test_read_price_data(price_files):
# using default name_func in read_price_data()
df = util.read_price_data(price_files)
dt1 = TS("2014-09-30")
dt2 = TS("2014-10-01")
idx = pd.MultiIndex.from_tuples([(dt1, "CME-FVU2014"),
(dt1, "CME-FVZ2014"),
(dt2, "CME-FVZ2014")],
names=["date", "contract"])
df_exp = pd.DataFrame([119.27344, 118.35938, 118.35938],
index=idx, columns=["Open"])
assert_frame_equal(df, df_exp)
def name_func(fstr):
file_name = os.path.split(fstr)[-1]
name = file_name.split('-')[1].split('.')[0]
return name[-4:] + name[:3]
df = util.read_price_data(price_files, name_func)
dt1 = TS("2014-09-30")
dt2 = TS("2014-10-01")
idx = pd.MultiIndex.from_tuples([(dt1, "2014FVU"), (dt1, "2014FVZ"),
(dt2, "2014FVZ")],
names=["date", "contract"])
df_exp = pd.DataFrame([119.27344, 118.35938, 118.35938],
index=idx, columns=["Open"])
assert_frame_equal(df, df_exp)
def test_calc_rets_one_generic():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-05'), 'CLG5')])
rets = pd.Series([0.1, 0.05, 0.1, 0.8], index=idx)
vals = [1, 0.5, 0.5, 1]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-05'), 'CLG5')
])
weights = pd.DataFrame(vals, index=widx, columns=['CL1'])
wrets = util.calc_rets(rets, weights)
wrets_exp = pd.DataFrame([0.1, 0.075, 0.8],
index=weights.index.levels[0],
columns=['CL1'])
assert_frame_equal(wrets, wrets_exp)
def test_calc_rets_two_generics():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')])
rets = pd.Series([0.1, 0.15, 0.05, 0.1, 0.8, -0.5, 0.2], index=idx)
vals = [[1, 0], [0, 1],
[0.5, 0], [0.5, 0.5], [0, 0.5],
[1, 0], [0, 1]]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')
])
weights = pd.DataFrame(vals, index=widx, columns=['CL1', 'CL2'])
wrets = util.calc_rets(rets, weights)
wrets_exp = pd.DataFrame([[0.1, 0.15], [0.075, 0.45], [-0.5, 0.2]],
index=weights.index.levels[0],
columns=['CL1', 'CL2'])
assert_frame_equal(wrets, wrets_exp)
def test_calc_rets_two_generics_nans_in_second_generic():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')])
rets = pd.Series([0.1, np.NaN, 0.05, 0.1, np.NaN, -0.5, 0.2],
index=idx)
vals = [[1, 0], [0, 1],
[0.5, 0], [0.5, 0.5], [0, 0.5],
[1, 0], [0, 1]]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')
])
weights = pd.DataFrame(vals, index=widx, columns=['CL1', 'CL2'])
wrets = util.calc_rets(rets, weights)
wrets_exp = pd.DataFrame([[0.1, np.NaN], [0.075, np.NaN], [-0.5, 0.2]],
index=weights.index.levels[0],
columns=['CL1', 'CL2'])
assert_frame_equal(wrets, wrets_exp)
def test_calc_rets_two_generics_non_unique_columns():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')])
rets = pd.Series([0.1, 0.15, 0.05, 0.1, 0.8, -0.5, 0.2], index=idx)
vals = [[1, 0], [0, 1],
[0.5, 0], [0.5, 0.5], [0, 0.5],
[1, 0], [0, 1]]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')
])
weights = pd.DataFrame(vals, index=widx, columns=['CL1', 'CL1'])
with pytest.raises(ValueError):
util.calc_rets(rets, weights)
def test_calc_rets_two_generics_two_asts():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')])
rets1 = pd.Series([0.1, 0.15, 0.05, 0.1, 0.8, -0.5, 0.2], index=idx)
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'COF5'),
(TS('2015-01-03'), 'COG5'),
(TS('2015-01-04'), 'COF5'),
(TS('2015-01-04'), 'COG5'),
(TS('2015-01-04'), 'COH5')])
rets2 = pd.Series([0.1, 0.15, 0.05, 0.1, 0.4], index=idx)
rets = {"CL": rets1, "CO": rets2}
vals = [[1, 0], [0, 1],
[0.5, 0], [0.5, 0.5], [0, 0.5],
[1, 0], [0, 1]]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')
])
weights1 = pd.DataFrame(vals, index=widx, columns=["CL0", "CL1"])
vals = [[1, 0], [0, 1],
[0.5, 0], [0.5, 0.5], [0, 0.5]]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'COF5'),
(TS('2015-01-03'), 'COG5'),
(TS('2015-01-04'), 'COF5'),
(TS('2015-01-04'), 'COG5'),
(TS('2015-01-04'), 'COH5')
])
weights2 = pd.DataFrame(vals, index=widx, columns=["CO0", "CO1"])
weights = {"CL": weights1, "CO": weights2}
wrets = util.calc_rets(rets, weights)
wrets_exp = pd.DataFrame([[0.1, 0.15, 0.1, 0.15],
[0.075, 0.45, 0.075, 0.25],
[-0.5, 0.2, pd.np.NaN, pd.np.NaN]],
index=weights["CL"].index.levels[0],
columns=['CL0', 'CL1', 'CO0', 'CO1'])
assert_frame_equal(wrets, wrets_exp)
def test_calc_rets_missing_instr_rets_key_error():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5')])
irets = pd.Series([0.02, 0.01, 0.012], index=idx)
vals = [1, 1/2, 1/2, 1]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-05'), 'CLG5')])
weights = pd.DataFrame(vals, index=widx, columns=["CL1"])
with pytest.raises(KeyError):
util.calc_rets(irets, weights)
def test_calc_rets_nan_instr_rets():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-05'), 'CLG5')])
rets = pd.Series([pd.np.NaN, pd.np.NaN, 0.1, 0.8], index=idx)
vals = [1, 0.5, 0.5, 1]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-05'), 'CLG5')
])
weights = pd.DataFrame(vals, index=widx, columns=['CL1'])
wrets = util.calc_rets(rets, weights)
wrets_exp = pd.DataFrame([pd.np.NaN, pd.np.NaN, 0.8],
index=weights.index.levels[0],
columns=['CL1'])
assert_frame_equal(wrets, wrets_exp)
def test_calc_rets_missing_weight():
# see https://github.com/matthewgilbert/mapping/issues/8
# missing weight for return
idx = pd.MultiIndex.from_tuples([
(TS('2015-01-02'), 'CLF5'),
(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-04'), 'CLF5')
])
rets = pd.Series([0.02, -0.03, 0.06], index=idx)
vals = [1, 1]
widx = pd.MultiIndex.from_tuples([
(TS('2015-01-02'), 'CLF5'),
(TS('2015-01-04'), 'CLF5')
])
weights = pd.DataFrame(vals, index=widx, columns=["CL1"])
with pytest.raises(ValueError):
util.calc_rets(rets, weights)
# extra instrument
idx = pd.MultiIndex.from_tuples([(TS('2015-01-02'), 'CLF5'),
(TS('2015-01-04'), 'CLF5')])
weights1 = pd.DataFrame(1, index=idx, columns=["CL1"])
idx = pd.MultiIndex.from_tuples([
(TS('2015-01-02'), 'CLF5'),
(TS('2015-01-02'), 'CLH5'),
(TS('2015-01-03'), 'CLH5'), # extra day for no weight instrument
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLH5')
])
rets = pd.Series([0.02, -0.03, 0.06, 0.05, 0.01], index=idx)
with pytest.raises(ValueError):
util.calc_rets(rets, weights1)
# leading / trailing returns
idx = pd.MultiIndex.from_tuples([(TS('2015-01-02'), 'CLF5'),
(TS('2015-01-04'), 'CLF5')])
weights2 = pd.DataFrame(1, index=idx, columns=["CL1"])
idx = pd.MultiIndex.from_tuples([(TS('2015-01-01'), 'CLF5'),
(TS('2015-01-02'), 'CLF5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-05'), 'CLF5')])
rets = pd.Series([0.02, -0.03, 0.06, 0.05], index=idx)
with pytest.raises(ValueError):
util.calc_rets(rets, weights2)
def test_to_notional_empty():
instrs = pd.Series()
prices = pd.Series()
multipliers = pd.Series()
res_exp = pd.Series()
res = util.to_notional(instrs, prices, multipliers)
assert_series_equal(res, res_exp)
def test_to_notional_same_fx():
instrs = pd.Series([-1, 2, 1], index=['CLZ6', 'COZ6', 'GCZ6'])
prices = pd.Series([30.20, 30.5, 10.2], index=['CLZ6', 'COZ6', 'GCZ6'])
multipliers = pd.Series([1, 1, 1], index=['CLZ6', 'COZ6', 'GCZ6'])
res_exp = pd.Series([-30.20, 2 * 30.5, 10.2],
index=['CLZ6', 'COZ6', 'GCZ6'])
res = util.to_notional(instrs, prices, multipliers)
assert_series_equal(res, res_exp)
def test_to_notional_extra_prices():
instrs = pd.Series([-1, 2, 1], index=['CLZ6', 'COZ6', 'GCZ6'])
multipliers = pd.Series([1, 1, 1], index=['CLZ6', 'COZ6', 'GCZ6'])
prices = pd.Series([30.20, 30.5, 10.2, 13.1], index=['CLZ6', 'COZ6',
'GCZ6', 'extra'])
res_exp = pd.Series([-30.20, 2 * 30.5, 10.2],
index=['CLZ6', 'COZ6', 'GCZ6'])
res = util.to_notional(instrs, prices, multipliers)
assert_series_equal(res, res_exp)
def test_to_notional_missing_prices():
instrs = pd.Series([-1, 2, 1], index=['CLZ6', 'COZ6', 'GCZ6'])
multipliers = pd.Series([1, 1, 1], index=['CLZ6', 'COZ6', 'GCZ6'])
prices = pd.Series([30.20, 30.5], index=['CLZ6', 'COZ6'])
res_exp = pd.Series([-30.20, 2 * 30.5, pd.np.NaN],
index=['CLZ6', 'COZ6', 'GCZ6'])
res = util.to_notional(instrs, prices, multipliers)
assert_series_equal(res, res_exp)
def test_to_notional_different_fx():
instrs = pd.Series([-1, 2, 1], index=['CLZ6', 'COZ6', 'GCZ6'])
multipliers = pd.Series([1, 1, 1], index=['CLZ6', 'COZ6', 'GCZ6'])
prices = pd.Series([30.20, 30.5, 10.2], index=['CLZ6', 'COZ6', 'GCZ6'])
instr_fx = pd.Series(['USD', 'CAD', 'AUD'],
index=['CLZ6', 'COZ6', 'GCZ6'])
fx_rates = pd.Series([1.32, 0.8], index=['USDCAD', 'AUDUSD'])
res_exp = pd.Series([-30.20, 2 * 30.5 / 1.32, 10.2 * 0.8],
index=['CLZ6', 'COZ6', 'GCZ6'])
res = util.to_notional(instrs, prices, multipliers, desired_ccy='USD',
instr_fx=instr_fx, fx_rates=fx_rates)
assert_series_equal(res, res_exp)
def test_to_notional_duplicates():
instrs = pd.Series([1, 1], index=['A', 'A'])
prices = pd.Series([200.37], index=['A'])
mults = pd.Series([100], index=['A'])
with pytest.raises(ValueError):
util.to_notional(instrs, prices, mults)
instrs = pd.Series([1], index=['A'])
prices = pd.Series([200.37, 200.37], index=['A', 'A'])
mults = pd.Series([100], index=['A'])
with pytest.raises(ValueError):
util.to_notional(instrs, prices, mults)
instrs = pd.Series([1], index=['A'])
prices = pd.Series([200.37], index=['A'])
mults = pd.Series([100, 100], index=['A', 'A'])
with pytest.raises(ValueError):
util.to_notional(instrs, prices, mults)
instrs = pd.Series([1], index=['A'])
prices = pd.Series([200.37], index=['A'])
mults = pd.Series([100], index=['A'])
desired_ccy = "CAD"
instr_fx = pd.Series(['USD', 'USD'], index=['A', 'A'])
fx_rate = pd.Series([1.32], index=['USDCAD'])
with pytest.raises(ValueError):
util.to_notional(instrs, prices, mults, desired_ccy,
instr_fx, fx_rate)
instrs = pd.Series([1], index=['A'])
prices = pd.Series([200.37], index=['A'])
mults = pd.Series([100], index=['A'])
desired_ccy = "CAD"
instr_fx = pd.Series(['USD'], index=['A'])
fx_rate = pd.Series([1.32, 1.32], index=['USDCAD', 'USDCAD'])
with pytest.raises(ValueError):
util.to_notional(instrs, prices, mults, desired_ccy,
instr_fx, fx_rate)
def test_to_notional_bad_fx():
instrs = pd.Series([1], index=['A'])
prices = pd.Series([200.37], index=['A'])
mults = pd.Series([100], index=['A'])
instr_fx = pd.Series(['JPY'], index=['A'])
fx_rates = pd.Series([1.32], index=['GBPCAD'])
with pytest.raises(ValueError):
util.to_notional(instrs, prices, mults, desired_ccy='USD',
instr_fx=instr_fx, fx_rates=fx_rates)
def test_to_contracts_rounder():
prices = pd.Series([30.20, 30.5], index=['CLZ6', 'COZ6'])
multipliers = pd.Series([1, 1], index=['CLZ6', 'COZ6'])
# 30.19 / 30.20 is slightly less than 1 so will round to 0
notional = pd.Series([30.19, 2 * 30.5], index=['CLZ6', 'COZ6'])
res = util.to_contracts(notional, prices, multipliers,
rounder=pd.np.floor)
res_exp = pd.Series([0, 2], index=['CLZ6', 'COZ6'])
assert_series_equal(res, res_exp)
def test_to_contract_different_fx_with_multiplier():
notionals = pd.Series([-30.20, 2 * 30.5 / 1.32 * 10, 10.2 * 0.8 * 100],
index=['CLZ6', 'COZ6', 'GCZ6'])
prices = pd.Series([30.20, 30.5, 10.2], index=['CLZ6', 'COZ6', 'GCZ6'])
instr_fx = pd.Series(['USD', 'CAD', 'AUD'],
index=['CLZ6', 'COZ6', 'GCZ6'])
fx_rates = pd.Series([1.32, 0.8], index=['USDCAD', 'AUDUSD'])
multipliers = pd.Series([1, 10, 100], index=['CLZ6', 'COZ6', 'GCZ6'])
res_exp = pd.Series([-1, 2, 1], index=['CLZ6', 'COZ6', 'GCZ6'])
res = util.to_contracts(notionals, prices, desired_ccy='USD',
instr_fx=instr_fx, fx_rates=fx_rates,
multipliers=multipliers)
assert_series_equal(res, res_exp)
def test_to_contract_different_fx_with_multiplier_rounding():
# won't work out to integer number of contracts so this tests rounding
notionals = pd.Series([-30.21, 2 * 30.5 / 1.32 * 10, 10.2 * 0.8 * 100],
index=['CLZ6', 'COZ6', 'GCZ6'])
prices = pd.Series([30.20, 30.5, 10.2], index=['CLZ6', 'COZ6', 'GCZ6'])
instr_fx = pd.Series(['USD', 'CAD', 'AUD'],
index=['CLZ6', 'COZ6', 'GCZ6'])
fx_rates = pd.Series([1.32, 0.8], index=['USDCAD', 'AUDUSD'])
multipliers = pd.Series([1, 10, 100], index=['CLZ6', 'COZ6', 'GCZ6'])
res_exp = pd.Series([-1, 2, 1], index=['CLZ6', 'COZ6', 'GCZ6'])
res = util.to_contracts(notionals, prices, desired_ccy='USD',
instr_fx=instr_fx, fx_rates=fx_rates,
multipliers=multipliers)
assert_series_equal(res, res_exp)
def test_trade_with_zero_amount():
wts = pd.DataFrame([[0.5, 0], [0.5, 0.5], [0, 0.5]],
index=["CLX16", "CLZ16", "CLF17"],
columns=[0, 1])
desired_holdings = pd.Series([200000, 0], index=[0, 1])
current_contracts = pd.Series([0, 1, 0],
index=['CLX16', 'CLZ16', 'CLF17'])
prices = pd.Series([50.32, 50.41, 50.48],
index=['CLX16', 'CLZ16', 'CLF17'])
multiplier = pd.Series([100, 100, 100],
index=['CLX16', 'CLZ16', 'CLF17'])
trades = util.calc_trades(current_contracts, desired_holdings, wts,
prices, multipliers=multiplier)
# 200000 * 0.5 / (50.32*100) - 0,
# 200000 * 0.5 / (50.41*100) + 0 * 0.5 / (50.41*100) - 1,
# 0 * 0.5 / (50.48*100) - 0,
exp_trades = pd.Series([20, 19], index=['CLX16', 'CLZ16'])
assert_series_equal(trades, exp_trades)
def test_trade_all_zero_amount_return_empty():
wts = pd.DataFrame([1], index=["CLX16"], columns=[0])
desired_holdings = pd.Series([13], index=[0])
current_contracts = 0
prices = pd.Series([50.32], index=['CLX16'])
multiplier = pd.Series([100], index=['CLX16'])
trades = util.calc_trades(current_contracts, desired_holdings, wts,
prices, multipliers=multiplier)
exp_trades = pd.Series(dtype="int64")
assert_series_equal(trades, exp_trades)
def test_trade_one_asset():
wts = pd.DataFrame([[0.5, 0], [0.5, 0.5], [0, 0.5]],
index=["CLX16", "CLZ16", "CLF17"],
columns=[0, 1])
desired_holdings = pd.Series([200000, -50000], index=[0, 1])
current_contracts = pd.Series([0, 1, 0],
index=['CLX16', 'CLZ16', 'CLF17'])
prices = pd.Series([50.32, 50.41, 50.48],
index=['CLX16', 'CLZ16', 'CLF17'])
multiplier = pd.Series([100, 100, 100],
index=['CLX16', 'CLZ16', 'CLF17'])
trades = util.calc_trades(current_contracts, desired_holdings, wts,
prices, multipliers=multiplier)
# 200000 * 0.5 / (50.32*100) - 0,
# 200000 * 0.5 / (50.41*100) - 50000 * 0.5 / (50.41*100) - 1,
# -50000 * 0.5 / (50.48*100) - 0,
exp_trades = pd.Series([20, 14, -5], index=['CLX16', 'CLZ16', 'CLF17'])
exp_trades = exp_trades.sort_index()
assert_series_equal(trades, exp_trades)
def test_trade_multi_asset():
wts1 = pd.DataFrame([[0.5, 0], [0.5, 0.5], [0, 0.5]],
index=["CLX16", "CLZ16", "CLF17"],
columns=["CL0", "CL1"])
wts2 = pd.DataFrame([1], index=["COX16"], columns=["CO0"])
wts = {"CL": wts1, "CO": wts2}
desired_holdings = pd.Series([200000, -50000, 100000],
index=["CL0", "CL1", "CO0"])
current_contracts = pd.Series([0, 1, 0, 5],
index=['CLX16', 'CLZ16', 'CLF17',
'COX16'])
prices = pd.Series([50.32, 50.41, 50.48, 49.50],
index=['CLX16', 'CLZ16', 'CLF17', 'COX16'])
multiplier = pd.Series([100, 100, 100, 100],
index=['CLX16', 'CLZ16', 'CLF17', 'COX16'])
trades = util.calc_trades(current_contracts, desired_holdings, wts,
prices, multipliers=multiplier)
# 200000 * 0.5 / (50.32*100) - 0,
# 200000 * 0.5 / (50.41*100) - 50000 * 0.5 / (50.41*100) - 1,
# -50000 * 0.5 / (50.48*100) - 0,
# 100000 * 1 / (49.50*100) - 5,
exp_trades = pd.Series([20, 14, -5, 15], index=['CLX16', 'CLZ16',
'CLF17', 'COX16'])
exp_trades = exp_trades.sort_index()
assert_series_equal(trades, exp_trades)
def test_trade_extra_desired_holdings_without_weights():
wts = pd.DataFrame([0], index=["CLX16"], columns=["CL0"])
desired_holdings = pd.Series([200000, 10000], index=["CL0", "CL1"])
current_contracts = pd.Series([0], index=['CLX16'])
prices = pd.Series([50.32], index=['CLX16'])
multipliers = pd.Series([1], index=['CLX16'])
with pytest.raises(ValueError):
util.calc_trades(current_contracts, desired_holdings, wts, prices,
multipliers)
def test_trade_extra_desired_holdings_without_current_contracts():
# this should treat the missing holdings as 0, since this would often
# happen when adding new positions without any current holdings
wts = pd.DataFrame([[0.5, 0], [0.5, 0.5], [0, 0.5]],
index=["CLX16", "CLZ16", "CLF17"],
columns=[0, 1])
desired_holdings = pd.Series([200000, -50000], index=[0, 1])
current_contracts = pd.Series([0, 1],
index=['CLX16', 'CLZ16'])
prices = pd.Series([50.32, 50.41, 50.48],
index=['CLX16', 'CLZ16', 'CLF17'])
multiplier = pd.Series([100, 100, 100],
index=['CLX16', 'CLZ16', 'CLF17'])
trades = util.calc_trades(current_contracts, desired_holdings, wts,
prices, multipliers=multiplier)
# 200000 * 0.5 / (50.32*100) - 0,
# 200000 * 0.5 / (50.41*100) - 50000 * 0.5 / (50.41*100) - 1,
# -50000 * 0.5 / (50.48*100) - 0,
exp_trades = pd.Series([20, 14, -5], index=['CLX16', 'CLZ16', 'CLF17'])
exp_trades = exp_trades.sort_index()
# non existent contract holdings result in fill value being a float,
# which casts to float64
assert_series_equal(trades, exp_trades, check_dtype=False)
def test_trade_extra_weights():
# extra weights should be ignored
wts = pd.DataFrame([[0.5, 0], [0.5, 0.5], [0, 0.5]],
index=["CLX16", "CLZ16", "CLF17"],
columns=[0, 1])
desired_holdings = pd.Series([200000], index=[0])
current_contracts = pd.Series([0, 2], index=['CLX16', 'CLZ16'])
prices = pd.Series([50.32, 50.41], index=['CLX16', 'CLZ16'])
multiplier = pd.Series([100, 100], index=['CLX16', 'CLZ16'])
trades = util.calc_trades(current_contracts, desired_holdings, wts,
prices, multipliers=multiplier)
# 200000 * 0.5 / (50.32*100) - 0,
# 200000 * 0.5 / (50.41*100) - 2,
exp_trades = pd.Series([20, 18], index=['CLX16', 'CLZ16'])
assert_series_equal(trades, exp_trades)
def test_get_multiplier_dataframe_weights():
wts = pd.DataFrame([[0.5, 0], [0.5, 0.5], [0, 0.5]],
index=["CLX16", "CLZ16", "CLF17"],
columns=[0, 1])
ast_mult = pd.Series([1000], index=["CL"])
imults = util.get_multiplier(wts, ast_mult)
imults_exp = pd.Series([1000, 1000, 1000],
index=["CLF17", "CLX16", "CLZ16"])
assert_series_equal(imults, imults_exp)
def test_get_multiplier_dict_weights():
wts1 = pd.DataFrame([[0.5, 0], [0.5, 0.5], [0, 0.5]],
index=["CLX16", "CLZ16", "CLF17"],
columns=[0, 1])
wts2 = pd.DataFrame([0.5, 0.5], index=["COX16", "COZ16"], columns=[0])
wts = {"CL": wts1, "CO": wts2}
ast_mult = pd.Series([1000, 1000], index=["CL", "CO"])
imults = util.get_multiplier(wts, ast_mult)
imults_exp = pd.Series([1000, 1000, 1000, 1000, 1000],
index=["CLF17", "CLX16", "CLZ16", "COX16",
"COZ16"])
assert_series_equal(imults, imults_exp)
def test_get_multiplier_dataframe_weights_multiplier_asts_error():
wts = pd.DataFrame([[0.5, 0], [0.5, 0.5], [0, 0.5]],
index=["CLX16", "CLZ16", "CLF17"],
columns=[0, 1])
ast_mult = pd.Series([1000, 1000], index=["CL", "CO"])
with pytest.raises(ValueError):
util.get_multiplier(wts, ast_mult)
def test_weighted_expiration_two_generics():
vals = [[1, 0, 1/2, 1/2, 0, 1, 0], [0, 1, 0, 1/2, 1/2, 0, 1]]
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF15'),
(TS('2015-01-03'), 'CLG15'),
(TS('2015-01-04'), 'CLF15'),
(TS('2015-01-04'), 'CLG15'),
(TS('2015-01-04'), 'CLH15'),
(TS('2015-01-05'), 'CLG15'),
(TS('2015-01-05'), 'CLH15')])
weights = pd.DataFrame({"CL1": vals[0], "CL2": vals[1]}, index=idx)
contract_dates = pd.Series([TS('2015-01-20'),
TS('2015-02-21'),
TS('2015-03-20')],
index=['CLF15', 'CLG15', 'CLH15'])
wexp = util.weighted_expiration(weights, contract_dates)
exp_wexp = pd.DataFrame([[17.0, 49.0], [32.0, 61.5], [47.0, 74.0]],
index=[TS('2015-01-03'),
TS('2015-01-04'),
TS('2015-01-05')],
columns=["CL1", "CL2"])
assert_frame_equal(wexp, exp_wexp)
def test_flatten():
vals = [[1, 0], [0, 1], [1, 0], [0, 1]]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5')])
weights = pd.DataFrame(vals, index=widx, columns=["CL1", "CL2"])
flat_wts = util.flatten(weights)
flat_wts_exp = pd.DataFrame(
{"date": [TS('2015-01-03')] * 4 + [TS('2015-01-04')] * 4,
"contract": ['CLF5'] * 2 + ['CLG5'] * 4 + ['CLH5'] * 2,
"generic": ["CL1", "CL2"] * 4,
"weight": [1, 0, 0, 1, 1, 0, 0, 1]}
).loc[:, ["date", "contract", "generic", "weight"]]
assert_frame_equal(flat_wts, flat_wts_exp)
def test_flatten_dict():
vals = [[1, 0], [0, 1], [1, 0], [0, 1]]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5')])
weights1 = pd.DataFrame(vals, index=widx, columns=["CL1", "CL2"])
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'COF5')])
weights2 = pd.DataFrame(1, index=widx, columns=["CO1"])
weights = {"CL": weights1, "CO": weights2}
flat_wts = util.flatten(weights)
flat_wts_exp = pd.DataFrame(
{"date": ([TS('2015-01-03')] * 4 + [TS('2015-01-04')] * 4
+ [TS('2015-01-03')]),
"contract": (['CLF5'] * 2 + ['CLG5'] * 4 + ['CLH5'] * 2
+ ["COF5"]),
"generic": ["CL1", "CL2"] * 4 + ["CO1"],
"weight": [1, 0, 0, 1, 1, 0, 0, 1, 1],
"key": ["CL"] * 8 + ["CO"]}
).loc[:, ["date", "contract", "generic", "weight", "key"]]
assert_frame_equal(flat_wts, flat_wts_exp)
def test_flatten_bad_input():
dummy = 0
with pytest.raises(ValueError):
util.flatten(dummy)
def test_unflatten():
flat_wts = pd.DataFrame(
{"date": [TS('2015-01-03')] * 4 + [TS('2015-01-04')] * 4,
"contract": ['CLF5'] * 2 + ['CLG5'] * 4 + ['CLH5'] * 2,
"generic": ["CL1", "CL2"] * 4,
"weight": [1, 0, 0, 1, 1, 0, 0, 1]}
).loc[:, ["date", "contract", "generic", "weight"]]
wts = util.unflatten(flat_wts)
vals = [[1, 0], [0, 1], [1, 0], [0, 1]]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5')],
names=("date", "contract"))
cols = pd.Index(["CL1", "CL2"], name="generic")
wts_exp = pd.DataFrame(vals, index=widx, columns=cols)
assert_frame_equal(wts, wts_exp)
def test_unflatten_dict():
flat_wts = pd.DataFrame(
{"date": ([TS('2015-01-03')] * 4 + [TS('2015-01-04')] * 4
+ [ | TS('2015-01-03') | pandas.Timestamp |
import numpy as np
import pandas as pd
from bach import Series, DataFrame
from bach.operations.cut import CutOperation, QCutOperation
from sql_models.util import quote_identifier
from tests.functional.bach.test_data_and_utils import assert_equals_data
PD_TESTING_SETTINGS = {
'check_dtype': False,
'check_exact': False,
'atol': 1e-3,
}
def compare_boundaries(expected: pd.Series, result: Series) -> None:
for exp, res in zip(expected.to_numpy(), result.to_numpy()):
if not isinstance(exp, pd.Interval):
assert res is None or np.isnan(res)
continue
np.testing.assert_almost_equal(exp.left, float(res.left), decimal=2)
np.testing.assert_almost_equal(exp.right, float(res.right), decimal=2)
if exp.closed_left:
assert res.closed_left
if exp.closed_right:
assert res.closed_right
def test_cut_operation_pandas(engine) -> None:
p_series = pd.Series(range(100), name='a')
series = DataFrame.from_pandas(engine=engine, df=p_series.to_frame(), convert_objects=True).a
expected = pd.cut(p_series, bins=10)
result = CutOperation(series=series, bins=10)()
compare_boundaries(expected, result)
expected_wo_right = pd.cut(p_series, bins=10, right=False)
result_wo_right = CutOperation(series, bins=10, right=False)()
compare_boundaries(expected_wo_right, result_wo_right)
def test_cut_operation_bach(engine) -> None:
p_series = pd.Series(range(100), name='a')
series = DataFrame.from_pandas(engine=engine, df=p_series.to_frame(), convert_objects=True).a
ranges = [
pd.Interval(0, 9.9, closed='both'),
pd.Interval(9.9, 19.8, closed='right'),
pd.Interval(19.8, 29.7, closed='right'),
pd.Interval(29.7, 39.6, closed='right'),
pd.Interval(39.6, 49.5, closed='right'),
pd.Interval(49.5, 59.4, closed='right'),
pd.Interval(59.4, 69.3, closed='right'),
pd.Interval(69.3, 79.2, closed='right'),
pd.Interval(79.2, 89.1, closed='right'),
pd.Interval(89.1, 99, closed='right'),
]
expected = pd.Series({num: ranges[int(num / 10)] for num in range(100)})
result = CutOperation(series=series, bins=10, method='bach')().sort_index()
compare_boundaries(expected, result)
ranges_wo_right = [
pd.Interval(0, 9.9, closed='left'),
pd.Interval(9.9, 19.8, closed='left'),
| pd.Interval(19.8, 29.7, closed='left') | pandas.Interval |
from __future__ import division
from datetime import timedelta
from functools import partial
import itertools
from nose.tools import assert_true
from parameterized import parameterized
import numpy as np
from numpy.testing import assert_array_equal, assert_almost_equal
import pandas as pd
from toolz import merge
from zipline.pipeline import SimplePipelineEngine, Pipeline, CustomFactor
from zipline.pipeline.common import (
EVENT_DATE_FIELD_NAME,
FISCAL_QUARTER_FIELD_NAME,
FISCAL_YEAR_FIELD_NAME,
SID_FIELD_NAME,
TS_FIELD_NAME,
)
from zipline.pipeline.data import DataSet
from zipline.pipeline.data import Column
from zipline.pipeline.domain import EquitySessionDomain
import platform
if platform.system() != 'Windows':
from zipline.pipeline.loaders.blaze.estimates import (
BlazeNextEstimatesLoader,
BlazeNextSplitAdjustedEstimatesLoader,
BlazePreviousEstimatesLoader,
BlazePreviousSplitAdjustedEstimatesLoader,
)
from zipline.pipeline.loaders.earnings_estimates import (
INVALID_NUM_QTRS_MESSAGE,
NextEarningsEstimatesLoader,
NextSplitAdjustedEarningsEstimatesLoader,
normalize_quarters,
PreviousEarningsEstimatesLoader,
PreviousSplitAdjustedEarningsEstimatesLoader,
split_normalized_quarters,
)
from zipline.testing.fixtures import (
WithAdjustmentReader,
WithTradingSessions,
ZiplineTestCase,
)
from zipline.testing.predicates import assert_equal, assert_raises_regex
from zipline.testing.predicates import assert_frame_equal
from zipline.utils.numpy_utils import datetime64ns_dtype
from zipline.utils.numpy_utils import float64_dtype
import platform
import unittest
class Estimates(DataSet):
event_date = Column(dtype=datetime64ns_dtype)
fiscal_quarter = Column(dtype=float64_dtype)
fiscal_year = Column(dtype=float64_dtype)
estimate = Column(dtype=float64_dtype)
class MultipleColumnsEstimates(DataSet):
event_date = Column(dtype=datetime64ns_dtype)
fiscal_quarter = Column(dtype=float64_dtype)
fiscal_year = Column(dtype=float64_dtype)
estimate1 = Column(dtype=float64_dtype)
estimate2 = Column(dtype=float64_dtype)
def QuartersEstimates(announcements_out):
class QtrEstimates(Estimates):
num_announcements = announcements_out
name = Estimates
return QtrEstimates
def MultipleColumnsQuartersEstimates(announcements_out):
class QtrEstimates(MultipleColumnsEstimates):
num_announcements = announcements_out
name = Estimates
return QtrEstimates
def QuartersEstimatesNoNumQuartersAttr(num_qtr):
class QtrEstimates(Estimates):
name = Estimates
return QtrEstimates
def create_expected_df_for_factor_compute(start_date,
sids,
tuples,
end_date):
"""
Given a list of tuples of new data we get for each sid on each critical
date (when information changes), create a DataFrame that fills that
data through a date range ending at `end_date`.
"""
df = pd.DataFrame(tuples,
columns=[SID_FIELD_NAME,
'estimate',
'knowledge_date'])
df = df.pivot_table(columns=SID_FIELD_NAME,
values='estimate',
index='knowledge_date')
df = df.reindex(
pd.date_range(start_date, end_date)
)
# Index name is lost during reindex.
df.index = df.index.rename('knowledge_date')
df['at_date'] = end_date.tz_localize('utc')
df = df.set_index(['at_date', df.index.tz_localize('utc')]).ffill()
new_sids = set(sids) - set(df.columns)
df = df.reindex(columns=df.columns.union(new_sids))
return df
class WithEstimates(WithTradingSessions, WithAdjustmentReader):
"""
ZiplineTestCase mixin providing cls.loader and cls.events as class
level fixtures.
Methods
-------
make_loader(events, columns) -> PipelineLoader
Method which returns the loader to be used throughout tests.
events : pd.DataFrame
The raw events to be used as input to the pipeline loader.
columns : dict[str -> str]
The dictionary mapping the names of BoundColumns to the
associated column name in the events DataFrame.
make_columns() -> dict[BoundColumn -> str]
Method which returns a dictionary of BoundColumns mapped to the
associated column names in the raw data.
"""
# Short window defined in order for test to run faster.
START_DATE = pd.Timestamp('2014-12-28')
END_DATE = pd.Timestamp('2015-02-04')
@classmethod
def make_loader(cls, events, columns):
raise NotImplementedError('make_loader')
@classmethod
def make_events(cls):
raise NotImplementedError('make_events')
@classmethod
def get_sids(cls):
return cls.events[SID_FIELD_NAME].unique()
@classmethod
def make_columns(cls):
return {
Estimates.event_date: 'event_date',
Estimates.fiscal_quarter: 'fiscal_quarter',
Estimates.fiscal_year: 'fiscal_year',
Estimates.estimate: 'estimate'
}
def make_engine(self, loader=None):
if loader is None:
loader = self.loader
return SimplePipelineEngine(
lambda x: loader,
self.asset_finder,
default_domain=EquitySessionDomain(
self.trading_days, self.ASSET_FINDER_COUNTRY_CODE,
),
)
@classmethod
def init_class_fixtures(cls):
cls.events = cls.make_events()
cls.ASSET_FINDER_EQUITY_SIDS = cls.get_sids()
cls.ASSET_FINDER_EQUITY_SYMBOLS = [
's' + str(n) for n in cls.ASSET_FINDER_EQUITY_SIDS
]
# We need to instantiate certain constants needed by supers of
# `WithEstimates` before we call their `init_class_fixtures`.
super(WithEstimates, cls).init_class_fixtures()
cls.columns = cls.make_columns()
# Some tests require `WithAdjustmentReader` to be set up by the time we
# make the loader.
cls.loader = cls.make_loader(cls.events, {column.name: val for
column, val in
cls.columns.items()})
class WithOneDayPipeline(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
events : pd.DataFrame
A simple DataFrame with columns needed for estimates and a single sid
and no other data.
Tests
------
test_wrong_num_announcements_passed()
Tests that loading with an incorrect quarter number raises an error.
test_no_num_announcements_attr()
Tests that the loader throws an AssertionError if the dataset being
loaded has no `num_announcements` attribute.
"""
@classmethod
def make_columns(cls):
return {
MultipleColumnsEstimates.event_date: 'event_date',
MultipleColumnsEstimates.fiscal_quarter: 'fiscal_quarter',
MultipleColumnsEstimates.fiscal_year: 'fiscal_year',
MultipleColumnsEstimates.estimate1: 'estimate1',
MultipleColumnsEstimates.estimate2: 'estimate2'
}
@classmethod
def make_events(cls):
return pd.DataFrame({
SID_FIELD_NAME: [0] * 2,
TS_FIELD_NAME: [pd.Timestamp('2015-01-01'),
pd.Timestamp('2015-01-06')],
EVENT_DATE_FIELD_NAME: [pd.Timestamp('2015-01-10'),
pd.Timestamp('2015-01-20')],
'estimate1': [1., 2.],
'estimate2': [3., 4.],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: [2015, 2015]
})
@classmethod
def make_expected_out(cls):
raise NotImplementedError('make_expected_out')
@classmethod
def init_class_fixtures(cls):
super(WithOneDayPipeline, cls).init_class_fixtures()
cls.sid0 = cls.asset_finder.retrieve_asset(0)
cls.expected_out = cls.make_expected_out()
def test_load_one_day(self):
# We want to test multiple columns
dataset = MultipleColumnsQuartersEstimates(1)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline({c.name: c.latest for c in dataset.columns}),
start_date=pd.Timestamp('2015-01-15', tz='utc'),
end_date=pd.Timestamp('2015-01-15', tz='utc'),
)
assert_frame_equal(results, self.expected_out)
class PreviousWithOneDayPipeline(WithOneDayPipeline, ZiplineTestCase):
"""
Tests that previous quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_out(cls):
return pd.DataFrame(
{
EVENT_DATE_FIELD_NAME: pd.Timestamp('2015-01-10'),
'estimate1': 1.,
'estimate2': 3.,
FISCAL_QUARTER_FIELD_NAME: 1.,
FISCAL_YEAR_FIELD_NAME: 2015.,
},
index=pd.MultiIndex.from_tuples(
((pd.Timestamp('2015-01-15', tz='utc'), cls.sid0),)
)
)
class NextWithOneDayPipeline(WithOneDayPipeline, ZiplineTestCase):
"""
Tests that next quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_out(cls):
return pd.DataFrame(
{
EVENT_DATE_FIELD_NAME: pd.Timestamp('2015-01-20'),
'estimate1': 2.,
'estimate2': 4.,
FISCAL_QUARTER_FIELD_NAME: 2.,
FISCAL_YEAR_FIELD_NAME: 2015.,
},
index=pd.MultiIndex.from_tuples(
((pd.Timestamp('2015-01-15', tz='utc'), cls.sid0),)
)
)
dummy_df = pd.DataFrame({SID_FIELD_NAME: 0},
columns=[SID_FIELD_NAME,
TS_FIELD_NAME,
EVENT_DATE_FIELD_NAME,
FISCAL_QUARTER_FIELD_NAME,
FISCAL_YEAR_FIELD_NAME,
'estimate'],
index=[0])
class WithWrongLoaderDefinition(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
events : pd.DataFrame
A simple DataFrame with columns needed for estimates and a single sid
and no other data.
Tests
------
test_wrong_num_announcements_passed()
Tests that loading with an incorrect quarter number raises an error.
test_no_num_announcements_attr()
Tests that the loader throws an AssertionError if the dataset being
loaded has no `num_announcements` attribute.
"""
@classmethod
def make_events(cls):
return dummy_df
def test_wrong_num_announcements_passed(self):
bad_dataset1 = QuartersEstimates(-1)
bad_dataset2 = QuartersEstimates(-2)
good_dataset = QuartersEstimates(1)
engine = self.make_engine()
columns = {c.name + str(dataset.num_announcements): c.latest
for dataset in (bad_dataset1,
bad_dataset2,
good_dataset)
for c in dataset.columns}
p = Pipeline(columns)
with self.assertRaises(ValueError) as e:
engine.run_pipeline(
p,
start_date=self.trading_days[0],
end_date=self.trading_days[-1],
)
assert_raises_regex(e, INVALID_NUM_QTRS_MESSAGE % "-1,-2")
def test_no_num_announcements_attr(self):
dataset = QuartersEstimatesNoNumQuartersAttr(1)
engine = self.make_engine()
p = Pipeline({c.name: c.latest for c in dataset.columns})
with self.assertRaises(AttributeError):
engine.run_pipeline(
p,
start_date=self.trading_days[0],
end_date=self.trading_days[-1],
)
class PreviousWithWrongNumQuarters(WithWrongLoaderDefinition,
ZiplineTestCase):
"""
Tests that previous quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
class NextWithWrongNumQuarters(WithWrongLoaderDefinition,
ZiplineTestCase):
"""
Tests that next quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
options = ["split_adjustments_loader",
"split_adjusted_column_names",
"split_adjusted_asof"]
class WrongSplitsLoaderDefinition(WithEstimates, ZiplineTestCase):
"""
Test class that tests that loaders break correctly when incorrectly
instantiated.
Tests
-----
test_extra_splits_columns_passed(SplitAdjustedEstimatesLoader)
A test that checks that the loader correctly breaks when an
unexpected column is passed in the list of split-adjusted columns.
"""
@classmethod
def init_class_fixtures(cls):
super(WithEstimates, cls).init_class_fixtures()
@parameterized.expand(itertools.product(
(NextSplitAdjustedEarningsEstimatesLoader,
PreviousSplitAdjustedEarningsEstimatesLoader),
))
def test_extra_splits_columns_passed(self, loader):
columns = {
Estimates.event_date: 'event_date',
Estimates.fiscal_quarter: 'fiscal_quarter',
Estimates.fiscal_year: 'fiscal_year',
Estimates.estimate: 'estimate'
}
with self.assertRaises(ValueError):
loader(dummy_df,
{column.name: val for column, val in
columns.items()},
split_adjustments_loader=self.adjustment_reader,
split_adjusted_column_names=["estimate", "extra_col"],
split_adjusted_asof=pd.Timestamp("2015-01-01"))
class WithEstimatesTimeZero(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
cls.events : pd.DataFrame
Generated dynamically in order to test inter-leavings of estimates and
event dates for multiple quarters to make sure that we select the
right immediate 'next' or 'previous' quarter relative to each date -
i.e., the right 'time zero' on the timeline. We care about selecting
the right 'time zero' because we use that to calculate which quarter's
data needs to be returned for each day.
Methods
-------
get_expected_estimate(q1_knowledge,
q2_knowledge,
comparable_date) -> pd.DataFrame
Retrieves the expected estimate given the latest knowledge about each
quarter and the date on which the estimate is being requested. If
there is no expected estimate, returns an empty DataFrame.
Tests
------
test_estimates()
Tests that we get the right 'time zero' value on each day for each
sid and for each column.
"""
# Shorter date range for performance
END_DATE = pd.Timestamp('2015-01-28')
q1_knowledge_dates = [pd.Timestamp('2015-01-01'),
pd.Timestamp('2015-01-04'),
pd.Timestamp('2015-01-07'),
pd.Timestamp('2015-01-11')]
q2_knowledge_dates = [pd.Timestamp('2015-01-14'),
pd.Timestamp('2015-01-17'),
pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-01-23')]
# We want to model the possibility of an estimate predicting a release date
# that doesn't match the actual release. This could be done by dynamically
# generating more combinations with different release dates, but that
# significantly increases the amount of time it takes to run the tests.
# These hard-coded cases are sufficient to know that we can update our
# beliefs when we get new information.
q1_release_dates = [pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-14')] # One day late
q2_release_dates = [pd.Timestamp('2015-01-25'), # One day early
pd.Timestamp('2015-01-26')]
@classmethod
def make_events(cls):
"""
In order to determine which estimate we care about for a particular
sid, we need to look at all estimates that we have for that sid and
their associated event dates.
We define q1 < q2, and thus event1 < event2 since event1 occurs
during q1 and event2 occurs during q2 and we assume that there can
only be 1 event per quarter. We assume that there can be multiple
estimates per quarter leading up to the event. We assume that estimates
will not surpass the relevant event date. We will look at 2 estimates
for an event before the event occurs, since that is the simplest
scenario that covers the interesting edge cases:
- estimate values changing
- a release date changing
- estimates for different quarters interleaving
Thus, we generate all possible inter-leavings of 2 estimates per
quarter-event where estimate1 < estimate2 and all estimates are < the
relevant event and assign each of these inter-leavings to a
different sid.
"""
sid_estimates = []
sid_releases = []
# We want all permutations of 2 knowledge dates per quarter.
it = enumerate(
itertools.permutations(cls.q1_knowledge_dates +
cls.q2_knowledge_dates,
4)
)
for sid, (q1e1, q1e2, q2e1, q2e2) in it:
# We're assuming that estimates must come before the relevant
# release.
if (q1e1 < q1e2 and
q2e1 < q2e2 and
# All estimates are < Q2's event, so just constrain Q1
# estimates.
q1e1 < cls.q1_release_dates[0] and
q1e2 < cls.q1_release_dates[0]):
sid_estimates.append(cls.create_estimates_df(q1e1,
q1e2,
q2e1,
q2e2,
sid))
sid_releases.append(cls.create_releases_df(sid))
return pd.concat(sid_estimates +
sid_releases).reset_index(drop=True)
@classmethod
def get_sids(cls):
sids = cls.events[SID_FIELD_NAME].unique()
# Tack on an extra sid to make sure that sids with no data are
# included but have all-null columns.
return list(sids) + [max(sids) + 1]
@classmethod
def create_releases_df(cls, sid):
# Final release dates never change. The quarters have very tight date
# ranges in order to reduce the number of dates we need to iterate
# through when testing.
return pd.DataFrame({
TS_FIELD_NAME: [pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-26')],
EVENT_DATE_FIELD_NAME: [pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-26')],
'estimate': [0.5, 0.8],
FISCAL_QUARTER_FIELD_NAME: [1.0, 2.0],
FISCAL_YEAR_FIELD_NAME: [2015.0, 2015.0],
SID_FIELD_NAME: sid
})
@classmethod
def create_estimates_df(cls,
q1e1,
q1e2,
q2e1,
q2e2,
sid):
return pd.DataFrame({
EVENT_DATE_FIELD_NAME: cls.q1_release_dates + cls.q2_release_dates,
'estimate': [.1, .2, .3, .4],
FISCAL_QUARTER_FIELD_NAME: [1.0, 1.0, 2.0, 2.0],
FISCAL_YEAR_FIELD_NAME: [2015.0, 2015.0, 2015.0, 2015.0],
TS_FIELD_NAME: [q1e1, q1e2, q2e1, q2e2],
SID_FIELD_NAME: sid,
})
def get_expected_estimate(self,
q1_knowledge,
q2_knowledge,
comparable_date):
return pd.DataFrame()
def test_estimates(self):
dataset = QuartersEstimates(1)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline({c.name: c.latest for c in dataset.columns}),
start_date=self.trading_days[1],
end_date=self.trading_days[-2],
)
for sid in self.ASSET_FINDER_EQUITY_SIDS:
sid_estimates = results.xs(sid, level=1)
# Separate assertion for all-null DataFrame to avoid setting
# column dtypes on `all_expected`.
if sid == max(self.ASSET_FINDER_EQUITY_SIDS):
assert_true(sid_estimates.isnull().all().all())
else:
ts_sorted_estimates = self.events[
self.events[SID_FIELD_NAME] == sid
].sort_values(TS_FIELD_NAME)
q1_knowledge = ts_sorted_estimates[
ts_sorted_estimates[FISCAL_QUARTER_FIELD_NAME] == 1
]
q2_knowledge = ts_sorted_estimates[
ts_sorted_estimates[FISCAL_QUARTER_FIELD_NAME] == 2
]
all_expected = pd.concat(
[self.get_expected_estimate(
q1_knowledge[q1_knowledge[TS_FIELD_NAME] <=
date.tz_localize(None)],
q2_knowledge[q2_knowledge[TS_FIELD_NAME] <=
date.tz_localize(None)],
date.tz_localize(None),
).set_index([[date]]) for date in sid_estimates.index],
axis=0)
assert_equal(all_expected[sid_estimates.columns],
sid_estimates)
class NextEstimate(WithEstimatesTimeZero, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
def get_expected_estimate(self,
q1_knowledge,
q2_knowledge,
comparable_date):
# If our latest knowledge of q1 is that the release is
# happening on this simulation date or later, then that's
# the estimate we want to use.
if (not q1_knowledge.empty and
q1_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] >=
comparable_date):
return q1_knowledge.iloc[-1:]
# If q1 has already happened or we don't know about it
# yet and our latest knowledge indicates that q2 hasn't
# happened yet, then that's the estimate we want to use.
elif (not q2_knowledge.empty and
q2_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] >=
comparable_date):
return q2_knowledge.iloc[-1:]
return pd.DataFrame(columns=q1_knowledge.columns,
index=[comparable_date])
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazeNextEstimateLoaderTestCase(NextEstimate):
"""
Run the same tests as EventsLoaderTestCase, but using a BlazeEventsLoader.
"""
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazeNextEstimatesLoader(
bz.data(events),
columns,
)
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class PreviousEstimate(WithEstimatesTimeZero, ZiplineTestCase):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
def get_expected_estimate(self,
q1_knowledge,
q2_knowledge,
comparable_date):
# The expected estimate will be for q2 if the last thing
# we've seen is that the release date already happened.
# Otherwise, it'll be for q1, as long as the release date
# for q1 has already happened.
if (not q2_knowledge.empty and
q2_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] <=
comparable_date):
return q2_knowledge.iloc[-1:]
elif (not q1_knowledge.empty and
q1_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] <=
comparable_date):
return q1_knowledge.iloc[-1:]
return pd.DataFrame(columns=q1_knowledge.columns,
index=[comparable_date])
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazePreviousEstimateLoaderTestCase(PreviousEstimate):
"""
Run the same tests as EventsLoaderTestCase, but using a BlazeEventsLoader.
"""
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazePreviousEstimatesLoader(
bz.data(events),
columns,
)
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class WithEstimateMultipleQuarters(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events, cls.make_expected_out as
class-level fixtures and self.test_multiple_qtrs_requested as a test.
Attributes
----------
events : pd.DataFrame
Simple DataFrame with estimates for 2 quarters for a single sid.
Methods
-------
make_expected_out() --> pd.DataFrame
Returns the DataFrame that is expected as a result of running a
Pipeline where estimates are requested for multiple quarters out.
fill_expected_out(expected)
Fills the expected DataFrame with data.
Tests
------
test_multiple_qtrs_requested()
Runs a Pipeline that calculate which estimates for multiple quarters
out and checks that the returned columns contain data for the correct
number of quarters out.
"""
@classmethod
def make_events(cls):
return pd.DataFrame({
SID_FIELD_NAME: [0] * 2,
TS_FIELD_NAME: [pd.Timestamp('2015-01-01'),
pd.Timestamp('2015-01-06')],
EVENT_DATE_FIELD_NAME: [pd.Timestamp('2015-01-10'),
pd.Timestamp('2015-01-20')],
'estimate': [1., 2.],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: [2015, 2015]
})
@classmethod
def init_class_fixtures(cls):
super(WithEstimateMultipleQuarters, cls).init_class_fixtures()
cls.expected_out = cls.make_expected_out()
@classmethod
def make_expected_out(cls):
expected = pd.DataFrame(columns=[cls.columns[col] + '1'
for col in cls.columns] +
[cls.columns[col] + '2'
for col in cls.columns],
index=cls.trading_days)
for (col, raw_name), suffix in itertools.product(
cls.columns.items(), ('1', '2')
):
expected_name = raw_name + suffix
if col.dtype == datetime64ns_dtype:
expected[expected_name] = pd.to_datetime(
expected[expected_name]
)
else:
expected[expected_name] = expected[
expected_name
].astype(col.dtype)
cls.fill_expected_out(expected)
return expected.reindex(cls.trading_days)
def test_multiple_qtrs_requested(self):
dataset1 = QuartersEstimates(1)
dataset2 = QuartersEstimates(2)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline(
merge([{c.name + '1': c.latest for c in dataset1.columns},
{c.name + '2': c.latest for c in dataset2.columns}])
),
start_date=self.trading_days[0],
end_date=self.trading_days[-1],
)
q1_columns = [col.name + '1' for col in self.columns]
q2_columns = [col.name + '2' for col in self.columns]
# We now expect a column for 1 quarter out and a column for 2
# quarters out for each of the dataset columns.
assert_equal(sorted(np.array(q1_columns + q2_columns)),
sorted(results.columns.values))
assert_equal(self.expected_out.sort_index(axis=1),
results.xs(0, level=1).sort_index(axis=1))
class NextEstimateMultipleQuarters(
WithEstimateMultipleQuarters, ZiplineTestCase
):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@classmethod
def fill_expected_out(cls, expected):
# Fill columns for 1 Q out
for raw_name in cls.columns.values():
expected.loc[
pd.Timestamp('2015-01-01'):pd.Timestamp('2015-01-11'),
raw_name + '1'
] = cls.events[raw_name].iloc[0]
expected.loc[
pd.Timestamp('2015-01-11'):pd.Timestamp('2015-01-20'),
raw_name + '1'
] = cls.events[raw_name].iloc[1]
# Fill columns for 2 Q out
# We only have an estimate and event date for 2 quarters out before
# Q1's event happens; after Q1's event, we know 1 Q out but not 2 Qs
# out.
for col_name in ['estimate', 'event_date']:
expected.loc[
pd.Timestamp('2015-01-06'):pd.Timestamp('2015-01-10'),
col_name + '2'
] = cls.events[col_name].iloc[1]
# But we know what FQ and FY we'd need in both Q1 and Q2
# because we know which FQ is next and can calculate from there
expected.loc[
pd.Timestamp('2015-01-01'):pd.Timestamp('2015-01-09'),
FISCAL_QUARTER_FIELD_NAME + '2'
] = 2
expected.loc[
pd.Timestamp('2015-01-12'):pd.Timestamp('2015-01-20'),
FISCAL_QUARTER_FIELD_NAME + '2'
] = 3
expected.loc[
pd.Timestamp('2015-01-01'):pd.Timestamp('2015-01-20'),
FISCAL_YEAR_FIELD_NAME + '2'
] = 2015
return expected
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazeNextEstimateMultipleQuarters(NextEstimateMultipleQuarters):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazeNextEstimatesLoader(
bz.data(events),
columns,
)
class PreviousEstimateMultipleQuarters(
WithEstimateMultipleQuarters,
ZiplineTestCase
):
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@classmethod
def fill_expected_out(cls, expected):
# Fill columns for 1 Q out
for raw_name in cls.columns.values():
expected[raw_name + '1'].loc[
pd.Timestamp('2015-01-12'):pd.Timestamp('2015-01-19')
] = cls.events[raw_name].iloc[0]
expected[raw_name + '1'].loc[
pd.Timestamp('2015-01-20'):
] = cls.events[raw_name].iloc[1]
# Fill columns for 2 Q out
for col_name in ['estimate', 'event_date']:
expected[col_name + '2'].loc[
pd.Timestamp('2015-01-20'):
] = cls.events[col_name].iloc[0]
expected[
FISCAL_QUARTER_FIELD_NAME + '2'
].loc[pd.Timestamp('2015-01-12'):pd.Timestamp('2015-01-20')] = 4
expected[
FISCAL_YEAR_FIELD_NAME + '2'
].loc[pd.Timestamp('2015-01-12'):pd.Timestamp('2015-01-20')] = 2014
expected[
FISCAL_QUARTER_FIELD_NAME + '2'
].loc[pd.Timestamp('2015-01-20'):] = 1
expected[
FISCAL_YEAR_FIELD_NAME + '2'
].loc[pd.Timestamp('2015-01-20'):] = 2015
return expected
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazePreviousEstimateMultipleQuarters(PreviousEstimateMultipleQuarters):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazePreviousEstimatesLoader(
bz.data(events),
columns,
)
class WithVaryingNumEstimates(WithEstimates):
"""
ZiplineTestCase mixin providing fixtures and a test to ensure that we
have the correct overwrites when the event date changes. We want to make
sure that if we have a quarter with an event date that gets pushed back,
we don't start overwriting for the next quarter early. Likewise,
if we have a quarter with an event date that gets pushed forward, we want
to make sure that we start applying adjustments at the appropriate, earlier
date, rather than the later date.
Methods
-------
assert_compute()
Defines how to determine that results computed for the `SomeFactor`
factor are correct.
Tests
-----
test_windows_with_varying_num_estimates()
Tests that we create the correct overwrites from 2015-01-13 to
2015-01-14 regardless of how event dates were updated for each
quarter for each sid.
"""
@classmethod
def make_events(cls):
return pd.DataFrame({
SID_FIELD_NAME: [0] * 3 + [1] * 3,
TS_FIELD_NAME: [pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-01-12'),
pd.Timestamp('2015-01-13')] * 2,
EVENT_DATE_FIELD_NAME: [pd.Timestamp('2015-01-12'),
pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-12'),
pd.Timestamp('2015-01-20')],
'estimate': [11., 12., 21.] * 2,
FISCAL_QUARTER_FIELD_NAME: [1, 1, 2] * 2,
FISCAL_YEAR_FIELD_NAME: [2015] * 6
})
@classmethod
def assert_compute(cls, estimate, today):
raise NotImplementedError('assert_compute')
def test_windows_with_varying_num_estimates(self):
dataset = QuartersEstimates(1)
assert_compute = self.assert_compute
class SomeFactor(CustomFactor):
inputs = [dataset.estimate]
window_length = 3
def compute(self, today, assets, out, estimate):
assert_compute(estimate, today)
engine = self.make_engine()
engine.run_pipeline(
Pipeline({'est': SomeFactor()}),
start_date=pd.Timestamp('2015-01-13', tz='utc'),
# last event date we have
end_date=pd.Timestamp('2015-01-14', tz='utc'),
)
class PreviousVaryingNumEstimates(
WithVaryingNumEstimates,
ZiplineTestCase
):
def assert_compute(self, estimate, today):
if today == pd.Timestamp('2015-01-13', tz='utc'):
assert_array_equal(estimate[:, 0],
np.array([np.NaN, np.NaN, 12]))
assert_array_equal(estimate[:, 1],
np.array([np.NaN, 12, 12]))
else:
assert_array_equal(estimate[:, 0],
np.array([np.NaN, 12, 12]))
assert_array_equal(estimate[:, 1],
np.array([12, 12, 12]))
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazePreviousVaryingNumEstimates(PreviousVaryingNumEstimates):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazePreviousEstimatesLoader(
bz.data(events),
columns,
)
class NextVaryingNumEstimates(
WithVaryingNumEstimates,
ZiplineTestCase
):
def assert_compute(self, estimate, today):
if today == pd.Timestamp('2015-01-13', tz='utc'):
assert_array_equal(estimate[:, 0],
np.array([11, 12, 12]))
assert_array_equal(estimate[:, 1],
np.array([np.NaN, np.NaN, 21]))
else:
assert_array_equal(estimate[:, 0],
np.array([np.NaN, 21, 21]))
assert_array_equal(estimate[:, 1],
np.array([np.NaN, 21, 21]))
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazeNextVaryingNumEstimates(NextVaryingNumEstimates):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazeNextEstimatesLoader(
bz.data(events),
columns,
)
class WithEstimateWindows(WithEstimates):
"""
ZiplineTestCase mixin providing fixures and a test to test running a
Pipeline with an estimates loader over differently-sized windows.
Attributes
----------
events : pd.DataFrame
DataFrame with estimates for 2 quarters for 2 sids.
window_test_start_date : pd.Timestamp
The date from which the window should start.
timelines : dict[int -> pd.DataFrame]
A dictionary mapping to the number of quarters out to
snapshots of how the data should look on each date in the date range.
Methods
-------
make_expected_timelines() -> dict[int -> pd.DataFrame]
Creates a dictionary of expected data. See `timelines`, above.
Tests
-----
test_estimate_windows_at_quarter_boundaries()
Tests that we overwrite values with the correct quarter's estimate at
the correct dates when we have a factor that asks for a window of data.
"""
END_DATE = pd.Timestamp('2015-02-10')
window_test_start_date = pd.Timestamp('2015-01-05')
critical_dates = [pd.Timestamp('2015-01-09', tz='utc'),
pd.Timestamp('2015-01-15', tz='utc'),
pd.Timestamp('2015-01-20', tz='utc'),
pd.Timestamp('2015-01-26', tz='utc'),
pd.Timestamp('2015-02-05', tz='utc'),
pd.Timestamp('2015-02-10', tz='utc')]
# Starting date, number of announcements out.
window_test_cases = list(itertools.product(critical_dates, (1, 2)))
@classmethod
def make_events(cls):
# Typical case: 2 consecutive quarters.
sid_0_timeline = pd.DataFrame({
TS_FIELD_NAME: [cls.window_test_start_date,
pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-01-12'),
pd.Timestamp('2015-02-10'),
# We want a case where we get info for a later
# quarter before the current quarter is over but
# after the split_asof_date to make sure that
# we choose the correct date to overwrite until.
pd.Timestamp('2015-01-18')],
EVENT_DATE_FIELD_NAME:
[pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-02-10'),
pd.Timestamp('2015-02-10'),
pd.Timestamp('2015-04-01')],
'estimate': [100., 101.] + [200., 201.] + [400],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [2] * 2 + [4],
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 0,
})
# We want a case where we skip a quarter. We never find out about Q2.
sid_10_timeline = pd.DataFrame({
TS_FIELD_NAME: [pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-01-12'),
pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-01-15')],
EVENT_DATE_FIELD_NAME:
[pd.Timestamp('2015-01-22'), pd.Timestamp('2015-01-22'),
pd.Timestamp('2015-02-05'), pd.Timestamp('2015-02-05')],
'estimate': [110., 111.] + [310., 311.],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [3] * 2,
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 10
})
# We want to make sure we have correct overwrites when sid quarter
# boundaries collide. This sid's quarter boundaries collide with sid 0.
sid_20_timeline = pd.DataFrame({
TS_FIELD_NAME: [cls.window_test_start_date,
pd.Timestamp('2015-01-07'),
cls.window_test_start_date,
pd.Timestamp('2015-01-17')],
EVENT_DATE_FIELD_NAME:
[pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-02-10'),
pd.Timestamp('2015-02-10')],
'estimate': [120., 121.] + [220., 221.],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [2] * 2,
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 20
})
concatted = pd.concat([sid_0_timeline,
sid_10_timeline,
sid_20_timeline]).reset_index()
np.random.seed(0)
return concatted.reindex(np.random.permutation(concatted.index))
@classmethod
def get_sids(cls):
sids = sorted(cls.events[SID_FIELD_NAME].unique())
# Add extra sids between sids in our data. We want to test that we
# apply adjustments to the correct sids.
return [sid for i in range(len(sids) - 1)
for sid in range(sids[i], sids[i+1])] + [sids[-1]]
@classmethod
def make_expected_timelines(cls):
return {}
@classmethod
def init_class_fixtures(cls):
super(WithEstimateWindows, cls).init_class_fixtures()
cls.create_expected_df_for_factor_compute = partial(
create_expected_df_for_factor_compute,
cls.window_test_start_date,
cls.get_sids()
)
cls.timelines = cls.make_expected_timelines()
@parameterized.expand(window_test_cases)
def test_estimate_windows_at_quarter_boundaries(self,
start_date,
num_announcements_out):
dataset = QuartersEstimates(num_announcements_out)
trading_days = self.trading_days
timelines = self.timelines
# The window length should be from the starting index back to the first
# date on which we got data. The goal is to ensure that as we
# progress through the timeline, all data we got, starting from that
# first date, is correctly overwritten.
window_len = (
self.trading_days.get_loc(start_date) -
self.trading_days.get_loc(self.window_test_start_date) + 1
)
class SomeFactor(CustomFactor):
inputs = [dataset.estimate]
window_length = window_len
def compute(self, today, assets, out, estimate):
today_idx = trading_days.get_loc(today)
today_timeline = timelines[
num_announcements_out
].loc[today].reindex(
trading_days[:today_idx + 1]
).values
timeline_start_idx = (len(today_timeline) - window_len)
assert_almost_equal(estimate,
today_timeline[timeline_start_idx:])
engine = self.make_engine()
engine.run_pipeline(
Pipeline({'est': SomeFactor()}),
start_date=start_date,
# last event date we have
end_date=pd.Timestamp('2015-02-10', tz='utc'),
)
class PreviousEstimateWindows(WithEstimateWindows, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_timelines(cls):
oneq_previous = pd.concat([
pd.concat([
cls.create_expected_df_for_factor_compute([
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date)
], end_date)
for end_date in pd.date_range('2015-01-09', '2015-01-19')
]),
cls.create_expected_df_for_factor_compute(
[(0, 101, pd.Timestamp('2015-01-20')),
(10, np.NaN, cls.window_test_start_date),
(20, 121, pd.Timestamp('2015-01-20'))],
pd.Timestamp('2015-01-20')
),
cls.create_expected_df_for_factor_compute(
[(0, 101, pd.Timestamp('2015-01-20')),
(10, np.NaN, cls.window_test_start_date),
(20, 121, pd.Timestamp('2015-01-20'))],
pd.Timestamp('2015-01-21')
),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 101, pd.Timestamp('2015-01-20')),
(10, 111, pd.Timestamp('2015-01-22')),
(20, 121, pd.Timestamp('2015-01-20'))],
end_date
) for end_date in pd.date_range('2015-01-22', '2015-02-04')
]),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 101, pd.Timestamp('2015-01-20')),
(10, 311, pd.Timestamp('2015-02-05')),
(20, 121, pd.Timestamp('2015-01-20'))],
end_date
) for end_date in pd.date_range('2015-02-05', '2015-02-09')
]),
cls.create_expected_df_for_factor_compute(
[(0, 201, pd.Timestamp('2015-02-10')),
(10, 311, pd.Timestamp('2015-02-05')),
(20, 221, pd.Timestamp('2015-02-10'))],
pd.Timestamp('2015-02-10')
),
])
twoq_previous = pd.concat(
[cls.create_expected_df_for_factor_compute(
[(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date)],
end_date
) for end_date in pd.date_range('2015-01-09', '2015-02-09')] +
# We never get estimates for S1 for 2Q ago because once Q3
# becomes our previous quarter, 2Q ago would be Q2, and we have
# no data on it.
[cls.create_expected_df_for_factor_compute(
[(0, 101, pd.Timestamp('2015-02-10')),
(10, np.NaN, pd.Timestamp('2015-02-05')),
(20, 121, pd.Timestamp('2015-02-10'))],
pd.Timestamp('2015-02-10')
)]
)
return {
1: oneq_previous,
2: twoq_previous
}
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazePreviousEstimateWindows(PreviousEstimateWindows):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazePreviousEstimatesLoader(bz.data(events), columns)
class NextEstimateWindows(WithEstimateWindows, ZiplineTestCase):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_timelines(cls):
oneq_next = pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 100, cls.window_test_start_date),
(10, 110, pd.Timestamp('2015-01-09')),
(20, 120, cls.window_test_start_date),
(20, 121, pd.Timestamp('2015-01-07'))],
pd.Timestamp('2015-01-09')
),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 100, cls.window_test_start_date),
(10, 110, pd.Timestamp('2015-01-09')),
(10, 111, pd.Timestamp('2015-01-12')),
(20, 120, cls.window_test_start_date),
(20, 121, pd.Timestamp('2015-01-07'))],
end_date
) for end_date in pd.date_range('2015-01-12', '2015-01-19')
]),
cls.create_expected_df_for_factor_compute(
[(0, 100, cls.window_test_start_date),
(0, 101, pd.Timestamp('2015-01-20')),
(10, 110, pd.Timestamp('2015-01-09')),
(10, 111, pd.Timestamp('2015-01-12')),
(20, 120, cls.window_test_start_date),
(20, 121, pd.Timestamp('2015-01-07'))],
pd.Timestamp('2015-01-20')
),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 200, pd.Timestamp('2015-01-12')),
(10, 110, pd.Timestamp('2015-01-09')),
(10, 111, pd.Timestamp('2015-01-12')),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp('2015-01-17'))],
end_date
) for end_date in pd.date_range('2015-01-21', '2015-01-22')
]),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 200, pd.Timestamp('2015-01-12')),
(10, 310, pd.Timestamp('2015-01-09')),
(10, 311, pd.Timestamp('2015-01-15')),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp('2015-01-17'))],
end_date
) for end_date in pd.date_range('2015-01-23', '2015-02-05')
]),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 200, pd.Timestamp('2015-01-12')),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp('2015-01-17'))],
end_date
) for end_date in pd.date_range('2015-02-06', '2015-02-09')
]),
cls.create_expected_df_for_factor_compute(
[(0, 200, pd.Timestamp('2015-01-12')),
(0, 201, pd.Timestamp('2015-02-10')),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp('2015-01-17'))],
pd.Timestamp('2015-02-10')
)
])
twoq_next = pd.concat(
[cls.create_expected_df_for_factor_compute(
[(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date)],
end_date
) for end_date in pd.date_range('2015-01-09', '2015-01-11')] +
[cls.create_expected_df_for_factor_compute(
[(0, 200, pd.Timestamp('2015-01-12')),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date)],
end_date
) for end_date in pd.date_range('2015-01-12', '2015-01-16')] +
[cls.create_expected_df_for_factor_compute(
[(0, 200, pd.Timestamp('2015-01-12')),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp('2015-01-17'))],
pd.Timestamp('2015-01-20')
)] +
[cls.create_expected_df_for_factor_compute(
[(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date)],
end_date
) for end_date in pd.date_range('2015-01-21', '2015-02-10')]
)
return {
1: oneq_next,
2: twoq_next
}
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazeNextEstimateWindows(NextEstimateWindows):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazeNextEstimatesLoader(bz.data(events), columns)
class WithSplitAdjustedWindows(WithEstimateWindows):
"""
ZiplineTestCase mixin providing fixures and a test to test running a
Pipeline with an estimates loader over differently-sized windows and with
split adjustments.
"""
split_adjusted_asof_date = pd.Timestamp('2015-01-14')
@classmethod
def make_events(cls):
# Add an extra sid that has a release before the split-asof-date in
# order to test that we're reversing splits correctly in the previous
# case (without an overwrite) and in the next case (with an overwrite).
sid_30 = pd.DataFrame({
TS_FIELD_NAME: [cls.window_test_start_date,
pd.Timestamp('2015-01-09'),
# For Q2, we want it to start early enough
# that we can have several adjustments before
# the end of the first quarter so that we
# can test un-adjusting & readjusting with an
# overwrite.
cls.window_test_start_date,
# We want the Q2 event date to be enough past
# the split-asof-date that we can have
# several splits and can make sure that they
# are applied correctly.
pd.Timestamp('2015-01-20')],
EVENT_DATE_FIELD_NAME:
[pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-01-20')],
'estimate': [130., 131., 230., 231.],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [2] * 2,
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 30
})
# An extra sid to test no splits before the split-adjusted-asof-date.
# We want an event before and after the split-adjusted-asof-date &
# timestamps for data points also before and after
# split-adjsuted-asof-date (but also before the split dates, so that
# we can test that splits actually get applied at the correct times).
sid_40 = pd.DataFrame({
TS_FIELD_NAME: [pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-01-15')],
EVENT_DATE_FIELD_NAME: [pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-02-10')],
'estimate': [140., 240.],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 40
})
# An extra sid to test all splits before the
# split-adjusted-asof-date. All timestamps should be before that date
# so that we have cases where we un-apply and re-apply splits.
sid_50 = pd.DataFrame({
TS_FIELD_NAME: [pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-01-12')],
EVENT_DATE_FIELD_NAME: [pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-02-10')],
'estimate': [150., 250.],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 50
})
return pd.concat([
# Slightly hacky, but want to make sure we're using the same
# events as WithEstimateWindows.
cls.__base__.make_events(),
sid_30,
sid_40,
sid_50,
])
@classmethod
def make_splits_data(cls):
# For sid 0, we want to apply a series of splits before and after the
# split-adjusted-asof-date we well as between quarters (for the
# previous case, where we won't see any values until after the event
# happens).
sid_0_splits = pd.DataFrame({
SID_FIELD_NAME: 0,
'ratio': (-1., 2., 3., 4., 5., 6., 7., 100),
'effective_date': (pd.Timestamp('2014-01-01'), # Filter out
# Split before Q1 event & after first estimate
pd.Timestamp('2015-01-07'),
# Split before Q1 event
pd.Timestamp('2015-01-09'),
# Split before Q1 event
pd.Timestamp('2015-01-13'),
# Split before Q1 event
pd.Timestamp('2015-01-15'),
# Split before Q1 event
pd.Timestamp('2015-01-18'),
# Split after Q1 event and before Q2 event
pd.Timestamp('2015-01-30'),
# Filter out - this is after our date index
pd.Timestamp('2016-01-01'))
})
sid_10_splits = pd.DataFrame({
SID_FIELD_NAME: 10,
'ratio': (.2, .3),
'effective_date': (
# We want a split before the first estimate and before the
# split-adjusted-asof-date but within our calendar index so
# that we can test that the split is NEVER applied.
pd.Timestamp('2015-01-07'),
# Apply a single split before Q1 event.
pd.Timestamp('2015-01-20')),
})
# We want a sid with split dates that collide with another sid (0) to
# make sure splits are correctly applied for both sids.
sid_20_splits = pd.DataFrame({
SID_FIELD_NAME: 20,
'ratio': (.4, .5, .6, .7, .8, .9,),
'effective_date': (
pd.Timestamp('2015-01-07'),
pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-15'),
pd.Timestamp('2015-01-18'),
pd.Timestamp('2015-01-30')),
})
# This sid has event dates that are shifted back so that we can test
# cases where an event occurs before the split-asof-date.
sid_30_splits = pd.DataFrame({
SID_FIELD_NAME: 30,
'ratio': (8, 9, 10, 11, 12),
'effective_date': (
# Split before the event and before the
# split-asof-date.
pd.Timestamp('2015-01-07'),
# Split on date of event but before the
# split-asof-date.
pd.Timestamp('2015-01-09'),
# Split after the event, but before the
# split-asof-date.
pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-15'),
pd.Timestamp('2015-01-18')),
})
# No splits for a sid before the split-adjusted-asof-date.
sid_40_splits = pd.DataFrame({
SID_FIELD_NAME: 40,
'ratio': (13, 14),
'effective_date': (
pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-01-22')
)
})
# No splits for a sid after the split-adjusted-asof-date.
sid_50_splits = pd.DataFrame({
SID_FIELD_NAME: 50,
'ratio': (15, 16),
'effective_date': (
pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-14')
)
})
return pd.concat([
sid_0_splits,
sid_10_splits,
sid_20_splits,
sid_30_splits,
sid_40_splits,
sid_50_splits,
])
class PreviousWithSplitAdjustedWindows(WithSplitAdjustedWindows,
ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return PreviousSplitAdjustedEarningsEstimatesLoader(
events,
columns,
split_adjustments_loader=cls.adjustment_reader,
split_adjusted_column_names=['estimate'],
split_adjusted_asof=cls.split_adjusted_asof_date,
)
@classmethod
def make_expected_timelines(cls):
oneq_previous = pd.concat([
pd.concat([
cls.create_expected_df_for_factor_compute([
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
# Undo all adjustments that haven't happened yet.
(30, 131*1/10, pd.Timestamp('2015-01-09')),
(40, 140., pd.Timestamp('2015-01-09')),
(50, 150 * 1 / 15 * 1 / 16, pd.Timestamp('2015-01-09')),
], end_date)
for end_date in pd.date_range('2015-01-09', '2015-01-12')
]),
cls.create_expected_df_for_factor_compute([
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
(30, 131, pd.Timestamp('2015-01-09')),
(40, 140., pd.Timestamp('2015-01-09')),
(50, 150. * 1 / 16, pd.Timestamp('2015-01-09')),
], pd.Timestamp('2015-01-13')),
cls.create_expected_df_for_factor_compute([
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
(30, 131, pd.Timestamp('2015-01-09')),
(40, 140., pd.Timestamp('2015-01-09')),
(50, 150., pd.Timestamp('2015-01-09'))
], pd.Timestamp('2015-01-14')),
pd.concat([
cls.create_expected_df_for_factor_compute([
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
(30, 131*11, pd.Timestamp('2015-01-09')),
(40, 140., pd.Timestamp('2015-01-09')),
(50, 150., pd.Timestamp('2015-01-09')),
], end_date)
for end_date in pd.date_range('2015-01-15', '2015-01-16')
]),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 101, pd.Timestamp('2015-01-20')),
(10, np.NaN, cls.window_test_start_date),
(20, 121*.7*.8, pd.Timestamp('2015-01-20')),
(30, 231, pd.Timestamp('2015-01-20')),
(40, 140.*13, pd.Timestamp('2015-01-09')),
(50, 150., pd.Timestamp('2015-01-09'))],
end_date
) for end_date in pd.date_range('2015-01-20', '2015-01-21')
]),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 101, pd.Timestamp('2015-01-20')),
(10, 111*.3, pd.Timestamp('2015-01-22')),
(20, 121*.7*.8, pd.Timestamp('2015-01-20')),
(30, 231, pd.Timestamp('2015-01-20')),
(40, 140.*13*14, pd.Timestamp('2015-01-09')),
(50, 150., pd.Timestamp('2015-01-09'))],
end_date
) for end_date in pd.date_range('2015-01-22', '2015-01-29')
]),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 101*7, pd.Timestamp('2015-01-20')),
(10, 111*.3, pd.Timestamp('2015-01-22')),
(20, 121*.7*.8*.9, pd.Timestamp('2015-01-20')),
(30, 231, pd.Timestamp('2015-01-20')),
(40, 140.*13*14, pd.Timestamp('2015-01-09')),
(50, 150., pd.Timestamp('2015-01-09'))],
end_date
) for end_date in pd.date_range('2015-01-30', '2015-02-04')
]),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 101*7, pd.Timestamp('2015-01-20')),
(10, 311*.3, pd.Timestamp('2015-02-05')),
(20, 121*.7*.8*.9, pd.Timestamp('2015-01-20')),
(30, 231, pd.Timestamp('2015-01-20')),
(40, 140.*13*14, pd.Timestamp('2015-01-09')),
(50, 150., pd.Timestamp('2015-01-09'))],
end_date
) for end_date in pd.date_range('2015-02-05', '2015-02-09')
]),
cls.create_expected_df_for_factor_compute(
[(0, 201, pd.Timestamp('2015-02-10')),
(10, 311*.3, pd.Timestamp('2015-02-05')),
(20, 221*.8*.9, pd.Timestamp('2015-02-10')),
(30, 231, pd.Timestamp('2015-01-20')),
(40, 240.*13*14, pd.Timestamp('2015-02-10')),
(50, 250., pd.Timestamp('2015-02-10'))],
pd.Timestamp('2015-02-10')
),
])
twoq_previous = pd.concat(
[cls.create_expected_df_for_factor_compute(
[(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
(30, np.NaN, cls.window_test_start_date)],
end_date
) for end_date in pd.date_range('2015-01-09', '2015-01-19')] +
[cls.create_expected_df_for_factor_compute(
[(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
(30, 131*11*12, pd.Timestamp('2015-01-20'))],
end_date
) for end_date in pd.date_range('2015-01-20', '2015-02-09')] +
# We never get estimates for S1 for 2Q ago because once Q3
# becomes our previous quarter, 2Q ago would be Q2, and we have
# no data on it.
[cls.create_expected_df_for_factor_compute(
[(0, 101*7, pd.Timestamp('2015-02-10')),
(10, np.NaN, pd.Timestamp('2015-02-05')),
(20, 121*.7*.8*.9, pd.Timestamp('2015-02-10')),
(30, 131*11*12, pd.Timestamp('2015-01-20')),
(40, 140. * 13 * 14, pd.Timestamp('2015-02-10')),
(50, 150., pd.Timestamp('2015-02-10'))],
pd.Timestamp('2015-02-10')
)]
)
return {
1: oneq_previous,
2: twoq_previous
}
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazePreviousWithSplitAdjustedWindows(PreviousWithSplitAdjustedWindows):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazePreviousSplitAdjustedEstimatesLoader(
bz.data(events),
columns,
split_adjustments_loader=cls.adjustment_reader,
split_adjusted_column_names=['estimate'],
split_adjusted_asof=cls.split_adjusted_asof_date,
)
class NextWithSplitAdjustedWindows(WithSplitAdjustedWindows, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return NextSplitAdjustedEarningsEstimatesLoader(
events,
columns,
split_adjustments_loader=cls.adjustment_reader,
split_adjusted_column_names=['estimate'],
split_adjusted_asof=cls.split_adjusted_asof_date,
)
@classmethod
def make_expected_timelines(cls):
oneq_next = pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 100*1/4, cls.window_test_start_date),
(10, 110, pd.Timestamp('2015-01-09')),
(20, 120*5/3, cls.window_test_start_date),
(20, 121*5/3, pd.Timestamp('2015-01-07')),
(30, 130*1/10, cls.window_test_start_date),
(30, 131*1/10, pd.Timestamp('2015-01-09')),
(40, 140, pd.Timestamp('2015-01-09')),
(50, 150.*1/15*1/16, pd.Timestamp('2015-01-09'))],
pd.Timestamp('2015-01-09')
),
cls.create_expected_df_for_factor_compute(
[(0, 100*1/4, cls.window_test_start_date),
(10, 110, pd.Timestamp('2015-01-09')),
(10, 111, pd.Timestamp('2015-01-12')),
(20, 120*5/3, cls.window_test_start_date),
(20, 121*5/3, pd.Timestamp('2015-01-07')),
(30, 230*1/10, cls.window_test_start_date),
(40, np.NaN, pd.Timestamp('2015-01-10')),
(50, 250.*1/15*1/16, pd.Timestamp('2015-01-12'))],
pd.Timestamp('2015-01-12')
),
cls.create_expected_df_for_factor_compute(
[(0, 100, cls.window_test_start_date),
(10, 110, pd.Timestamp('2015-01-09')),
(10, 111, pd.Timestamp('2015-01-12')),
(20, 120, cls.window_test_start_date),
(20, 121, pd.Timestamp('2015-01-07')),
(30, 230, cls.window_test_start_date),
(40, np.NaN, pd.Timestamp('2015-01-10')),
(50, 250.*1/16, pd.Timestamp('2015-01-12'))],
pd.Timestamp('2015-01-13')
),
cls.create_expected_df_for_factor_compute(
[(0, 100, cls.window_test_start_date),
(10, 110, pd.Timestamp('2015-01-09')),
(10, 111, pd.Timestamp('2015-01-12')),
(20, 120, cls.window_test_start_date),
(20, 121, pd.Timestamp('2015-01-07')),
(30, 230, cls.window_test_start_date),
(40, np.NaN, pd.Timestamp('2015-01-10')),
(50, 250., pd.Timestamp('2015-01-12'))],
pd.Timestamp('2015-01-14')
),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 100*5, cls.window_test_start_date),
(10, 110, pd.Timestamp('2015-01-09')),
(10, 111, pd.Timestamp('2015-01-12')),
(20, 120*.7, cls.window_test_start_date),
(20, 121*.7, pd.Timestamp('2015-01-07')),
(30, 230*11, cls.window_test_start_date),
(40, 240, pd.Timestamp('2015-01-15')),
(50, 250., pd.Timestamp('2015-01-12'))],
end_date
) for end_date in pd.date_range('2015-01-15', '2015-01-16')
]),
cls.create_expected_df_for_factor_compute(
[(0, 100*5*6, cls.window_test_start_date),
(0, 101, pd.Timestamp('2015-01-20')),
(10, 110*.3, pd.Timestamp('2015-01-09')),
(10, 111*.3, pd.Timestamp('2015-01-12')),
(20, 120*.7*.8, cls.window_test_start_date),
(20, 121*.7*.8, pd.Timestamp('2015-01-07')),
(30, 230*11*12, cls.window_test_start_date),
(30, 231, pd.Timestamp('2015-01-20')),
(40, 240*13, pd.Timestamp('2015-01-15')),
(50, 250., pd.Timestamp('2015-01-12'))],
pd.Timestamp('2015-01-20')
),
cls.create_expected_df_for_factor_compute(
[(0, 200 * 5 * 6, pd.Timestamp('2015-01-12')),
(10, 110 * .3, pd.Timestamp('2015-01-09')),
(10, 111 * .3, pd.Timestamp('2015-01-12')),
(20, 220 * .7 * .8, cls.window_test_start_date),
(20, 221 * .8, pd.Timestamp('2015-01-17')),
(40, 240 * 13, pd.Timestamp('2015-01-15')),
(50, 250., pd.Timestamp('2015-01-12'))],
| pd.Timestamp('2015-01-21') | pandas.Timestamp |
# -*- coding: utf-8 -*-
"""
UC#01: Identify top 10 criteria spontaneously discussed by customers and that have
the most impact on the satisfaction score
"""
import pandas as pd
import requests, json, sys
import numpy as np
from wordcloud import WordCloud
import matplotlib.pyplot as plt
from matplotlib import colors
class DictanovaAPIAuth(requests.auth.AuthBase):
"""Attaches Dictanova Bearer Authentication to the given Request object."""
def __init__(self, id, secret):
self.apiclient_id = id
self.apiclient_secret = secret
self._token = None
def __eq__(self, other):
return all([
self.apiclient_id == getattr(other, 'apiclient_id', None),
self.apiclient_secret == getattr(other, 'apiclient_secret', None)
])
def __ne__(self, other):
return not self == other
def __call__(self, r):
r.headers['Authorization'] = self.get_token()
return r
def get_token(self):
# Get authentication token
if self._token is None:
payload = {
"clientId": self.apiclient_id,
"clientSecret": self.apiclient_secret
}
r = requests.post("https://api.dictanova.io/v1/token", json=payload)
self._token = r.json()
# Always use the one in cache
return "Bearer %s" % self._token["access_token"]
if __name__ == "__main__":
# Prepare Auth handler with API client id and secret
# https://docs.dictanova.io/docs/authentication-and-security
clientId, clientSecret = open("../credentials", "r").readline().strip().split(";")
dictanova_auth = DictanovaAPIAuth(clientId, clientSecret)
####################################################################### TOP OPINION
# Request for top 100 opinions
top100_opinions = []
for page in range(1,3):
r = requests.post(
"https://api.dictanova.io/v1/search/datasets/5b55b264dbcd8100019f0495/terms",
data="", # empty query
params={"page": page, "pageSize": 50}, # https://docs.dictanova.io/docs/pagination
auth=dictanova_auth)
print(r)
top100_opinions += r.json()["items"]
############################################################## COMPUTE REFERENCE CSAT
# Compute the distribution of the CSAT that will serve as reference to compute impact
query = {
"type" : "COUNT",
"field" : "metadata.rating_satisfaction",
"dimensions" : [
{
"field" : "metadata.rating_satisfaction",
"group": "DISTINCT"
}
]
}
# Request
r = requests.post(
"https://api.dictanova.io/v1/aggregation/datasets/5b55b264dbcd8100019f0495/documents",
json=query,
auth=dictanova_auth)
print(r)
ref_distr = {int(v["dimensions"][0]): v["value"] for v in r.json()["periods"][0]["values"]}
ref_total = r.json()["periods"][0]["total"]["value"]
# Pretty print
print("Reference distribution of CSAT for rating_satisfaction:")
ref_sum = 0
for i in range(1,6):
print("\t%d/5 => %d documents (%0.1f%%)" % (i, ref_distr[i], 100.*ref_distr[i]/ref_total))
ref_sum += i*ref_distr[i]
ref_csat = 1.*ref_sum/ref_total
print("Reference CSAT on perimeter: %0.2f" % ref_csat)
# init the computation matrix
ref_distr.update({"opinion": "__REF__", "base": ref_total})
df_impact = | pd.DataFrame.from_records([ref_distr]) | pandas.DataFrame.from_records |
from builtins import range
import pandas as pd
import numpy as np
def _group_parser(indices, pop):
# indices is a dictionary
sel_ind = []
for i in indices:
if len(indices[i]) <= pop :
sel_ind += indices[i]
else:
sel = list(np.random.choice(indices[i],size=pop,replace=False))
sel_ind += sel
return sel_ind
class Trimmer(object):
""" cut unnecessary parts of the data
Parameters
----------
type: string, optional (default="margins")
list of types:
- random: remove part of data randomly
- margins: cut from both top and bottom of data set
- top: cut only top margin
- bottom: cut only bottom margin
sort: boolean, optional (default=True)
If True data will be sorted by target values before trimming.
cut: float in (0,1) range, optional (default=0.05)
fraction of data set size to be trimmed.
shuffle: Boolean, optional (default=True)
To shuffle the data before sampling. Effective only if sort is False.
Attributes
----------
Ncut: int
number of removed data points
selected_indices_: list
Axis labels of data points that have been drawn randomly. Available only
if type is 'random'.
Returns
-------
data and target
"""
def __init__(self, type="margins", sort = True,
cut = 0.05, shuffle = True):
self.type = type
self.sort = sort
self.cut = cut
self.shuffle = shuffle
def fit_transform(self, data, target):
"""
Fit the trimmer on df.
"""
df = pd.concat([data, target], axis=1)
if target.columns[0] in data.columns:
cols = list(df.columns)
col = 'target'
while col in cols:
col += 't'
cols[-1] = col
df.columns = cols
else:
col = target.columns[0]
if self.sort == True:
df.sort_values(col,axis=0,inplace=True)
df.index = pd.Index(range(len(df)))
data = df.iloc[:,:-1]
target = pd.DataFrame(df.iloc[:,-1])
elif self.shuffle == True:
df = df.reindex(np.random.permutation(df.index))
df.index = pd.Index(range(len(df)))
data = df.iloc[:,:-1]
target = pd.DataFrame(df.iloc[:,-1])
Nsamples = len(data)
self.Ncut_ = int(self.cut * Nsamples)
if self.type == 'random':
self.selected_indices_ = np.random.choice(range(0, Nsamples), Nsamples-self.Ncut_,replace=False)
data = data.iloc[self.selected_indices_,:]
data.index = pd.Index(range(len(data)))
target = target.iloc[self.selected_indices_,:]
target.index = pd.Index(range(len(target)))
return data, target
elif self.type == 'margins':
Nhalfcut = self.Ncut_/2
data = data[Nhalfcut:Nsamples-Nhalfcut]
data.index = pd.Index(range(len(data)))
target = target[Nhalfcut:Nsamples-Nhalfcut]
target.index = pd.Index(range(len(target)))
return data, target
elif self.type == 'top':
data = data[self.Ncut_:]
data.index = pd.Index(range(len(data)))
target = target[self.Ncut_:]
target.index = pd.Index(range(len(target)))
return data, target
elif self.type == 'bottom':
data = data[:Nsamples-self.Ncut_]
data.index = pd.Index(range(len(data)))
target = target[:Nsamples-self.Ncut_]
target.index = pd.Index(range(len(target)))
return data, target
else:
raise ValueError("Not a valid type")
class Uniformer(object):
""" select a uniform size of groups of target values
Parameters
----------
bins: int or sequence of scalars or float, to be passed to pandas.cut
If bins is an int, it defines the number of equal-width bins in the range of x.
However, in this case, the range of x is extended by 0.1% on each side to include
the min or max values of x. If bins is a sequence it defines the bin edges allowing
for non-uniform bin width. No extension of the range of x is done in this case.
If bins is a float, it defines the width of bins.
right: bool, optional, default True
Indicates whether the bins include the rightmost edge or not.
If right == True (by default), then the bins [1,2,3,4] indicate (1,2], (2,3], (3,4].
include_lowest: bool, optional, default False
Whether the first interval should be left-inclusive or not.
bin_pop: int or float, optional, default 0.5
bin_pop defines the maximum population of selected samples from each group(bin).
If bin_pop is an int, it defines the maximum number of samples to be drawn from each group.
A float value for bin_pop defines the fraction of the maximum population of groups as the
maximum size of selections from each group.
substitute: str('mean','lower,'upper') or sequence of scalars, optional, default None
If substitute is one of the choices of 'mean', 'lower' or 'upper' strings,
target values will be substitute with mean bin edges, lower bin edge or
upper bin edge, respectively. If bins is a sequence, it defines the target
value for bins. If None, no substitute will happen and original target
values would be passed out.
Attributes
----------
groups_: dataframe, shape (n_targets, n_bins, n_ranges)
return groups info: target values, bin labels, range of bins
grouped_indices_: dictionary
A dict whose keys are the group labels and corresponding values being the
axis labels belonging to each group.
selected_indices_: list
axis labels of data points that are drawn.
Returns
-------
data and target
"""
def __init__(self, bins, bin_pop = 0.5, right = True, include_lowest = True,
substitute = None):
self.bins = bins
self.bin_pop = bin_pop
self.right = right
self.include_lowest = include_lowest
self.substitute = substitute
def fit_transform(self, data, target):
"""
Fit the uniformer on df.
"""
# pandas.cut
col = target.columns[0]
if type(self.bins) == int:
bined = pd.cut(target[col], self.bins, right = self.right, retbins=True, labels=False,include_lowest=self.include_lowest)
elif type(self.bins) == float:
bins = int(max(target[col]) - min(target[col]) / self.bins)
bined = | pd.cut(target[col], bins, right = self.right, retbins=True, labels=False,include_lowest=self.include_lowest) | pandas.cut |
import os
import numpy as np
import pandas as pd
import SimpleITK as sitk
import six
import radiomics
from tqdm import tqdm
from radiomics import firstorder, glcm, imageoperations, glrlm, glszm, ngtdm, gldm, getTestCase
class ExtractRadiomicFeatures():
def __init__(self, input_image,
input_mask=None,
save_path=None,
seq='Flair',
class_ = 'ET',
all_=True):
self.input_image = input_image
if not input_mask:
self.input_mask = np.ones(tuple(list(self.input_image.shape)[:-1]))
else: self.input_mask = input_mask
self.img = sitk.GetImageFromArray(self.input_image)
self.GT = sitk.GetImageFromArray(self.input_mask)
self.save_path = save_path
self.seq = seq
self.all_ = all_
self.class_ = class_
self.feat_dict = {}
def first_order(self):
feat_dict = {}
firstOrderFeatures = firstorder.RadiomicsFirstOrder(self.img, self.GT)
firstOrderFeatures.enableAllFeatures()
firstOrderFeatures.execute()
for (key,val) in six.iteritems(firstOrderFeatures.featureValues):
if self.all_:
self.feat_dict[self.seq + "_" + self.class_ + '_' + key] = val
else:
feat_dict[self.seq + "_" + self.class_ + "_" + key] = val
df = pd.DataFrame(feat_dict)
if self.save_path:
df.to_csv(os.path.join(self.save_path, 'firstorder_features.csv'), index=False)
return df
def glcm_features(self):
glcm_dict = {}
GLCMFeatures = glcm.RadiomicsGLCM(self.img, self.GT)
GLCMFeatures.enableAllFeatures()
GLCMFeatures.execute()
for (key,val) in six.iteritems(GLCMFeatures.featureValues):
if self.all_:
self.feat_dict[self.seq + "_" + self.class_ + '_' + key] = val
else:
glcm_dict[self.seq + "_" + self.class_ + "_" + key] = val
df = pd.DataFrame(glcm_dict)
if self.save_path:
df.to_csv(os.path.join(self.save_path, 'glcm_features.csv'), index=False)
return df
def glszm_features(self):
glszm_dict = {}
GLSZMFeatures = glszm.RadiomicsGLSZM(self.img, self.GT)
GLSZMFeatures.enableAllFeatures() # On the feature class level, all features are disabled by default.
GLSZMFeatures.execute()
for (key,val) in six.iteritems(GLSZMFeatures.featureValues):
if self.all_:
self.feat_dict[self.seq + "_" + self.class_ + '_' + key] = val
else:
glszm_dict[self.seq + "_" + self.class_ + "_" + key] = val
df = pd.DataFrame(glszm_dict)
if self.save_path:
df.to_csv(os.path.join(self.save_path, 'glszm_features.csv'), index=False)
return df
def glrlm_features(self):
glrlm_dict = {}
GLRLMFeatures = glrlm.RadiomicsGLRLM(self.img, self.GT)
GLRLMFeatures.enableAllFeatures() # On the feature class level, all features are disabled by default.
GLRLMFeatures.execute()
for (key,val) in six.iteritems(GLRLMFeatures.featureValues):
if self.all_:
self.feat_dict[self.seq + "_" + self.class_ + '_' + key] = val
else:
glrlm_dict[self.seq + "_" + self.class_ + "_" + key] = val
df = pd.DataFrame(glrlm_dict)
if self.save_path:
df.to_csv(os.path.join(self.save_path, 'glrlm_features.csv'), index=False)
return df
def ngtdm_features(self):
ngtdm_dict = {}
NGTDMFeatures = ngtdm.RadiomicsNGTDM(self.img, self.GT)
NGTDMFeatures.enableAllFeatures() # On the feature class level, all features are disabled by default.
NGTDMFeatures.execute()
for (key,val) in six.iteritems(NGTDMFeatures.featureValues):
if self.all_:
self.feat_dict[self.seq + "_" + self.class_ + '_' + key] = val
else:
ngtdm_dict[self.seq + "_" + self.class_ + "_" + key] = val
df = pd.DataFrame(ngtdm_dict)
if self.save_path:
df.to_csv(os.path.join(self.save_path, 'ngtdm_features.csv'), index=False)
return df
def gldm_features(self):
gldm_dict = {}
GLDMFeatures = gldm.RadiomicsGLDM(self.img, self.GT)
GLDMFeatures.enableAllFeatures() # On the feature class level, all features are disabled by default.
GLDMFeatures.execute()
for (key,val) in six.iteritems(GLDMFeatures.featureValues):
if self.all_:
self.feat_dict[self.seq + "_" + self.class_ + '_' + key] = val
else:
gldm_dict[self.seq + "_" + self.class_ + "_" + key] = val
df = | pd.DataFrame(gldm_dict) | pandas.DataFrame |
# Script to carry out analysis of the data from the Goodreads API
# Goal: predict my rating and/or positivity (or a combination of them) for unread books
import os
import pickle
import pandas as pd
from sklearn import tree
from sklearn.externals.six import StringIO
import pydot
from collections import Counter
from utilities import pretty_cm
from sklearn.externals import joblib
from sklearn.metrics import confusion_matrix, classification_report
from sklearn.cross_validation import train_test_split
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
from sklearn.preprocessing import Imputer
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
# from sklearn import grid_search
with open('data/reviews_sentiment.pkl', 'r') as f:
df = pickle.load(f)
with open('data/author_info.pkl', 'r') as f:
df_author = pickle.load(f)
# Merge author information
df = pd.merge(df, df_author, how='left', left_on='author', right_on='author')
# Set gender to be 0/1. Nones are assumed male (0)
gender = {'male': 0, 'female': 1}
df.replace(to_replace=gender, inplace=True)
df.loc[df['gender'].isnull(), 'gender'] = 0
# Select columns to consider
cols = ['author', 'publication_year', 'average_rating', 'number_pages', 'works', 'fans', 'number_ratings', 'gender',
'rating', 'positivity']
data = df[cols].copy()
# data.dropna(axis=0, inplace=True) # eliminate missing values. Currently not done as we are imputing
ratings = data['rating']
positivity = data['positivity']
# Get most frequent authors.
# Authors what I have read just once will be grouped together as 'Single-Read' to avoid too many variables
freq_author = Counter(data['author'])
for author in freq_author:
if freq_author[author] < 2:
data.replace(author, 'Single-Read Authors', inplace=True)
# Set authors as dummy variables
dummy_var = | pd.get_dummies(data['author']) | pandas.get_dummies |
import numpy as np
from scipy import ndimage
import pandas as pd
import geopandas as gpd
from shapely.geometry import Point, Polygon
import shapefile
import os
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
from utility import *
class Visualize:
def __init__(self, resolution, input_filepath, output_filepath):
self.resolution = resolution
self.input_filepath = input_filepath
self.output_filepath = output_filepath
self.gridmap = None
self.x_min = None
self.y_min = None
def set_patrol_posts(self, patrol_post_filename):
patrol_posts = pd.read_csv(patrol_post_filename)
self.patrol_posts = self.shift_data_coords(patrol_posts)
###########################################################
# utility
###########################################################
def mask_to_grid(self, map):
return np.ma.masked_where(self.gridmap==0, map)
# for the gridmap, return list of indices of valid cells
# order: begin from top left, then go row-by-row
def get_indices_from_gridmap(self):
# need this complicated way to compute corresponding indices
# within the gridmap boundary because numpy indexing
# starts at lower left corner, and the CSV file assumes
# ordering starts in top left corner
idx = [[], []]
for y in range(self.gridmap.shape[0] - 1, -1, -1):
add_idx = np.where(self.gridmap[y, :] == 1)
idx[0] += [y] * add_idx[0].shape[0]
idx[1] += list(add_idx[0])
return tuple(idx)
# get list of np.arrays, where each is a map of predicted risk`
# at a different threshold of patrol effort
def get_map_from_array(self, array):
idx = self.get_indices_from_gridmap()
map = np.zeros(self.gridmap.shape)
map[idx] = array
return map
# get list of np.arrays, where each is a map of predicted risk`
# at a different threshold of patrol effort
def get_map_from_csv(self, filename):
data = pd.read_csv(filename)
print(' creating map from file {}'.format(filename))
# discard first column: index of grid cell
data.drop(data.columns[0], axis=1, inplace=True)
if data.shape[1] > 1:
raise Exception('ambiguous input: filename {} has more than one value column'.format(filename))
idx = self.get_indices_from_gridmap()
map = np.zeros(self.gridmap.shape)
map[idx] = data.values[:,0]
return map
# maps is a dictionary of {map_name : map}
def save_maps_to_csv(self, filename_out, maps):
idx = self.get_indices_from_gridmap()
map_names = list(maps.keys())
data = {'x_idx': idx[1], 'y_idx': idx[0]}
for i in range(len(maps)):
map_name = map_names[i]
map = maps[map_name]
data[map_name] = map[idx]
data_df = pd.DataFrame(data)
data_df.to_csv(filename_out)
# scale and transform to real crs coordinates
def scale_to_real(self, shape):
assert type(shape) == gpd.GeoDataFrame
shape.geometry = shape.geometry.translate(self.x_min, self.y_min)
shape.geometry = shape.geometry.scale(xfact=self.resolution, yfact=self.resolution, origin=(self.x_min, self.y_min))
return shape
###########################################################
# visualize
###########################################################
# options:
# - log_norm: whether rendering is displayed as log.
# useful for past patrol effort
# - min_value and max_value: bounds on the colorbar scale
# - plot_patrol_post: whether to display patrol posts in images
def save_map(self, feature_map, feature_name, cmap='Greens', log_norm=False, min_value=None, max_value=None, plot_title=True, plot_patrol_post=True):
# mask feature map
feature_map = self.mask_to_grid(feature_map)
if min_value is None:
min_value = feature_map.min()
if max_value is None:
max_value = feature_map.max()
fig, ax = plt.subplots()
if log_norm:
a = plt.imshow(np.flipud(feature_map), interpolation='none', cmap=cmap, extent=[0, self.gridmap.shape[1], 0, self.gridmap.shape[0]], vmin=min_value, vmax=max_value, norm=LogNorm())
else:
a = plt.imshow(np.flipud(feature_map), interpolation='none', cmap=cmap, extent=[0, self.gridmap.shape[1], 0, self.gridmap.shape[0]], vmin=min_value, vmax=max_value)
plt.colorbar(a)
# set plot title and labels
if plot_title:
plt.title(feature_name)
#plt.xticks(np.arange(0,mx+1),[self.min_xval+resolution*i for i in range(mx+1)], rotation=60)
plt.xlabel('x', fontsize=6)
#plt.yticks(np.arange(0,my+1),[self.min_yval+resolution*i for i in range(my+1)])
plt.ylabel('y', fontsize=6)
# plot patrol post locations
if plot_patrol_post and self.patrol_posts is not None:
for index, row in self.patrol_posts.iterrows():
sx = row['x']
sy = row['y']
plt.plot([sx+0.5], [sy+0.5], marker='o', markersize=5, color='aqua', markeredgewidth=1, markeredgecolor='blue')
# set background color
axes = plt.gca()
axes.set_facecolor((0,0,0))
plt.savefig(self.output_filepath + 'plot_{}.png'.format(feature_name))
plt.close()
# title - string
# masked_map - masked np array of map to plot
# shapefiles - dict of (string, GeoDataFrame) files
# crs_out - string that specifies crs of the shapefiles
def save_map_with_features(self, title, masked_map, shapefiles, crs_out, cmap='Reds', vmin=None, vmax=None, log_norm=False):
map_grid = map_to_color_grid(masked_map)
# prepare plot
fig, ax = plt.subplots(figsize=(10,10), dpi=150)
ax.set_facecolor((.9,.9,.9)) # gray background
ax.set_aspect('equal') # displays proportionally
# hide tick labels
ax.tick_params(labelbottom=False)
ax.tick_params(labelleft=False)
# make shapefiles directory
if not os.path.exists(self.output_filepath + 'shapefiles/'):
os.makedirs(self.output_filepath + 'shapefiles/')
# create output shapefile and save
map_grid.crs = crs_out # {'init': crs_out}.
map_grid = self.scale_to_real(map_grid)
if log_norm:
map_grid.plot(ax=ax, column='value', cmap=cmap, legend=True, vmin=vmin, vmax=vmax, norm=LogNorm())
else:
map_grid.plot(ax=ax, column='value', cmap=cmap, legend=True, vmin=vmin, vmax=vmax)
map_grid.to_file('{}shapefiles/map_grid_{}.shp'.format(self.output_filepath, title))
# plot shapefiles
shapefiles['boundary'].plot(ax=ax, facecolor='none', edgecolor='black', linewidth=.5) # facecolor='#e4e8c6'
if 'patrol_posts' in shapefiles:
shapefiles['patrol_posts'].plot(marker='o', markersize=20, color='blue', ax=ax)
if 'roads' in shapefiles:
shapefiles['roads'].plot(ax=ax, facecolor='none', edgecolor='#68200c', linewidth=.5)
if 'water' in shapefiles:
shapefiles['water'].plot(ax=ax, facecolor='#40b4d1', edgecolor='black', linewidth=.5)
if 'rivers' in shapefiles:
shapefiles['rivers'].plot(ax=ax, facecolor='none', edgecolor='#40b4d1', linewidth=.5)
if 'patrol_blocks' in shapefiles:
shapefiles['patrol_blocks'].plot(ax=ax, facecolor='none', edgecolor='black', linewidth=.5)
if 'core_zone' in shapefiles:
shapefiles['core_zone'].plot(ax=ax, facecolor='none', edgecolor='green', linewidth=2)
if 'buffer' in shapefiles:
shapefiles['buffer'].plot(ax=ax, facecolor='none', edgecolor='#666666', linewidth=2)
# save out plot
plt.title('{}'.format(title))
fig.savefig('{}map_{}.png'.format(self.output_filepath, title))
plt.close()
# NOTE: this .npy file must be saved from PatrolProblem.py
# (or from this script)
def get_riskmap_from_npy(self, npy_filename):
riskmap = np.load(npy_filename)
return riskmap
# get list of np.arrays, where each is a map of predicted risk`
# at a different threshold of patrol effort
def get_maps_from_csv(self, maps_filename):
num_extra_cols = 4
map_data = | pd.read_csv(maps_filename) | pandas.read_csv |
import json
from pandas.io.json import json_normalize
from pandas import pandas as pd
def fixRecordKeys(rows, keyName, default=''):
"""
Adds record keys to rows that are missing the expect key
and resets to default value if value is None
"""
# add missing keys to malformed rows
for row in rows:
if keyName not in row:
row[keyName] = default
elif row[keyName] is None:
row[keyName] = default
def extractAddress(address, parsed):
# ensure min length for subsequent parsing logic
address = address.ljust(65, ' ')
# extract first chars as street
parsed['street'] = address[0:50].strip()
# extract remaining chars as city/state/zip (c,s,z)
csz = address[50:]
c, sz = (csz + ',').split(',')[:2]
s, z = (sz.strip() + ' ').split(' ')[:2]
parsed['city'] = c.strip()
parsed['state'] = s.strip()
parsed['zip'] = z.strip()
def trimAllColumns(df):
trimStrings = lambda x: x.strip() if type(x) is str else x
return df.applymap(trimStrings)
def removePeriodsFromAllColumns(df):
trimStrings = lambda x: x.replace('.', '') if type(x) is str else x
return df.applymap(trimStrings)
def combineRows(series):
return ','.join(map(str, series.tolist()))
#########################################################
# load JSON string data into python list
with open('./tax_payers.json') as data_file:
jdata = json.load(data_file)
# make sure each comany row has the target officers list key
fixRecordKeys(jdata, 'offiersList', default=[])
# remove unwanted rows
jdata[:] = [
r for r in jdata
if r['agentName'] != 'Not on file' and r['status'] == 'ACTIVE'
]
# transform company and officer records
parsed = {}
for row in jdata:
# rename / remove company fields
row['officersList'] = row.pop('offiersList')
row['companyName'] = row.pop('businessEntityName', '')
row.pop('dbaName', None)
row.pop('ltrCode', None)
row.pop('regionIncLabel', None)
row.pop('regionIncName', None)
row.pop('status', None)
# fixup company address
address = row.pop('businessEntityAdd', '')
extractAddress(address, parsed)
row['companyAddress.street'] = parsed['street']
row['companyAddress.city'] = parsed['city']
row['companyAddress.state'] = parsed['state']
row['companyAddress.zip'] = parsed['zip']
# fixup registered agent address
address = row.pop('agentAddress', '')
extractAddress(address, parsed)
row['agentAddress.street'] = parsed['street']
row['agentAddress.city'] = parsed['city']
row['agentAddress.state'] = parsed['state']
row['agentAddress.zip'] = parsed['zip']
# transform officer records
for officer in row['officersList']:
# remove unwanted columns
officer.pop('agentRsgnDate', None)
officer.pop('agentPositionEndDate', None)
officer.pop('agentTypeCode', None)
officer.pop('formatedAddress', None)
# move and transform address within officer node
address = officer.pop('address', None)
if address is not None:
officer['agentAddress.street'] = address['street']
officer['agentAddress.city'] = address['city']
officer['agentAddress.state'] = address['state']
officer['agentAddress.zip'] = address['zipCode']
# export officer records from in-memory python objects to dataframes
df_officers = json_normalize(
jdata,
'officersList', [
'companyAddress.city',
'companyAddress.state',
'companyAddress.street',
'companyAddress.zip',
'companyName',
'fileNumber',
'reportYear',
'sosRegDate',
'taxpayerId',
],
sep='.',
errors='ignore')
# clean and transform agent records
for row in jdata:
row['agentActiveYr'] = row['reportYear']
row['agentTitle'] = 'REGISTERED AGENT'
# remove unwanted officers
row.pop('officersList', None)
# export agent records from in-memory python objects to dataframes
df_agents = json_normalize(jdata, None, errors='ignore')
# combine agents and offices into a single dataframe set
df = | pd.concat([df_agents, df_officers]) | pandas.pandas.concat |
# Imports
import math
import logging
import os
import numpy as np
import pandas as pd
import re
import requests
import sqlite3
from bs4 import BeautifulSoup
from datetime import datetime
from sqlalchemy import create_engine
# Data Collection
def data_collection (url, headers):
# Request to URL
page = requests.get(url, headers=headers)
# Beautiful Soup object
soup = BeautifulSoup(page.text, 'html.parser')
# Verify total item per page
total_item = soup.find_all('h2',class_='load-more-heading')[0].get('data-total')
# Verify amount requests must be done considering 36 item per page
page_number = math.ceil(int(total_item)/36)
# New URL
url02 = url + '?page-size='+ str(int(page_number*36))
# Request to URL02
page = requests.get(url02, headers=headers)
# Beautiful Soup object
soup = BeautifulSoup(page.text, 'html.parser')
# ======================== Products Data =============================
products = soup.find('ul',class_='products-listing small')
product_list = products.find_all('article', class_='hm-product-item')
# product id
product_id = [p.get('data-articlecode') for p in product_list]
# product category
product_category = [p.get('data-category') for p in product_list]
# product_name
products_list = products.find_all('a',class_='link')
product_name = [p.get_text() for p in products_list]
#price
products_list = products.find_all('span',class_='price regular')
product_price = [p.get_text() for p in products_list]
data = pd.DataFrame([product_id,product_category,product_name,product_price]).T
data.columns = ['product_id','product_category','product_name','product_price']
return data
# Data Collection by product
def data_collection_by_product(data, headers):
# empty dataframe
df_compositions = pd.DataFrame()
# unique columns for all products
aux = []
df_pattern = pd.DataFrame( columns = ['Art. No.', 'Composition', 'Fit', 'Product safety', 'Size','More sustainable materials'] )
for i in range(len(data)):
#API requests
url = 'https://www2.hm.com/en_us/productpage.'+ data.loc[i,'product_id'] + '.html'
logger.debug('Product: %s', url )
page = requests.get(url, headers=headers)
# Beautiful Soup object
soup = BeautifulSoup(page.text, 'html.parser')
# =========================== color name ======================================
product_list = soup.find_all('a',class_='filter-option miniature active') + soup.find_all('a',class_='filter-option miniature')
color_name = [p.get('data-color') for p in product_list]
# product id
product_id = [p.get('data-articlecode') for p in product_list]
df_color = pd.DataFrame([product_id,color_name]).T
df_color.columns = ['product_id','color_name']
for j in range(len(df_color)):
#API requests
url = 'https://www2.hm.com/en_us/productpage.'+ df_color.loc[j,'product_id'] + '.html'
logger.debug('Color: %s', url )
page = requests.get(url, headers=headers)
# Beautiful Soup object
soup = BeautifulSoup(page.text, 'html.parser')
# =========================== Product Name =================================
product_name = soup.find_all('h1', class_= 'primary product-item-headline')
product_name = product_name[0].get_text()
# =========================== Product Price =================================
product_price = soup.find_all('div', class_= 'primary-row product-item-price')
product_price = re.findall(r'\d+\.?\d+',product_price[0].get_text( ))[0]
# =========================== composition ======================================
product_composition_list = soup.find_all('div',class_='pdp-description-list-item')
product_composition = [list(filter(None, p.get_text().split('\n'))) for p in product_composition_list]
# rename dataframe
df_composition = pd.DataFrame(product_composition).T
df_composition.columns = df_composition.iloc[0]
# delete first row and fill None with same value of above line
df_composition = df_composition[1:].fillna(method='ffill')
# remove pocket lining, shell and lining
df_composition['Composition'] = df_composition['Composition'].str.replace('Pocket: ', '', regex = True)
df_composition['Composition'] = df_composition['Composition'].str.replace('Pocket lining: ', '', regex = True)
df_composition['Composition'] = df_composition['Composition'].str.replace('Shell: ', '', regex = True)
df_composition['Composition'] = df_composition['Composition'].str.replace('Lining: ', '', regex = True)
# guarantee the same number of columns
df_composition = pd.concat([df_pattern, df_composition],axis=0)
# rename columns
df_composition.columns = ['product_id','composition','fit','product_safety','size','more_sustainable_materials']
df_composition['product_name'] = product_name
df_composition['product_price'] = product_price
# keep new columns if it shows up
aux = aux + df_composition.columns.tolist()
# merge data color + composition
df_composition = pd.merge(df_composition, df_color, how='left',on='product_id')
# all products
df_compositions = pd.concat([df_compositions, df_composition], axis=0)
# Join Showroom data + details
df_compositions['style_id'] = df_compositions['product_id'].apply(lambda x: x[:-3])
df_compositions['color_id'] = df_compositions['product_id'].apply(lambda x: x[-3:])
# scrapy datetime
df_compositions['scrapy_datetime'] = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
return df_compositions
# Data Cleaning
def data_cleaning(data_product):
# product_id
# removendo os NA, mantendo como string por ser o padrão no site
df_data = data_product.dropna(subset = ['product_id'])
# product_name
# removendo \n\t
# substituindo o espaço vazio por _ e colocando em minúsculo
df_data['product_name'] = df_data['product_name'].str.strip('\n\t ')
df_data['product_name'] = df_data['product_name'].str.replace(' ','_').str.lower()
# product_price
df_data['product_price'] = df_data['product_price'].astype(float)
# color_name
df_data['color_name'] = df_data['color_name'].str.replace(' ','_')
df_data['color_name'] = df_data['color_name'].str.replace('/','_').str.lower()
# Fit
df_data['fit'] = df_data['fit'].apply(lambda x: x.replace(' ','_').lower())
# size number
df_data['size_number'] = df_data["size"].apply(lambda x: re.search('\d{3}cm',x).group(0) if pd.notnull(x) else x)
df_data['size_number'] = df_data['size_number'].apply(lambda x: re.search('\d+',x).group(0) if pd.notnull(x) else x)
# size model
# extract é uma função do pandas que aplica regex para extrair valores
# str é usado para vetorizar para indicar que precisa executar a função nas linhas
df_data['size_model'] = df_data['size'].str.extract('(\d+/\\d+)')
# break composition by comma
# expand = True, indica para retornar como um dataframe
# str é usado para vetorizar para indicar que precisa executar a função nas linhas
df1 = df_data['composition'].str.split(',', expand = True).reset_index(drop=True)
# Cotton | Polyester | Elasterell-P | Spandex
# precisa criar um dataframe do tamanho do dataframe original usando o np.arange
df_ref = pd.DataFrame(index = np.arange(len(df_data)), columns=['cotton','polyester','spandex','elasterell'])
# ======================================= composition =============================================
# --------- cotton --------
# como é uma serie precisa usar o .name para mudar o nome da coluna
df_cotton_0 = df1.loc[df1[0].str.contains('Cotton',na=True),0]
df_cotton_0.name = 'cotton'
df_cotton_1 = df1.loc[df1[1].str.contains('Cotton',na=True),1]
df_cotton_1.name = 'cotton'
# combine
df_cotton = df_cotton_0.combine_first(df_cotton_1)
df_ref = pd.concat([df_ref,df_cotton],axis = 1)
df_ref = df_ref.iloc[:, ~df_ref.columns.duplicated(keep='last')]
# -------- polyester --------
df_polyester_0 = df1.loc[df1[0].str.contains('Polyester',na=True),0]
df_polyester_0.name = 'polyester'
df_polyester_1 = df1.loc[df1[1].str.contains('Polyester',na=True),1]
df_polyester_1.name = 'polyester'
# combine
df_polyester = df_polyester_0.combine_first(df_polyester_1)
df_ref = pd.concat([df_ref,df_polyester],axis = 1)
df_ref = df_ref.iloc[:,~df_ref.columns.duplicated(keep='last')]
# ------------- spandex -------------------
df_spandex_1 = df1.loc[df1[1].str.contains('Spandex',na=True),1]
df_spandex_1.name = 'spandex'
df_spandex_2 = df1.loc[df1[2].str.contains('Spandex',na=True),2]
df_spandex_2.name = 'spandex'
df_spandex_3 = df1.loc[df1[3].str.contains('Spandex',na=True),3]
df_spandex_3.name = 'spandex'
# combine
df_spandex_c2 = df_spandex_1.combine_first(df_spandex_2)
df_spandex = df_spandex_c2.combine_first(df_spandex_3)
df_ref = pd.concat([df_ref,df_spandex],axis = 1)
df_ref = df_ref.iloc[:,~df_ref.columns.duplicated(keep='last')]
# ------------- elasterell -------------------
df_elasterell = df1.loc[df1[1].str.contains('Elasterell-P',na=True),1]
df_elasterell.name = 'elasterell'
df_ref = pd.concat([df_ref,df_elasterell], axis = 1)
df_ref = df_ref.iloc[:,~df_ref.columns.duplicated(keep='last')]
# join of combine with product_id
df_aux = pd.concat([df_data['product_id'].reset_index(drop = True),df_ref],axis = 1)
# format composition data
df_aux['cotton'] = df_aux['cotton'].apply(lambda x: int (re.search('\d+',x).group(0))/100 if pd.notnull(x) else x)
df_aux['polyester'] = df_aux['polyester'].apply(lambda x: int (re.search('\d+',x).group(0))/100 if | pd.notnull(x) | pandas.notnull |
import glob
import traceback
import settings
import os
import codecs
import pandas as pd
import numpy as np
import csv
import json
import datetime
import collections
import re
RESULT_SUCCESS = 'success'
MSG_CANNOT_PARSE_FILENAME = 'Cannot parse filename'
MSG_INVALID_TYPE = 'Type mismatch'
MSG_INCORRECT_HEADER = 'Column not in table definition'
MSG_MISSING_HEADER = 'Column missing in file'
MSG_INCORRECT_ORDER = 'Column not in expected order'
MSG_NULL_DISALLOWED = 'NULL values are not allowed for column'
MSG_INVALID_DATE = 'Invalid date format. Expecting "YYYY-MM-DD"'
MSG_INVALID_TIMESTAMP = 'Invalid timestamp format. Expecting "YYYY-MM-DD HH:MM:SS[.SSSSSS]"'
HEADER_KEYS = ['file_name', 'table_name']
ERROR_KEYS = ['message', 'column_name', 'actual', 'expected']
VALID_DATE_FORMAT = ['%Y-%m-%d']
VALID_TIMESTAMP_FORMAT = ['%Y-%m-%d %H:%M',
'%Y-%m-%d %H:%MZ',
'%Y-%m-%d %H:%M %Z',
'%Y-%m-%d %H:%M%z',
'%Y-%m-%d %H:%M:%S',
'%Y-%m-%d %H:%M:%SZ',
'%Y-%m-%d %H:%M:%S %Z',
'%Y-%m-%d %H:%M:%S%z',
'%Y-%m-%d %H:%M:%S.%f',
'%Y-%m-%d %H:%M:%S.%fZ',
'%Y-%m-%d %H:%M:%S.%f %Z',
'%Y-%m-%d %H:%M:%S.%f%z',
'%Y-%m-%dT%H:%M',
'%Y-%m-%dT%H:%MZ',
'%Y-%m-%dT%H:%M %Z',
'%Y-%m-%dT%H:%M%z',
'%Y-%m-%dT%H:%M:%S',
'%Y-%m-%dT%H:%M:%SZ',
'%Y-%m-%dT%H:%M:%S %Z',
'%Y-%m-%dT%H:%M:%S%z',
'%Y-%m-%dT%H:%M:%S.%f',
'%Y-%m-%dT%H:%M:%S.%fZ',
'%Y-%m-%dT%H:%M:%S.%f %Z',
'%Y-%m-%dT%H:%M:%S.%f%z']
SCIENTIFIC_NOTATION_REGEX = "^(?:-?\d*)\.?\d+[eE][-\+]?\d+$"
csv.register_dialect('load',
quotechar='"',
doublequote=True,
delimiter=',',
quoting=csv.QUOTE_ALL,
strict=True)
def get_readable_key(key):
new_key = key.replace('_', ' ')
new_key = new_key.title()
return new_key
def get_cdm_table_columns(table_name):
# allow files to be found regardless of CaSe
file = os.path.join(settings.cdm_metadata_path,
table_name.lower() + '.json')
if os.path.isfile(file):
with open(file, 'r') as f:
return json.load(f, object_pairs_hook=collections.OrderedDict)
else:
return None
def type_eq(cdm_column_type, submission_column_type):
"""
Compare column type in spec with column type in submission
:param cdm_column_type:
:param submission_column_type:
:return:
"""
if cdm_column_type == 'time':
return submission_column_type == 'character varying'
if cdm_column_type == 'integer':
return submission_column_type == 'int'
if cdm_column_type in ['character varying', 'text', 'string']:
return submission_column_type in ('str', 'unicode', 'object')
if cdm_column_type == 'date':
return submission_column_type in ['str', 'unicode', 'datetime64[ns]']
if cdm_column_type == 'timestamp':
return submission_column_type in ['str', 'unicode', 'datetime64[ns]']
if cdm_column_type in ['numeric', 'float']:
return submission_column_type == 'float'
else:
print(submission_column_type)
raise Exception('Unsupported CDM column type ' + cdm_column_type)
def cast_type(cdm_column_type, value):
"""
Compare column type in spec with column type in submission
:param cdm_column_type:
:param value:
:return:
"""
if cdm_column_type in ('integer', 'int64'):
# Regex check only relevant if submission dtype is 'object'
if not re.match(SCIENTIFIC_NOTATION_REGEX, str(value)):
return int(value)
if cdm_column_type in ('character varying', 'text', 'string'):
return str(value)
if cdm_column_type == 'numeric':
return float(value)
if cdm_column_type == 'float' and isinstance(value, float):
return value
if cdm_column_type == 'date' and isinstance(value, datetime.date):
return value
if cdm_column_type == 'timestamp' and isinstance(
value, datetime.datetime): # do not do datetime.datetime
return value
def date_format_valid(date_str, fmt='%Y-%m-%d'):
"""Check if a date string matches a certain pattern and is compilable into a datetime object
:param date_str:
:type date_str: string
:param fmt: A C standard-compliant date format, defaults to '%Y-%m-%d'
:type fmt: str, optional
:return: A boolean indicating if date string matches the date format
:rtype: bool
"""
try:
#Avoids out of range dates, e.g. 2020-02-31
datetime.datetime.strptime(date_str, fmt)
except ValueError:
return False
return True
def detect_bom_encoding(file_path):
default = None
with open(file_path, 'rb') as f:
buffer = f.read(4)
non_standard_encodings = [
('utf-8-sig', (codecs.BOM_UTF8, )),
('utf-16', (codecs.BOM_UTF16_LE, codecs.BOM_UTF16_BE)),
('utf-32', (codecs.BOM_UTF32_LE, codecs.BOM_UTF32_BE))
]
for enc, boms in non_standard_encodings:
if any(buffer.startswith(bom) for bom in boms):
print(
'Detected non-standard encoding %s. Please encode the CSV file in utf-8 standard'
% enc)
return enc
return default
# finds the first occurrence of an error for that column.
# currently, it does NOT find all errors in the column.
def find_error_in_file(column_name, cdm_column_type, submission_column_type,
df):
for i, (index, row) in enumerate(df.iterrows()):
try:
if i <= len(df) - 1:
if pd.notnull(row[column_name]):
cast_type(cdm_column_type, row[column_name])
else:
return False
except ValueError:
# print(row[column_name])
return index
def find_blank_lines(f):
"""Check for rows in a csv file with only empty values
:param f: A file object
:type f: file-like object
:return: List of rows with all empty values
:rtype: list
"""
df = pd.read_csv(f)
indices = df.index[df.apply(
lambda row: all(row.apply(lambda col: pd.isnull(col))),
axis=1)].tolist()
return [i + 1 for i in indices]
def find_scientific_notation_errors(f, int_columns):
df = pd.read_csv(f, dtype=str)
df = df.rename(columns=str.lower)
df = df[[col for col in int_columns if col in df.columns]]
errors = []
sci_not_line = collections.defaultdict(int)
for submission_col_name in df.columns:
submission_column = df[submission_col_name]
for i, value in submission_column.items():
if pd.notnull(value) and re.match(SCIENTIFIC_NOTATION_REGEX,
value):
sci_not_line[submission_col_name] = (value, i + 1)
break
for col, (value, line_num) in sci_not_line.items():
e = dict(message=(
f"Scientific notation value '{value}' was found on line {line_num}. "
"Scientific notation is not allowed for integer fields."),
column_name=col)
errors.append(e)
return errors
def check_csv_format(f, column_names):
results = []
idx = 1
line = []
header_error_msg = 'Please add/fix incorrect headers at the top of the file, enclosed in double quotes'
quote_comma_error_msg = 'Stray double quote or comma within field on line %s'
try:
reader = csv.reader(f, dialect='load')
header = next(reader)
line = header
if header != column_names:
results.append([header_error_msg, header, column_names])
for idx, line in enumerate(reader, start=2):
for field in line:
if '\n' in field:
newline_msg = 'Newline character found on line %s: %s\n' \
'Please replace newline "\\n" characters with space " "' % (str(idx), line)
print(newline_msg)
results.append([newline_msg, None, None])
if len(line) != len(column_names):
column_mismatch_msg = 'Incorrect number of columns on line %s: %s' % (
str(idx), line)
results.append([column_mismatch_msg, None, None])
except (ValueError, csv.Error):
print(traceback.format_exc())
if not line:
print(quote_comma_error_msg % (str(idx)))
print(header_error_msg + '\n')
else:
print(quote_comma_error_msg % (str(idx + 1)))
print('Previously parsed line %s: %s\n' % (str(idx), line))
print(
'Enclose all fields in double-quotes\n'
'e.g. person_id,2020-05-05,6345 -> "person_id","2020-05-05","6345"\n'
'At a minimum, enclose all non-numeric fields in double-quotes \n'
'e.g. person_id,2020-05-05,6345 -> "person_id","2020-05-05",6345\n'
)
print(
'Pair stray double quotes or remove them if they are inside a field \n'
'e.g. "wound is 1" long" -> "wound is 1"" long" or "wound is 1 long"\n'
)
print(
'Remove stray commas if they are inside a field and next to a double quote \n'
'e.g. "drug route: "orally", "topically"" -> "drug route: ""orally"" ""topically"""\n'
)
f.seek(0)
return results
def run_checks(file_path, f):
file_name, file_extension = os.path.splitext(file_path)
file_path_parts = file_name.split(os.sep)
table_name = file_path_parts[-1]
print('Found CSV file %s' % file_path)
result = {
'passed': False,
'errors': [],
'file_name': table_name + file_extension,
'table_name': get_readable_key(table_name)
}
# get the column definitions for a particular OMOP table
cdm_table_columns = get_cdm_table_columns(table_name)
if cdm_table_columns is None:
msg = '"%s" is not a valid OMOP table' % table_name
print(msg)
result['errors'].append(dict(message=msg))
return result
# get column names for this table
cdm_column_names = [col['name'] for col in cdm_table_columns]
if not os.path.isfile(file_path):
print('File does not exist: %s' % file_path)
return result
try:
print('Parsing CSV file for OMOP table "%s"' % table_name)
format_errors = check_csv_format(f, cdm_column_names)
for format_error in format_errors:
result['errors'].append(
dict(message=format_error[0],
actual=format_error[1],
expected=format_error[2]))
csv_columns = list(pd.read_csv(f, nrows=1).columns.values)
datetime_columns = [
col_name.lower() for col_name in csv_columns
if 'date' in col_name.lower()
]
f.seek(0)
blank_lines = find_blank_lines(f)
if blank_lines:
blank_lines_str = ",".join(map(str, blank_lines))
line_str = 'lines' if len(blank_lines) > 1 else 'line'
blank_lines_msg = f'File contains blank {line_str} on {line_str} {blank_lines_str}. ' \
'If there is no data, please only submit the header line.'
result['errors'].append(dict(message=blank_lines_msg))
return result
f.seek(0)
# check columns if looks good process file
if not _check_columns(cdm_column_names, csv_columns, result):
return result
#search for scientific notation
int_columns = [
col['name'] for col in cdm_table_columns
if col['type'] == 'integer'
]
sci_not_errors = find_scientific_notation_errors(f, int_columns)
for sci_not_error in sci_not_errors:
result['errors'].append(sci_not_error)
f.seek(0)
# read file to be processed
df = pd.read_csv(f,
sep=',',
na_values=['', ' ', '.'],
parse_dates=False,
infer_datetime_format=False)
# Check each column exists with correct type and required
for meta_item in cdm_table_columns:
meta_column_name = meta_item['name']
meta_column_required = meta_item['mode'] == 'required'
meta_column_type = meta_item['type']
submission_has_column = False
for submission_column in df.columns:
if submission_column == meta_column_name:
submission_has_column = True
submission_column_type = df[submission_column].dtype
# If all empty don't do type check
if not df[submission_column].isnull().values.all():
if not type_eq(meta_column_type,
submission_column_type):
# find the row that has the issue
error_row_index = find_error_in_file(
submission_column, meta_column_type,
submission_column_type, df)
if error_row_index:
if not (pd.isnull(
df[submission_column][error_row_index])
and not meta_column_required):
e = dict(message=MSG_INVALID_TYPE +
" line number " +
str(error_row_index + 1),
column_name=submission_column,
actual=df[submission_column]
[error_row_index],
expected=meta_column_type)
result['errors'].append(e)
# Check that date format is in the YYYY-MM-DD or YYYY-MM-DD hh:mm:ss format
if meta_column_type in ('date', 'timestamp'):
fmt = ''
err_msg = ''
if meta_column_type == 'date':
fmts = VALID_DATE_FORMAT
err_msg = MSG_INVALID_DATE
elif meta_column_type == 'timestamp':
fmts = VALID_TIMESTAMP_FORMAT
err_msg = MSG_INVALID_TIMESTAMP
for idx, value in df[submission_column].iteritems(
):
if not any(
list(
map(
lambda fmt:
date_format_valid(
str(value), fmt),
fmts))):
if not ( | pd.isnull(value) | pandas.isnull |
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import tkinter as tk
from tkinter import filedialog
import os
def main():
# todo: load the "results.csv" file from the mia-results directory
# todo: read the data into a list
# todo: plot the Dice coefficients per label (i.e. white matter, gray matter, hippocampus, amygdala, thalamus) in a boxplot
# alternative: instead of manually loading/reading the csv file you could also use the pandas package
# but you will need to install it first ('pip install pandas') and import it to this file ('import pandas as pd')
data = pd.read_csv("./bin/mia-result/2020-11-10-16-45-44whitestripe/results.csv",sep=';')
data.boxplot(by='LABEL', column=['DICE'], grid = False)
plt.show()
data.boxplot(by='LABEL', column=['HDRFDST'], grid = False)
plt.show()
def plot_all(show = False, save=False):
root = tk.Tk()
root.withdraw()
file_path = filedialog.askdirectory()
files = os.listdir(file_path)
data_all = []
for name in files:
print('****************** '+name+' ******************')
data_path = os.path.join(file_path, name, 'results.csv')
data = pd.read_csv(data_path, sep=';')
if save:
data.boxplot(by='LABEL', column=['DICE'], grid = False)
plt.savefig(os.path.join(file_path, f"result_Dice_{name}.png"), dpi=300)
if show:
data.boxplot(by='LABEL', column=['DICE'], grid = False)
plt.show()
if save:
data.boxplot(by='LABEL', column=['HDRFDST'], grid = False)
plt.savefig(os.path.join(file_path, f"result_HDRFDST_{name}.png"), dpi=300)
if show:
data.boxplot(by='LABEL', column=['HDRFDST'], grid = False)
plt.show()
data[''] = name[19:]
data_all.append(data)
data_all = | pd.concat(data_all) | pandas.concat |
from pathlib import Path
import numpy as np
import pandas as pd
from plotnine import *
def main(incsv, outcsv, colname="seed", colvalue=42, adddescription=True):
fulldf = pd.read_csv(incsv)
value = fulldf[fulldf[colname] == | pd.to_numeric(colvalue) | pandas.to_numeric |
import builtins
from io import StringIO
import numpy as np
import pytest
from pandas.errors import UnsupportedFunctionCall
import pandas as pd
from pandas import DataFrame, Index, MultiIndex, Series, Timestamp, date_range, isna
import pandas._testing as tm
import pandas.core.nanops as nanops
from pandas.util import _test_decorators as td
@pytest.fixture(
params=[np.int32, np.int64, np.float32, np.float64],
ids=["np.int32", "np.int64", "np.float32", "np.float64"],
)
def numpy_dtypes_for_minmax(request):
"""
Fixture of numpy dtypes with min and max values used for testing
cummin and cummax
"""
dtype = request.param
min_val = (
np.iinfo(dtype).min if np.dtype(dtype).kind == "i" else np.finfo(dtype).min
)
max_val = (
np.iinfo(dtype).max if np.dtype(dtype).kind == "i" else np.finfo(dtype).max
)
return (dtype, min_val, max_val)
@pytest.mark.parametrize("agg_func", ["any", "all"])
@pytest.mark.parametrize("skipna", [True, False])
@pytest.mark.parametrize(
"vals",
[
["foo", "bar", "baz"],
["foo", "", ""],
["", "", ""],
[1, 2, 3],
[1, 0, 0],
[0, 0, 0],
[1.0, 2.0, 3.0],
[1.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[True, True, True],
[True, False, False],
[False, False, False],
[np.nan, np.nan, np.nan],
],
)
def test_groupby_bool_aggs(agg_func, skipna, vals):
df = DataFrame({"key": ["a"] * 3 + ["b"] * 3, "val": vals * 2})
# Figure out expectation using Python builtin
exp = getattr(builtins, agg_func)(vals)
# edge case for missing data with skipna and 'any'
if skipna and all(isna(vals)) and agg_func == "any":
exp = False
exp_df = DataFrame([exp] * 2, columns=["val"], index=Index(["a", "b"], name="key"))
result = getattr(df.groupby("key"), agg_func)(skipna=skipna)
tm.assert_frame_equal(result, exp_df)
def test_max_min_non_numeric():
# #2700
aa = | DataFrame({"nn": [11, 11, 22, 22], "ii": [1, 2, 3, 4], "ss": 4 * ["mama"]}) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
QUANDLKEY = '<Enter your Quandl APT key here>'
"""
Created on Fri Oct 5 23:24:35 2018
@author: jeff
"""
'''*************************************
#1. Import libraries and define key variables
'''
import pandas as pd
import numpy as np
import quandl
import matplotlib.pyplot as plt
from sklearn.metrics import classification_report,roc_curve, auc,confusion_matrix,f1_score
from sklearn.model_selection import train_test_split
from sklearn import tree
from sklearn.neural_network import MLPClassifier
from sklearn.preprocessing import StandardScaler
import pickle
import graphviz
#KPI keys
quandl.ApiConfig.api_key = QUANDLKEY
'''*************************************
#2. Definition of functions
'''
#2a.Download tickers
def download_tkr(tkr):
record_db_events_gp = pd.DataFrame()
record_db_financials=quandl.get_table('SHARADAR/SF1', calendardate={'gte': '2008-12-31'}, ticker=tkr, dimension='MRY')
record_db_financials['year'] = record_db_financials['reportperiod'].dt.year
record_db_financials['year_1'] = record_db_financials['year']+1
record_db_events=quandl.get_table('SHARADAR/EVENTS', ticker=tkr)
tmp_series = record_db_events['eventcodes'].str.contains('21')
record_db_events= record_db_events[tmp_series]
record_db_events['year'] = record_db_events.date.dt.year
record_db_events= record_db_events.drop(['date'],axis=1)
record_db_events_gp = record_db_events.groupby(['ticker','year'],as_index=False).count()
combined_pd = pd.merge(record_db_financials,record_db_events_gp,how ='left',left_on='year_1',right_on='year')
#convert all events to 1 and NaN
combined_pd.loc[combined_pd['eventcodes']>1,'eventcodes'] = 1
X = record_db_financials.iloc[:,6:-5]
Y = combined_pd.iloc[:,-1]
return combined_pd, X, Y
#tkr = 'AMZN'
#df_tmp = download_tkr(tkr)
#2b.Train tree
def train_tree(X,Y,ind):
print('Decision Tree')
#split the dataset into training set and testing set
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=.33, random_state=0)
min_leaf_size = int(len(X_train) * 0.01)
tree_clf = tree.DecisionTreeClassifier(min_samples_leaf=min_leaf_size)
#preprocessing the data
scaler = StandardScaler()
scaler.fit(X_train)
X_train = scaler.transform(X_train)
X_test = scaler.transform(X_test)
#fit the training data to the model
tree_clf.fit(X_train,Y_train)
##metric 1: roc
Y_score_tree = tree_clf.predict(X_test)
fpr, tpr, thresholds = roc_curve(Y_test,Y_score_tree, pos_label=1)
roc_auc = auc(fpr,tpr)
lw=2
plt.figure()
plt.plot(fpr,tpr,color='darkorange',lw=lw,label='ROC curve (area = %0.2f)' %roc_auc)
plt.plot([0,1],[0,1],color='navy',lw=lw,linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic - Decision Tree '+ind)
plt.legend(loc="lower right")
plt.savefig(ind+'_DT.png')
##metric 2: Confusion matrix
Y_pred_tree = tree_clf.predict(X_test)
confusion_matrix_tree = confusion_matrix(Y_test, Y_pred_tree)
print(confusion_matrix_tree)
print(classification_report(Y_test, Y_pred_tree))
#common standard to compare across models
f1_clf = f1_score(Y_test, Y_pred_tree, average='weighted')
##save model
f_tree = open(ind+'_tree_clf.pkl',"wb+")
pickle.dump(tree_clf, f_tree)
f_tree.close()
f_tree_sc = open(ind+'_tree_scaler.pkl',"wb+")
pickle.dump(scaler, f_tree_sc)
f_tree_sc.close()
return tree_clf,f1_clf
##2C Neural Network
#2Ci. Grid search that simulate the performance of different neural network design
def grid_search(X_train,X_test, Y_train,Y_test,num_training_sample):
best_f1 = 0
best_hidden_layers_list = []
best_hidden_layers_tuple = ()
#various depth
for depth in range(1,5):
print('Depth = '+str(depth))
for layer_size in range(1,8):
neuron_cnt = 0
hidden_layers_list = []
i = 0
while i<depth:
hidden_layers_list.append(layer_size)
neuron_cnt += layer_size
i+=1
#pruning - to avoid over-training
if num_training_sample<neuron_cnt:
break
hidden_layers_tuple = tuple(hidden_layers_list)
nn_clf = MLPClassifier(alpha=1e-5,
hidden_layer_sizes=hidden_layers_tuple, random_state=1)
nn_clf.fit(X_train,Y_train)
Y_pred = nn_clf.predict(X_test)
temp_f1 = f1_score(Y_test, Y_pred, average='weighted')
if temp_f1 > best_f1:
best_f1 = temp_f1
best_hidden_layers_list = hidden_layers_list
best_hidden_layers_tuple = hidden_layers_tuple
print(best_hidden_layers_list)
return best_hidden_layers_list,best_hidden_layers_tuple
#2Cii. Train Neural Network
def train_NN(X,Y,ind):
print('Neural Network')
#split the dataset into training set and testing set
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=.33, random_state=0)
#preprocessing the data
scaler = StandardScaler()
scaler.fit(X_train)
X_train = scaler.transform(X_train)
X_test = scaler.transform(X_test)
num_training_sample = len(X_train)
best_hidden_layers_list,best_hidden_layers_tuple = grid_search(X_train, X_test, Y_train, Y_test,num_training_sample)
nn_clf = MLPClassifier(alpha=1e-5,
hidden_layer_sizes=best_hidden_layers_tuple, random_state=1)
#fit the training data to the model
nn_clf.fit(X_train,Y_train)
##metric 1: roc
Y_score_nn = nn_clf.predict(X_test)
fpr, tpr, thresholds = roc_curve(Y_test,Y_score_nn, pos_label=1)
roc_auc = auc(fpr,tpr)
lw=2
plt.figure()
plt.plot(fpr,tpr,color='darkorange',lw=lw,label='ROC curve (area = %0.2f)' %roc_auc)
plt.plot([0,1],[0,1],color='navy',lw=lw,linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic - Neural Network '+ind)
plt.legend(loc="lower right")
#plt.show()
plt.savefig(ind+'_NN.png')
##metric 2: Confusion matrix
Y_pred_tree = nn_clf.predict(X_test)
confusion_matrix_tree = confusion_matrix(Y_test, Y_pred_tree)
print(confusion_matrix_tree)
print(classification_report(Y_test, Y_pred_tree))
#common standard to compare across models
#f1_clf = f1_score(Y_test, Y_score_nn, average='binary')
f1_clf = f1_score(Y_test, Y_score_nn, average='weighted')
##save model
f_nn = open(ind+'_nn_clf_.pkl',"wb+")
pickle.dump(nn_clf, f_nn)
f_nn.close()
f_nn_sc = open(ind+'_nn_scaler.pkl',"wb+")
pickle.dump(scaler, f_nn_sc)
f_nn_sc.close()
return nn_clf, f1_clf
'''*************************************
3. Execute the program
#3a. filter the industry in scope
'''
groupby_fld = 'sicsector'
min_size = 30
df_tkr = pd.read_csv('industry_tickers_list.csv')
dict_ind_tkr = {}
f1_list = []
df_tkr_ind = pd.DataFrame()
df_tkr_ind['cnt'] = df_tkr.groupby(groupby_fld)['ticker'].count()
df_tkr_ind_select = df_tkr_ind[df_tkr_ind['cnt']>=min_size]
list_scope = list(df_tkr_ind_select.index)
#collect ticker in each industry
for index, row in df_tkr.iterrows():
ind = row[groupby_fld]
tkr = row['ticker']
if ind in list_scope:
if ind in dict_ind_tkr:
dict_ind_tkr[ind].append(tkr)
else:
dict_ind_tkr[ind] = [tkr]
#loop through the dictionary - one industry at a time
for ind, list_tkr in dict_ind_tkr.items():
df_X = pd.DataFrame({})
df_Y = pd.DataFrame({})
print(ind)
#Go through the ticker list to Download data from source
#loop through tickers from that industry
for tkr in list_tkr:
print(tkr)
try:
df_tmp,X_tmp,Y_tmp = download_tkr(tkr)
except Exception:
continue
if len(df_X)==0:
#df_all = df_tmp
df_X = X_tmp
df_Y = Y_tmp
else:
#df_all = pd.concat([df_all,df_tmp])
df_X = pd.concat([df_X,X_tmp])
df_Y = | pd.concat([df_Y,Y_tmp]) | pandas.concat |
# get human_ebv_tpms.py
import pandas as pd
import argparse
import os
import math
import datetime
import subprocess
# get basename from a file and path string
def get_basename(filepath):
import os
return os.path.basename(os.path.splitext(filepath)[0])
# get and format output directory
def format_odir(odir):
import os
cwd = os.getcwd()
if odir != '':
# if first character is not /, use cwd to make this an absolute path
if odir[0] != '/' and odir[0] != '~':
odir = cwd+odir
if odir[-1] != '/':
odir += '/'
return odir
# make a dated output directory for the files used for the tracks
def make_dated_folder(odir, bname):
date = datetime.datetime.now()
date = date.strftime('%y%m%d')
odir = odir+date+'_'+bname+'_figures/'
if not os.path.isdir(odir):
print('Making output directory '+odir)
os.makedirs(odir)
return odir
# get value associated with keyword in the 9th column of gtf
def get_field_value(key, fields):
if key not in fields:
return None
else:
return fields.split(key+' "')[1].split()[0].replace('";','')
# calculate tpm for a column in the abundance table
def get_tpm(df, col):
new_col = 'TPM_'+col
total_reads = df[d].sum()
df[new_col] = df.apply(lambda x: float(x[d]*1000000)/total_reads, axis=1)
return new_col, df
# calculate tpm for a column in the abundance table
def get_log_tpm(df, col, gene):
tpm_col = 'TPM_'+col
if not gene:
new_col = 'log_'+tpm_col
else:
new_col = 'gene_log_'+TPM_col
df[new_col] = df.apply(lambda x: math.log2(x[tpm_col]+1), axis=1)
return new_col, df
# get gtf file name
parser = argparse.ArgumentParser(description='removes EBV transcripts from GTF file')
parser.add_argument('--human_gtf', help='GTF with human and EBV data')
parser.add_argument('--human_filt_ab', help='Filtered abundance file with human and EBV data')
parser.add_argument('--human_ab', help='Unfiltered abundance file with human and EBV data')
parser.add_argument('--ebv_filt_ab', help='EBV only filtered abundance file')
parser.add_argument('--ebv_ab', help='EBV only unfiltered abundance file')
parser.add_argument('--datasets', help='Comma-separated list of dataset names to use for human+ebv data')
parser.add_argument('--o', help='Prefix for output file')
args = parser.parse_args()
full_gtf = args.human_gtf
full_ab = args.human_filt_ab
full_unf_ab = args.human_ab
ebv_ab = args.ebv_filt_ab
ebv_unf_ab = args.ebv_ab
my_datasets = args.datasets.split(',')
oprefix = args.o
# get all human transcript ids
infile = open(full_gtf, 'r')
human_tids = []
ebv_tids = []
for i, line in enumerate(infile):
line = line.replace('\n', '')
temp = line.split('\t')
fields = temp[-1]
if temp[0] != 'chrEBV' and temp[2] == 'transcript':
human_tids.append(get_field_value('talon_transcript', fields))
elif temp[0] == 'chrEBV' and temp[2] == 'transcript':
ebv_tids.append(get_field_value('talon_transcript', fields))
full_df = pd.read_csv(full_ab, sep='\t')
ebv_df = | pd.read_csv(ebv_ab, sep='\t') | pandas.read_csv |
#%%
import os
from pyteomics import mzid, mzml
import pandas as pd
import numpy as np
import glob
"""
Files are downloaded and manually randomly divided into different folders
the following code is repeated but has the same effect, it is applied to various folders to
generate pandas data frames and to store all the data in a single hdf5 file
"""
#%%
os.chdir('./files/train')
mzid_files=glob.glob('*.mzid')
indexed_mzid = mzid.chain.from_iterable(mzid_files, use_index=True)
def _parse_mzid_entry(entry):
spectrum_id = str(entry['spectrumID'])
seq = str(entry['SpectrumIdentificationItem'][0]['PeptideSequence'])
try:
mods = entry['SpectrumIdentificationItem'][0]['Modification']
except:
mods = None
rank = int(entry['SpectrumIdentificationItem'][0]['rank'])
file_location = str(entry['name'])
return file_location,spectrum_id,seq,mods,rank
all_mzid = []
for entry in(indexed_mzid):
all_mzid.append(_parse_mzid_entry(entry))
file_location,spectrum_ids,seq,mods,rank = zip(*all_mzid)
mzid_df = pd.DataFrame({'file':file_location,'id':spectrum_ids,'seq':seq})
def _parse_mzml_entry(entry):
ID = str(entry['id'])
mz = np.array(entry['m/z array'])
intensities = np.array(entry['intensity array'])
return ID, mz, intensities
all_spectra = []
for file in np.unique(file_location):
print(file)
indexed = mzml.MzML(file)
for i,entry in enumerate(indexed.map(_parse_mzml_entry)):
tupl = (file,)+entry
all_spectra.append(tupl)
mzml_location, ids, mz, intensities = zip(*all_spectra)
spectra_df = pd.DataFrame({'file':mzml_location,'id':ids,'mz':mz,'intensities':intensities})
#### MERGE: mzid + mzml
merged_df = pd.merge(mzid_df,spectra_df,how='left',on=['file','id'])
merged_df = merged_df[['id','seq','mz','intensities']]
#%%
hdf = pd.HDFStore('/home/ubuntu/data/jiahao/files/train.hdf5', mode="w")
hdf.put(value=merged_df, key="df")
#%%
os.chdir('./train_1')
mzid_files_1=glob.glob('*.mzid')
indexed_mzid_1 = mzid.chain.from_iterable(mzid_files_1, use_index=True)
def _parse_mzid_entry(entry):
spectrum_id = str(entry['spectrumID'])
seq = str(entry['SpectrumIdentificationItem'][0]['PeptideSequence'])
try:
mods = entry['SpectrumIdentificationItem'][0]['Modification']
except:
mods = None
rank = int(entry['SpectrumIdentificationItem'][0]['rank'])
file_location = str(entry['name'])
return file_location,spectrum_id,seq,mods,rank
all_mzid_1 = []
for entry in(indexed_mzid_1):
all_mzid_1.append(_parse_mzid_entry(entry))
file_location,spectrum_ids,seq,mods,rank = zip(*all_mzid_1)
mzid_df_1 = pd.DataFrame({'file':file_location,'id':spectrum_ids,'seq':seq})
def _parse_mzml_entry(entry):
ID = str(entry['id'])
mz = np.array(entry['m/z array'])
intensities = np.array(entry['intensity array'])
return ID, mz, intensities
all_spectra_1 = []
for file in np.unique(file_location):
print(file)
indexed = mzml.MzML(file)
for i,entry in enumerate(indexed.map(_parse_mzml_entry)):
tupl = (file,)+entry
all_spectra_1.append(tupl)
mzml_location, ids, mz, intensities = zip(*all_spectra_1)
spectra_df_1 = pd.DataFrame({'file':mzml_location,'id':ids,'mz':mz,'intensities':intensities})
#### MERGE: mzid + mzml
merged_df_1 = | pd.merge(mzid_df_1,spectra_df_1,how='left',on=['file','id']) | pandas.merge |
import pandas as pd
import torch
class Trainer:
def __init__(self, data_loaders, criterion, device, on_after_epoch=None):
self.data_loaders = data_loaders
self.criterion = criterion
self.device = device
self.history = []
self.on_after_epoch = on_after_epoch
def train(self, model, optimizer, num_epochs):
for epoch in range(num_epochs):
train_epoch_loss = self._train_on_epoch(model, optimizer)
val_epoch_loss = self._val_on_epoch(model, optimizer)
hist = {
'epoch': epoch,
'train_loss': train_epoch_loss,
'val_loss': val_epoch_loss,
}
self.history.append(hist)
if self.on_after_epoch is not None:
self.on_after_epoch(model, pd.DataFrame(self.history))
return | pd.DataFrame(self.history) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 28 20:15:47 2020
@authors: <NAME>, omars
"""
#%% Libraries
import pandas as pd
import numpy as np
from random import choices
import tensorflow as tf
import matplotlib.pyplot as plt
import keras
from keras import backend as K
from keras.layers import Layer
from keras.models import Sequential, load_model
from keras.losses import MSE
from tensorflow.keras.callbacks import EarlyStopping
from keras.layers import LSTM
from keras.layers import Bidirectional
from keras.layers import TimeDistributed
from keras.layers import Flatten
from keras.layers import Dense
from keras.layers import RepeatVector
from keras.layers import TimeDistributed
from tensorflow.keras.models import load_model
from tslearn.utils import to_time_series_dataset
from tslearn.clustering import TimeSeriesKMeans
from kneed import KneeLocator
#%% Helper Functions
def wmape(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred).astype('float')
return sum((np.abs(y_true - y_pred)) * 100) / sum(y_true)
def get_in_date_range(dataset, first_date = '2020-01-01', last_date = '2020-12-31', date_col='date'):
return dataset.loc[(dataset[date_col].astype('datetime64') >= np.datetime64(first_date)) & (dataset[date_col].astype('datetime64') < np.datetime64(last_date))]
def mod_date(date, interval):
return str(np.datetime64(date) + np.timedelta64(interval, 'D'))
def rename_features(i, features, memory):
dictionary = {}
for j in range(memory):
dictionary[features[j]] = 'GrowthRate_t-' +str(memory-j)
return dictionary
def transpose_case_df(simple_output, forward_days, day_0, date_col='date', region_col='county', target_col='cases'):
dates = []
cases = []
states = []
for i in range(forward_days):
date = mod_date(day_0, i)
dates.extend([date for i in range(len(simple_output))])
cases.extend(simple_output[target_col+'_predicted_day_' + str(i)])
states.extend(simple_output[region_col])
df = | pd.DataFrame({date_col:dates, region_col: states, 'pred_'+ target_col:cases}) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
@author: <NAME> - https://www.linkedin.com/in/adamrvfisher/
"""
#This is part of a kth fold optimization tool
#Import modules
#from DefRSIPredictor import DefRSIPredictor
import numpy as np
import pandas as pd
import time as t
import random as rand
#Number of iterations
iterations = range(0,200000)
#Read in data
s = pd.read_pickle('VXXAdvice07_50')
s = s.drop('Regime',1)
s = s.drop('Strategy',1)
#Get short only returns
s['ShortReturns'] = s['LogRet'] * -1
#Empty structures
empty = []
dataset = | pd.DataFrame() | pandas.DataFrame |
"""IO methods for segmotion output.
--- DEFINITIONS ---
segmotion (or w2segmotionll) = storm-tracking algorithm in WDSS-II.
WDSS-II = Warning Decision Support System -- Integrated Information, a software
package for the visualization and analysis of thunderstorm-related data.
"""
import os
import glob
import gzip
import tempfile
import shutil
import xml.etree.ElementTree as ElementTree
import numpy
import pandas
from gewittergefahr.gg_io import netcdf_io
from gewittergefahr.gg_io import myrorss_and_mrms_io
from gewittergefahr.gg_utils import radar_sparse_to_full as radar_s2f
from gewittergefahr.gg_utils import polygons
from gewittergefahr.gg_utils import unzipping
from gewittergefahr.gg_utils import radar_utils
from gewittergefahr.gg_utils import geodetic_utils
from gewittergefahr.gg_utils import storm_tracking_utils as tracking_utils
from gewittergefahr.gg_utils import time_conversion
from gewittergefahr.gg_utils import time_periods
from gewittergefahr.gg_utils import error_checking
FILE_EXISTS_ERROR_CODE = 17
GZIP_FILE_EXTENSION = '.gz'
STATS_FILE_EXTENSION = '.xml'
POLYGON_FILE_EXTENSION = '.netcdf'
STATS_DIR_NAME_PART = 'PolygonTable'
POLYGON_DIR_NAME_PART = 'ClusterID'
SENTINEL_VALUE = -9999
TIME_FORMAT_IN_FILES = '%Y%m%d-%H%M%S'
TIME_FORMAT_IN_FILES_HOUR_ONLY = '%Y%m%d-%H'
SPC_DATE_START_HOUR = 11
SPC_DATE_END_HOUR = 37
HOURS_TO_SECONDS = 3600
PRIMARY_ID_COLUMN_ORIG = 'RowName'
EAST_VELOCITY_COLUMN_ORIG = 'MotionEast'
NORTH_VELOCITY_COLUMN_ORIG = 'MotionSouth'
AGE_COLUMN_ORIG = 'Age'
XML_COLUMN_NAMES = [
tracking_utils.PRIMARY_ID_COLUMN, tracking_utils.EAST_VELOCITY_COLUMN,
tracking_utils.NORTH_VELOCITY_COLUMN, tracking_utils.AGE_COLUMN
]
XML_COLUMN_NAMES_ORIG = [
PRIMARY_ID_COLUMN_ORIG, EAST_VELOCITY_COLUMN_ORIG,
NORTH_VELOCITY_COLUMN_ORIG, AGE_COLUMN_ORIG
]
def _xml_column_name_orig_to_new(column_name_orig):
"""Converts name of XML column from original (segmotion) to new format.
:param column_name_orig: Column name in original format.
:return: column_name: Column name in new format.
"""
orig_column_flags = [c == column_name_orig for c in XML_COLUMN_NAMES_ORIG]
orig_column_index = numpy.where(orig_column_flags)[0][0]
return XML_COLUMN_NAMES[orig_column_index]
def _id_matrix_to_coord_lists(numeric_id_matrix):
"""Converts grid of numeric storm IDs to one coord list per storm object.
M = number of rows in grid
N = number of columns in grid
P = number of coordinates (grid points) in a given storm object
:param numeric_id_matrix: M-by-N numpy array of numeric storm IDs.
:return: polygon_table: pandas DataFrame with the following columns. Each
row is one storm object.
polygon_table.primary_id_string: Primary storm ID.
polygon_table.grid_point_rows: length-P numpy array with indices of grid
rows in storm object.
polygon_table.grid_point_columns: length-P numpy array with indices of grid
columns in storm object.
"""
numeric_id_matrix[numpy.isnan(numeric_id_matrix)] = SENTINEL_VALUE
unique_numeric_ids, orig_to_unique_indices = numpy.unique(
numeric_id_matrix, return_inverse=True)
unique_id_strings = [str(int(this_id)) for this_id in unique_numeric_ids]
polygon_table = pandas.DataFrame.from_dict({
tracking_utils.PRIMARY_ID_COLUMN: unique_id_strings
})
nested_array = polygon_table[[
tracking_utils.PRIMARY_ID_COLUMN, tracking_utils.PRIMARY_ID_COLUMN
]].values.tolist()
polygon_table = polygon_table.assign(**{
tracking_utils.ROWS_IN_STORM_COLUMN: nested_array,
tracking_utils.COLUMNS_IN_STORM_COLUMN: nested_array
})
num_grid_rows = numeric_id_matrix.shape[0]
num_grid_columns = numeric_id_matrix.shape[1]
num_storms = len(unique_numeric_ids)
for i in range(num_storms):
if unique_numeric_ids[i] == SENTINEL_VALUE:
continue
these_linear_indices = numpy.where(orig_to_unique_indices == i)[0]
these_row_indices, these_column_indices = numpy.unravel_index(
these_linear_indices, (num_grid_rows, num_grid_columns)
)
polygon_table[tracking_utils.ROWS_IN_STORM_COLUMN].values[i] = (
these_row_indices
)
polygon_table[tracking_utils.COLUMNS_IN_STORM_COLUMN].values[i] = (
these_column_indices
)
return polygon_table.loc[
polygon_table[tracking_utils.PRIMARY_ID_COLUMN] !=
str(int(SENTINEL_VALUE))
]
def _append_spc_date_to_storm_ids(primary_id_strings, spc_date_string):
"""Appends SPC date to each storm ID.
N = number of storm objects
:param primary_id_strings: length-N list of primary IDs.
:param spc_date_string: SPC date (format "yyyymmdd").
:return: primary_id_strings: Same as input but with new IDs.
"""
return [
'{0:s}-{1:s}'.format(p, spc_date_string) for p in primary_id_strings
]
def _get_pathless_stats_file_name(unix_time_sec, zipped=True):
"""Generates pathless name for statistics file.
This file should contain storm stats (everything except polygons) for one
time step and one tracking scale.
:param unix_time_sec: Time in Unix format.
:param zipped: Boolean flag. If True, will generate name for zipped file.
If False, will generate name for unzipped file.
:return: pathless_stats_file_name: Pathless name for statistics file.
"""
if zipped:
return '{0:s}{1:s}{2:s}'.format(
time_conversion.unix_sec_to_string(
unix_time_sec, TIME_FORMAT_IN_FILES),
STATS_FILE_EXTENSION, GZIP_FILE_EXTENSION
)
return '{0:s}{1:s}'.format(
time_conversion.unix_sec_to_string(unix_time_sec, TIME_FORMAT_IN_FILES),
STATS_FILE_EXTENSION
)
def _get_pathless_polygon_file_name(unix_time_sec, zipped=True):
"""Generates pathless name for polygon file.
This file should contain storm outlines (polygons) for one time step and one
tracking scale.
:param unix_time_sec: Time in Unix format.
:param zipped: Boolean flag. If True, will generate name for zipped file.
If False, will generate name for unzipped file.
:return: pathless_polygon_file_name: Pathless name for polygon file.
"""
if zipped:
return '{0:s}{1:s}{2:s}'.format(
time_conversion.unix_sec_to_string(
unix_time_sec, TIME_FORMAT_IN_FILES),
POLYGON_FILE_EXTENSION, GZIP_FILE_EXTENSION
)
return '{0:s}{1:s}'.format(
time_conversion.unix_sec_to_string(unix_time_sec, TIME_FORMAT_IN_FILES),
POLYGON_FILE_EXTENSION
)
def _get_relative_stats_dir_ordinal_scale(tracking_scale_ordinal):
"""Generates relative path for directory with storm statistics.
:param tracking_scale_ordinal: Tracking scale. This should be an ordinal
number in [0, N - 1], where N = number of tracking scales.
:return: relative_stats_dir_name: Relative path for directory with storm
statistics.
"""
return '{0:s}/scale_{1:d}'.format(
STATS_DIR_NAME_PART, tracking_scale_ordinal)
def _get_relative_stats_dir_physical_scale(tracking_scale_metres2):
"""Generates relative path for directory with storm statistics.
:param tracking_scale_metres2: Tracking scale (minimum storm area).
:return: relative_stats_dir_name: Relative path for directory with storm
statistics.
"""
return '{0:s}/scale_{1:d}m2'.format(
STATS_DIR_NAME_PART, int(numpy.round(tracking_scale_metres2))
)
def _get_relative_polygon_dir_ordinal_scale(tracking_scale_ordinal):
"""Generates relative path for directory with storm boundaries (polygons).
:param tracking_scale_ordinal: Tracking scale. This should be an ordinal
number in [0, N - 1], where N = number of tracking scales.
:return: relative_polygon_dir_name: Relative path for directory with storm
boundaries (polygons).
"""
return '{0:s}/scale_{1:d}'.format(
POLYGON_DIR_NAME_PART, tracking_scale_ordinal)
def _get_relative_polygon_dir_physical_scale(tracking_scale_metres2):
"""Generates relative path for directory with storm boundaries (polygons).
:param tracking_scale_metres2: Tracking scale (minimum storm area).
:return: relative_polygon_dir_name: Relative path for directory with storm
boundaries (polygons).
"""
return '{0:s}/scale_{1:d}m2'.format(
POLYGON_DIR_NAME_PART, int(numpy.round(tracking_scale_metres2))
)
def _rename_raw_dirs_ordinal_to_physical(
top_raw_directory_name=None, spc_date_string=None,
tracking_scales_ordinal=None, tracking_scales_metres2=None):
"""Renames dirs by changing tracking scale from ordinal number to m^2.
Each raw directory should contain either stats or polygon files for one
tracking scale and one SPC date. These directories exist inside the 1-day
tar files and are extracted by unzip_1day_tar_file.
N = number of tracking scales
:param top_raw_directory_name: Top-level directory for raw (polygon and
stats) files.
:param spc_date_string: SPC date in format "yyyymmdd".
:param tracking_scales_ordinal: length-N numpy array of tracking scales.
Each element must be an ordinal number in [0, N - 1].
:param tracking_scales_metres2: length-N numpy array of tracking scales
(m^2).
"""
num_scales = len(tracking_scales_ordinal)
for j in range(num_scales):
orig_stats_dir_name = '{0:s}/{1:s}/{2:s}/{3:s}'.format(
top_raw_directory_name, spc_date_string[:4], spc_date_string,
_get_relative_stats_dir_ordinal_scale(tracking_scales_ordinal[j])
)
new_stats_dir_name = '{0:s}/{1:s}/{2:s}/{3:s}'.format(
top_raw_directory_name, spc_date_string[:4], spc_date_string,
_get_relative_stats_dir_physical_scale(tracking_scales_metres2[j])
)
# TODO(thunderhoser): Write a rename method somewhere, which handles the
# case where the target already exists.
try:
os.rename(orig_stats_dir_name, new_stats_dir_name)
except OSError as this_error:
if this_error.errno == FILE_EXISTS_ERROR_CODE:
shutil.rmtree(new_stats_dir_name)
os.rename(orig_stats_dir_name, new_stats_dir_name)
else:
raise
orig_polygon_dir_name = '{0:s}/{1:s}/{2:s}/{3:s}'.format(
top_raw_directory_name, spc_date_string[:4], spc_date_string,
_get_relative_polygon_dir_ordinal_scale(tracking_scales_ordinal[j])
)
new_polygon_dir_name = '{0:s}/{1:s}/{2:s}/{3:s}'.format(
top_raw_directory_name, spc_date_string[:4], spc_date_string,
_get_relative_polygon_dir_physical_scale(tracking_scales_metres2[j])
)
try:
os.rename(orig_polygon_dir_name, new_polygon_dir_name)
except OSError as this_error:
if this_error.errno == FILE_EXISTS_ERROR_CODE:
shutil.rmtree(new_polygon_dir_name)
os.rename(orig_polygon_dir_name, new_polygon_dir_name)
else:
raise
def _open_xml_file(xml_file_name):
"""Opens an XML file, which may or may not be gzipped.
:param xml_file_name: Path to input file.
:return: xml_tree: Instance of `xml.etree.ElementTree`.
"""
gzip_as_input = xml_file_name.endswith(GZIP_FILE_EXTENSION)
if gzip_as_input:
gzip_file_object = gzip.open(xml_file_name, 'rb')
xml_temporary_file_object = tempfile.NamedTemporaryFile(delete=False)
shutil.copyfileobj(gzip_file_object, xml_temporary_file_object)
xml_file_name = xml_temporary_file_object.name
gzip_file_object.close()
xml_temporary_file_object.close()
xml_tree = ElementTree.parse(xml_file_name)
if gzip_as_input:
os.remove(xml_file_name)
return xml_tree
def unzip_1day_tar_file(
tar_file_name, spc_date_string, top_target_dir_name,
scales_to_extract_metres2):
"""Unzips tar file with segmotion output for one SPC date.
:param tar_file_name: Path to input file.
:param spc_date_string: SPC date (format "yyyymmdd").
:param top_target_dir_name: Name of top-level output directory.
:param scales_to_extract_metres2: 1-D numpy array of tracking scales to
extract.
:return: target_directory_name: Path to output directory. This will be
"<top_target_directory_name>/<yyyymmdd>", where <yyyymmdd> is the SPC
date.
"""
# Verification.
_ = time_conversion.spc_date_string_to_unix_sec(spc_date_string)
error_checking.assert_file_exists(tar_file_name)
error_checking.assert_is_greater_numpy_array(scales_to_extract_metres2, 0)
error_checking.assert_is_numpy_array(
scales_to_extract_metres2, num_dimensions=1)
scales_to_extract_metres2 = numpy.round(
scales_to_extract_metres2
).astype(int)
num_scales_to_extract = len(scales_to_extract_metres2)
directory_names_to_unzip = []
for j in range(num_scales_to_extract):
this_relative_stats_dir_name = '{0:s}/{1:s}'.format(
spc_date_string,
_get_relative_stats_dir_physical_scale(scales_to_extract_metres2[j])
)
this_relative_polygon_dir_name = '{0:s}/{1:s}'.format(
spc_date_string,
_get_relative_polygon_dir_physical_scale(
scales_to_extract_metres2[j])
)
directory_names_to_unzip.append(
this_relative_stats_dir_name.replace(spc_date_string + '/', '')
)
directory_names_to_unzip.append(
this_relative_polygon_dir_name.replace(spc_date_string + '/', '')
)
target_directory_name = '{0:s}/{1:s}/{2:s}'.format(
top_target_dir_name, spc_date_string[:4], spc_date_string
)
unzipping.unzip_tar(
tar_file_name, target_directory_name=target_directory_name,
file_and_dir_names_to_unzip=directory_names_to_unzip)
return target_directory_name
def find_local_stats_file(
unix_time_sec, spc_date_string, top_raw_directory_name,
tracking_scale_metres2, raise_error_if_missing=True):
"""Finds statistics file on local machine.
This file should contain storm stats (everything except polygons) for one
time step and one tracking scale.
:param unix_time_sec: Valid time.
:param spc_date_string: SPC date (format "yyyymmdd").
:param top_raw_directory_name: Name of top-level directory with raw
segmotion files.
:param tracking_scale_metres2: Tracking scale.
:param raise_error_if_missing: Boolean flag. If True and file is missing,
this method will raise an error.
:return: stats_file_name: Path to statistics file. If
raise_error_if_missing = False and file is missing, this will be the
*expected* path.
:raises: ValueError: if raise_error_if_missing = True and file is missing.
"""
# Error-checking.
_ = time_conversion.spc_date_string_to_unix_sec(spc_date_string)
error_checking.assert_is_string(top_raw_directory_name)
error_checking.assert_is_greater(tracking_scale_metres2, 0.)
error_checking.assert_is_boolean(raise_error_if_missing)
directory_name = '{0:s}/{1:s}/{2:s}/{3:s}'.format(
top_raw_directory_name, spc_date_string[:4], spc_date_string,
_get_relative_stats_dir_physical_scale(tracking_scale_metres2)
)
pathless_file_name = _get_pathless_stats_file_name(
unix_time_sec, zipped=True)
stats_file_name = '{0:s}/{1:s}'.format(directory_name, pathless_file_name)
if raise_error_if_missing and not os.path.isfile(stats_file_name):
pathless_file_name = _get_pathless_stats_file_name(
unix_time_sec, zipped=False)
stats_file_name = '{0:s}/{1:s}'.format(
directory_name, pathless_file_name)
if raise_error_if_missing and not os.path.isfile(stats_file_name):
raise ValueError(
'Cannot find storm-statistics file. Expected at location: ' +
stats_file_name)
return stats_file_name
def find_local_polygon_file(
unix_time_sec, spc_date_string, top_raw_directory_name,
tracking_scale_metres2, raise_error_if_missing=True):
"""Finds polygon file on local machine.
This file should contain storm outlines (polygons) for one time step and one
tracking scale.
:param unix_time_sec: Valid time.
:param spc_date_string: SPC date (format "yyyymmdd").
:param top_raw_directory_name: Name of top-level directory with raw
segmotion files.
:param tracking_scale_metres2: Tracking scale.
:param raise_error_if_missing: Boolean flag. If True and file is missing,
this method will raise an error.
:return: polygon_file_name: Path to polygon file. If
raise_error_if_missing = False and file is missing, this will be the
*expected* path.
:raises: ValueError: if raise_error_if_missing = True and file is missing.
"""
# Verification.
_ = time_conversion.spc_date_string_to_unix_sec(spc_date_string)
error_checking.assert_is_string(top_raw_directory_name)
error_checking.assert_is_greater(tracking_scale_metres2, 0.)
error_checking.assert_is_boolean(raise_error_if_missing)
directory_name = '{0:s}/{1:s}/{2:s}/{3:s}'.format(
top_raw_directory_name, spc_date_string[:4], spc_date_string,
_get_relative_polygon_dir_physical_scale(tracking_scale_metres2)
)
pathless_file_name = _get_pathless_polygon_file_name(
unix_time_sec, zipped=True)
polygon_file_name = '{0:s}/{1:s}'.format(directory_name, pathless_file_name)
if raise_error_if_missing and not os.path.isfile(polygon_file_name):
pathless_file_name = _get_pathless_polygon_file_name(
unix_time_sec, zipped=False)
polygon_file_name = '{0:s}/{1:s}'.format(
directory_name, pathless_file_name)
if raise_error_if_missing and not os.path.isfile(polygon_file_name):
raise ValueError(
'Cannot find polygon file. Expected at location: ' +
polygon_file_name)
return polygon_file_name
def find_polygon_files_for_spc_date(
spc_date_string, top_raw_directory_name, tracking_scale_metres2,
raise_error_if_missing=True):
"""Finds all polygon files for one SPC date.
:param spc_date_string: SPC date (format "yyyymmdd").
:param top_raw_directory_name: Name of top-level directory with raw
segmotion files.
:param tracking_scale_metres2: Tracking scale.
:param raise_error_if_missing: If True and no files can be found, this
method will raise an error.
:return: polygon_file_names: 1-D list of paths to polygon files.
"""
error_checking.assert_is_string(top_raw_directory_name)
directory_name = '{0:s}/{1:s}/{2:s}/{3:s}'.format(
top_raw_directory_name, spc_date_string[:4], spc_date_string,
_get_relative_polygon_dir_physical_scale(tracking_scale_metres2)
)
first_hour_unix_sec = (
SPC_DATE_START_HOUR * HOURS_TO_SECONDS +
time_conversion.string_to_unix_sec(
spc_date_string, time_conversion.SPC_DATE_FORMAT)
)
last_hour_unix_sec = (
SPC_DATE_END_HOUR * HOURS_TO_SECONDS +
time_conversion.string_to_unix_sec(
spc_date_string, time_conversion.SPC_DATE_FORMAT)
)
hours_in_spc_date_unix_sec = time_periods.range_and_interval_to_list(
start_time_unix_sec=first_hour_unix_sec,
end_time_unix_sec=last_hour_unix_sec,
time_interval_sec=HOURS_TO_SECONDS, include_endpoint=True)
polygon_file_names = []
for this_hour_unix_sec in hours_in_spc_date_unix_sec:
this_time_string_seconds = time_conversion.unix_sec_to_string(
this_hour_unix_sec, TIME_FORMAT_IN_FILES)
this_time_string_hours = time_conversion.unix_sec_to_string(
this_hour_unix_sec, TIME_FORMAT_IN_FILES_HOUR_ONLY) + '*'
this_pathless_file_name_zipped = _get_pathless_polygon_file_name(
this_hour_unix_sec, zipped=True)
this_pathless_file_pattern_zipped = (
this_pathless_file_name_zipped.replace(
this_time_string_seconds, this_time_string_hours)
)
this_file_pattern_zipped = '{0:s}/{1:s}'.format(
directory_name, this_pathless_file_pattern_zipped)
these_polygon_file_names_zipped = glob.glob(this_file_pattern_zipped)
if these_polygon_file_names_zipped:
polygon_file_names += these_polygon_file_names_zipped
this_pathless_file_name_unzipped = _get_pathless_polygon_file_name(
this_hour_unix_sec, zipped=False)
this_pathless_file_pattern_unzipped = (
this_pathless_file_name_unzipped.replace(
this_time_string_seconds, this_time_string_hours)
)
this_file_pattern_unzipped = '{0:s}/{1:s}'.format(
directory_name, this_pathless_file_pattern_unzipped)
these_polygon_file_names_unzipped = glob.glob(
this_file_pattern_unzipped)
for this_file_name_unzipped in these_polygon_file_names_unzipped:
this_file_name_zipped = (
this_file_name_unzipped + GZIP_FILE_EXTENSION)
if this_file_name_zipped in polygon_file_names:
continue
polygon_file_names.append(this_file_name_unzipped)
if raise_error_if_missing and not polygon_file_names:
raise ValueError(
'Cannot find any polygon files in directory: ' + directory_name)
polygon_file_names.sort()
return polygon_file_names
def get_start_end_times_for_spc_date(
spc_date_string, top_raw_directory_name, tracking_scale_metres2):
"""Returns first and last tracking times for SPC date.
:param spc_date_string: SPC date (format "yyyymmdd").
:param top_raw_directory_name: Name of top-level directory with raw
segmotion files.
:param tracking_scale_metres2: Tracking scale.
:return: start_time_unix_sec: First tracking time for SPC date.
:return: end_time_unix_sec: Last tracking time for SPC date.
"""
polygon_file_names = find_polygon_files_for_spc_date(
spc_date_string=spc_date_string,
top_raw_directory_name=top_raw_directory_name,
tracking_scale_metres2=tracking_scale_metres2)
first_metadata_dict = myrorss_and_mrms_io.read_metadata_from_raw_file(
polygon_file_names[0], data_source=radar_utils.MYRORSS_SOURCE_ID)
start_time_unix_sec = first_metadata_dict[radar_utils.UNIX_TIME_COLUMN]
last_metadata_dict = myrorss_and_mrms_io.read_metadata_from_raw_file(
polygon_file_names[-1], data_source=radar_utils.MYRORSS_SOURCE_ID)
end_time_unix_sec = last_metadata_dict[radar_utils.UNIX_TIME_COLUMN]
return start_time_unix_sec, end_time_unix_sec
def read_stats_from_xml(xml_file_name, spc_date_string):
"""Reads storm statistics from XML file.
:param xml_file_name: Path to input file.
:param spc_date_string: SPC date (format "yyyymmdd").
:return: stats_table: pandas DataFrame with the following columns.
stats_table.primary_id_string: Primary storm ID.
stats_table.east_velocity_m_s01: Eastward velocity (m/s).
stats_table.north_velocity_m_s01: Northward velocity (m/s).
stats_table.age_sec: Age of storm cell (seconds).
"""
# Verification.
_ = time_conversion.spc_date_string_to_unix_sec(spc_date_string)
error_checking.assert_file_exists(xml_file_name)
xml_tree = _open_xml_file(xml_file_name)
storm_dict = {}
this_column_name = None
this_column_name_orig = None
this_column_values = None
for this_element in xml_tree.iter():
if this_element.tag == 'datacolumn':
if this_column_name_orig in XML_COLUMN_NAMES_ORIG:
storm_dict.update({this_column_name: this_column_values})
this_column_name_orig = this_element.attrib['name']
if this_column_name_orig in XML_COLUMN_NAMES_ORIG:
this_column_name = _xml_column_name_orig_to_new(
this_column_name_orig)
this_column_values = []
continue
if this_column_name_orig not in XML_COLUMN_NAMES_ORIG:
continue
if this_column_name == tracking_utils.PRIMARY_ID_COLUMN:
this_column_values.append(this_element.attrib['value'])
elif this_column_name == tracking_utils.NORTH_VELOCITY_COLUMN:
this_column_values.append(-1 * float(this_element.attrib['value']))
elif this_column_name == tracking_utils.EAST_VELOCITY_COLUMN:
this_column_values.append(float(this_element.attrib['value']))
elif this_column_name == tracking_utils.AGE_COLUMN:
this_column_values.append(
int(numpy.round(float(this_element.attrib['value'])))
)
stats_table = pandas.DataFrame.from_dict(storm_dict)
primary_id_strings = _append_spc_date_to_storm_ids(
primary_id_strings=stats_table[
tracking_utils.PRIMARY_ID_COLUMN].values,
spc_date_string=spc_date_string)
stats_table = stats_table.assign(**{
tracking_utils.PRIMARY_ID_COLUMN: primary_id_strings
})
# Removes any row with NaN.
return stats_table.loc[stats_table.notnull().all(axis=1)]
def read_polygons_from_netcdf(
netcdf_file_name, metadata_dict, spc_date_string,
tracking_start_time_unix_sec, tracking_end_time_unix_sec,
raise_error_if_fails=True):
"""Reads storm polygons (outlines of storm cells) from NetCDF file.
P = number of grid points in storm cell (different for each storm cell)
V = number of vertices in storm polygon (different for each storm cell)
If file cannot be opened, returns None.
:param netcdf_file_name: Path to input file.
:param metadata_dict: Dictionary with metadata for NetCDF file, created by
`myrorss_and_mrms_io.read_metadata_from_raw_file`.
:param spc_date_string: SPC date (format "yyyymmdd").
:param tracking_start_time_unix_sec: Start time for tracking period. This
can be found by `get_start_end_times_for_spc_date`.
:param tracking_end_time_unix_sec: End time for tracking period. This can
be found by `get_start_end_times_for_spc_date`.
:param raise_error_if_fails: Boolean flag. If True and file cannot be
opened, this method will raise an error.
:return: polygon_table: pandas DataFrame with the following columns. Each
row is one storm object.
polygon_table.primary_id_string: See documentation for
`storm_tracking_io.write_file`.
polygon_table.valid_time_unix_sec: Same.
polygon_table.spc_date_string: Same.
polygon_table.tracking_start_time_unix_sec: Same.
polygon_table.tracking_end_time_unix_sec: Same.
polygon_table.centroid_latitude_deg: Same.
polygon_table.centroid_longitude_deg: Same.
polygon_table.grid_point_latitudes_deg: Same.
polygon_table.grid_point_longitudes_deg: Same.
polygon_table.grid_point_rows: Same.
polygon_table.grid_point_columns: Same.
polygon_table.polygon_object_latlng_deg: Same.
polygon_table.polygon_object_rowcol: Same.
"""
error_checking.assert_file_exists(netcdf_file_name)
error_checking.assert_is_integer(tracking_start_time_unix_sec)
error_checking.assert_is_not_nan(tracking_start_time_unix_sec)
error_checking.assert_is_integer(tracking_end_time_unix_sec)
error_checking.assert_is_not_nan(tracking_end_time_unix_sec)
netcdf_dataset = netcdf_io.open_netcdf(netcdf_file_name,
raise_error_if_fails)
if netcdf_dataset is None:
return None
storm_id_column = metadata_dict[radar_utils.FIELD_NAME_COLUMN]
storm_id_column_orig = metadata_dict[
myrorss_and_mrms_io.FIELD_NAME_COLUMN_ORIG]
num_values = len(
netcdf_dataset.variables[myrorss_and_mrms_io.GRID_ROW_COLUMN_ORIG]
)
if num_values == 0:
sparse_grid_dict = {
myrorss_and_mrms_io.GRID_ROW_COLUMN: numpy.array([], dtype=int),
myrorss_and_mrms_io.GRID_COLUMN_COLUMN: numpy.array([], dtype=int),
myrorss_and_mrms_io.NUM_GRID_CELL_COLUMN:
numpy.array([], dtype=int),
storm_id_column: numpy.array([], dtype=int)
}
else:
sparse_grid_dict = {
myrorss_and_mrms_io.GRID_ROW_COLUMN:
netcdf_dataset.variables[
myrorss_and_mrms_io.GRID_ROW_COLUMN_ORIG][:],
myrorss_and_mrms_io.GRID_COLUMN_COLUMN:
netcdf_dataset.variables[
myrorss_and_mrms_io.GRID_COLUMN_COLUMN_ORIG][:],
myrorss_and_mrms_io.NUM_GRID_CELL_COLUMN:
netcdf_dataset.variables[
myrorss_and_mrms_io.NUM_GRID_CELL_COLUMN_ORIG][:],
storm_id_column: netcdf_dataset.variables[storm_id_column_orig][:]
}
netcdf_dataset.close()
sparse_grid_table = | pandas.DataFrame.from_dict(sparse_grid_dict) | pandas.DataFrame.from_dict |
import pickle5 as pickle
import matplotlib.pyplot as plt
import matplotlib as mpl
#mpl.use('pdf')
import itertools
import numpy as np
from datetime import datetime
import torch
from torch import nn
from torch import optim
import os
import sys
import pandas as pd
from matplotlib import interactive
from matplotlib.patches import Rectangle
from utils import make_histos
from utils.utilities import meter
from utils.utilities import cartesian_converter
sys.path.insert(0,'/mnt/c/Users/rober/Dropbox/Bobby/Linux/classes/GAML/GAMLX/nflows/nflows')
from nflows.transforms.autoregressive import MaskedUMNNAutoregressiveTransform
from nflows.flows.base import Flow
from nflows.distributions.normal import StandardNormal
from nflows.distributions.normal import DiagonalNormal
from nflows.transforms.base import CompositeTransform
from nflows.transforms.autoregressive import MaskedAffineAutoregressiveTransform
from nflows.transforms.permutations import ReversePermutation
from icecream import ic
#data_path = "gendata/4features/" #Just electorn features
#data_path = "gendata/16features/" #All 16 features
#data_path = "gendata/Cond/16features/maaf/"
#data_path = "gendata/Cond/16features/UMNN/"
#data_path = "gendata/Relational/photon1_1M/"
#data_path = "gendata/Relational/QT/photon_1/"
data_path = "gendata/Relational/QT/INV/photon_1/"
dfs = []
filenames = os.listdir(data_path)
for f in filenames:
df0 = pd.read_pickle(data_path+f)
dfs.append(df0)
df_photon1 = | pd.concat(dfs) | pandas.concat |
"""This application experiments with the (grid) layout and some styling
Can we make a compact dashboard across several columns and with a dark theme?"""
# import io
from typing import List, Optional
import os, sys, re
import markdown
import matplotlib
import matplotlib.pyplot as plt
import pandas as pd
import streamlit as st
import pydeck as pdk
# for testing
import numpy as np
# add path to database
modules_path = [os.path.abspath(os.path.join('.')+'/src/data/data_collection/')] #, os.path.abspath(os.path.join('.')+'/web/pages')
for module in modules_path:
if module not in sys.path:
sys.path.append(module)
from storage_managers.database import Database
# import pages.home as home
# matplotlib.use("Agg")
COLOR = 'black'
BACKGROUND_COLOR = '#fffafa'
COUNT = 0
# extension to sidebar
# def add_resources_section():
# """Adds a resources section to the sidebar"""
# st.sidebar.header("Add_resources_section")
# st.sidebar.markdown(
# """
# - [gridbyexample.com] (https://gridbyexample.com/examples/)
# """
# )
class Cell:
"""A Cell can hold text, markdown, plots etc."""
def __init__(
self,
class_: str = None,
grid_column_start: Optional[int] = None,
grid_column_end: Optional[int] = None,
grid_row_start: Optional[int] = None,
grid_row_end: Optional[int] = None,
):
self.class_ = class_
self.grid_column_start = grid_column_start
self.grid_column_end = grid_column_end
self.grid_row_start = grid_row_start
self.grid_row_end = grid_row_end
self.inner_html = ""
def _to_style(self) -> str:
return f"""
.{self.class_} {{
grid-column-start: {self.grid_column_start};
grid-column-end: {self.grid_column_end};
grid-row-start: {self.grid_row_start};
grid-row-end: {self.grid_row_end};
}}
"""
def text(self, text: str = ""):
self.inner_html = text
def markdown(self, text):
self.inner_html = markdown.markdown(text)
def dataframe(self, dataframe: pd.DataFrame):
self.inner_html = dataframe.to_html()
def image(self, url):
self.inner_html = '<img src ="' + url +'"/>'
def image_card(self, name, address, score, image_url):
stars = ""
for i in range(int(score)): stars += '<img src="https://cdn.onlinewebfonts.com/svg/img_39469.png"/>'
self.inner_html = '<div class="flex">'\
+'<img class="main-image" src ="' + image_url +'"/>'\
+ '<div class="main-body">'\
+ '<h3>'+ name +'</h3>'\
+ '<p class="stars">'+ stars + '</p>'\
+ '<p class="location"><svg xmlns="http://www.w3.org/2000/svg" width="24" height="24" viewBox="0 0 24 24" class="icon_svg"><path d="M12 1.04a9.25 9.25 0 0 1 6.54 15.79l-5.83 5.84A1 1 0 0 1 12 23a1 1 0 0 1-.71-.29l-5.83-5.88A9.25 9.25 0 0 1 12 1.04zm0 2.01a7.25 7.25 0 0 0-5.13 12.37L12 20.54l5.13-5.12A7.25 7.25 0 0 0 12 3.05zm0 3.2a4 4 0 1 1 0 8 4 4 0 0 1 0-8zm0 2a2 2 0 1 0 0 4 2 2 0 0 0 0-4z"></path></svg> 0.2 miles </p>' \
+ '<p class="light">Summer drinks are back at Starbucks. Order today. </p>'\
+ '</div>'\
+ '<div class="address">'\
+ '<p class="light">' + address + '</p>'\
+ '</div>'\
+ '</div>'
def _to_html(self):
return f"""<div class="box {self.class_}">{self.inner_html}</div>"""
class Grid:
"""A (CSS) Grid"""
def __init__(
self,
template_columns="1 1 1",
gap="10px",
background_color=COLOR,
color=BACKGROUND_COLOR,
df=None,
):
self.template_columns = template_columns
self.gap = gap
self.background_color = background_color
self.color = color
self.cells: List[Cell] = []
self.df = df
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
st.markdown(self._get_grid_style(), unsafe_allow_html=True)
st.markdown(self._get_cells_style(), unsafe_allow_html=True)
st.markdown(self._get_cells_html(), unsafe_allow_html=True)
def _get_grid_style(self):
return f"""
<style>
.wrapper {{
display: grid;
grid-template-columns: {self.template_columns};
grid-gap: {self.gap};
background-color: {self.background_color};
color: {self.color};
}}
.box {{
border-radius: 5px;
padding: 20px;
font-size: 150%;
background: none;
border: 1px solid #a9abaf;
color: #000;
background: #fbfafa;
}}
.box .main-image {{
width: auto;
height: 200px;
border-radius: 10px;
margin-right: 12px;
}}
.box .stars {{
height: 15px;
}}
.box .location {{
font-weight: bold;
}}
.box .light {{
color: #a0a0a0;
font-weight: 100;
font-size: 14px;
}}
.box .main-body {{
flex: 1;
}}
.box .address {{
width: 150px;
}}
.box .stars img {{
height: 100%;
margin-right: 2px;
}}
table {{
color: {self.color}
}}
.flex {{
display: flex;
}}
.stSelectbox div {{
background: #fff;
}}
.st-at {{
background-color:none;
border: 1px solid #fb919d;
}}
.reportview-container .image-container img {{
width:100px !important;
margin: auto;
}}
.sidebar .sidebar-content {{
width: 14rem;
}}
</style>
"""
def _get_cells_style(self):
return (
"<style>"
+ "\n".join([cell._to_style() for cell in self.cells])
+ "</style>"
)
def _get_cells_html(self):
return (
'<div class="wrapper">'
+ "\n".join([cell._to_html() for cell in self.cells])
+ "</div>"
)
def cell(
self,
class_: str = None,
grid_column_start: Optional[int] = None,
grid_column_end: Optional[int] = None,
grid_row_start: Optional[int] = None,
grid_row_end: Optional[int] = None,
):
cell = Cell(
class_=class_,
grid_column_start=grid_column_start,
grid_column_end=grid_column_end,
grid_row_start=grid_row_start,
grid_row_end=grid_row_end,
)
self.cells.append(cell)
return cell
def _set_block_container_style(
# max_width: int = 1200,
# max_width_100_percent: bool = False,
padding_top: int = 5,
padding_right: int = 1,
padding_left: int = 1,
padding_bottom: int = 5,
):
# if max_width_100_percent:
# max_width_str = f"max-width: 100%;"
# else:
# max_width_str = f"max-width: {max_width}px;"
st.markdown(
f"""
<style>
.reportview-container .main .block-container{{
width: 67%;
padding-top: 20px;
padding-right: {padding_right}rem;
padding-left: {padding_left}rem;
padding-bottom: 0;
max-width: 814px;
}}
.reportview-container .main {{
color: {COLOR};
background-color: {BACKGROUND_COLOR};
align-items: flex-start;
}}
.reportview-container .main .block-container .element-container:nth-child(9) {{
width: 28% !important;
position: fixed;
top: 110px;
right: 0;
border-radius: 5px;
overflow: hidden;
height: 100vh;
padding-right: 10px;
}}
.sidebar-content {{
background-color: #e5e7ea;
background-image: none;
}}
@media (max-width: 960px) {{
.reportview-container .main .block-container{{
width: 100%;
max-width: 100%;
}}
.reportview-container .main .block-container .element-container:nth-child(9) {{
display: none;
}}
}}
</style>
""",
unsafe_allow_html=True,
)
x = 0
def make_main_body(res_df):
df = get_neighborhoods_dataframe()
df.columns = ['name', 'id', 'lat', 'lon']
neighborhood = st.selectbox("", df['name'])
st.markdown(
"""
<h1>Best Halal food near {0}</h1>
Are you wondering what halal options are around NYC neighborhoods?
We provide a halal-reliability scroe based on reviews of the restaurants.
We even have a dark theme?
""".format(neighborhood), unsafe_allow_html=True
)
# generate a grid of 2 image_cards
grid = Grid("1 1 1", color=COLOR, background_color=BACKGROUND_COLOR, df=df)
with grid:
for i, row in zip(range(df.shape[0]), res_df.itertuples()):
grid.cell(chr(i + 97), 1, 2, i+1, i+1).image_card(name='. '.join([str(i+1),row.name]), address=row.address, score=str(row.score), image_url=row.image_url)
# should be places in it's own function later
# generate the map
# Adding code so we can have map default to the center of the data
midpoint = ((df.loc[0, 'lat']), (df.loc[0, 'lon']))
st.pydeck_chart(pdk.Deck(
map_style='mapbox://styles/mapbox/light-v9',
initial_view_state=pdk.ViewState(
latitude = midpoint[0],
longitude = midpoint[1],
zoom = 10,
pitch = 10
),
tooltip={
'html': '<b>{name}</b>',
'style': {
'color': 'white'
}
},
layers=[ pdk.Layer(
'ScatterplotLayer',
data=df,
get_position=['lon', 'lat'],
auto_highlight=True,
get_radius=250,
get_fill_color='[145, 196, 251]',
pickable=True
)]
)
)
@st.cache
def get_restaurant_dataframe(sort_by='') -> | pd.DataFrame() | pandas.DataFrame |
"""
Individual plots
"""
import pandas as pd
import plotly.graph_objs as go
import constants as c
import utilities as u
def plot_timeserie(dfg, avg_month, timewindow="M", height=None):
"""
Creates a timeseries plot with expenses, incomes and their regressions
Args:
dfg: dataframe with info
avg_month: month to use in time average
timewindow: temporal grouping
height: height of the plot
Returns:
the plotly plot as html-div format
"""
# Income/Expense traces
iter_data = {c.names.INCOMES: c.colors.INCOMES, c.names.EXPENSES: c.colors.EXPENSES}
data = []
for name, color in iter_data.items():
df = u.group_df_with_time_avg(dfg[dfg[c.cols.TYPE] == name], timewindow, avg_month)
data.append(
go.Scatter(
x=df.index, y=df[c.cols.AMOUNT], marker={"color": color}, name=name, mode="lines"
)
)
# EBIT trace
df = u.group_df_with_time_avg(u.get_ebit(dfg), timewindow, avg_month)
data.append(
go.Scatter(
x=df.index,
y=df[c.cols.AMOUNT],
marker={"color": c.colors.EBIT},
name=c.names.EBIT,
mode="lines",
)
)
layout = go.Layout(title="Evolution", height=height)
return go.Figure(data=data, layout=layout)
def plot_timeserie_by_categories(
dfg, df_categ, avg_month, type_trans=c.names.EXPENSES, timewindow="M"
):
"""
Creates a timeseries plot detailed by category
Args:
dfg: dataframe with info
df_categ: categories dataframe
avg_month: month to use in time average
type_trans: type of transaction [Income/Expense]
timewindow: temporal grouping
Returns:
the plotly plot as html-div format
"""
df = dfg[dfg[c.cols.TYPE] == type_trans].copy()
df_cat = df_categ[df_categ[c.cols.TYPE] == type_trans].set_index(c.cols.NAME)
df_aux = u.group_df_with_time_avg(df, timewindow, avg_month, dfg)
data = [
go.Scatter(
x=df_aux.index, y=df_aux[c.cols.AMOUNT], marker={"color": "black"}, name=c.names.TOTAL
)
]
for cat in df_cat.index:
if cat in df_cat.index:
color_index = df_cat.at[cat, c.cols.COLOR_INDEX]
color_name = df_cat.at[cat, c.cols.COLOR_NAME]
color = u.get_colors((color_name, color_index))
else:
color = u.get_colors(("black", 500))
df_aux = u.group_df_with_time_avg(
df[df[c.cols.CATEGORY] == cat], timewindow, avg_month, dfg
)
data.append(
go.Bar(x=df_aux.index, y=df_aux[c.cols.AMOUNT], marker={"color": color}, name=cat)
)
layout = go.Layout(title="Evolution by category", barmode="stack", height=600)
return go.Figure(data=data, layout=layout)
def plot_savings_ratio(dfg, avg_month, timewindow="M"):
"""
Plots the ratio between ebit and incomes
Args:
dfg: dataframe with info
avg_month: month to use in time average
timewindow: temporal grouping
Returns:
the plotly plot as html-div format
"""
# Calculate EBIT
df = u.group_df_with_time_avg(u.get_ebit(dfg), timewindow, avg_month)
# Incomes
dfi = u.group_df_with_time_avg(dfg[dfg[c.cols.TYPE] == c.names.INCOMES], timewindow, avg_month)
# Savings ratio
df = df[c.cols.AMOUNT] / dfi[c.cols.AMOUNT]
# Only positive values
df = df.apply(lambda x: 0 if | pd.isnull(x) | pandas.isnull |
# Copyright 2021 <NAME> +https://github.com/tonywu7/
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import defaultdict
from datetime import datetime
from statistics import mean
from typing import Dict, List, Tuple
import numpy as np
import pandas as pd
def stats(predictions: Dict[str, str], ground_truths: Dict[str, str], categories: List[str]):
predictions = np.array([*predictions.values()])
ground_truths = np.array([*ground_truths.values()])
print('# results')
for k in categories:
print(f'{k}: {np.count_nonzero(predictions == k)} predicted {np.count_nonzero(ground_truths == k)} expected')
def score(predictions: Dict[str, str], ground_truths: Dict[str, str], categories: List[str]) -> Dict[str, Tuple[float, float, float]]:
tp = defaultdict(int)
fp = defaultdict(int)
fn = defaultdict(int)
gt = np.array([*ground_truths.values()])
counts = {k: np.count_nonzero(gt == k) for k in categories}
confusion = | pd.DataFrame(index=categories, columns=categories, data=0) | pandas.DataFrame |
import math
import torch
import pandas as pd
from torchvision.transforms import Lambda
from util.util_funcs import LABEL_TO_IDX
def collate_fn(batch):
len_batch = len(batch) # original batch length
batch = list(filter(lambda x: x is not None, batch)) # filter out all the Nones
if len_batch > len(
batch
): # if there are samples missing just use existing members, doesn't work if you reject every sample in a batch
diff = len_batch - len(batch)
for i in range(diff):
batch = batch + batch[:diff]
return torch.utils.data.dataloader.default_collate(batch)
class TableDataset(torch.utils.data.Dataset):
def __init__(self, data, tokenizer):
self.data = data
self.tokenizer = tokenizer
def __getitem__(self, idx):
item = self.data.iloc[idx]
table = pd.read_csv(item.table_file).astype(
str
) # be sure to make your table data text only
try:
encoding = self.tokenizer(
table=table,
queries=item.question,
answer_coordinates=item.answer_coordinates,
answer_text=item.answer_text,
truncation=True,
padding="max_length",
return_tensors="pt",
)
# remove the batch dimension which the tokenizer adds by default
encoding = {key: val.squeeze(0) for key, val in encoding.items()}
if torch.gt(encoding["numeric_values"], 1e20).any():
return None
# add the float_answer which is also required (weak supervision for aggregation case)
encoding["float_answer"] = torch.tensor(item.float_answer)
encoding["claim_id"] = item["claim_id"]
encoding["question"] = item["question"]
return encoding
except:
return None
def __len__(self):
return len(self.data)
# return 10
class PredictionDataset(torch.utils.data.Dataset):
def __init__(self, entailment_data, roberta_tokenizer, tapas_tokenizer):
self.entailment_data = entailment_data
self.roberta_tokenizer = roberta_tokenizer
self.tapas_tokenizer = tapas_tokenizer
self.label_transform = Lambda(
lambda y: torch.zeros(3, dtype=torch.long).scatter_(
dim=0, index=torch.tensor(y), value=1
)
)
self.roberta_max_seq_len = 256
def __getitem__(self, idx):
item = self.entailment_data.iloc[idx]
if not | pd.isnull(item.table_file) | pandas.isnull |
# Required imports
import os
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib as mpl
import pylab
import scipy
import random
import datetime
import re
import time
from math import sqrt
import matplotlib.dates as mdates
from matplotlib.dates import date2num, num2date
get_ipython().run_line_magic('matplotlib', 'inline')
from sklearn import preprocessing
pd.set_option('display.max_columns', None) # to view all columns
from scipy.optimize import curve_fit
from supersmoother import SuperSmoother
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
from sklearn.cluster import KMeans
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.pipeline import make_pipeline
from sklearn.gaussian_process.kernels import Matern, WhiteKernel
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import LinearRegression, Ridge, Lasso, RidgeCV, LassoCV
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import mean_squared_error
import scipy.stats as stats
import warnings
warnings.filterwarnings("ignore")
from pyproj import Proj, Transformer
from ipyleaflet import (Map, basemaps, WidgetControl, GeoJSON,
LayersControl, Icon, Marker,FullScreenControl,
CircleMarker, Popup, AwesomeIcon)
from ipywidgets import HTML
plt.rcParams["font.family"] = "Times New Roman"
class functions:
def __init__(self, data):
self.setData(data)
self.__jointData = [None, 0]
# DATA VALIDATION
def __isValid_Data(self, data):
if(str(type(data)).lower().find('dataframe') == -1):
return (False, 'Make sure the data is a pandas DataFrame.\n')
if(not self.__hasColumns_Data(data)):
return (False, 'Make sure that ALL of the columns specified in the REQUIREMENTS are present.\n')
else:
return (True, None)
def __isValid_Construction_Data(self, data):
if(str(type(data)).lower().find('dataframe') == -1):
return (False, 'Make sure the data is a pandas DataFrame.\n')
if(not self.__hasColumns_Construction_Data(data)):
return (False, 'Make sure that ALL of the columns specified in the REQUIREMENTS are present.\n')
else:
return (True, None)
# COLUMN VALIDATION
def __hasColumns_Data(self, data):
find = ['COLLECTION_DATE','STATION_ID','ANALYTE_NAME','RESULT','RESULT_UNITS']
cols = list(data.columns)
cols = [x.upper() for x in cols]
hasCols = all(item in cols for item in find)
return hasCols
def __hasColumns_Construction_Data(self, data):
find = ['STATION_ID', 'AQUIFER', 'WELL_USE', 'LATITUDE', 'LONGITUDE', 'GROUND_ELEVATION', 'TOTAL_DEPTH']
cols = list(data.columns)
cols = [x.upper() for x in cols]
hasCols = all(item in cols for item in find)
return hasCols
# SETTING DATA
def setData(self, data, verbose=True):
validation = self.__isValid_Data(data)
if(validation[0]):
# Make all columns all caps
cols_upper = [x.upper() for x in list(data.columns)]
data.columns = cols_upper
self.data = data
if(verbose):
print('Successfully imported the data!\n')
self.__set_units()
else:
print('ERROR: {}'.format(validation[1]))
return self.REQUIREMENTS_DATA()
def setConstructionData(self, construction_data, verbose=True):
validation = self.__isValid_Construction_Data(construction_data)
if(validation[0]):
# Make all columns all caps
cols_upper = [x.upper() for x in list(construction_data.columns)]
construction_data.columns = cols_upper
self.construction_data = construction_data.set_index(['STATION_ID'])
if(verbose):
print('Successfully imported the construction data!\n')
else:
print('ERROR: {}'.format(validation[1]))
return self.REQUIREMENTS_CONSTRUCTION_DATA()
def jointData_is_set(self, lag):
if(str(type(self.__jointData[0])).lower().find('dataframe') == -1):
return False
else:
if(self.__jointData[1]==lag):
return True
else:
return False
def set_jointData(self, data, lag):
self.__jointData[0] = data
self.__jointData[1] = lag
# GETTING DATA
def getData(self):
return self.data
def get_Construction_Data(self):
return self.construction_data
# MESSAGES FOR INVALID DATA
def REQUIREMENTS_DATA(self):
print('PYLENM DATA REQUIREMENTS:\nThe imported data needs to meet ALL of the following conditions to have a successful import:')
print(' 1) Data should be a pandas dataframe.')
print(" 2) Data must have these column names: \n ['COLLECTION_DATE','STATION_ID','ANALYTE_NAME','RESULT','RESULT_UNITS']")
def REQUIREMENTS_CONSTRUCTION_DATA(self):
print('PYLENM CONSTRUCTION REQUIREMENTS:\nThe imported construction data needs to meet ALL of the following conditions to have a successful import:')
print(' 1) Data should be a pandas dataframe.')
print(" 2) Data must have these column names: \n ['station_id', 'aquifer', 'well_use', 'latitude', 'longitude', 'ground_elevation', 'total_depth']")
# Helper function for plot_correlation
# Sorts analytes in a specific order: 'TRITIUM', 'URANIUM-238','IODINE-129','SPECIFIC CONDUCTANCE', 'PH', 'DEPTH_TO_WATER'
def __custom_analyte_sort(self, analytes):
my_order = 'TURISPDABCEFGHJKLMNOQVWXYZ-_abcdefghijklmnopqrstuvwxyz135790 2468'
return sorted(analytes, key=lambda word: [my_order.index(c) for c in word])
def __plotUpperHalf(self, *args, **kwargs):
corr_r = args[0].corr(args[1], 'pearson')
corr_text = f"{corr_r:2.2f}"
ax = plt.gca()
ax.set_axis_off()
marker_size = abs(corr_r) * 10000
ax.scatter([.5], [.5], marker_size, [corr_r], alpha=0.6, cmap="coolwarm",
vmin=-1, vmax=1, transform=ax.transAxes)
font_size = abs(corr_r) * 40 + 5
ax.annotate(corr_text, [.5, .48,], xycoords="axes fraction", # [.5, .48,]
ha='center', va='center', fontsize=font_size, fontweight='bold')
# Description:
# Removes all columns except 'COLLECTION_DATE', 'STATION_ID', 'ANALYTE_NAME', 'RESULT', and 'RESULT_UNITS'.
# If the user specifies additional columns in addition to the ones listed above, those columns will be kept.
# The function returns a dataframe and has an optional parameter to be able to save the dataframe to a csv file.
# Parameters:
# data (dataframe): data to simplify
# inplace (bool): save data to current working dataset
# columns (list of strings): list of any additional columns on top of ['COLLECTION_DATE', 'STATION_ID', 'ANALYTE_NAME', 'RESULT', and 'RESULT_UNITS'] to be kept in the dataframe.
# save_csv (bool): flag to determine whether or not to save the dataframe to a csv file.
# file_name (string): name of the csv file you want to save
# save_dir (string): name of the directory you want to save the csv file to
def simplify_data(self, data=None, inplace=False, columns=None, save_csv=False, file_name= 'data_simplified', save_dir='data/'):
if(str(type(data)).lower().find('dataframe') == -1):
data = self.data
else:
data = data
if(columns==None):
sel_cols = ['COLLECTION_DATE','STATION_ID','ANALYTE_NAME','RESULT','RESULT_UNITS']
else:
hasColumns = all(item in list(data.columns) for item in columns)
if(hasColumns):
sel_cols = ['COLLECTION_DATE','STATION_ID','ANALYTE_NAME','RESULT','RESULT_UNITS'] + columns
else:
print('ERROR: specified column(s) do not exist in the data')
return None
data = data[sel_cols]
data.COLLECTION_DATE = pd.to_datetime(data.COLLECTION_DATE)
data = data.sort_values(by="COLLECTION_DATE")
dup = data[data.duplicated(['COLLECTION_DATE', 'STATION_ID','ANALYTE_NAME', 'RESULT'])]
data = data.drop(dup.index)
data = data.reset_index().drop('index', axis=1)
if(save_csv):
if not os.path.exists(save_dir):
os.makedirs(save_dir)
data.to_csv(save_dir + file_name + '.csv')
print('Successfully saved "' + file_name +'.csv" in ' + save_dir)
if(inplace):
self.setData(data, verbose=False)
return data
# Description:
# Returns the Maximum Concentration Limit value for the specified analyte.
# Example: 'TRITIUM' returns 1.3
# Parameters:
# analyte_name (string): name of the analyte to be processed
def get_MCL(self, analyte_name):
mcl_dictionary = {'TRITIUM': 1.3, 'URANIUM-238': 1.31, 'NITRATE-NITRITE AS NITROGEN': 1,
'TECHNETIUM-99': 2.95, 'IODINE-129': 0, 'STRONTIUM-90': 0.9
}
return mcl_dictionary[analyte_name]
def __set_units(self):
analytes = list(np.unique(self.data[['ANALYTE_NAME']]))
mask1 = ~self.data[['ANALYTE_NAME','RESULT_UNITS']].duplicated()
res = self.data[['ANALYTE_NAME','RESULT_UNITS']][mask1]
mask2 = ~self.data[['ANALYTE_NAME']].duplicated()
res = res[mask2]
unit_dictionary = pd.Series(res.RESULT_UNITS.values,index=res.ANALYTE_NAME).to_dict()
self.unit_dictionary = unit_dictionary
# Description:
# Returns the unit of the analyte you specify.
# Example: 'DEPTH_TO_WATER' returns 'ft'
# Parameters:
# analyte_name (string): name of the analyte to be processed
def get_unit(self, analyte_name):
return self.unit_dictionary[analyte_name]
# Description:
# Filters construction data based on one column. You only specify ONE column to filter by, but can selected MANY values for the entry.
# Parameters:
# data (dataframe): dataframe to filter
# col (string): column to filter. Example: col='STATION_ID'
# equals (list of strings): values to filter col by. Examples: equals=['FAI001A', 'FAI001B']
def filter_by_column(self, data=None, col=None, equals=[]):
if(data is None):
return 'ERROR: DataFrame was not provided to this function.'
else:
if(str(type(data)).lower().find('dataframe') == -1):
return 'ERROR: Data provided is not a pandas DataFrame.'
else:
data = data
# DATA VALIDATION
if(col==None):
return 'ERROR: Specify a column name to filter by.'
data_cols = list(data.columns)
if((col in data_cols)==False): # Make sure column name exists
return 'Error: Column name "{}" does not exist'.format(col)
if(equals==[]):
return 'ERROR: Specify a value that "{}" should equal to'.format(col)
data_val = list(data[col])
for value in equals:
if((value in data_val)==False):
return 'ERROR: No value equal to "{}" in "{}".'.format(value, col)
# QUERY
final_data = | pd.DataFrame() | pandas.DataFrame |
import docx
from docx.shared import Pt
from docx.enum.text import WD_ALIGN_PARAGRAPH, WD_BREAK
from docx.shared import Cm
import os
import math
import pandas as pd
import numpy as np
import re
from datetime import date
import streamlit as st
import json
import glob
from PIL import Image
import smtplib
import docx2pdf
import shutil
import zipfile
from datetime import datetime
import platform
import matplotlib.pyplot as plt
def User_validation():
f=open("Validation/Validation.json","r")
past=json.loads(f.read())
f.close()
now=datetime.now()
dt_string = now.strftime("%d/%m/%Y %H:%M")
time_past=datetime.strptime(past['Acceso']["Hora"], "%d/%m/%Y %H:%M")
timesince = now - time_past
Time_min= int(timesince.total_seconds() / 60)
bool_negate = Time_min<120
if not bool_negate:
past['Acceso'].update({"Estado":"Negado"})
str_json_0=json.dumps(past, indent=4)
J_f=open("Validation/Validation.json","w")
J_f.write(str_json_0)
J_f.close()
bool_aprove= past['Acceso']["Estado"]=="Aprovado"
if not bool_aprove:
colums= st.columns([1,2,1])
with colums[1]:
#st.image("Imagenes/Escudo_unal.png")
st.subheader("Ingrese el usuario y contraseña")
Usuario=st.text_input("Usuario")
Clave=st.text_input("Contraseña",type="password")
Users=["Gestor Comercial"]
bool_user = Usuario in Users
bool_clave = (Clave)==("1234")
bool_user_email = past['Acceso']["User"] == Usuario
bool_time2 = Time_min<1000
bool_1 = bool_time2 and bool_user_email
bool_2 = bool_user and bool_clave
if not bool_user_email and bool_2:
past['Acceso'].update({"User":Usuario,"Estado":"Aprovado","Hora":dt_string})
str_json_0=json.dumps(past, indent=4)
J_f=open("Validation/Validation.json","w")
J_f.write(str_json_0)
J_f.close()
if not bool_2:
if (Usuario != "") and (Clave!=""):
with colums[1]:
st.warning("Usuario o contraseña incorrectos.\n\n Por favor intente nuevamente.")
elif bool_2 and not bool_1:
past['Acceso'].update({"User":Usuario,"Estado":"Aprovado","Hora":dt_string})
str_json_0=json.dumps(past, indent=4)
J_f=open("Validation/Validation.json","w")
J_f.write(str_json_0)
J_f.close()
EMAIL_ADDRESS = '<EMAIL>'
EMAIL_PASSWORD = '<PASSWORD>'
try:
with smtplib.SMTP('smtp.gmail.com', 587) as smtp:
smtp.ehlo()
smtp.starttls()
smtp.ehlo()
smtp.login(EMAIL_ADDRESS, EMAIL_PASSWORD)
subject = 'Acceso aplicacion Julia'
body = 'Acceso usuario ' + Usuario +' el '+dt_string
msg = f'Subject: {subject}\n\n{body}'
smtp.sendmail(EMAIL_ADDRESS, EMAIL_ADDRESS, msg)
except:
pass
with colums[1]:
st.button("Acceder a la aplicación")
elif bool_2:
past['Acceso'].update({"Estado":"Aprovado","Hora":dt_string,"User":Usuario})
str_json_0=json.dumps(past, indent=4)
J_f=open("Validation/Validation.json","w")
J_f.write(str_json_0)
J_f.close()
with colums[1]:
st.button("Acceder a la aplicación")
return bool_aprove
def Num_dias(leng):
if leng==1:
return "1 día"
else:
return str(leng) + " días"
def day_week(dia):
if dia ==0:
Dia="Lunes"
elif dia ==1:
Dia="Martes"
elif dia ==2:
Dia="Miércoles"
elif dia ==3:
Dia="Jueves"
elif dia ==4:
Dia="Viernes"
elif dia ==5:
Dia="Sábado"
elif dia ==6:
Dia="Domingo-Festivo"
return Dia
def remove_row(table, row):
tbl = table._tbl
tr = row._tr
tbl.remove(tr)
def Range_fecha(dates):
if len(dates)==1:
return pd.to_datetime(dates[0]).strftime('%Y-%m-%d')
else:
return pd.to_datetime(dates[0]).strftime('%Y-%m-%d')+" hasta "+ pd.to_datetime(dates[-1]).strftime('%Y-%m-%d')
def any2str(obj):
if isinstance(obj, str):
return obj
elif math.isnan(obj):
return ""
elif isinstance(obj, int):
return str(obj)
elif isinstance(obj, float):
return str(obj)
def dt_fechas(data,data_user,Fechas,tipo_dia):
dt_Final=pd.DataFrame(columns=["Dia","Fecha","Requerimiento","Respaldo"])
for dia in Fechas:
data_fecha=data_user[data_user["Fecha"]== dia]
data_dia_todos=data[data["Fecha"]==dia]
try:
d_week=tipo_dia[Tipo_dia["FECHA"]==dia]["TIPO D"].to_numpy()[0]
except:
st.warning("Actualizar el calendario del excel extra")
d_week=day_week(pd.Series(data=dia).dt.dayofweek.to_numpy()[0])
df=pd.DataFrame([[d_week,dia,data_dia_todos["CANTIDAD"].sum(),data_fecha["CANTIDAD"].sum()]],columns=["Dia","Fecha","Requerimiento","Respaldo"])
dt_Final=dt_Final.append(df, ignore_index=True)
return dt_Final
def dt_fechas_2(data,data_user,Fechas,tipo_dia):
dt_Final=pd.DataFrame(columns=["Dia","Fecha","Requerimiento","Respaldo"])
for dia in Fechas:
data_fecha=data_user[data_user["FECHA"]== dia]
data_dia_todos=data[data["FECHA"]==dia]
try:
d_week=tipo_dia[Tipo_dia["FECHA"]==dia]["TIPO D"].to_numpy()[0]
except:
st.warning("Actualizar el calendario del excel extra")
d_week=day_week(pd.Series(data=dia).dt.dayofweek.to_numpy()[0])
df=pd.DataFrame([[d_week,dia,data_dia_todos["CANTIDAD"].sum(),data_fecha["CANTIDAD"].sum()]],columns=["Dia","Fecha","Requerimiento","Respaldo"])
dt_Final=dt_Final.append(df, ignore_index=True)
return dt_Final
def dt_fechas_3(data,data_user,Fechas,tipo_dia):
dt_Final=pd.DataFrame(columns=["Dia","Fecha","Respaldo","P_neto","TRM","PRECIO PONDERADO"])
for dia in Fechas:
data_fecha=data_user[data_user["FECHA"]== dia]
try:
d_week=tipo_dia[Tipo_dia["FECHA"]==dia]["TIPO D"].to_numpy()[0]
except:
st.warning("Actualizar el calendario del excel extra")
d_week=day_week(pd.Series(data=dia).dt.dayofweek.to_numpy()[0])
df=pd.DataFrame([[d_week,dia,data_fecha["CANTIDAD"].sum(),data_fecha["P NETO"].sum(),round(data_fecha["TRM"].mean(),2),round(data_fecha["PRECIO PONDERADO"].mean(),2)]],
columns=["Dia","Fecha","Respaldo","P_neto","TRM","PRECIO PONDERADO"])
dt_Final=dt_Final.append(df, ignore_index=True)
return dt_Final
def Mes_espa(mes):
if mes =="01":
Mes="Enero"
elif mes =="02":
Mes="Febrero"
elif mes =="03":
Mes="Marzo"
elif mes =="04":
Mes="Abril"
elif mes =="05":
Mes="Mayo"
elif mes =="06":
Mes="Junio"
elif mes =="07":
Mes="Julio"
elif mes =="08":
Mes="Agosto"
elif mes =="09":
Mes="Septiembre"
elif mes =="10":
Mes="Octubre"
elif mes =="11":
Mes="Noviembre"
elif mes =="12":
Mes="Diciembre"
return Mes
def F_Liq_pag(mes,ano):
if mes%12 ==1:
Fecha ="Enero"
elif mes%12 ==2:
Fecha ="Febrero"
elif mes%12 ==3:
Fecha ="Marzo"
elif mes%12 ==4:
Fecha ="Abril"
elif mes%12 ==5:
Fecha ="Mayo"
elif mes%12 ==6:
Fecha ="Junio"
elif mes%12 ==7:
Fecha ="Julio"
elif mes%12 ==8:
Fecha="Agosto"
elif mes%12 ==9:
Fecha="Septiembre"
elif mes%12 ==10:
Fecha="Octubre"
elif mes%12 ==11:
Fecha="Noviembre"
elif mes%12 ==0:
Fecha="Diciembre"
if mes > 12:
Fecha += " "+ str(ano+1)
else:
Fecha += " "+ str(ano)
return Fecha
def num2money(num):
if num < 1e3:
return str(round(num,2))
elif num < 1e6:
return str(round(num*1e3/1e6,2))+ " miles."
elif num < 1e9:
return str(round(num*1e3/1e9,2))+ " mill."
elif num < 1e12:
return str(round(num*1e3/1e12,2))+ " mil mill."
def mes_espa(mes):
if mes =="01":
Mes="enero"
elif mes =="02":
Mes="febrero"
elif mes =="03":
Mes="marzo"
elif mes =="04":
Mes="abril"
elif mes =="05":
Mes="mayo"
elif mes =="06":
Mes="junio"
elif mes =="07":
Mes="julio"
elif mes =="08":
Mes="agosto"
elif mes =="09":
Mes="septiembre"
elif mes =="10":
Mes="octubre"
elif mes =="11":
Mes="noviembre"
elif mes =="12":
Mes="diciembre"
return Mes
def mes_num(mes):
Opciones2=("Enero","Febrero","Marzo","Abril","Mayo","Junio","Julio","Agosto","Septiembre","Octubre","Noviembre","Diciembre")
if mes == Opciones2[0]:
Mes="01"
elif mes == Opciones2[1]:
Mes="02"
elif mes == Opciones2[2]:
Mes="03"
elif mes == Opciones2[3]:
Mes="04"
elif mes == Opciones2[4]:
Mes="05"
elif mes == Opciones2[5]:
Mes="06"
elif mes == Opciones2[6]:
Mes="07"
elif mes == Opciones2[7]:
Mes="08"
elif mes == Opciones2[8]:
Mes="09"
elif mes == Opciones2[9]:
Mes="10"
elif mes == Opciones2[10]:
Mes="11"
elif mes == Opciones2[11]:
Mes="12"
return Mes
def dia_esp(dia):
if dia =="01":
Dia="1"
elif dia =="02":
Dia="2"
elif dia =="03":
Dia="3"
elif dia =="04":
Dia="4"
elif dia =="05":
Dia="5"
elif dia =="06":
Dia="6"
elif dia =="07":
Dia="7"
elif dia =="08":
Dia="8"
elif dia =="09":
Dia="9"
else :
Dia = dia
return Dia
def set_font(rows,fila,col,size):
run=rows[fila].cells[col].paragraphs[0].runs
font = run[0].font
font.size= Pt(size)
font.name = 'Tahoma'
def replace_text_for_image(paragraph, key, value,wid,hei):
if key in paragraph.text:
inline = paragraph.runs
for item in inline:
if key in item.text:
item.text = item.text.replace(key, "")
for val in value:
r = paragraph.add_run()
r.add_picture(val,width=Cm(wid), height=Cm(hei))
def replace_text_in_paragraph(paragraph, key, value):
if key in paragraph.text:
inline = paragraph.runs
for item in inline:
if key in item.text:
item.text = item.text.replace(key, value)
def delete_columns(table, columns):
# sort columns descending
columns.sort(reverse=True)
grid = table._tbl.find("w:tblGrid", table._tbl.nsmap)
for ci in columns:
for cell in table.column_cells(ci):
cell._tc.getparent().remove(cell._tc)
# Delete column reference.
col_elem = grid[ci]
grid.remove(col_elem)
st.set_page_config(
layout="centered", # Can be "centered" or "wide". In the future also "dashboard", etc.
initial_sidebar_state="auto", # Can be "auto", "expanded", "collapsed"
page_title="JULIA RD", # String or None. Strings get appended with "• Streamlit".
page_icon="📊", # String, anything supported by st.image, or None.
)
if User_validation():
#if True:
Opciones1=("Oferta Firme de Respaldo","Certificado de Reintegros","Informe Comercial")
eleccion=st.sidebar.selectbox('Seleccione el proyecto',Opciones1)
#if False:
if eleccion==Opciones1[0]:
st.header("Creación ofertas firmes de respaldo")
st.subheader("Introducción de los documentos")
colums= st.columns([1,1,1])
with colums[0]:
uploaded_file_1 = st.file_uploader("Suba el consolidado base")
with colums[1]:
uploaded_file_2 = st.file_uploader("Suba la plantilla del documento")
with colums[2]:
uploaded_file_3 = st.file_uploader("Suba el excel adicional")
if (uploaded_file_1 is not None) and (uploaded_file_2 is not None) and (uploaded_file_3 is not None):
try:
data=pd.read_excel(uploaded_file_1)
Extras=pd.read_excel(uploaded_file_3,sheet_name="Usuarios")
Tipo_dia=pd.read_excel(uploaded_file_3,sheet_name="Calendario")
except:
st.warning("Recuerde que el formato del Excel tiene que ser xls")
data["Fecha"]=data["FECHAINI"].dt.to_pydatetime()
if data["USUARIO"].isnull().values.any():
st.warning("Revisar archivo de consolidado base, usuario no encontrado.")
data.dropna(subset = ["USUARIO"], inplace=True)
Users=pd.unique(data["USUARIO"])
else:
Users=pd.unique(data["USUARIO"])
Extras=pd.read_excel(uploaded_file_3,sheet_name="Usuarios")
Tipo_dia=pd.read_excel(uploaded_file_3,sheet_name="Calendario")
template_file_path = uploaded_file_2
today = date.today()
fecha=dia_esp(today.strftime("%d")) +" de "+ mes_espa(today.strftime("%m")) +" de "+ today.strftime("%Y")
colums= st.columns([1,4,1])
with colums[1]:
st.subheader("Introducción de las variables")
P_bolsa=st.text_input("Introduzca el Precio de Escasez de Activación",value="10.00")
P_contrato=st.text_input("Introduzca el precio del contrato [USD]",value="10.00")
P_TMR=st.text_input("Introduzca el valor de la TRM",value="3,950.00")
F_TRM = st.date_input("Seleccione la fecha del valor de la TRM:",value=today).strftime("%Y-%m-%d")
Agente_extra = st.text_input("Introduzca el nombre particular del agente")
columns_2 = st.columns([1,2,2,1])
Opciones2=("Enero","Febrero","Marzo","Abril","Mayo","Junio","Julio","Agosto","Septiembre","Octubre","Noviembre","Diciembre")
Opciones3=("I","II","III","IV","V")
with columns_2[1]:
eleccion2=st.selectbox('Seleccione el mes de la OFR',Opciones2)
with columns_2[2]:
eleccion3=st.selectbox('Selecciona la semana de la OFR',Opciones3)
if Agente_extra:
Agente_extra="_"+Agente_extra
else:
Agente_extra=""
columns_3 = st.columns([2,1,2])
with columns_3[1]:
if platform.system()=='Windows':
b=st.checkbox("PDF")
else:
b=False
a=st.button("Crear los documentos")
Ruta="Documentos/OFR/"+str(today.year)+"/"+mes_num(eleccion2)+"-"+eleccion2 +"/"+ eleccion3
Ruta_x="Documentos_exportar/"
if os.path.exists(Ruta_x):
shutil.rmtree(Ruta_x)
Ruta_x=Ruta_x+"/"
Ruta_x=Ruta_x+"/"
os.makedirs(Ruta_x, exist_ok=True)
if a:
try:
path1 = os.path.join(Ruta)
shutil.rmtree(path1)
os.makedirs(Ruta, exist_ok=True)
except:
os.makedirs(Ruta, exist_ok=True)
Ruta_word=Ruta+"/Word"
Ruta_pdf=Ruta+"/PDF"
Info ={"Ruta": Ruta,
"File_names": None
}
File_names=[]
os.makedirs(Ruta_word, exist_ok=True)
if b:
os.makedirs(Ruta_pdf, exist_ok=True)
zf = zipfile.ZipFile(
"Resultado.zip", "w", zipfile.ZIP_DEFLATED)
my_bar=st.progress(0)
steps=len(Users)
steps_done=0
for usuario in Users:
data_user=data.copy()
data_user=data_user[data_user["USUARIO"]==usuario]
Empresas = pd.unique(data_user["agente1"])
Respaldo = data[data["USUARIO"]== usuario]["CANTIDAD"].sum()
Fechas = pd.unique(data_user["Fecha"])
R_fechas = Range_fecha(Fechas)
Data_frame_fechas=dt_fechas(data.copy(),data_user,Fechas,Tipo_dia)
try:
Email = str(Extras[Extras["USUARIO"] == usuario]["CORREO"].values)
Porc_come = Extras[Extras["USUARIO"] == usuario]["MARGEN"].values[0]
except:
Email = ""
Porc_come = 0.1
st.warning("No hay coincidencia en el Excel de usuarios para: "+usuario)
Email = re.sub("\[|\]|\'|0","",Email)
tx_empresas=""
for idx ,val in enumerate(Empresas):
if len(Empresas)<4:
val_2=val[0:3]
tx_empresas += val_2
if idx==len(Empresas)-1:
pass
else:
tx_empresas +=", "
else:
tx_empresas += "Los Generadores"
P_kwh=float(re.sub(",","",P_TMR))*float(P_contrato)/1000
Ingreso=int(P_kwh*Respaldo)
C_comer=int(Ingreso*Porc_come)
C_GMS=int(Ingreso*4/1000)
I_NETO=Ingreso-C_comer-C_GMS
if len(Data_frame_fechas.index.values)>13:
Enter="\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n"
else:
Enter=""
variables = {
"${FECHA}": fecha,
"${MES}": eleccion2,
"${AGENTES}": tx_empresas,
"${USUARIO}": usuario,
"${PRECIO_BOLSA}": P_bolsa,
"${PRECIO_CONTRATO}": P_contrato,
"${FECHA_TRM}": F_TRM,
"${PRECIO_TRM}": P_TMR,
"${EMAIL_USUARIO}": Email,
"${PRECIO_PKWH}":str(round(P_kwh,2)),
"${PORC_COMER}":str(int(Porc_come*100))+"%",
"${RESPALDO_TOT}":f'{Respaldo:,}',
"${INGRESO}":f'{Ingreso:,}',
"${COST_COME}":f'{C_comer:,}',
"${COST_GMS}":f'{C_GMS:,}',
"${INGRESO_NETO}": f'{I_NETO:,}',
"${NUM_DIAS}":Num_dias(len(Fechas)),
"${RANGO_FECHAS_1}": R_fechas,
"${ENTER}": Enter,
"${MES_LIQUIDACION}": F_Liq_pag(Opciones2.index(eleccion2)+2,int(today.strftime("%Y"))),
"${MES_PAGO}": F_Liq_pag(Opciones2.index(eleccion2)+3,int(today.strftime("%Y"))),
"${INDICADOR}": eleccion3
}
template_document = docx.Document(template_file_path)
for variable_key, variable_value in variables.items():
for section in template_document.sections:
for paragraph in section.header.paragraphs:
replace_text_in_paragraph(paragraph, variable_key, variable_value)
for paragraph in template_document.paragraphs:
replace_text_in_paragraph(paragraph, variable_key, variable_value)
for table in template_document.tables:
for col in table.columns:
for cell in col.cells:
for paragraph in cell.paragraphs:
replace_text_in_paragraph(paragraph, variable_key, variable_value)
rows = template_document.tables[1].rows
index_1=Data_frame_fechas.index.values
Acum_Req=0
Acum_Res=0
for idx in index_1:
rows[int(idx)+1].cells[0].text = Data_frame_fechas.iloc[idx]["Dia"]
rows[int(idx)+1].cells[1].text = Data_frame_fechas.iloc[idx]["Fecha"].strftime('%Y-%m-%d')
rows[int(idx)+1].cells[2].text = f'{Data_frame_fechas.iloc[idx]["Requerimiento"]:,}'
Acum_Req += Data_frame_fechas.iloc[idx]["Requerimiento"]
rows[int(idx)+1].cells[3].text = f'{Data_frame_fechas.iloc[idx]["Respaldo"]:,}'
Acum_Res += Data_frame_fechas.iloc[idx]["Respaldo"]
for idx_2 in range(0,4):
run=rows[int(idx)+1].cells[idx_2].paragraphs[0].runs
font = run[0].font
font.size= Pt(10)
font.name = 'Tahoma'
for idx in np.arange(len(index_1)+1,37):
remove_row(template_document.tables[1], rows[len(index_1)+1])
rows[-1].cells[1].text = Num_dias(len(Fechas))
rows[-1].cells[2].text = f'{Acum_Req:,}'
rows[-1].cells[3].text = f'{Acum_Res:,}'
version=1
template_document.save(Ruta_x+usuario+"_OFR"+Agente_extra+"_"+eleccion2+"_"+str(today.year)+".docx")
zf.write(Ruta_x+usuario+"_OFR"+Agente_extra+"_"+eleccion2+"_"+str(today.year)+".docx")
if b:
docx2pdf.convert(Ruta_x+usuario+"_OFR"+Agente_extra+"_"+eleccion2+"_"+str(today.year)+".docx",
Ruta_pdf+"/"+usuario+"_OFR"+Agente_extra+"_"+eleccion2+"_"+str(today.year)+".pdf")
zf.write(Ruta_pdf+"/"+usuario+"_OFR"+Agente_extra+"_"+eleccion2+"_"+str(today.year)+".pdf")
File_names.extend([usuario+"_OFR"+Agente_extra+"_"+eleccion2+"_"+str(today.year)+".docx"])
steps_done += 1
my_bar.progress(int(steps_done*100/steps))
Info.update({"File_names":File_names})
json_info = json.dumps(Info, indent = 4)
with open(Ruta_x+'/00_data.json', 'w') as f:
json.dump(json_info, f)
zf.write(Ruta_x+'/00_data.json')
zf.close()
with open("Resultado.zip", "rb") as fp:
with columns_3[1]:
btn = st.download_button(
label="Descargar resultados",
data=fp,
file_name="Resultado.zip",
mime="application/zip"
)
else:
st.warning("Necesita subir los tres archivos")
#elif False:
elif eleccion==Opciones1[1]:
st.header("Creación certificados de reintegros")
st.subheader("Introducción de los documentos")
if True:
colums= st.columns([1,1,1])
with colums[0]:
uploaded_file_1 = st.file_uploader("Suba el documento de liquidación")
with colums[1]:
uploaded_file_2 = st.file_uploader("Suba la plantilla del documento")
with colums[2]:
uploaded_file_3 = st.file_uploader("Suba el excel adicional")
else:
uploaded_file_1="Liquidacion_base.xlsm"
uploaded_file_2="Certificado_base.docx"
uploaded_file_3="Excel_extra_certificados.xls"
if (uploaded_file_1 is not None) and (uploaded_file_2 is not None) and (uploaded_file_3 is not None):
try:
data= | pd.read_excel(uploaded_file_1) | pandas.read_excel |
"""Functions that generate the experiments."""
import sys
import math
import time
import itertools
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import utils as ut
# --- Binary classification ---
def eta_a(x, a=1.):
"""See the math in the notebook."""
a = float(a)
k = a / (1 - a)
if x >= 0.5:
return 0.5 + 0.5 * math.pow(2. * x - 1., (1. / k))
return 0.5 - 0.5 * math.pow(1. - 2. * x, (1. / k))
def datagen_eta_a(n=100, a=1.):
"""Generate data for X ~ U[0,1] and eta_a."""
X = np.random.uniform(0., 1., n)
Y = 2 * np.random.binomial(n=1, p=list(map(lambda x: eta_a(x, a), X))) - 1
return X, Y
def classif_error(f, a=1.):
"""Gives the theoretical error given a frontier f that separates positives and negatives."""
a = float(a)
return (1 - a) / 2 + (a / 2) * math.pow(abs(2 * f - 1), 1 / a)
def classification_experiments(n_list, a_collection, n_exps=1000, verbose=True):
"""
Returns a DataFrame with the results for 1000 experiences
for each n in n_list and each a in a_collection.
"""
d = {"a": [], "n": [], "gen_error": [], "frontier": []}
for i in range(0, n_exps):
for a_test in a_collection:
# Do the experiments for the collection of k's
n_tot = np.max(n_list)
X, Y = datagen_eta_a(n=n_tot, a=a_test)
for n_cur in n_list:
# Do the experiments for the collection of n's
f_opt, _, _ = ut.bipart_partition(X[0:n_cur], Y[0:n_cur],
criterion=lambda np, nn: np - nn,
epsilon=0.001)
err_est = classif_error(f_opt, a=a_test)
err_theo = classif_error(0.5, a=a_test)
# Save results
d["a"] += [a_test]
d["n"] += [n_cur]
d["gen_error"] += [err_est - err_theo]
d["frontier"] += [f_opt]
if verbose and i % 50 == 0:
print("Done {:3} %".format(int((i / n_exps)*100)))
return pd.DataFrame(d)
# --- Bipartite ranking ---
def bipart_interval_alpha(a, m=0.25):
"""
Returns an interval that alpha has to belong to to respect
the conditions C >= 0 and C <= 1/2.
"""
inter = np.power(1 - 2 * m, 1 / a)
return (1 - 2 * m - a * inter) / 2, (1 - a * inter) / 2
def bipart_C(a, alpha, m=0.25):
"""Returns the C for a given alpha and a in the bipartite ranking scheme."""
if m > 0.:
return 0.5 + (2 * alpha - 1 + a * np.power(1 - 2 * m, 1 / a)) / (4 * m)
return 0.
def eta_aCm(x, a, C, m=0.25):
"""See the math in the notebook."""
if x < 0:
return None
if x < m:
return C
if x < 1 - m:
return eta_a(x, a=a)
if x <= 1:
return 1 - C
return None
def datagen_eta_aCm(n, a, C=None, m=0.25, alpha=0.05):
"""Generate data for X ~ U[0,1] and eta_aCm."""
if C is None:
print("Unspecified C, choosen with bipart_C.")
C = bipart_C(a, alpha)
X = np.random.uniform(0., 1., n)
Y = 2 * np.random.binomial(n=1,
p=list(map(lambda x: eta_aCm(x, a, C, m=m), X))) - 1
return X, Y
def int_eta_a(x, a):
"""Integral of eta_aCm over [0,x]."""
return x / 2 + (a / 4) * (np.power(abs(1 - 2 * x), 1 / a) - 1)
def birank_G_aCm(f, a, C=None, m=0.25, alpha=0.15):
"""Computes G for a frontier in the bipartite ranking scheme."""
a = float(a)
if C is None:
C = bipart_C(a, alpha, m=m)
res = 0
if f >= 0.0:
res += C * min(f, m)
if f >= m:
val_f = min(f, 1. - m)
res += int_eta_a(val_f, a=a) - int_eta_a(m, a=a)
if f >= 1. - m:
res += (1 - C) * (min(1., f) - (1 - m))
return 2 * (1. / 2 - res)
def birank_H_aCm(f, a, C=None, m=0.25, alpha=0.15):
"""Computes H for a frontier f in the bipartite ranking scheme."""
return 2 * (1. - f) - birank_G_aCm(f, a, C=C, m=m, alpha=alpha)
def exp_bi_ranking(n_list, n_exps, a_collection, alpha=0.38, m=0.25):
"""
Does the experiments associated to the bipartite ranking problem.
"""
dict_res = dict()
n_tot = np.max(n_list)
for i in range(0, n_exps):
for a in a_collection:
a = float(a)
if m <= 0:
alpha = (1 - a) / 2
C = 0
m = 0
else:
C = bipart_C(a, alpha, m)
X, Y = datagen_eta_aCm(n_tot, a, C=C, m=m)
G_theo = birank_G_aCm(0.5, a, C=C, m=m, alpha=alpha)
H_theo = birank_H_aCm(0.5, a, C=C, m=m, alpha=alpha)
for n_cur in n_list:
X_cand = X[0:n_cur]
Y_cand = Y[0:n_cur]
n_p = (Y_cand == +1).sum()
n_n = X_cand.shape[0] - n_p
def opt_fun(i_p, i_n):
if float(i_n) / n_n <= alpha:
return i_p
return - float("inf")
f_opt, _, _ = ut.bipart_partition(X_cand, Y_cand, opt_fun)
H_est = birank_H_aCm(f_opt, a, C=C, m=m, alpha=alpha)
G_est = birank_G_aCm(f_opt, a, C=C, m=m, alpha=alpha)
phi_nd = (H_est - alpha) / 2
d_update = {"a": a, "n": n_cur, "phi_nd": phi_nd,
"GR": G_theo, "GRn": G_est,
"HR": H_theo, "HRn": H_est,
"frontier": f_opt, "gen_error": G_theo - G_est}
dict_res = {k : dict_res.get(k, []) + [d_update[k]] for k in d_update}
if i % 50 == 0:
print("Done {:3} %".format(int(100*(i/n_exps))))
return pd.DataFrame(dict_res)
# --- Similarity ranking ---
def illustrate_As(m=0.25):
"""Illustrates the decomposition of the integral over A1, A2, A3"""
def return_plot(x0, x1, y0, y1):
return [x0] * 2 + [x1] * 2 + [x0], [y0] + [y1] * 2 + [y0] * 2
xX, yX = return_plot(0, 1., 0, 1.)
plt.plot(xX, yX, label="pair space")
xA1, yA1 = return_plot(1 - m, 1, 1 - m, 1)
plt.plot(xA1, yA1, color="orange", linewidth=3, label="A1")
xA3, yA3 = return_plot(0.5, 1 - m, 0.5, 1 - m)
plt.plot(xA3, yA3, color="red", linewidth=3, label="A3")
xA2, yA2 = return_plot(1 - m, 1, 0.5, 1 - m)
plt.plot(xA2, yA2, color="green", linewidth=3, label="A2")
plt.plot([0, 1], [0, 1], "b--", label="symmetry eta")
plt.plot([1, 0], [0, 1], "b--")
plt.xlabel("x")
plt.ylabel("x'")
plt.title("")
plt.legend(loc="lower left")
plt.show()
def simrank_eta(x, xp, a=0.4, alpha=0.38, m=0.25):
""" See the math in the notebook. """
C = simrank_C(a, alpha, m=m)
eta_x = eta_aCm(x, a, C, m=m)
eta_xp = eta_aCm(xp, a, C, m=m)
return 0.5 + 0.5 * (2 * eta_x - 1) * (2 * eta_xp - 1)
def simrank_interval_alpha(a, m):
"""
Returns an interval that alpha has to belong to to respect
the conditions C >= 0 and C <= 1/2.
"""
v1 = (2 * m + a * np.power(1 - 2 * m, 1. / a))**2
v2 = ((a**2) * np.power(1 - 2 * m, 2. / a)) / (4 * (m**2))
return 0.5 - v1 / 2, 0.5 - v2 / 2
def simrank_C(a, alpha, m=0.25):
"""Returns the C for a given alpha and a in the similarity ranking scheme."""
if m > 0:
num = a * np.power(1 - 2 * m, 1. / a) + 2 * m - np.sqrt(1 - 2 * alpha)
denom = 4 * m
return num / denom
return 0.
def int_fm1(min_x0, max_x0, a, m, C):
"""
Assumes min_x0 < max_x0.
Computes the integral of f(x)-1 over min_x0, max_x0 which is
an intermediary step before computing the integral of eta over rectangles.
"""
res = 0
deb_reg = min(max(min_x0, 0.), m)
end_reg = min(max_x0, m)
res += (end_reg - deb_reg) * (2 * C - 1)
# f-1 = 2 \eta - 1
if max_x0 > m:
deb_reg = min(max(min_x0, m), 1. - m)
end_reg = min(max_x0, 1. - m)
res += 2 * (int_eta_a(end_reg, a=a) -
int_eta_a(deb_reg, a=a)) - (end_reg - deb_reg)
if max_x0 > 1. - m:
deb_reg = min(max(min_x0, 1. - m), 1.)
end_reg = min(max_x0, 1.)
res += (end_reg - deb_reg) * (1 - 2 * C)
return res
def square_int(min_x, min_xp, max_x, max_xp, a, m, C):
"""
Computes the integral of eta over rectangles.
"""
intf1 = int_fm1(min_x, max_x, a, m, C)
intf2 = int_fm1(min_xp, max_xp, a, m, C)
return 0.5 * (max_x - min_x) * (max_xp - min_xp) + 0.5 * intf1 * intf2
def simrank_G_aCm(f, a, C=None, m=0.25, alpha=0.15):
"""Computes G for a frontier f in the similarity ranking scheme."""
a = float(a)
if C is None:
C = simrank_C(a, alpha, m=m)
res = 0
if 0. <= f < 0.5:
val_f = min(f, 0.5)
res = 2 * square_int(0., 0., val_f, val_f, a, m, C)
if f >= 0.5:
val_f = min(f, 1.)
# Integ de 0.5 a f
res = square_int(0., 0., 1., 1., a, m, C) - 2 * \
square_int(0., val_f, 1. - f, 1., a, m, C)
return res / 0.5
def simrank_H_aCm(f, a, C=None, m=0.25, alpha=0.15):
"""Computes H for a frontier f in the similarity ranking scheme."""
int_eta = simrank_G_aCm(f, a, C=C, m=m, alpha=alpha)
if f < 0.5:
return 4 * (f**2) - int_eta
return 2 * (1 - 2 * ((1 - f)**2)) - int_eta
def optimum_simrank(x_p, x_n, alpha):
"""Intermediary function to the one below."""
pos_pair_1 = itertools.combinations(x_p, 2)
pos_pair_2 = itertools.combinations(x_n, 2)
neg_pair = itertools.product(x_p, x_n)
def get_val_from_pair(x):
# Transforms each pair into one minus the minimum of its l1 distance to (0,0) or (1,1).
distance_to_lower_corner = max(abs(x[0]), abs(x[1]))
distance_to_upper_corner = max(abs(1. - x[0]), abs(1. - x[1]))
return 1 - min(distance_to_lower_corner, distance_to_upper_corner)
x_p = (np.array(list(map(get_val_from_pair, pos_pair_1))
+ list(map(get_val_from_pair, pos_pair_2))))
x_n = np.array(list(map(get_val_from_pair, neg_pair)))
def opt_fun(i_p, i_n):
if float(i_n) / x_n.shape[0] <= alpha:
return i_p / x_p.shape[0]
return - float("inf")
X = np.hstack([x_p, x_n])
Y = np.array([+1]*len(x_p) + [-1]*len(x_n))
f_opt, crit_opt, _ = ut.bipart_partition(X, Y, opt_fun)
return 1-f_opt, crit_opt
def exp_sim_ranking(n_list, n_exps, a_collection, m=0.25, alpha=0.15):
"""
Does the experiments that are required for pointwise ROC optimization
for similarity ranking.
"""
print("Starting...")
dict_res = dict()
n_tot = np.max(n_list)
c_time = time.time()
for i in range(0, n_exps):
for a in a_collection:
a = float(a)
if m <= 0:
C = 0
alpha = (1 - a**2) / 2
m = 0
else:
C = simrank_C(a, alpha, m)
X, Y = datagen_eta_aCm(n_tot, a, C=C, m=m)
G_theo = simrank_G_aCm(0.5, a, C=C, m=m, alpha=alpha)
H_theo = simrank_H_aCm(0.5, a, C=C, m=m, alpha=alpha)
for n_cur in n_list:
X_cand = X[0:n_cur]
Y_cand = Y[0:n_cur]
x_p = X_cand[Y_cand == +1]
x_n = X_cand[Y_cand == -1]
f_opt, _ = optimum_simrank(x_p, x_n, alpha=alpha)
G_est = simrank_G_aCm(f_opt, a, C=C, m=m, alpha=alpha)
H_est = simrank_H_aCm(f_opt, a, C=C, m=m, alpha=alpha)
phi_nd = (H_est - alpha) / 2
d_update = {"a": a, "n": n_cur, "phi_nd": phi_nd,
"G_theo": G_theo, "G_est": G_est,
"H_theo": H_theo, "H_est": H_est,
"f_opt": f_opt, "gen_error": G_theo - G_est}
dict_res = {k : dict_res.get(k, []) + [d_update[k]] for k in d_update}
if i % 10 == 0:
print("Done {:3}.... {:3.0f} s".format(int(100*(i/n_exps)), time.time() - c_time))
return | pd.DataFrame(dict_res) | pandas.DataFrame |
import nltk
import spacy
from collections import Counter
from textstat import flesch_reading_ease
import regex as re
from nltk import ngrams as make_ngrams
from language_change_methods.utility_functions import get_ll_and_lr
import pandas as pd
from typing import Iterable
from sklearn.feature_extraction import DictVectorizer
from pkg_resources import resource_filename
filepath = resource_filename('language_change_methods', 'word_lists/function_words.txt')
with open(filepath, encoding="utf-8") as func_file:
function_words = [line.strip().lower() for line in func_file.readlines()]
def get_wordcounts_multiple_texts(toks):
count = Counter()
for text in toks:
count.update(text)
return count
def get_normalised_wordcounts(toks, words_to_keep=None):
# Get all wordcounts for window
curr_counts = get_wordcounts_multiple_texts(toks)
# Only keep the word counts for top words
if words_to_keep is not None:
curr_counts = {word: count for word, count in curr_counts.items() if word in words_to_keep}
# Normalise the counts
num_words = toks.apply(len).sum()
curr_counts = {word: count/num_words for word, count in curr_counts.items()}
return curr_counts
def get_binary_wordcounts(toks, words_to_keep=None):
# Get all wordcounts for window
curr_counts = get_wordcounts_multiple_texts(toks)
curr_counts = {word: 1 for word in curr_counts.keys()}
# Get keep only the words to keep
if words_to_keep is not None:
curr_counts = {word: count for word, count in curr_counts.items() if word in words_to_keep}
return curr_counts
def get_tok_counts(toks, words_to_keep=None, binary=False):
"""
Given a list of tokens, does a count, and only keeps words in the optionally provided words_to_keep.
@toks: an iterator/list of tokens making up a text.
@words_to_keep: an iterator/list of words to consider in the Count.
"""
tok_counts = Counter(toks)
if binary:
tok_counts = {tok: 1 for tok in tok_counts.keys()}
if words_to_keep is not None:
tok_counts = Counter({w: c for w, c in tok_counts.items() if w in words_to_keep})
return tok_counts
def get_ttr(toks):
"""
Given a list of tokens, return the type token ratio (# unique / # tokens)
@toks: a list of tokens.
"""
unique = set(toks)
return len(unique) / len(toks)
def get_complexity(text):
"""
Define complexity as 1 - (Flesch Reading Ease / 121.22)
This will usually be between 0 and 1 (0 being simple and 1 being complex), but can exceed 1 in special circumstances, with no upper limit.
"""
reading_ease = flesch_reading_ease(text)
readability = reading_ease / 121.22
return 1 - readability
def count_regex_occurances(reg, text):
"""
Counts the number of a given regex in a given text.
This can be used, for example, for finding the counts of certain patterns (e.g. quotes or URLs).
"""
return len(list(re.findall(reg, text)))
def ngram_okay(phrase):
"""
Checks if an N-gram is okay.
Primarily, this means seeing if it includes punctuation or white space.
"""
okay = True
for phrase_part in phrase.split("_"):
okay = okay and not re.match(r"[\p{P}|\p{S}|\p{N}]+", phrase_part)
return okay
def get_ngram_counts(toks: Iterable, n: int, join_char: str = "_"):
"""
Gets the ngram counts for a given value of N and a given list of tokenised documents.
@param toks: A dict-like structure of tokenised documents.
@param n: The value of n for finding ngrams.
"""
ngrams = pd.Series({i: list(make_ngrams(post, n)) for i, post in toks.items()})
ngrams = ngrams.apply(lambda post: [join_char.join(words) for words in post])
counts = get_wordcounts_multiple_texts(ngrams)
counts = Counter({n: c for n, c in counts.items() if ngram_okay(n)})
return counts
# Function for finding the LL and LR for the ngrams of a given sequence of tokens
def get_ngram_lr_and_ll(toks1, toks2, n, join_char="_"):
"""
For a given value of n, calculates the log-likelihood and log-ratio between two different corpora.
@param toks1: A dict-like structure of tokenised documents.
@param toks2: A dict-like structure of tokenised documents.
@param n: The value of n for finding ngrams.
"""
counts1 = get_ngram_counts(toks1, n, join_char=join_char)
counts2 = get_ngram_counts(toks2, n, join_char=join_char)
key_ngrams = get_ll_and_lr(counts1, counts2)
key_ngrams = | pd.DataFrame(key_ngrams, columns=["ngram", "freq1", "freq2", "LR", "LL", "Len_Corpus1", "Len_Corpus2"]) | pandas.DataFrame |
import time
import requests
import numpy as np
import pandas as pd
from tradingfeatures import apiBase
class bitmexBase(apiBase):
def __init__(self):
super(bitmexBase, self).__init__(
name = 'bitmex',
per_step = 500,
sleep = 1.01,
)
self.base_address = 'https://www.bitmex.com/api/v1'
self.address = '/trade/bucketed'
self.start = 1442227200
self.limit = 500
self.default_columns = ['open', 'high', 'low', 'close', 'trades', 'volume', 'vwap', 'lastSize', 'turnover',
'homeNotional', 'foreignNotional']
self.symbol_dict = {
'btcusd': 'XBT',
'ethusd': 'ETH',
# 'ethbtc': 'ETHBTC',
'ltcusd': 'LTC',
'bchusd': 'BCH',
'eosusd': 'EOS',
'xrpusd': 'XRP',
}
def get(self,
limit: int = None,
symbol: str = None,
address: str = None,
query: dict = None,
start: int = None,
end: int = None,
interval: str = '1h',
columns: list = None,
return_r: bool = False,
):
address = address or self.address
address = self.base_address + address
symbol = symbol or 'btcusd'
start, end, out_of_range = self.calc_start(limit, start, end, interval)
if out_of_range:
return self.get_hist(symbol=symbol, start=start, end=end, columns=columns)
symbol = self.symbol_check(symbol) # had to give raw symbol above, this has to be after
if query is None:
limit = self.limit if limit is None else limit
start, end = self.to_date(start), self.to_date(end)
query = {'symbol': symbol, 'binSize': interval, 'count': limit, 'startTime': start, 'endTime': end,
'reverse': 'false'}
r = self.response_handler(address, query)
# Bitmex remaining limit
if 'x-ratelimit-remaining' in r.headers:
if int(r.headers['x-ratelimit-remaining']) <= 2:
print('\nReached the rate limit, bitmex api is sleeping...')
time.sleep(61)
if return_r:
return r
df = pd.DataFrame.from_dict(r.json())
if len(df) == 0:
return None
df['timestamp'] = self.to_ts( | pd.to_datetime(df['timestamp']) | pandas.to_datetime |
# -*- coding: utf-8 -*-
from collections import OrderedDict
from datetime import date, datetime, timedelta
import numpy as np
import pytest
from pandas.compat import product, range
import pandas as pd
from pandas import (
Categorical, DataFrame, Grouper, Index, MultiIndex, Series, concat,
date_range)
from pandas.api.types import CategoricalDtype as CDT
from pandas.core.reshape.pivot import crosstab, pivot_table
import pandas.util.testing as tm
@pytest.fixture(params=[True, False])
def dropna(request):
return request.param
class TestPivotTable(object):
def setup_method(self, method):
self.data = DataFrame({'A': ['foo', 'foo', 'foo', 'foo',
'bar', 'bar', 'bar', 'bar',
'foo', 'foo', 'foo'],
'B': ['one', 'one', 'one', 'two',
'one', 'one', 'one', 'two',
'two', 'two', 'one'],
'C': ['dull', 'dull', 'shiny', 'dull',
'dull', 'shiny', 'shiny', 'dull',
'shiny', 'shiny', 'shiny'],
'D': np.random.randn(11),
'E': np.random.randn(11),
'F': np.random.randn(11)})
def test_pivot_table(self):
index = ['A', 'B']
columns = 'C'
table = pivot_table(self.data, values='D',
index=index, columns=columns)
table2 = self.data.pivot_table(
values='D', index=index, columns=columns)
tm.assert_frame_equal(table, table2)
# this works
pivot_table(self.data, values='D', index=index)
if len(index) > 1:
assert table.index.names == tuple(index)
else:
assert table.index.name == index[0]
if len(columns) > 1:
assert table.columns.names == columns
else:
assert table.columns.name == columns[0]
expected = self.data.groupby(
index + [columns])['D'].agg(np.mean).unstack()
tm.assert_frame_equal(table, expected)
def test_pivot_table_nocols(self):
df = DataFrame({'rows': ['a', 'b', 'c'],
'cols': ['x', 'y', 'z'],
'values': [1, 2, 3]})
rs = df.pivot_table(columns='cols', aggfunc=np.sum)
xp = df.pivot_table(index='cols', aggfunc=np.sum).T
tm.assert_frame_equal(rs, xp)
rs = df.pivot_table(columns='cols', aggfunc={'values': 'mean'})
xp = df.pivot_table(index='cols', aggfunc={'values': 'mean'}).T
tm.assert_frame_equal(rs, xp)
def test_pivot_table_dropna(self):
df = DataFrame({'amount': {0: 60000, 1: 100000, 2: 50000, 3: 30000},
'customer': {0: 'A', 1: 'A', 2: 'B', 3: 'C'},
'month': {0: 201307, 1: 201309, 2: 201308, 3: 201310},
'product': {0: 'a', 1: 'b', 2: 'c', 3: 'd'},
'quantity': {0: 2000000, 1: 500000,
2: 1000000, 3: 1000000}})
pv_col = df.pivot_table('quantity', 'month', [
'customer', 'product'], dropna=False)
pv_ind = df.pivot_table(
'quantity', ['customer', 'product'], 'month', dropna=False)
m = MultiIndex.from_tuples([('A', 'a'), ('A', 'b'), ('A', 'c'),
('A', 'd'), ('B', 'a'), ('B', 'b'),
('B', 'c'), ('B', 'd'), ('C', 'a'),
('C', 'b'), ('C', 'c'), ('C', 'd')],
names=['customer', 'product'])
tm.assert_index_equal(pv_col.columns, m)
tm.assert_index_equal(pv_ind.index, m)
def test_pivot_table_categorical(self):
cat1 = Categorical(["a", "a", "b", "b"],
categories=["a", "b", "z"], ordered=True)
cat2 = Categorical(["c", "d", "c", "d"],
categories=["c", "d", "y"], ordered=True)
df = DataFrame({"A": cat1, "B": cat2, "values": [1, 2, 3, 4]})
result = pd.pivot_table(df, values='values', index=['A', 'B'],
dropna=True)
exp_index = pd.MultiIndex.from_arrays(
[cat1, cat2],
names=['A', 'B'])
expected = DataFrame(
{'values': [1, 2, 3, 4]},
index=exp_index)
tm.assert_frame_equal(result, expected)
def test_pivot_table_dropna_categoricals(self, dropna):
# GH 15193
categories = ['a', 'b', 'c', 'd']
df = DataFrame({'A': ['a', 'a', 'a', 'b', 'b', 'b', 'c', 'c', 'c'],
'B': [1, 2, 3, 1, 2, 3, 1, 2, 3],
'C': range(0, 9)})
df['A'] = df['A'].astype(CDT(categories, ordered=False))
result = df.pivot_table(index='B', columns='A', values='C',
dropna=dropna)
expected_columns = Series(['a', 'b', 'c'], name='A')
expected_columns = expected_columns.astype(
CDT(categories, ordered=False))
expected_index = Series([1, 2, 3], name='B')
expected = DataFrame([[0, 3, 6],
[1, 4, 7],
[2, 5, 8]],
index=expected_index,
columns=expected_columns,)
if not dropna:
# add back the non observed to compare
expected = expected.reindex(
columns=Categorical(categories)).astype('float')
tm.assert_frame_equal(result, expected)
def test_pivot_with_non_observable_dropna(self, dropna):
# gh-21133
df = pd.DataFrame(
{'A': pd.Categorical([np.nan, 'low', 'high', 'low', 'high'],
categories=['low', 'high'],
ordered=True),
'B': range(5)})
result = df.pivot_table(index='A', values='B', dropna=dropna)
expected = pd.DataFrame(
{'B': [2, 3]},
index=pd.Index(
pd.Categorical.from_codes([0, 1],
categories=['low', 'high'],
ordered=True),
name='A'))
tm.assert_frame_equal(result, expected)
# gh-21378
df = pd.DataFrame(
{'A': pd.Categorical(['left', 'low', 'high', 'low', 'high'],
categories=['low', 'high', 'left'],
ordered=True),
'B': range(5)})
result = df.pivot_table(index='A', values='B', dropna=dropna)
expected = pd.DataFrame(
{'B': [2, 3, 0]},
index=pd.Index(
pd.Categorical.from_codes([0, 1, 2],
categories=['low', 'high', 'left'],
ordered=True),
name='A'))
tm.assert_frame_equal(result, expected)
def test_pass_array(self):
result = self.data.pivot_table(
'D', index=self.data.A, columns=self.data.C)
expected = self.data.pivot_table('D', index='A', columns='C')
tm.assert_frame_equal(result, expected)
def test_pass_function(self):
result = self.data.pivot_table('D', index=lambda x: x // 5,
columns=self.data.C)
expected = self.data.pivot_table('D', index=self.data.index // 5,
columns='C')
tm.assert_frame_equal(result, expected)
def test_pivot_table_multiple(self):
index = ['A', 'B']
columns = 'C'
table = pivot_table(self.data, index=index, columns=columns)
expected = self.data.groupby(index + [columns]).agg(np.mean).unstack()
tm.assert_frame_equal(table, expected)
def test_pivot_dtypes(self):
# can convert dtypes
f = DataFrame({'a': ['cat', 'bat', 'cat', 'bat'], 'v': [
1, 2, 3, 4], 'i': ['a', 'b', 'a', 'b']})
assert f.dtypes['v'] == 'int64'
z = pivot_table(f, values='v', index=['a'], columns=[
'i'], fill_value=0, aggfunc=np.sum)
result = z.get_dtype_counts()
expected = Series(dict(int64=2))
tm.assert_series_equal(result, expected)
# cannot convert dtypes
f = DataFrame({'a': ['cat', 'bat', 'cat', 'bat'], 'v': [
1.5, 2.5, 3.5, 4.5], 'i': ['a', 'b', 'a', 'b']})
assert f.dtypes['v'] == 'float64'
z = pivot_table(f, values='v', index=['a'], columns=[
'i'], fill_value=0, aggfunc=np.mean)
result = z.get_dtype_counts()
expected = Series(dict(float64=2))
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('columns,values',
[('bool1', ['float1', 'float2']),
('bool1', ['float1', 'float2', 'bool1']),
('bool2', ['float1', 'float2', 'bool1'])])
def test_pivot_preserve_dtypes(self, columns, values):
# GH 7142 regression test
v = np.arange(5, dtype=np.float64)
df = DataFrame({'float1': v, 'float2': v + 2.0,
'bool1': v <= 2, 'bool2': v <= 3})
df_res = df.reset_index().pivot_table(
index='index', columns=columns, values=values)
result = dict(df_res.dtypes)
expected = {col: np.dtype('O') if col[0].startswith('b')
else np.dtype('float64') for col in df_res}
assert result == expected
def test_pivot_no_values(self):
# GH 14380
idx = pd.DatetimeIndex(['2011-01-01', '2011-02-01', '2011-01-02',
'2011-01-01', '2011-01-02'])
df = pd.DataFrame({'A': [1, 2, 3, 4, 5]},
index=idx)
res = df.pivot_table(index=df.index.month, columns=df.index.day)
exp_columns = pd.MultiIndex.from_tuples([('A', 1), ('A', 2)])
exp = pd.DataFrame([[2.5, 4.0], [2.0, np.nan]],
index=[1, 2], columns=exp_columns)
tm.assert_frame_equal(res, exp)
df = pd.DataFrame({'A': [1, 2, 3, 4, 5],
'dt': pd.date_range('2011-01-01', freq='D',
periods=5)},
index=idx)
res = df.pivot_table(index=df.index.month,
columns=pd.Grouper(key='dt', freq='M'))
exp_columns = pd.MultiIndex.from_tuples([('A',
pd.Timestamp('2011-01-31'))])
exp_columns.names = [None, 'dt']
exp = pd.DataFrame([3.25, 2.0],
index=[1, 2], columns=exp_columns)
tm.assert_frame_equal(res, exp)
res = df.pivot_table(index=pd.Grouper(freq='A'),
columns=pd.Grouper(key='dt', freq='M'))
exp = pd.DataFrame([3],
index=pd.DatetimeIndex(['2011-12-31']),
columns=exp_columns)
tm.assert_frame_equal(res, exp)
def test_pivot_multi_values(self):
result = pivot_table(self.data, values=['D', 'E'],
index='A', columns=['B', 'C'], fill_value=0)
expected = pivot_table(self.data.drop(['F'], axis=1),
index='A', columns=['B', 'C'], fill_value=0)
tm.assert_frame_equal(result, expected)
def test_pivot_multi_functions(self):
f = lambda func: pivot_table(self.data, values=['D', 'E'],
index=['A', 'B'], columns='C',
aggfunc=func)
result = f([np.mean, np.std])
means = f(np.mean)
stds = f(np.std)
expected = concat([means, stds], keys=['mean', 'std'], axis=1)
tm.assert_frame_equal(result, expected)
# margins not supported??
f = lambda func: pivot_table(self.data, values=['D', 'E'],
index=['A', 'B'], columns='C',
aggfunc=func, margins=True)
result = f([np.mean, np.std])
means = f(np.mean)
stds = f(np.std)
expected = concat([means, stds], keys=['mean', 'std'], axis=1)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize('method', [True, False])
def test_pivot_index_with_nan(self, method):
# GH 3588
nan = np.nan
df = DataFrame({'a': ['R1', 'R2', nan, 'R4'],
'b': ['C1', 'C2', 'C3', 'C4'],
'c': [10, 15, 17, 20]})
if method:
result = df.pivot('a', 'b', 'c')
else:
result = pd.pivot(df, 'a', 'b', 'c')
expected = DataFrame([[nan, nan, 17, nan], [10, nan, nan, nan],
[nan, 15, nan, nan], [nan, nan, nan, 20]],
index=Index([nan, 'R1', 'R2', 'R4'], name='a'),
columns=Index(['C1', 'C2', 'C3', 'C4'], name='b'))
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(df.pivot('b', 'a', 'c'), expected.T)
# GH9491
df = DataFrame({'a': pd.date_range('2014-02-01', periods=6, freq='D'),
'c': 100 + np.arange(6)})
df['b'] = df['a'] - pd.Timestamp('2014-02-02')
df.loc[1, 'a'] = df.loc[3, 'a'] = nan
df.loc[1, 'b'] = df.loc[4, 'b'] = nan
if method:
pv = df.pivot('a', 'b', 'c')
else:
pv = pd.pivot(df, 'a', 'b', 'c')
assert pv.notna().values.sum() == len(df)
for _, row in df.iterrows():
assert pv.loc[row['a'], row['b']] == row['c']
if method:
result = df.pivot('b', 'a', 'c')
else:
result = pd.pivot(df, 'b', 'a', 'c')
tm.assert_frame_equal(result, pv.T)
@pytest.mark.parametrize('method', [True, False])
def test_pivot_with_tz(self, method):
# GH 5878
df = DataFrame({'dt1': [datetime(2013, 1, 1, 9, 0),
datetime(2013, 1, 2, 9, 0),
datetime(2013, 1, 1, 9, 0),
datetime(2013, 1, 2, 9, 0)],
'dt2': [datetime(2014, 1, 1, 9, 0),
datetime(2014, 1, 1, 9, 0),
datetime(2014, 1, 2, 9, 0),
datetime(2014, 1, 2, 9, 0)],
'data1': np.arange(4, dtype='int64'),
'data2': np.arange(4, dtype='int64')})
df['dt1'] = df['dt1'].apply(lambda d: pd.Timestamp(d, tz='US/Pacific'))
df['dt2'] = df['dt2'].apply(lambda d: pd.Timestamp(d, tz='Asia/Tokyo'))
exp_col1 = Index(['data1', 'data1', 'data2', 'data2'])
exp_col2 = pd.DatetimeIndex(['2014/01/01 09:00',
'2014/01/02 09:00'] * 2,
name='dt2', tz='Asia/Tokyo')
exp_col = pd.MultiIndex.from_arrays([exp_col1, exp_col2])
expected = DataFrame([[0, 2, 0, 2], [1, 3, 1, 3]],
index=pd.DatetimeIndex(['2013/01/01 09:00',
'2013/01/02 09:00'],
name='dt1',
tz='US/Pacific'),
columns=exp_col)
if method:
pv = df.pivot(index='dt1', columns='dt2')
else:
pv = pd.pivot(df, index='dt1', columns='dt2')
tm.assert_frame_equal(pv, expected)
expected = DataFrame([[0, 2], [1, 3]],
index=pd.DatetimeIndex(['2013/01/01 09:00',
'2013/01/02 09:00'],
name='dt1',
tz='US/Pacific'),
columns=pd.DatetimeIndex(['2014/01/01 09:00',
'2014/01/02 09:00'],
name='dt2',
tz='Asia/Tokyo'))
if method:
pv = df.pivot(index='dt1', columns='dt2', values='data1')
else:
pv = pd.pivot(df, index='dt1', columns='dt2', values='data1')
tm.assert_frame_equal(pv, expected)
@pytest.mark.parametrize('method', [True, False])
def test_pivot_periods(self, method):
df = DataFrame({'p1': [pd.Period('2013-01-01', 'D'),
pd.Period('2013-01-02', 'D'),
pd.Period('2013-01-01', 'D'),
pd.Period('2013-01-02', 'D')],
'p2': [pd.Period('2013-01', 'M'),
pd.Period('2013-01', 'M'),
pd.Period('2013-02', 'M'),
pd.Period('2013-02', 'M')],
'data1': np.arange(4, dtype='int64'),
'data2': np.arange(4, dtype='int64')})
exp_col1 = Index(['data1', 'data1', 'data2', 'data2'])
exp_col2 = pd.PeriodIndex(['2013-01', '2013-02'] * 2,
name='p2', freq='M')
exp_col = pd.MultiIndex.from_arrays([exp_col1, exp_col2])
expected = DataFrame([[0, 2, 0, 2], [1, 3, 1, 3]],
index=pd.PeriodIndex(['2013-01-01', '2013-01-02'],
name='p1', freq='D'),
columns=exp_col)
if method:
pv = df.pivot(index='p1', columns='p2')
else:
pv = pd.pivot(df, index='p1', columns='p2')
tm.assert_frame_equal(pv, expected)
expected = DataFrame([[0, 2], [1, 3]],
index=pd.PeriodIndex(['2013-01-01', '2013-01-02'],
name='p1', freq='D'),
columns=pd.PeriodIndex(['2013-01', '2013-02'],
name='p2', freq='M'))
if method:
pv = df.pivot(index='p1', columns='p2', values='data1')
else:
pv = pd.pivot(df, index='p1', columns='p2', values='data1')
tm.assert_frame_equal(pv, expected)
@pytest.mark.parametrize('values', [
['baz', 'zoo'], np.array(['baz', 'zoo']),
pd.Series(['baz', 'zoo']), pd.Index(['baz', 'zoo'])
])
@pytest.mark.parametrize('method', [True, False])
def test_pivot_with_list_like_values(self, values, method):
# issue #17160
df = pd.DataFrame({'foo': ['one', 'one', 'one', 'two', 'two', 'two'],
'bar': ['A', 'B', 'C', 'A', 'B', 'C'],
'baz': [1, 2, 3, 4, 5, 6],
'zoo': ['x', 'y', 'z', 'q', 'w', 't']})
if method:
result = df.pivot(index='foo', columns='bar', values=values)
else:
result = pd.pivot(df, index='foo', columns='bar', values=values)
data = [[1, 2, 3, 'x', 'y', 'z'],
[4, 5, 6, 'q', 'w', 't']]
index = Index(data=['one', 'two'], name='foo')
columns = MultiIndex(levels=[['baz', 'zoo'], ['A', 'B', 'C']],
codes=[[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]],
names=[None, 'bar'])
expected = DataFrame(data=data, index=index,
columns=columns, dtype='object')
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize('values', [
['bar', 'baz'], np.array(['bar', 'baz']),
pd.Series(['bar', 'baz']), pd.Index(['bar', 'baz'])
])
@pytest.mark.parametrize('method', [True, False])
def test_pivot_with_list_like_values_nans(self, values, method):
# issue #17160
df = pd.DataFrame({'foo': ['one', 'one', 'one', 'two', 'two', 'two'],
'bar': ['A', 'B', 'C', 'A', 'B', 'C'],
'baz': [1, 2, 3, 4, 5, 6],
'zoo': ['x', 'y', 'z', 'q', 'w', 't']})
if method:
result = df.pivot(index='zoo', columns='foo', values=values)
else:
result = pd.pivot(df, index='zoo', columns='foo', values=values)
data = [[np.nan, 'A', np.nan, 4],
[np.nan, 'C', np.nan, 6],
[np.nan, 'B', np.nan, 5],
['A', np.nan, 1, np.nan],
['B', np.nan, 2, np.nan],
['C', np.nan, 3, np.nan]]
index = Index(data=['q', 't', 'w', 'x', 'y', 'z'], name='zoo')
columns = MultiIndex(levels=[['bar', 'baz'], ['one', 'two']],
codes=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=[None, 'foo'])
expected = DataFrame(data=data, index=index,
columns=columns, dtype='object')
tm.assert_frame_equal(result, expected)
@pytest.mark.xfail(reason='MultiIndexed unstack with tuple names fails'
'with KeyError GH#19966')
@pytest.mark.parametrize('method', [True, False])
def test_pivot_with_multiindex(self, method):
# issue #17160
index = Index(data=[0, 1, 2, 3, 4, 5])
data = [['one', 'A', 1, 'x'],
['one', 'B', 2, 'y'],
['one', 'C', 3, 'z'],
['two', 'A', 4, 'q'],
['two', 'B', 5, 'w'],
['two', 'C', 6, 't']]
columns = MultiIndex(levels=[['bar', 'baz'], ['first', 'second']],
codes=[[0, 0, 1, 1], [0, 1, 0, 1]])
df = DataFrame(data=data, index=index, columns=columns, dtype='object')
if method:
result = df.pivot(index=('bar', 'first'),
columns=('bar', 'second'),
values=('baz', 'first'))
else:
result = pd.pivot(df,
index=('bar', 'first'),
columns=('bar', 'second'),
values=('baz', 'first'))
data = {'A': Series([1, 4], index=['one', 'two']),
'B': Series([2, 5], index=['one', 'two']),
'C': Series([3, 6], index=['one', 'two'])}
expected = DataFrame(data)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize('method', [True, False])
def test_pivot_with_tuple_of_values(self, method):
# issue #17160
df = pd.DataFrame({'foo': ['one', 'one', 'one', 'two', 'two', 'two'],
'bar': ['A', 'B', 'C', 'A', 'B', 'C'],
'baz': [1, 2, 3, 4, 5, 6],
'zoo': ['x', 'y', 'z', 'q', 'w', 't']})
with pytest.raises(KeyError):
# tuple is seen as a single column name
if method:
df.pivot(index='zoo', columns='foo', values=('bar', 'baz'))
else:
pd.pivot(df, index='zoo', columns='foo', values=('bar', 'baz'))
def test_margins(self):
def _check_output(result, values_col, index=['A', 'B'],
columns=['C'],
margins_col='All'):
col_margins = result.loc[result.index[:-1], margins_col]
expected_col_margins = self.data.groupby(index)[values_col].mean()
tm.assert_series_equal(col_margins, expected_col_margins,
check_names=False)
assert col_margins.name == margins_col
result = result.sort_index()
index_margins = result.loc[(margins_col, '')].iloc[:-1]
expected_ix_margins = self.data.groupby(columns)[values_col].mean()
tm.assert_series_equal(index_margins, expected_ix_margins,
check_names=False)
assert index_margins.name == (margins_col, '')
grand_total_margins = result.loc[(margins_col, ''), margins_col]
expected_total_margins = self.data[values_col].mean()
assert grand_total_margins == expected_total_margins
# column specified
result = self.data.pivot_table(values='D', index=['A', 'B'],
columns='C',
margins=True, aggfunc=np.mean)
_check_output(result, 'D')
# Set a different margins_name (not 'All')
result = self.data.pivot_table(values='D', index=['A', 'B'],
columns='C',
margins=True, aggfunc=np.mean,
margins_name='Totals')
_check_output(result, 'D', margins_col='Totals')
# no column specified
table = self.data.pivot_table(index=['A', 'B'], columns='C',
margins=True, aggfunc=np.mean)
for value_col in table.columns.levels[0]:
_check_output(table[value_col], value_col)
# no col
# to help with a buglet
self.data.columns = [k * 2 for k in self.data.columns]
table = self.data.pivot_table(index=['AA', 'BB'], margins=True,
aggfunc=np.mean)
for value_col in table.columns:
totals = table.loc[('All', ''), value_col]
assert totals == self.data[value_col].mean()
# no rows
rtable = self.data.pivot_table(columns=['AA', 'BB'], margins=True,
aggfunc=np.mean)
assert isinstance(rtable, Series)
table = self.data.pivot_table(index=['AA', 'BB'], margins=True,
aggfunc='mean')
for item in ['DD', 'EE', 'FF']:
totals = table.loc[('All', ''), item]
assert totals == self.data[item].mean()
def test_margins_dtype(self):
# GH 17013
df = self.data.copy()
df[['D', 'E', 'F']] = np.arange(len(df) * 3).reshape(len(df), 3)
mi_val = list(product(['bar', 'foo'], ['one', 'two'])) + [('All', '')]
mi = MultiIndex.from_tuples(mi_val, names=('A', 'B'))
expected = DataFrame({'dull': [12, 21, 3, 9, 45],
'shiny': [33, 0, 36, 51, 120]},
index=mi).rename_axis('C', axis=1)
expected['All'] = expected['dull'] + expected['shiny']
result = df.pivot_table(values='D', index=['A', 'B'],
columns='C', margins=True,
aggfunc=np.sum, fill_value=0)
tm.assert_frame_equal(expected, result)
@pytest.mark.xfail(reason='GH#17035 (len of floats is casted back to '
'floats)')
def test_margins_dtype_len(self):
mi_val = list(product(['bar', 'foo'], ['one', 'two'])) + [('All', '')]
mi = MultiIndex.from_tuples(mi_val, names=('A', 'B'))
expected = DataFrame({'dull': [1, 1, 2, 1, 5],
'shiny': [2, 0, 2, 2, 6]},
index=mi).rename_axis('C', axis=1)
expected['All'] = expected['dull'] + expected['shiny']
result = self.data.pivot_table(values='D', index=['A', 'B'],
columns='C', margins=True,
aggfunc=len, fill_value=0)
tm.assert_frame_equal(expected, result)
def test_pivot_integer_columns(self):
# caused by upstream bug in unstack
d = date.min
data = list(product(['foo', 'bar'], ['A', 'B', 'C'], ['x1', 'x2'],
[d + timedelta(i)
for i in range(20)], [1.0]))
df = DataFrame(data)
table = df.pivot_table(values=4, index=[0, 1, 3], columns=[2])
df2 = df.rename(columns=str)
table2 = df2.pivot_table(
values='4', index=['0', '1', '3'], columns=['2'])
tm.assert_frame_equal(table, table2, check_names=False)
def test_pivot_no_level_overlap(self):
# GH #1181
data = DataFrame({'a': ['a', 'a', 'a', 'a', 'b', 'b', 'b', 'b'] * 2,
'b': [0, 0, 0, 0, 1, 1, 1, 1] * 2,
'c': (['foo'] * 4 + ['bar'] * 4) * 2,
'value': np.random.randn(16)})
table = data.pivot_table('value', index='a', columns=['b', 'c'])
grouped = data.groupby(['a', 'b', 'c'])['value'].mean()
expected = grouped.unstack('b').unstack('c').dropna(axis=1, how='all')
tm.assert_frame_equal(table, expected)
def test_pivot_columns_lexsorted(self):
n = 10000
dtype = np.dtype([
("Index", object),
("Symbol", object),
("Year", int),
("Month", int),
("Day", int),
("Quantity", int),
("Price", float),
])
products = np.array([
('SP500', 'ADBE'),
('SP500', 'NVDA'),
('SP500', 'ORCL'),
('NDQ100', 'AAPL'),
('NDQ100', 'MSFT'),
('NDQ100', 'GOOG'),
('FTSE', 'DGE.L'),
('FTSE', 'TSCO.L'),
('FTSE', 'GSK.L'),
], dtype=[('Index', object), ('Symbol', object)])
items = np.empty(n, dtype=dtype)
iproduct = np.random.randint(0, len(products), n)
items['Index'] = products['Index'][iproduct]
items['Symbol'] = products['Symbol'][iproduct]
dr = pd.date_range(date(2000, 1, 1),
date(2010, 12, 31))
dates = dr[np.random.randint(0, len(dr), n)]
items['Year'] = dates.year
items['Month'] = dates.month
items['Day'] = dates.day
items['Price'] = np.random.lognormal(4.0, 2.0, n)
df = DataFrame(items)
pivoted = df.pivot_table('Price', index=['Month', 'Day'],
columns=['Index', 'Symbol', 'Year'],
aggfunc='mean')
assert pivoted.columns.is_monotonic
def test_pivot_complex_aggfunc(self):
f = OrderedDict([('D', ['std']), ('E', ['sum'])])
expected = self.data.groupby(['A', 'B']).agg(f).unstack('B')
result = self.data.pivot_table(index='A', columns='B', aggfunc=f)
tm.assert_frame_equal(result, expected)
def test_margins_no_values_no_cols(self):
# Regression test on pivot table: no values or cols passed.
result = self.data[['A', 'B']].pivot_table(
index=['A', 'B'], aggfunc=len, margins=True)
result_list = result.tolist()
assert sum(result_list[:-1]) == result_list[-1]
def test_margins_no_values_two_rows(self):
# Regression test on pivot table: no values passed but rows are a
# multi-index
result = self.data[['A', 'B', 'C']].pivot_table(
index=['A', 'B'], columns='C', aggfunc=len, margins=True)
assert result.All.tolist() == [3.0, 1.0, 4.0, 3.0, 11.0]
def test_margins_no_values_one_row_one_col(self):
# Regression test on pivot table: no values passed but row and col
# defined
result = self.data[['A', 'B']].pivot_table(
index='A', columns='B', aggfunc=len, margins=True)
assert result.All.tolist() == [4.0, 7.0, 11.0]
def test_margins_no_values_two_row_two_cols(self):
# Regression test on pivot table: no values passed but rows and cols
# are multi-indexed
self.data['D'] = ['a', 'b', 'c', 'd',
'e', 'f', 'g', 'h', 'i', 'j', 'k']
result = self.data[['A', 'B', 'C', 'D']].pivot_table(
index=['A', 'B'], columns=['C', 'D'], aggfunc=len, margins=True)
assert result.All.tolist() == [3.0, 1.0, 4.0, 3.0, 11.0]
def test_pivot_table_with_margins_set_margin_name(self):
# see gh-3335
for margin_name in ['foo', 'one', 666, None, ['a', 'b']]:
with pytest.raises(ValueError):
# multi-index index
pivot_table(self.data, values='D', index=['A', 'B'],
columns=['C'], margins=True,
margins_name=margin_name)
with pytest.raises(ValueError):
# multi-index column
pivot_table(self.data, values='D', index=['C'],
columns=['A', 'B'], margins=True,
margins_name=margin_name)
with pytest.raises(ValueError):
# non-multi-index index/column
pivot_table(self.data, values='D', index=['A'],
columns=['B'], margins=True,
margins_name=margin_name)
def test_pivot_timegrouper(self):
df = DataFrame({
'Branch': 'A A A A A A A B'.split(),
'Buyer': '<NAME> <NAME>'.split(),
'Quantity': [1, 3, 5, 1, 8, 1, 9, 3],
'Date': [datetime(2013, 1, 1),
datetime(2013, 1, 1),
datetime(2013, 10, 1),
datetime(2013, 10, 2),
datetime(2013, 10, 1),
datetime(2013, 10, 2),
datetime(2013, 12, 2),
datetime(2013, 12, 2), ]}).set_index('Date')
expected = DataFrame(np.array([10, 18, 3], dtype='int64')
.reshape(1, 3),
index=[datetime(2013, 12, 31)],
columns='<NAME>'.split())
expected.index.name = 'Date'
expected.columns.name = 'Buyer'
result = pivot_table(df, index=Grouper(freq='A'), columns='Buyer',
values='Quantity', aggfunc=np.sum)
tm.assert_frame_equal(result, expected)
result = pivot_table(df, index='Buyer', columns=Grouper(freq='A'),
values='Quantity', aggfunc=np.sum)
tm.assert_frame_equal(result, expected.T)
expected = DataFrame(np.array([1, np.nan, 3, 9, 18, np.nan])
.reshape(2, 3),
index=[datetime(2013, 1, 1),
datetime(2013, 7, 1)],
columns='<NAME>'.split())
expected.index.name = 'Date'
expected.columns.name = 'Buyer'
result = pivot_table(df, index=Grouper(freq='6MS'), columns='Buyer',
values='Quantity', aggfunc=np.sum)
tm.assert_frame_equal(result, expected)
result = pivot_table(df, index='Buyer', columns=Grouper(freq='6MS'),
values='Quantity', aggfunc=np.sum)
tm.assert_frame_equal(result, expected.T)
# passing the name
df = df.reset_index()
result = pivot_table(df, index=Grouper(freq='6MS', key='Date'),
columns='Buyer',
values='Quantity', aggfunc=np.sum)
tm.assert_frame_equal(result, expected)
result = pivot_table(df, index='Buyer',
columns=Grouper(freq='6MS', key='Date'),
values='Quantity', aggfunc=np.sum)
tm.assert_frame_equal(result, expected.T)
pytest.raises(KeyError, lambda: pivot_table(
df, index=Grouper(freq='6MS', key='foo'),
columns='Buyer', values='Quantity', aggfunc=np.sum))
pytest.raises(KeyError, lambda: pivot_table(
df, index='Buyer',
columns=Grouper(freq='6MS', key='foo'),
values='Quantity', aggfunc=np.sum))
# passing the level
df = df.set_index('Date')
result = pivot_table(df, index=Grouper(freq='6MS', level='Date'),
columns='Buyer', values='Quantity',
aggfunc=np.sum)
tm.assert_frame_equal(result, expected)
result = pivot_table(df, index='Buyer',
columns=Grouper(freq='6MS', level='Date'),
values='Quantity', aggfunc=np.sum)
tm.assert_frame_equal(result, expected.T)
pytest.raises(ValueError, lambda: pivot_table(
df, index=Grouper(freq='6MS', level='foo'),
columns='Buyer', values='Quantity', aggfunc=np.sum))
pytest.raises(ValueError, lambda: pivot_table(
df, index='Buyer',
columns=Grouper(freq='6MS', level='foo'),
values='Quantity', aggfunc=np.sum))
# double grouper
df = DataFrame({
'Branch': 'A A A A A A A B'.split(),
'Buyer': '<NAME>'.split(),
'Quantity': [1, 3, 5, 1, 8, 1, 9, 3],
'Date': [datetime(2013, 11, 1, 13, 0), datetime(2013, 9, 1, 13, 5),
datetime(2013, 10, 1, 20, 0),
datetime(2013, 10, 2, 10, 0),
datetime(2013, 11, 1, 20, 0),
datetime(2013, 10, 2, 10, 0),
datetime(2013, 10, 2, 12, 0),
datetime(2013, 12, 5, 14, 0)],
'PayDay': [datetime(2013, 10, 4, 0, 0),
datetime(2013, 10, 15, 13, 5),
datetime(2013, 9, 5, 20, 0),
datetime(2013, 11, 2, 10, 0),
datetime(2013, 10, 7, 20, 0),
datetime(2013, 9, 5, 10, 0),
datetime(2013, 12, 30, 12, 0),
datetime(2013, 11, 20, 14, 0), ]})
result = pivot_table(df, index=Grouper(freq='M', key='Date'),
columns=Grouper(freq='M', key='PayDay'),
values='Quantity', aggfunc=np.sum)
expected = DataFrame(np.array([np.nan, 3, np.nan, np.nan,
6, np.nan, 1, 9,
np.nan, 9, np.nan, np.nan, np.nan,
np.nan, 3, np.nan]).reshape(4, 4),
index=[datetime(2013, 9, 30),
datetime(2013, 10, 31),
datetime(2013, 11, 30),
datetime(2013, 12, 31)],
columns=[datetime(2013, 9, 30),
datetime(2013, 10, 31),
datetime(2013, 11, 30),
datetime(2013, 12, 31)])
expected.index.name = 'Date'
expected.columns.name = 'PayDay'
tm.assert_frame_equal(result, expected)
result = pivot_table(df, index=Grouper(freq='M', key='PayDay'),
columns=Grouper(freq='M', key='Date'),
values='Quantity', aggfunc=np.sum)
tm.assert_frame_equal(result, expected.T)
tuples = [(datetime(2013, 9, 30), datetime(2013, 10, 31)),
(datetime(2013, 10, 31),
datetime(2013, 9, 30)),
(datetime(2013, 10, 31),
datetime(2013, 11, 30)),
(datetime(2013, 10, 31),
datetime(2013, 12, 31)),
(datetime(2013, 11, 30),
datetime(2013, 10, 31)),
(datetime(2013, 12, 31), datetime(2013, 11, 30)), ]
idx = MultiIndex.from_tuples(tuples, names=['Date', 'PayDay'])
expected = DataFrame(np.array([3, np.nan, 6, np.nan, 1, np.nan,
9, np.nan, 9, np.nan,
np.nan, 3]).reshape(6, 2),
index=idx, columns=['A', 'B'])
expected.columns.name = 'Branch'
result = pivot_table(
df, index=[Grouper(freq='M', key='Date'),
Grouper(freq='M', key='PayDay')], columns=['Branch'],
values='Quantity', aggfunc=np.sum)
tm.assert_frame_equal(result, expected)
result = pivot_table(df, index=['Branch'],
columns=[Grouper(freq='M', key='Date'),
Grouper(freq='M', key='PayDay')],
values='Quantity', aggfunc=np.sum)
tm.assert_frame_equal(result, expected.T)
def test_pivot_datetime_tz(self):
dates1 = ['2011-07-19 07:00:00', '2011-07-19 08:00:00',
'2011-07-19 09:00:00',
'2011-07-19 07:00:00', '2011-07-19 08:00:00',
'2011-07-19 09:00:00']
dates2 = ['2013-01-01 15:00:00', '2013-01-01 15:00:00',
'2013-01-01 15:00:00',
'2013-02-01 15:00:00', '2013-02-01 15:00:00',
'2013-02-01 15:00:00']
df = DataFrame({'label': ['a', 'a', 'a', 'b', 'b', 'b'],
'dt1': dates1, 'dt2': dates2,
'value1': np.arange(6, dtype='int64'),
'value2': [1, 2] * 3})
df['dt1'] = df['dt1'].apply(lambda d: pd.Timestamp(d, tz='US/Pacific'))
df['dt2'] = df['dt2'].apply(lambda d: pd.Timestamp(d, tz='Asia/Tokyo'))
exp_idx = pd.DatetimeIndex(['2011-07-19 07:00:00',
'2011-07-19 08:00:00',
'2011-07-19 09:00:00'],
tz='US/Pacific', name='dt1')
exp_col1 = Index(['value1', 'value1'])
exp_col2 = Index(['a', 'b'], name='label')
exp_col = MultiIndex.from_arrays([exp_col1, exp_col2])
expected = DataFrame([[0, 3], [1, 4], [2, 5]],
index=exp_idx, columns=exp_col)
result = pivot_table(df, index=['dt1'], columns=[
'label'], values=['value1'])
tm.assert_frame_equal(result, expected)
exp_col1 = Index(['sum', 'sum', 'sum', 'sum',
'mean', 'mean', 'mean', 'mean'])
exp_col2 = Index(['value1', 'value1', 'value2', 'value2'] * 2)
exp_col3 = pd.DatetimeIndex(['2013-01-01 15:00:00',
'2013-02-01 15:00:00'] * 4,
tz='Asia/Tokyo', name='dt2')
exp_col = MultiIndex.from_arrays([exp_col1, exp_col2, exp_col3])
expected = DataFrame(np.array([[0, 3, 1, 2, 0, 3, 1, 2],
[1, 4, 2, 1, 1, 4, 2, 1],
[2, 5, 1, 2, 2, 5, 1, 2]],
dtype='int64'),
index=exp_idx,
columns=exp_col)
result = pivot_table(df, index=['dt1'], columns=['dt2'],
values=['value1', 'value2'],
aggfunc=[np.sum, np.mean])
tm.assert_frame_equal(result, expected)
def test_pivot_dtaccessor(self):
# GH 8103
dates1 = ['2011-07-19 07:00:00', '2011-07-19 08:00:00',
'2011-07-19 09:00:00',
'2011-07-19 07:00:00', '2011-07-19 08:00:00',
'2011-07-19 09:00:00']
dates2 = ['2013-01-01 15:00:00', '2013-01-01 15:00:00',
'2013-01-01 15:00:00',
'2013-02-01 15:00:00', '2013-02-01 15:00:00',
'2013-02-01 15:00:00']
df = DataFrame({'label': ['a', 'a', 'a', 'b', 'b', 'b'],
'dt1': dates1, 'dt2': dates2,
'value1': np.arange(6, dtype='int64'),
'value2': [1, 2] * 3})
df['dt1'] = df['dt1'].apply(lambda d: pd.Timestamp(d))
df['dt2'] = df['dt2'].apply(lambda d: pd.Timestamp(d))
result = pivot_table(df, index='label', columns=df['dt1'].dt.hour,
values='value1')
exp_idx = Index(['a', 'b'], name='label')
expected = DataFrame({7: [0, 3], 8: [1, 4], 9: [2, 5]},
index=exp_idx,
columns=Index([7, 8, 9], name='dt1'))
tm.assert_frame_equal(result, expected)
result = pivot_table(df, index=df['dt2'].dt.month,
columns=df['dt1'].dt.hour,
values='value1')
expected = DataFrame({7: [0, 3], 8: [1, 4], 9: [2, 5]},
index=Index([1, 2], name='dt2'),
columns=Index([7, 8, 9], name='dt1'))
tm.assert_frame_equal(result, expected)
result = pivot_table(df, index=df['dt2'].dt.year.values,
columns=[df['dt1'].dt.hour, df['dt2'].dt.month],
values='value1')
exp_col = MultiIndex.from_arrays(
[[7, 7, 8, 8, 9, 9], [1, 2] * 3], names=['dt1', 'dt2'])
expected = DataFrame(np.array([[0, 3, 1, 4, 2, 5]], dtype='int64'),
index=[2013], columns=exp_col)
tm.assert_frame_equal(result, expected)
result = pivot_table(df, index=np.array(['X', 'X', 'X',
'X', 'Y', 'Y']),
columns=[df['dt1'].dt.hour, df['dt2'].dt.month],
values='value1')
expected = DataFrame(np.array([[0, 3, 1, np.nan, 2, np.nan],
[np.nan, np.nan, np.nan,
4, np.nan, 5]]),
index=['X', 'Y'], columns=exp_col)
tm.assert_frame_equal(result, expected)
def test_daily(self):
rng = date_range('1/1/2000', '12/31/2004', freq='D')
ts = Series(np.random.randn(len(rng)), index=rng)
annual = pivot_table(DataFrame(ts), index=ts.index.year,
columns=ts.index.dayofyear)
annual.columns = annual.columns.droplevel(0)
doy = np.asarray(ts.index.dayofyear)
for i in range(1, 367):
subset = ts[doy == i]
subset.index = subset.index.year
result = annual[i].dropna()
tm.assert_series_equal(result, subset, check_names=False)
assert result.name == i
def test_monthly(self):
rng = date_range('1/1/2000', '12/31/2004', freq='M')
ts = Series(np.random.randn(len(rng)), index=rng)
annual = pivot_table(pd.DataFrame(ts), index=ts.index.year,
columns=ts.index.month)
annual.columns = annual.columns.droplevel(0)
month = ts.index.month
for i in range(1, 13):
subset = ts[month == i]
subset.index = subset.index.year
result = annual[i].dropna()
tm.assert_series_equal(result, subset, check_names=False)
assert result.name == i
def test_pivot_table_with_iterator_values(self):
# GH 12017
aggs = {'D': 'sum', 'E': 'mean'}
pivot_values_list = pd.pivot_table(
self.data, index=['A'], values=list(aggs.keys()), aggfunc=aggs,
)
pivot_values_keys = pd.pivot_table(
self.data, index=['A'], values=aggs.keys(), aggfunc=aggs,
)
tm.assert_frame_equal(pivot_values_keys, pivot_values_list)
agg_values_gen = (value for value in aggs.keys())
pivot_values_gen = pd.pivot_table(
self.data, index=['A'], values=agg_values_gen, aggfunc=aggs,
)
tm.assert_frame_equal(pivot_values_gen, pivot_values_list)
def test_pivot_table_margins_name_with_aggfunc_list(self):
# GH 13354
margins_name = 'Weekly'
costs = pd.DataFrame(
{'item': ['bacon', 'cheese', 'bacon', 'cheese'],
'cost': [2.5, 4.5, 3.2, 3.3],
'day': ['M', 'M', 'T', 'T']}
)
table = costs.pivot_table(
index="item", columns="day", margins=True,
margins_name=margins_name, aggfunc=[np.mean, max]
)
ix = pd.Index(
['bacon', 'cheese', margins_name], dtype='object', name='item'
)
tups = [('mean', 'cost', 'M'), ('mean', 'cost', 'T'),
('mean', 'cost', margins_name), ('max', 'cost', 'M'),
('max', 'cost', 'T'), ('max', 'cost', margins_name)]
cols = pd.MultiIndex.from_tuples(tups, names=[None, None, 'day'])
expected = pd.DataFrame(table.values, index=ix, columns=cols)
tm.assert_frame_equal(table, expected)
@pytest.mark.xfail(reason='GH#17035 (np.mean of ints is casted back to '
'ints)')
def test_categorical_margins(self, observed):
# GH 10989
df = pd.DataFrame({'x': np.arange(8),
'y': np.arange(8) // 4,
'z': np.arange(8) % 2})
expected = pd.DataFrame([[1.0, 2.0, 1.5], [5, 6, 5.5], [3, 4, 3.5]])
expected.index = Index([0, 1, 'All'], name='y')
expected.columns = Index([0, 1, 'All'], name='z')
table = df.pivot_table('x', 'y', 'z', dropna=observed, margins=True)
tm.assert_frame_equal(table, expected)
@pytest.mark.xfail(reason='GH#17035 (np.mean of ints is casted back to '
'ints)')
def test_categorical_margins_category(self, observed):
df = pd.DataFrame({'x': np.arange(8),
'y': np.arange(8) // 4,
'z': np.arange(8) % 2})
expected = pd.DataFrame([[1.0, 2.0, 1.5], [5, 6, 5.5], [3, 4, 3.5]])
expected.index = Index([0, 1, 'All'], name='y')
expected.columns = Index([0, 1, 'All'], name='z')
df.y = df.y.astype('category')
df.z = df.z.astype('category')
table = df.pivot_table('x', 'y', 'z', dropna=observed, margins=True)
tm.assert_frame_equal(table, expected)
def test_categorical_aggfunc(self, observed):
# GH 9534
df = pd.DataFrame({"C1": ["A", "B", "C", "C"],
"C2": ["a", "a", "b", "b"],
"V": [1, 2, 3, 4]})
df["C1"] = df["C1"].astype("category")
result = df.pivot_table("V", index="C1", columns="C2",
dropna=observed, aggfunc="count")
expected_index = pd.CategoricalIndex(['A', 'B', 'C'],
categories=['A', 'B', 'C'],
ordered=False,
name='C1')
expected_columns = pd.Index(['a', 'b'], name='C2')
expected_data = np.array([[1., np.nan],
[1., np.nan],
[np.nan, 2.]])
expected = pd.DataFrame(expected_data,
index=expected_index,
columns=expected_columns)
tm.assert_frame_equal(result, expected)
def test_categorical_pivot_index_ordering(self, observed):
# GH 8731
df = pd.DataFrame({'Sales': [100, 120, 220],
'Month': ['January', 'January', 'January'],
'Year': [2013, 2014, 2013]})
months = ['January', 'February', 'March', 'April', 'May', 'June',
'July', 'August', 'September', 'October', 'November',
'December']
df['Month'] = df['Month'].astype('category').cat.set_categories(months)
result = df.pivot_table(values='Sales',
index='Month',
columns='Year',
dropna=observed,
aggfunc='sum')
expected_columns = pd.Int64Index([2013, 2014], name='Year')
expected_index = pd.CategoricalIndex(['January'],
categories=months,
ordered=False,
name='Month')
expected = pd.DataFrame([[320, 120]],
index=expected_index,
columns=expected_columns)
if not observed:
result = result.dropna().astype(np.int64)
tm.assert_frame_equal(result, expected)
def test_pivot_table_not_series(self):
# GH 4386
# pivot_table always returns a DataFrame
# when values is not list like and columns is None
# and aggfunc is not instance of list
df = DataFrame({'col1': [3, 4, 5],
'col2': ['C', 'D', 'E'],
'col3': [1, 3, 9]})
result = df.pivot_table('col1', index=['col3', 'col2'], aggfunc=np.sum)
m = MultiIndex.from_arrays([[1, 3, 9],
['C', 'D', 'E']],
names=['col3', 'col2'])
expected = DataFrame([3, 4, 5],
index=m, columns=['col1'])
tm.assert_frame_equal(result, expected)
result = df.pivot_table(
'col1', index='col3', columns='col2', aggfunc=np.sum
)
expected = DataFrame([[3, np.NaN, np.NaN],
[np.NaN, 4, np.NaN],
[np.NaN, np.NaN, 5]],
index=Index([1, 3, 9], name='col3'),
columns=Index(['C', 'D', 'E'], name='col2'))
tm.assert_frame_equal(result, expected)
result = df.pivot_table('col1', index='col3', aggfunc=[np.sum])
m = MultiIndex.from_arrays([['sum'],
['col1']])
expected = DataFrame([3, 4, 5],
index=Index([1, 3, 9], name='col3'),
columns=m)
tm.assert_frame_equal(result, expected)
def test_pivot_margins_name_unicode(self):
# issue #13292
greek = u'\u0394\u03bf\u03ba\u03b9\u03bc\u03ae'
frame = | pd.DataFrame({'foo': [1, 2, 3]}) | pandas.DataFrame |
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless REQUIRED by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import os
import util_global
from conver_by_ast import conver_ast
from file_op import mkdir
from file_op import mkdir_and_copyfile
from file_op import write_report_terminator
from file_op import abs_join
import pandas as pd
from file_op import get_api_statistic
from file_op import adjust_index
def conver():
print("Begin conver, input file: " + util_global.get_value('input'))
out_path = util_global.get_value('output')
dst_path = os.path.split(util_global.get_value('input').rstrip('\\/'))[-1]
dst_path_new = dst_path + util_global.get_value('timestap')
conver_path = os.walk(util_global.get_value('input'))
report_dir = util_global.get_value('report')
mkdir(report_dir)
report_xlsx = os.path.join(report_dir, 'api_analysis_report.xlsx')
util_global.set_value('generate_dir_report', | pd.DataFrame() | pandas.DataFrame |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.4.2
# kernelspec:
# display_name: Python [conda env:.conda-develop] *
# language: python
# name: conda-env-.conda-develop-py
# ---
# %% [markdown]
# # Description
# %% [markdown]
# # Imports
# %%
# %load_ext autoreload
# %autoreload 2
# %matplotlib inline
import json
import jsonpickle
import jsonpickle.ext.pandas as jsonpickle_pandas
jsonpickle_pandas.register_handlers()
import logging
import pandas as pd
import helpers.hdbg as hdbg
import helpers.henv as henv
import helpers.hplayback as hplayba
import helpers.hprint as hprint
# %%
hdbg.init_logger(verbosity=logging.INFO)
_LOG = logging.getLogger(__name__)
_LOG.info("%s", henv.get_system_signature()[0])
hprint.config_notebook()
# %%
import pandas as pd
data = {
"Product": ["Desktop Computer", "Tablet", "iPhone", "Laptop"],
"Price": [700, 250, 800, 1200],
}
df = pd.DataFrame(data, columns=["Product", "Price"])
df.index.name = "hello"
print(df)
# %%
# df.to_json(orient="")
df.to_dict(orient="series")
# %%
hplayba.to_python_code(df)
# %%
pd.DataFrame.from_dict(
{
"Product": ["Desktop Computer", "Tablet", "iPhone", "Laptop"],
"Price": [700, 250, 800, 1200],
}
)
# %%
use_playback = True
def F(a, b):
if use_playback:
playback = hplayba.Playback("assert_equal", "F", a, b)
playback.start()
c = a + b
if use_playback:
output = playback.end(c)
res = output
else:
res = c
return res
a = df
b = df
print(F(a, b))
# %%
hplayba.to_python_code(["3", 3])
# %%
hplayba.round_trip_convert(df, logging.INFO)
# %%
hplayba.round_trip_convert("hello", logging.INFO)
# %%
def F(a, b):
return a + b
# %%
# Initialize values for unit test.
dummy_0 = r"3"
dummy_0 = jsonpickle.decode(dummy_0)
dummy_1 = r"2"
dummy_1 = jsonpickle.decode(dummy_1)
# Call function.
act = F(dummy_0, dummy_1)
# Create expected value of function output.
exp = r"5"
exp = jsonpickle.decode(exp)
# Check.
assert act == exp
# %%
class Playback:
# def __init__(self, file_name, mode, *args, **kwargs):
# self.args = args
# self.kwargs = kwargs
def __init__(self, file_name, mode, func_name, a, b):
self.a = a
self.b = b
def start(self):
self.a_json = jsonpickle.encode(self.a)
self.b_json = jsonpickle.encode(self.b)
def end(self, ret):
self.ret_json = jsonpickle.encode(ret)
output = []
output.append("# Initialize values for unit test.")
output.append("a = %s" % jsonpickle.decode(self.a_json))
output.append("b = %s" % jsonpickle.decode(self.b_json))
output.append("# Apply values.")
output.append("act = F(a, b)")
output.append("exp = %s" % jsonpickle.decode(self.ret_json))
# output.append("self.assertEqual(act, exp)")
# output.append("assert act == exp")
output = "\n".join(output)
print("output=", output)
# def F(a: int, b: int):
# c = {}
# c["pavel"] = a + b
# return c
def F(a: int, b: int):
playback = Playback("", "", "F", a, b)
playback.start()
c = {}
c["pavel"] = a + b
playback.end(c)
return c
res = F(3, 4)
print(res)
# %%
class Playback:
# def __init__(self, file_name, mode, *args, **kwargs):
# self.args = args
# self.kwargs = kwargs
def __init__(self, file_name, mode, func_name, a, b):
self.a = a
self.b = b
def start(self):
self.a_json = jsonpickle.encode(self.a)
self.b_json = jsonpickle.encode(self.b)
def end(self, ret):
self.ret_json = jsonpickle.encode(ret)
output = []
output.append("# Initialize values for unit test.")
# output.append("a = %s" % jsonpickle.decode(self.a_json))
# output.append("b = %s" % jsonpickle.decode(self.b_json))
output.append("a = r'%s'" % self.a_json)
output.append("a = jsonpickle.decode(a)")
output.append("b = r'%s'" % self.b_json)
output.append("b = jsonpickle.decode(b)")
output.append("# Apply values.")
# output.append("act = F(a, b)[1]")
output.append("act = F(a, b)")
output.append("exp = r'%s'" % self.ret_json)
output.append("exp = jsonpickle.decode(exp)")
# output.append("self.assertEqual(act, exp)")
output.append("assert act.equals(exp)")
# output.append("assert act == exp")
output = "\n".join(output)
return output
# def F(a: int, b: int):
# c = {}
# c["pavel"] = a + b
# return c
use_playback = True
def F(a: pd.DataFrame, b: pd.DataFrame):
if use_playback:
playback = Playback("", "", "F", a, b)
playback.start()
# c = {}
# c["pavel"] = a + b
c = a + b
if use_playback:
output = playback.end(c)
res = output, c
else:
res = c
return res
a = pd.DataFrame({"Price": [700, 250, 800, 1200]})
b = pd.DataFrame({"Price": [1, 1, 1, 1]})
res = F(a, b)
output = res[0]
print(output)
exec(output)
# %%
# Initialize values for unit test.
a = r'{"py/object": "pandas.core.frame.DataFrame", "values": "Price\n700\n250\n800\n1200\n", "txt": true, "meta": {"dtypes": {"Price": "int64"}, "index": "{\"py/object\": \"pandas.core.indexes.range.RangeIndex\", \"values\": \"[0, 1, 2, 3]\", \"txt\": true, \"meta\": {\"dtype\": \"int64\", \"name\": null}}"}}'
a = jsonpickle.decode(a)
# %%
a = | pd.DataFrame({"Price": [700, 250, 800, 1200]}) | pandas.DataFrame |
import numpy as np
import pandas as pd
from scipy import stats
import sys, os, time, json
from pathlib import Path
import pickle as pkl
sys.path.append('../PreProcessing/')
sys.path.append('../Lib/')
sys.path.append('../Analyses/')
import sklearn.linear_model as lm
from sklearn.model_selection import RepeatedStratifiedKFold
from sklearn.metrics import balanced_accuracy_score as bac
from joblib import Parallel, delayed
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
from matplotlib.text import Text
import seaborn as sns
import analyses_table as AT
import TreeMazeFunctions as TMF
sns.set(style="whitegrid",font_scale=1,rc={
'axes.spines.bottom': False,
'axes.spines.left': False,
'axes.spines.right': False,
'axes.spines.top': False,
'axes.edgecolor':'0.5'})
def main(sePaths, doPlots=False, overwrite = False):
try:
dat = AT.loadSessionData(sePaths)
nUnits = dat['fitTable2'].shape[0]
# univariate analyses.
fn = sePaths['CueDesc_SegUniRes']
if ( (not fn.exists()) or overwrite):
CueDescFR_Dat, all_dat_spl = CueDesc_SegUniAnalysis(dat)
CueDescFR_Dat.to_csv(sePaths['CueDesc_SegUniRes'])
if doPlots:
plotCueVDes(CueDescFR_Dat,sePaths)
plotUnitRvL(CueDescFR_Dat,all_dat_spl,sePaths)
else:
CueDescFR_Dat = pd.read_csv(fn)
# decododer analyses
fn = sePaths['CueDesc_SegDecRes']
if ((not fn.exists()) or overwrite):
singCellDec,singCellDecSummary, popDec = CueDesc_SegDecAnalysis(dat)
singCellDec['se'] = sePaths['session']
singCellDecSummary['se'] = sePaths['session']
popDec['se'] = sePaths['session']
singCellDec.to_csv(fn)
singCellDecSummary.to_csv(sePaths['CueDesc_SegDecSumRes'])
popDec.to_csv(sePaths['PopCueDesc_SegDecSumRes'])
if doPlots:
f,_ = plotMultipleDecoderResults(singCellDecSummary)
fn = sePaths['CueDescPlots'] / ('DecResByUnit.jpeg')
f.savefig(str(fn),dpi=150, bbox_inches='tight',pad_inches=0.2)
plt.close(f)
f,_ = plotMultipleDecoderResults(popDec)
fn = sePaths['CueDescPlots'] / ('PopDecRes.jpeg')
f.savefig(str(fn),dpi=150, bbox_inches='tight',pad_inches=0.2)
plt.close(f)
for unit in np.arange(nUnits):
f,_ = plotMultipleDecoderResults(singCellDec[(singCellDec['unit']==unit)])
fn = sePaths['CueDescPlots'] / ('DecRes_UnitID-{}.jpeg'.format(unitNum) )
f.savefig(str(fn),dpi=150, bbox_inches='tight',pad_inches=0.2)
plt.close(f)
else:
singCellDec = pd.read_csv(fn)
singCellDecSummary = pd.read_csv(sePaths['CueDesc_SegDecSumRes'])
popDec = pd.read_csv(sePaths['PopCueDesc_SegDecSumRes'])
return CueDescFR_Dat, singCellDec,singCellDecSummary, popDec
except:
print ("Error", sys.exc_info()[0],sys.exc_info()[1],sys.exc_info()[2].tb_lineno)
return [],[],[],[]
def CueDesc_SegUniAnalysis(dat):
trDat = dat['TrialLongMat']
trConds = dat['TrialConds']
nCells = len(dat['ids']['cells'])
nMua = len(dat['ids']['muas'])
nUnits = nCells+nMua
# fixed variables (don't change with cell)
locs = TMF.ZonesNames
Trials = trConds[trConds['Good']].index.values
nTrials = len(Trials)
FeatIDs = {'A':[1],'Stem':[0,1,2],'Arm': [3,4]}
Segs = FeatIDs.keys()
HA = ['Home','SegA']
Stem = ['Home','SegA','Center']
L_Arm = ['SegE', 'I2', 'SegF', 'G3', 'SegG', 'G4']
R_Arm = ['SegB', 'I1', 'SegC', 'G1', 'SegD', 'G2']
# variable to be stored
#uni_LvR_Analyses = {'Stats':{'Cue':{},'Desc':{},'Cue_Desc':{}},'Mean':{'Cue':{},'Desc':{},'Cue_Desc':{}},'SD':{'Cue':{},'Desc':{},'Cue_Desc':{}} }
uni_LvR_Analyses = {'Cue':{'Stats':{},'Mean':{},'SD':{}},'Desc':{'Stats':{},'Mean':{},'SD':{}},'Cue_Desc':{'Stats':{},'Mean':{},'SD':{}}}
Conds = ['Cue','Desc','Cue_Desc']
dat_meas = ['Stats','Mean','SD']
all_dat_spl = {} # only used for plotting as it has overlapping data points; not necessary to store it.
for unitNum in np.arange(nUnits):
# splits of data per cell
dat_splits = {}
for k in ['Cue','Desc']:
dat_splits[k] = {}
for kk in FeatIDs.keys():
dat_splits[k][kk] = {}
dat_splits['Cue_Desc'] = {'Co_Arm':{},'L_Arm':{},'R_Arm':{}}
if unitNum==0:
for k in Conds:
for ii in dat_meas:
if ii=='Stats':
for jj in ['T','P','S']:
uni_LvR_Analyses[k][ii][jj] = pd.DataFrame(np.zeros((nUnits,3)),columns=dat_splits[k].keys())
else:
for jj in ['L','R']:
uni_LvR_Analyses[k][ii][jj] = pd.DataFrame(np.zeros((nUnits,3)),columns=dat_splits[k].keys())
if unitNum<nCells:
tt = dat['ids']['cells'][str(unitNum)][0]
cl = dat['ids']['cells'][str(unitNum)][1]
fr = dat['TrialFRLongMat']['cell_'+str(unitNum)]
#tR2 = dat['TrialModelFits']['testR2'][unitNum]
#selMod = dat['TrialModelFits']['selMod'][unitNum]
tR2 = dat['fitTable2']['testR2'][unitNum]
selMod = dat['fitTable2']['selMod'][unitNum]
else:
muaID = unitNum-nCells
tt = dat['ids']['muas'][str(muaID)][0]
cl = dat['ids']['muas'][str(muaID)][1]
fr = dat['TrialFRLongMat']['mua_'+str(muaID)]
tR2 = dat['fitTable2']['testR2'][unitNum]
selMod = dat['fitTable2']['selMod'][unitNum]
# get mean fr per trial per partition
mPartFRDat = pd.DataFrame(np.zeros((nTrials,3)),columns=FeatIDs)
cue = trConds.loc[Trials,'Cues'].values
desc = trConds.loc[Trials,'Desc'].values
cnt =0
for tr in Trials:
subset = (trDat['trID']==tr) & (trDat['IO']=='Out')
for k,v in FeatIDs.items():
mPartFRDat.loc[cnt,k]=np.nanmean(fr[subset].values[v])
cnt+=1
# univariate cue and desciscion tests by maze part
LvR = {}
l = {}
r = {}
# First & Second analyses: Cue/Desc
k = 'Cue'
l[k] = cue=='L'
r[k] = cue=='R'
k = 'Desc'
l[k]=desc=='L'
r[k]=desc=='R'
for k in ['Cue','Desc']:
LvR[k] = pd.DataFrame(np.zeros((3,3)),index=Segs,columns=['T','P','S'])
for kk in Segs:
lfr = mPartFRDat[kk][l[k]]
rfr = mPartFRDat[kk][r[k]]
temp = stats.ttest_ind(lfr,rfr)
LvR[k].loc[kk,'T'] = temp[0]
LvR[k].loc[kk,'P'] = temp[1]
dat_splits[k][kk]['l'] = lfr.values
dat_splits[k][kk]['r'] = rfr.values
LvR[k]['S'] = getSigLevel(LvR[k]['P'])
# thir analysis: Correct v Incorrect by L/R arm
k = 'Cue_Desc'
LvR[k] = pd.DataFrame(np.zeros((3,3)),index=['Co_Arm','L_Arm','R_Arm'],columns=['T','P','S'])
l = {}
r = {}
kk = 'Co_Arm'
l[kk] = mPartFRDat['Arm'][(cue=='L')&(desc=='L')]
r[kk] = mPartFRDat['Arm'][(cue=='R')&(desc=='R')]
kk = 'L_Arm'
l[kk]=mPartFRDat['Arm'][(desc=='L')&(cue=='L')]
r[kk]=mPartFRDat['Arm'][(desc=='L')&(cue=='R')]
kk = 'R_Arm'
l[kk]=mPartFRDat['Arm'][(desc=='R')&(cue=='L')]
r[kk]=mPartFRDat['Arm'][(desc=='R')&(cue=='R')]
for kk in ['Co_Arm','L_Arm','R_Arm']:
temp = stats.ttest_ind(l[kk],r[kk])
LvR[k].loc[kk,'T'] = temp[0]
LvR[k].loc[kk,'P'] = temp[1]
dat_splits[k][kk]['l'] = l[kk].values
dat_splits[k][kk]['r'] = r[kk].values
LvR[k]['S'] = getSigLevel(LvR[k]['P'])
# aggreagate results.
mlr = {}
slr = {}
for k,v in dat_splits.items():
mlr[k] = pd.DataFrame(np.zeros((3,2)),index=v.keys(),columns=['L','R'])
slr[k] = pd.DataFrame(np.zeros((3,2)),index=v.keys(),columns=['L','R'])
cnt = 0
for kk,vv in v.items():
l = vv['l']
r = vv['r']
mlr[k].loc[kk] = [np.mean(l),np.mean(r)]
slr[k].loc[kk] = [stats.sem(l),stats.sem(r)]
cnt+=1
for k in Conds: # keys : Cue, Desc, Cue_Desc
for ii in dat_meas:
if ii=='Stats':
for jj in ['T','P','S']:
if unitNum == 0:
uni_LvR_Analyses[k][ii][jj] = pd.DataFrame(np.zeros((nUnits,3)),columns=LvR[k].index.values)
uni_LvR_Analyses[k]['Stats'][jj].loc[unitNum] = LvR[k][jj]
else:
for jj in ['L','R']:
if unitNum == 0:
uni_LvR_Analyses[k][ii][jj] = pd.DataFrame(np.zeros((nUnits,3)),columns=LvR[k].index.values)
uni_LvR_Analyses[k]['Mean'][jj].loc[unitNum] = mlr[k][jj]
uni_LvR_Analyses[k]['SD'][jj].loc[unitNum] = slr[k][jj]
all_dat_spl[unitNum] = dat_splits
# reorg LvR to a pandas data frame with all the units
CueDescFR_Dat = pd.DataFrame()
for k in Conds:
cnt = 0
for kk in ['Mean','SD']:
for kkk in ['L','R']:
if kk=='Mean':
valName = 'MzFR_'+ kkk
elif kk == 'SD':
valName = 'SzFR_' + kkk
if cnt==0:
y = uni_LvR_Analyses[k][kk][kkk].copy()
y = y.reset_index()
y = y.melt(value_vars = uni_LvR_Analyses[k][kk][kkk].columns,id_vars='index',var_name='Seg',value_name= valName)
y['Cond'] = k
else:
z = uni_LvR_Analyses[k][kk][kkk].copy()
z = z.reset_index()
z = z.melt(value_vars = uni_LvR_Analyses[k][kk][kkk].columns,id_vars='index',value_name= valName)
y[valName] = z[valName].copy()
cnt+=1
for jj in ['T','P','S']:
z = uni_LvR_Analyses[k]['Stats'][jj].copy()
z = z.reset_index()
z = z.melt(value_vars = uni_LvR_Analyses[k]['Stats'][jj].columns ,id_vars='index', var_name = 'Seg', value_name = jj)
y[jj] = z[jj]
CueDescFR_Dat = pd.concat((CueDescFR_Dat,y))
CueDescFR_Dat['Sig'] = CueDescFR_Dat['P']<0.05
CueDescFR_Dat.rename(columns={'index':'unit'},inplace=True)
return CueDescFR_Dat, all_dat_spl
def CueDesc_SegDecAnalysis(dat):
nPe = 100
nRepeats = 10
nSh = 50
njobs = 20
trConds = dat['TrialConds']
trDat = dat['TrialLongMat']
nUnits = dat['fitTable2'].shape[0]
gTrialsIDs = trConds['Good']
Trials = trConds[gTrialsIDs].index.values
nTrials = len(Trials)
allZoneFR,unitIDs = reformatFRDat(dat,Trials)
CoTrials = trConds[gTrialsIDs & (trConds['Co']=='Co')].index.values
InCoTrials = trConds[gTrialsIDs & (trConds['Co']=='InCo')].index.values
nInCo = len(InCoTrials)
TrSets = {}
TrSets['all'] = np.arange(nTrials)
_,idx,_=np.intersect1d(np.array(Trials),np.array(CoTrials),return_indices=True)
TrSets['co'] = idx
_,idx,_=np.intersect1d(np.array(Trials),np.array(InCoTrials),return_indices=True)
TrSets['inco'] = idx
cueVec = trConds.loc[gTrialsIDs]['Cues'].values
descVec = trConds.loc[gTrialsIDs]['Desc'].values
predVec = {'Cue':cueVec, 'Desc':descVec}
nFeatures = {'h':np.arange(1),'a':np.arange(2),'center':np.arange(3),'be':np.arange(4),'int':np.arange(5),'cdfg':np.arange(6),'goal':np.arange(7)}
def correctTrials_Decoder(train,test):
res = pd.DataFrame(np.zeros((3,4)),columns=['Test','BAc','P','Z'])
temp = mod.fit(X_train[train],y_train[train])
res.loc[0,'Test'] = 'Model'
y_hat = temp.predict(X_train[test])
res.loc[0,'BAc'] = bac(y_train[test],y_hat)*100
# shuffle for held out train set
mod_sh = np.zeros(nSh)
for sh in np.arange(nSh):
y_perm_hat = np.random.permutation(y_hat)
mod_sh[sh] = bac(y_train[test],y_perm_hat)*100
res.loc[0,'Z'] = getPerm_Z(mod_sh, res.loc[0,'BAc'] )
res.loc[0,'P'] = getPerm_Pval(mod_sh, res.loc[0,'BAc'] )
# predictions on x test
y_hat = temp.predict(X_test)
res.loc[1,'Test'] = 'Cue'
res.loc[1,'BAc'] = bac(y_test_cue,y_hat)*100
res.loc[2,'Test'] = 'Desc'
res.loc[2,'BAc'] = bac(y_test_desc,y_hat)*100
# shuffles for ytest cue/desc
cue_sh = np.zeros(nSh)
desc_sh = np.zeros(nSh)
for sh in np.arange(nSh):
y_perm_hat = np.random.permutation(y_hat)
cue_sh[sh] = bac(y_test_cue,y_perm_hat)*100
desc_sh[sh] = bac(y_test_desc,y_perm_hat)*100
res.loc[1,'Z'] = getPerm_Z(cue_sh, res.loc[1,'BAc'] )
res.loc[1,'P'] = getPerm_Pval(cue_sh, res.loc[1,'BAc'] )
res.loc[2,'Z'] = getPerm_Z(desc_sh, res.loc[2,'BAc'] )
res.loc[2,'P'] = getPerm_Pval(desc_sh, res.loc[2,'BAc'] )
res['nSeUnits'] = nUnits
return res
def balancedCoIncoTrial_Decoder(pe,feats):
res = pd.DataFrame(np.zeros((2,4)),columns=['Test','BAc','P','Z'])
# sample correct trials to match the number of incorrect trials.
samp_co_trials = np.random.choice(TrSets['co'],nInCo,replace=False)
train = np.concatenate( (TrSets['inco'], samp_co_trials ))
test = np.setdiff1d(TrSets['co'], samp_co_trials)
X_train = allZoneFR.loc[train,feats].values
X_test = allZoneFR.loc[test,feats].values
Y_cue_train = predVec['Cue'][train]
Y_desc_train = predVec['Desc'][train]
Y_test = predVec['Cue'][test] # cue and desc trials are the on the test set.
# model trained on the cue
res.loc[0,'Test'] = 'Cue'
cue_mod = mod.fit(X_train,Y_cue_train)
y_cue_hat = cue_mod.predict(X_test)
res.loc[0,'BAc'] = bac(Y_test,y_cue_hat)*100
cue_sh = np.zeros(nSh)
for sh in np.arange(nSh):
y_perm = np.random.permutation(Y_test)
cue_sh[sh] = bac(y_perm,y_cue_hat)*100
res.loc[0,'Z'] = getPerm_Z(cue_sh, res.loc[0,'BAc'] )
res.loc[0,'P'] = getPerm_Pval(cue_sh, res.loc[0,'BAc'] )
# model trained on the desc
res.loc[1,'Test'] = 'Desc'
desc_mod = mod.fit(X_train,Y_desc_train)
y_desc_hat = desc_mod.predict(X_test)
res.loc[1,'BAc'] = bac(Y_test,y_desc_hat)*100
desc_sh = np.zeros(nSh)
for sh in np.arange(nSh):
y_perm = np.random.permutation(Y_test)
desc_sh[sh] = bac(y_perm,y_desc_hat)*100
res.loc[1,'Z'] = getPerm_Z(cue_sh, res.loc[1,'BAc'] )
res.loc[1,'P'] = getPerm_Pval(cue_sh, res.loc[1,'BAc'] )
return res
def IncoTrial_Decoder(train,test):
res = pd.DataFrame(np.zeros((3,4)),columns=['Test','BAc','P','Z'])
temp = mod.fit(X_train[train],y_train[train])
res.loc[0,'Test'] = 'Model'
y_hat = temp.predict(X_train[test])
res.loc[0,'BAc'] = bac(y_train[test],y_hat)*100
# shuffle for held out train set
mod_sh = np.zeros(nSh)
for sh in np.arange(nSh):
y_perm_hat = np.random.permutation(y_hat)
mod_sh[sh] = bac(y_train[test],y_perm_hat)*100
res.loc[0,'Z'] = getPerm_Z(mod_sh, res.loc[0,'BAc'] )
res.loc[0,'P'] = getPerm_Pval(mod_sh, res.loc[0,'BAc'] )
# predictions on x test
y_hat = temp.predict(X_test)
res.loc[1,'Test'] = 'Cue'
res.loc[1,'BAc'] = bac(y_test_cue,y_hat)*100
res.loc[2,'Test'] = 'Desc'
res.loc[2,'BAc'] = 100-res.loc[1,'BAc']
# shuffles for ytest cue/desc
cue_sh = np.zeros(nSh)
for sh in np.arange(nSh):
y_perm_hat = np.random.permutation(y_hat)
cue_sh[sh] = bac(y_test_cue,y_perm_hat)*100
res.loc[1,'Z'] = getPerm_Z(cue_sh, res.loc[1,'BAc'] )
res.loc[1,'P'] = getPerm_Pval(cue_sh, res.loc[1,'BAc'] )
res.loc[2,'Z'] = getPerm_Z(100-cue_sh, res.loc[2,'BAc'] )
res.loc[2,'P'] = getPerm_Pval(100-cue_sh, res.loc[2,'BAc'] )
return res
with Parallel(n_jobs=njobs) as parallel:
# correct trials Model:
coModsDec = pd.DataFrame()
popCoModsDec = pd.DataFrame()
try:
nFolds = 10
y_train = predVec['Cue'][TrSets['co']]
y_test_cue = predVec['Cue'][TrSets['inco']]
y_test_desc = predVec['Desc'][TrSets['inco']]
rskf = RepeatedStratifiedKFold(n_splits=nFolds,n_repeats=nRepeats, random_state=0)
t0=time.time()
for unitNum in np.arange(nUnits):
for p,nF in nFeatures.items():
feats = unitIDs[unitNum][nF]
mod = lm.LogisticRegression(class_weight='balanced',C=1/np.sqrt(len(feats)))
X_train = allZoneFR.loc[TrSets['co'], feats ].values
X_test = allZoneFR.loc[TrSets['inco'], feats ].values
cnt=0
r = parallel(delayed(correctTrials_Decoder)(train,test) for train,test in rskf.split(X_train,y_train))
t1=time.time()
res = pd.DataFrame()
for jj in r:
res = pd.concat((jj,res))
res['Loc'] = p
res['-log(P)'] = -np.log(res['P'])
res['unit'] = unitNum
coModsDec = pd.concat((coModsDec,res))
print(end='.')
coModsDec['Decoder'] = 'Correct'
# -population
for p,nF in nFeatures.items():
feats=np.array([])
for f in nF:
feats=np.concatenate((feats,np.arange(f,nUnits*7,7)))
feats=feats.astype(int)
mod = lm.LogisticRegression(class_weight='balanced',C=1/np.sqrt(len(feats)))
X_train = allZoneFR.loc[TrSets['co'], feats ].values
X_test = allZoneFR.loc[TrSets['inco'], feats ].values
cnt=0
r = parallel(delayed(correctTrials_Decoder)(train,test) for train,test in rskf.split(X_train,y_train))
res = pd.DataFrame()
for jj in r:
res = pd.concat((jj,res))
res['Loc'] = p
res['-log(P)'] = -np.log(res['P'])
popCoModsDec = pd.concat((popCoModsDec,res))
print(end='.')
print('\nDecoding Correct Model Completed. Time = {0:.2f}s \n'.format(time.time()-t0))
popCoModsDec['Decoder'] = 'Correct'
except:
print('CorrectTrials Model Failed.')
print ("Error", sys.exc_info()[0],sys.exc_info()[1],sys.exc_info()[2].tb_lineno)
# balanced correct/inco model:
baModsDec = pd.DataFrame()
popBaModsDec = pd.DataFrame()
try:
t0=time.time()
for unitNum in np.arange(nUnits):
for p,nF in nFeatures.items():
feats = unitIDs[unitNum][nF]
mod = lm.LogisticRegression(class_weight='balanced',C=1/np.sqrt(len(feats)))
r = parallel(delayed(balancedCoIncoTrial_Decoder)(pe, feats) for pe in np.arange(nPe))
res = pd.DataFrame()
for jj in r:
res = pd.concat((jj,res))
res['Loc'] = p
res['-log(P)'] = -np.log(res['P'])
res['unit'] = unitNum
baModsDec = pd.concat((baModsDec,res))
print(end='.')
baModsDec['Decoder'] = 'Balanced'
# -population
for p,nF in nFeatures.items():
feats=np.array([])
for f in nF:
feats=np.concatenate((feats,np.arange(f,nUnits*7,7)))
feats=feats.astype(int)
mod = lm.LogisticRegression(class_weight='balanced',C=1/np.sqrt(len(feats)))
r = parallel(delayed(balancedCoIncoTrial_Decoder)(pe, feats) for pe in np.arange(nPe))
res = pd.DataFrame()
for jj in r:
res = pd.concat((jj,res))
res['Loc'] = p
res['-log(P)'] = -np.log(res['P'])
popBaModsDec = pd.concat((popBaModsDec,res))
print(end='.')
print('\nDecoding Balanced Model Completed. Time = {0:.2f}s \n'.format(time.time()-t0))
popBaModsDec['Decoder'] = 'Balanced'
except:
print('Balanced Model Failed.')
print ("Error", sys.exc_info()[0],sys.exc_info()[1],sys.exc_info()[2].tb_lineno)
# incorrect trials model:
InCoModsDec = pd.DataFrame()
popInCoModsDec = pd.DataFrame()
try:
t0=time.time()
nFolds = 5
y_train = predVec['Cue'][TrSets['inco']]
y_test_cue = predVec['Cue'][TrSets['co']]
y_test_desc = predVec['Desc'][TrSets['co']]
rskf = RepeatedStratifiedKFold(n_splits=nFolds,n_repeats=nRepeats, random_state=0)
for unitNum in np.arange(nUnits):
for p,nF in nFeatures.items():
feats = unitIDs[unitNum][nF]
mod = lm.LogisticRegression(class_weight='balanced',C=1/np.sqrt(len(feats)))
X_train = allZoneFR.loc[TrSets['inco'], feats ].values
X_test = allZoneFR.loc[TrSets['co'], feats ].values
cnt=0
r = parallel(delayed(IncoTrial_Decoder)(train,test) for train,test in rskf.split(X_train,y_train))
res = pd.DataFrame()
for jj in r:
res = pd.concat((jj,res))
res['Loc'] = p
res['-log(P)'] = -np.log(res['P'])
res['unit'] = unitNum
InCoModsDec = pd.concat((InCoModsDec,res))
print(end='.')
InCoModsDec['Decoder'] = 'Incorrect'
#-population
for p,nF in nFeatures.items():
feats=np.array([])
for f in nF:
feats=np.concatenate((feats,np.arange(f,nUnits*7,7)))
feats=feats.astype(int)
mod = lm.LogisticRegression(class_weight='balanced',C=1/np.sqrt(len(feats)))
X_train = allZoneFR.loc[TrSets['inco'], feats ].values
X_test = allZoneFR.loc[TrSets['co'], feats ].values
cnt=0
r = parallel(delayed(IncoTrial_Decoder)(train,test) for train,test in rskf.split(X_train,y_train))
res = | pd.DataFrame() | pandas.DataFrame |
import os
import sys
import numpy as np
import pytest
import pandas as pd
from pandas import DataFrame, compat
from pandas.util import testing as tm
class TestToCSV:
@pytest.mark.xfail((3, 6, 5) > sys.version_info >= (3, 5),
reason=("Python csv library bug "
"(see https://bugs.python.org/issue32255)"))
def test_to_csv_with_single_column(self):
# see gh-18676, https://bugs.python.org/issue32255
#
# Python's CSV library adds an extraneous '""'
# before the newline when the NaN-value is in
# the first row. Otherwise, only the newline
# character is added. This behavior is inconsistent
# and was patched in https://bugs.python.org/pull_request4672.
df1 = DataFrame([None, 1])
expected1 = """\
""
1.0
"""
with tm.ensure_clean('test.csv') as path:
df1.to_csv(path, header=None, index=None)
with open(path, 'r') as f:
assert f.read() == expected1
df2 = DataFrame([1, None])
expected2 = """\
1.0
""
"""
with tm.ensure_clean('test.csv') as path:
df2.to_csv(path, header=None, index=None)
with open(path, 'r') as f:
assert f.read() == expected2
def test_to_csv_defualt_encoding(self):
# GH17097
df = DataFrame({'col': ["AAAAA", "ÄÄÄÄÄ", "ßßßßß", "聞聞聞聞聞"]})
with tm.ensure_clean('test.csv') as path:
# the default to_csv encoding is uft-8.
df.to_csv(path)
tm.assert_frame_equal(pd.read_csv(path, index_col=0), df)
def test_to_csv_quotechar(self):
df = DataFrame({'col': [1, 2]})
expected = """\
"","col"
"0","1"
"1","2"
"""
with tm.ensure_clean('test.csv') as path:
df.to_csv(path, quoting=1) # 1=QUOTE_ALL
with open(path, 'r') as f:
assert f.read() == expected
expected = """\
$$,$col$
$0$,$1$
$1$,$2$
"""
with tm.ensure_clean('test.csv') as path:
df.to_csv(path, quoting=1, quotechar="$")
with open(path, 'r') as f:
assert f.read() == expected
with tm.ensure_clean('test.csv') as path:
with pytest.raises(TypeError, match='quotechar'):
df.to_csv(path, quoting=1, quotechar=None)
def test_to_csv_doublequote(self):
df = DataFrame({'col': ['a"a', '"bb"']})
expected = '''\
"","col"
"0","a""a"
"1","""bb"""
'''
with tm.ensure_clean('test.csv') as path:
df.to_csv(path, quoting=1, doublequote=True) # QUOTE_ALL
with open(path, 'r') as f:
assert f.read() == expected
from _csv import Error
with tm.ensure_clean('test.csv') as path:
with pytest.raises(Error, match='escapechar'):
df.to_csv(path, doublequote=False) # no escapechar set
def test_to_csv_escapechar(self):
df = DataFrame({'col': ['a"a', '"bb"']})
expected = '''\
"","col"
"0","a\\"a"
"1","\\"bb\\""
'''
with tm.ensure_clean('test.csv') as path: # QUOTE_ALL
df.to_csv(path, quoting=1, doublequote=False, escapechar='\\')
with open(path, 'r') as f:
assert f.read() == expected
df = DataFrame({'col': ['a,a', ',bb,']})
expected = """\
,col
0,a\\,a
1,\\,bb\\,
"""
with tm.ensure_clean('test.csv') as path:
df.to_csv(path, quoting=3, escapechar='\\') # QUOTE_NONE
with open(path, 'r') as f:
assert f.read() == expected
def test_csv_to_string(self):
df = DataFrame({'col': [1, 2]})
expected_rows = [',col',
'0,1',
'1,2']
expected = tm.convert_rows_list_to_csv_str(expected_rows)
assert df.to_csv() == expected
def test_to_csv_decimal(self):
# see gh-781
df = DataFrame({'col1': [1], 'col2': ['a'], 'col3': [10.1]})
expected_rows = [',col1,col2,col3',
'0,1,a,10.1']
expected_default = tm.convert_rows_list_to_csv_str(expected_rows)
assert df.to_csv() == expected_default
expected_rows = [';col1;col2;col3',
'0;1;a;10,1']
expected_european_excel = tm.convert_rows_list_to_csv_str(
expected_rows)
assert df.to_csv(decimal=',', sep=';') == expected_european_excel
expected_rows = [',col1,col2,col3',
'0,1,a,10.10']
expected_float_format_default = tm.convert_rows_list_to_csv_str(
expected_rows)
assert df.to_csv(float_format='%.2f') == expected_float_format_default
expected_rows = [';col1;col2;col3',
'0;1;a;10,10']
expected_float_format = tm.convert_rows_list_to_csv_str(expected_rows)
assert df.to_csv(decimal=',', sep=';',
float_format='%.2f') == expected_float_format
# see gh-11553: testing if decimal is taken into account for '0.0'
df = pd.DataFrame({'a': [0, 1.1], 'b': [2.2, 3.3], 'c': 1})
expected_rows = ['a,b,c',
'0^0,2^2,1',
'1^1,3^3,1']
expected = tm.convert_rows_list_to_csv_str(expected_rows)
assert df.to_csv(index=False, decimal='^') == expected
# same but for an index
assert df.set_index('a').to_csv(decimal='^') == expected
# same for a multi-index
assert df.set_index(['a', 'b']).to_csv(decimal="^") == expected
def test_to_csv_float_format(self):
# testing if float_format is taken into account for the index
# GH 11553
df = pd.DataFrame({'a': [0, 1], 'b': [2.2, 3.3], 'c': 1})
expected_rows = ['a,b,c',
'0,2.20,1',
'1,3.30,1']
expected = tm.convert_rows_list_to_csv_str(expected_rows)
assert df.set_index('a').to_csv(float_format='%.2f') == expected
# same for a multi-index
assert df.set_index(['a', 'b']).to_csv(
float_format='%.2f') == expected
def test_to_csv_na_rep(self):
# see gh-11553
#
# Testing if NaN values are correctly represented in the index.
df = DataFrame({'a': [0, np.NaN], 'b': [0, 1], 'c': [2, 3]})
expected_rows = ['a,b,c',
'0.0,0,2',
'_,1,3']
expected = tm.convert_rows_list_to_csv_str(expected_rows)
assert df.set_index('a').to_csv(na_rep='_') == expected
assert df.set_index(['a', 'b']).to_csv(na_rep='_') == expected
# now with an index containing only NaNs
df = DataFrame({'a': np.NaN, 'b': [0, 1], 'c': [2, 3]})
expected_rows = ['a,b,c',
'_,0,2',
'_,1,3']
expected = | tm.convert_rows_list_to_csv_str(expected_rows) | pandas.util.testing.convert_rows_list_to_csv_str |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed May 20 09:59:35 2020
this file should visualize differences between the replicates
@author: Thomsn
"""
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
import re
def standardsort(bigdir, xaxis, sortlist=None, pnw_topo=None):
os.chdir(bigdir)
rep_list = [diro for diro in os.listdir() if 'rep_' in diro]
rep_list.sort()
if sortlist:
rep_list = [co for _, co in sorted(zip(sortlist[:-1], rep_list))]
rep_mat = pd.DataFrame(columns = xaxis)# np.append(xaxis, 'topo'))
rep_arr = np.array([])
topolist = []
tclist = []
shortopolist = []
if 'pnw_topo' in locals():
pntopolist = []
pntclist = []
pnshortopolist = []
for repsdir in rep_list:
rep_ind = pd.read_csv(f'{repsdir}/replicate_data.csv')
rep_topo = pd.read_csv(f'{repsdir}/net_0_summary_seedrep.csv')
rtop = rep_topo['best_topology'].iat[0]
if 'pnw_topo' in locals():
rep_num = int(repsdir.split('rep_')[1])
ptop = pnw_topo[rep_num-1]
apstop = rearrange_topo(ptop)
pntopolist.append(apstop)
pntclist.append(TOPOCOL[apstop])
pnshortopolist.append(TOPOSHORT[apstop])
abstop = rearrange_topo(rtop)
topolist.append(abstop)
tclist.append(TOPOCOL[abstop])
shortopolist.append(TOPOSHORT[abstop])
ind_ass = []
for ind in xaxis:
if ind in list(rep_ind['ADN-ID'].values):
indpop = rep_ind.loc[rep_ind['ADN-ID'] == ind, 'pop'].values[0]
ind_ass.append(float(COL_DIC[indpop]))
else:
ind_ass.append(float(0))
# ind_ass.append(float(TOPOCOL[abstop]))
rep_mat.loc[repsdir,:] = ind_ass
if len(rep_arr) == 0:
rep_arr = np.array(ind_ass)
else:
rep_arr = np.vstack((rep_arr, np.array(ind_ass)))
reflist = [float(COL_DIC[''.join(''.join(pop.lower().split(' ')).split('.'))])\
for pop in all_ind['Pop-celine']]
rep_mat.loc['reference',:] = reflist
rep_arr = np.vstack((rep_arr, np.array(reflist)))
tclist.append(0)
shortopolist.append('phylonet')
if 'pnw_topo' in locals():
pntclist.append(0)
pnshortopolist.append('phylonetworks')
plt_rep(rep_mat, rep_arr, rep_list, tclist, shortopolist, pntclist, pnshortopolist)
else:
plt_rep(rep_mat, rep_arr, rep_list, tclist, shortopolist)
return pntclist
## change colormap ##
def make_new_cm(com, percent_black):
from matplotlib import cm
from matplotlib.colors import ListedColormap, LinearSegmentedColormap
viridis = cm.get_cmap(com, 256)
newcolors = viridis(np.linspace(0, 1, 256))
wht = np.array([0/256, 0/256, 0/256, percent_black])
newcolors[:5, :] = wht
return ListedColormap(newcolors)
def plt_rep(rep_mat,
rep_arr,
rep_list,
topocollist,
toposhortlist,
popocollist=None,
poposhortlist=None):
from mpl_toolkits.axes_grid1 import make_axes_locatable
newcmp = make_new_cm('hsv', 0.4)
newcmp2 = make_new_cm('Set1', 0.0)
libo = [i for i, collo in enumerate(rep_mat.columns) if sum(rep_mat[collo] != 0) > 1]
fig = plt.figure(figsize=(40, 6))
ax1 = plt.subplot(111)
divider = make_axes_locatable(ax1)
cax = divider.append_axes("right", size="15%", pad=0.15)
c = ax1.pcolor(rep_arr[:,libo], edgecolors='k', linewidths=.1, cmap = newcmp)
p = cax.pcolor(np.transpose(np.array([topocollist,topocollist,topocollist])),
cmap = newcmp2, edgecolors='k', linewidths=0)
if (('popocollist' in locals()) & ('poposhortlist' in locals())):
wax = divider.append_axes("right", size="15%", pad=0.15)
w = wax.pcolor(np.transpose(np.array([popocollist,popocollist,popocollist])),
cmap = newcmp2, edgecolors='k', linewidths=0)
ax1.set_title('replicates')
ax1.set_xticks(np.arange(len(libo)) + 0.5)
ax1.set_yticks(np.arange(len(rep_mat.index)) + 0.5)
ax1.invert_yaxis()
cax.invert_yaxis()
cax.set_xticks([])
cax.set_yticks([])
for i in range(len(rep_mat.index)):
text = cax.text(1.5, i + 0.5, toposhortlist[i],
ha="center", va="center", color="k")
if (('popocollist' in locals()) & ('poposhortlist' in locals())):
wax.invert_yaxis()
wax.set_xticks([])
wax.set_yticks([])
for i in range(len(rep_mat.index)):
text = wax.text(1.5, i + 0.5, poposhortlist[i],
ha="center", va="center", color="k")
ax1.set_xticklabels(rep_mat.columns[libo], fontsize=5)
ax1.set_yticklabels(rep_mat.index)
plt.setp(ax1.get_xticklabels(), rotation=45, ha="right",
rotation_mode="anchor")
plt.savefig('tree_behaviour_replicates.pdf', bbox_inches='tight')
def taximize(subtr):
TAXA = ['vitisasia',
'vitisusa',
'vveast',
'vvwest',
'vsylveast',
'vsylvwest']
given_tax = [taxon for taxon in TAXA if taxon in subtr]
return given_tax
def rearrange_topo(given_topo):
import re
abs_topo = re.sub(r':?[0-9]*\.[0-9]*E?-?[0-9]*', '', given_topo)
abs_topo = abs_topo.split('\n')[0]
openers = abs_topo.count('(')
for op in range(openers):
jack = '('.join(abs_topo.split('(')[op+1:])
commasplit = jack.split(',')
opi = np.cumsum([el.count('(') for el in commasplit])
clo = np.cumsum([el.count(')') for el in commasplit])
judge = [i for i, (op, cl) in enumerate(zip(opi,clo)) if op <= cl][0]
left = ','.join(commasplit[:judge+1])
right = ','.join(commasplit[judge+1:])
rommasplit = right.split(',')
ropi = np.cumsum([el.count('(') for el in rommasplit])
rclo = np.cumsum([el.count(')') for el in rommasplit])
rjudge = [i for i, (op, cl) in enumerate(zip(ropi,rclo)) if op <= cl][0]
right = ')'.join(right.split(')')[:rjudge+1])
if left.count(',') < right.count(','):
abs_topo = 'SPLIIIIT'.join(abs_topo.split(left))
abs_topo = f'{left}'.join(abs_topo.split(right))
abs_topo = f'{right}'.join(abs_topo.split('SPLIIIIT'))
elif left.count(',') == right.count(','):
let = taximize(left)
rit = taximize(right)
taxsort = list(set(let + rit))
taxsort.sort()
side = ['right' if tax in rit else 'left' for tax in taxsort][0]
if side == 'right':
abs_topo = 'SPLIIIIT'.join(abs_topo.split(left))
abs_topo = f'{left}'.join(abs_topo.split(right))
abs_topo = f'{right}'.join(abs_topo.split('SPLIIIIT'))
return abs_topo
def import_pnwtopo(topofile):
topo_frame = pd.DataFrame(columns = ['rep', 'topo'])
repl = None
with open(topofile, 'r') as tof:
soup = tof.readlines()
for molecule in soup:
if 'rep' in molecule:
repl = int(molecule.split('rep_')[-1].split('\n')[0])
else:
topl = molecule.split('\n')[0]
topo_frame.loc[len(topo_frame),:] = [repl, topl]
return topo_frame
def commasort(topolist):
elnum = [num.count(',')+1 for num in sorted(topolist, reverse=True)]
return [elle for _, elle in sorted(zip(elnum, sorted(topolist, reverse=True)), reverse=True)]
def polytomagic(given_topo):
import re
if given_topo.count(',') == given_topo.count(')'):
print('Topology is already binary.')
return given_topo
else:
abs_topo = re.sub(r':?[0-9]*\.[0-9]*E?-?[0-9]*', '', given_topo)
abs_topo = abs_topo.split('\n')[0]
new_topo = f'{abs_topo}'
openers = abs_topo.count('(')
elements_series = | pd.Series() | pandas.Series |
"""DEM coregistration classes and functions."""
from __future__ import annotations
import copy
import concurrent.futures
import json
import os
import subprocess
import tempfile
import warnings
from enum import Enum
from typing import Any, Callable, Optional, overload, Union, Sequence, TypeVar
try:
import cv2
_has_cv2 = True
except ImportError:
_has_cv2 = False
import fiona
import geoutils as gu
from geoutils.georaster import RasterType
import numpy as np
import rasterio as rio
import rasterio.warp # pylint: disable=unused-import
import rasterio.windows # pylint: disable=unused-import
import scipy
import scipy.interpolate
import scipy.ndimage
import scipy.optimize
import skimage.transform
from rasterio import Affine
from tqdm import trange, tqdm
import pandas as pd
import xdem
try:
import richdem as rd
_has_rd = True
except ImportError:
_has_rd = False
try:
from pytransform3d.transform_manager import TransformManager
import pytransform3d.transformations
_HAS_P3D = True
except ImportError:
_HAS_P3D = False
def filter_by_range(ds: rio.DatasetReader, rangelim: tuple[float, float]):
"""
Function to filter values using a range.
"""
print('Excluding values outside of range: {0:f} to {1:f}'.format(*rangelim))
out = np.ma.masked_outside(ds, *rangelim)
out.set_fill_value(ds.fill_value)
return out
def filtered_slope(ds_slope, slope_lim=(0.1, 40)):
print("Slope filter: %0.2f - %0.2f" % slope_lim)
print("Initial count: %i" % ds_slope.count())
flt_slope = filter_by_range(ds_slope, slope_lim)
print(flt_slope.count())
return flt_slope
def apply_xy_shift(ds: rio.DatasetReader, dx: float, dy: float) -> np.ndarray:
"""
Apply horizontal shift to rio dataset using Transform affine matrix
:param ds: DEM
:param dx: dx shift value
:param dy: dy shift value
Returns:
Rio Dataset with updated transform
"""
print("X shift: ", dx)
print("Y shift: ", dy)
# Update geotransform
ds_meta = ds.meta
gt_orig = ds.transform
gt_align = Affine(gt_orig.a, gt_orig.b, gt_orig.c+dx,
gt_orig.d, gt_orig.e, gt_orig.f+dy)
print("Original transform:", gt_orig)
print("Updated transform:", gt_align)
# Update ds Geotransform
ds_align = ds
meta_update = ds.meta.copy()
meta_update({"driver": "GTiff", "height": ds.shape[1],
"width": ds.shape[2], "transform": gt_align, "crs": ds.crs})
# to split this part in two?
with rasterio.open(ds_align, "w", **meta_update) as dest:
dest.write(ds_align)
return ds_align
def apply_z_shift(ds: rio.DatasetReader, dz: float):
"""
Apply vertical shift to rio dataset using Transform affine matrix
:param ds: DEM
:param dx: dz shift value
"""
src_dem = rio.open(ds)
a = src_dem.read(1)
ds_shift = a + dz
return ds_shift
def rio_to_rda(ds: rio.DatasetReader) -> rd.rdarray:
"""
Get georeferenced richDEM array from rasterio dataset
:param ds: DEM
:return: DEM
"""
arr = ds.read(1)
rda = rd.rdarray(arr, no_data=ds.get_nodatavals()[0])
rda.geotransform = ds.get_transform()
rda.projection = ds.get_gcps()
return rda
def get_terrainattr(ds: rio.DatasetReader, attrib='slope_degrees') -> rd.rdarray:
"""
Derive terrain attribute for DEM opened with rasterio. One of "slope_degrees", "slope_percentage", "aspect",
"profile_curvature", "planform_curvature", "curvature" and others (see richDEM documentation)
:param ds: DEM
:param attrib: terrain attribute
:return:
"""
rda = rio_to_rda(ds)
terrattr = rd.TerrainAttribute(rda, attrib=attrib)
return terrattr
def get_horizontal_shift(elevation_difference: np.ndarray, slope: np.ndarray, aspect: np.ndarray,
min_count: int = 20) -> tuple[float, float, float]:
"""
Calculate the horizontal shift between two DEMs using the method presented in Nuth and Kääb (2011).
:param elevation_difference: The elevation difference (reference_dem - aligned_dem).
:param slope: A slope map with the same shape as elevation_difference (units = pixels?).
:param aspect: An aspect map with the same shape as elevation_difference (units = radians).
:param min_count: The minimum allowed bin size to consider valid.
:raises ValueError: If very few finite values exist to analyse.
:returns: The pixel offsets in easting, northing, and the c_parameter (altitude?).
"""
input_x_values = aspect
with np.errstate(divide="ignore", invalid="ignore"):
input_y_values = elevation_difference / slope
# Remove non-finite values
x_values = input_x_values[np.isfinite(input_x_values) & np.isfinite(input_y_values)]
y_values = input_y_values[np.isfinite(input_x_values) & np.isfinite(input_y_values)]
assert y_values.shape[0] > 0
# Remove outliers
lower_percentile = np.percentile(y_values, 1)
upper_percentile = np.percentile(y_values, 99)
valids = np.where((y_values > lower_percentile) & (y_values < upper_percentile) & (np.abs(y_values) < 200))
x_values = x_values[valids]
y_values = y_values[valids]
# Slice the dataset into appropriate aspect bins
step = np.pi / 36
slice_bounds = np.arange(start=0, stop=2 * np.pi, step=step)
y_medians = np.zeros([len(slice_bounds)])
count = y_medians.copy()
for i, bound in enumerate(slice_bounds):
y_slice = y_values[(bound < x_values) & (x_values < (bound + step))]
if y_slice.shape[0] > 0:
y_medians[i] = np.median(y_slice)
count[i] = y_slice.shape[0]
# Filter out bins with counts below threshold
y_medians = y_medians[count > min_count]
slice_bounds = slice_bounds[count > min_count]
if slice_bounds.shape[0] < 10:
raise ValueError("Less than 10 different cells exist.")
# Make an initial guess of the a, b, and c parameters
initial_guess: tuple[float, float, float] = (3 * np.std(y_medians) / (2 ** 0.5), 0.0, np.mean(y_medians))
def estimate_ys(x_values: np.ndarray, parameters: tuple[float, float, float]) -> np.ndarray:
"""
Estimate y-values from x-values and the current parameters.
y(x) = a * cos(b - x) + c
:param x_values: The x-values to feed the above function.
:param parameters: The a, b, and c parameters to feed the above function
:returns: Estimated y-values with the same shape as the given x-values
"""
return parameters[0] * np.cos(parameters[1] - x_values) + parameters[2]
def residuals(parameters: tuple[float, float, float], y_values: np.ndarray, x_values: np.ndarray):
"""
Get the residuals between the estimated and measured values using the given parameters.
err(x, y) = est_y(x) - y
:param parameters: The a, b, and c parameters to use for the estimation.
:param y_values: The measured y-values.
:param x_values: The measured x-values
:returns: An array of residuals with the same shape as the input arrays.
"""
err = estimate_ys(x_values, parameters) - y_values
return err
# Estimate the a, b, and c parameters with least square minimisation
plsq = scipy.optimize.leastsq(func=residuals, x0=initial_guess, args=(y_medians, slice_bounds), full_output=1)
a_parameter, b_parameter, c_parameter = plsq[0]
# Calculate the easting and northing offsets from the above parameters
east_offset = a_parameter * np.sin(b_parameter)
north_offset = a_parameter * np.cos(b_parameter)
return east_offset, north_offset, c_parameter
def calculate_slope_and_aspect(dem: np.ndarray) -> tuple[np.ndarray, np.ndarray]:
"""
Calculate the slope and aspect of a DEM.
:param dem: A numpy array of elevation values.
:returns: The slope (in pixels??) and aspect (in radians) of the DEM.
"""
# TODO: Figure out why slope is called slope_px. What unit is it in?
# TODO: Change accordingly in the get_horizontal_shift docstring.
# Calculate the gradient of the slope
gradient_y, gradient_x = np.gradient(dem)
slope_px = np.sqrt(gradient_x ** 2 + gradient_y ** 2)
aspect = np.arctan2(-gradient_x, gradient_y)
aspect += np.pi
return slope_px, aspect
def deramping(elevation_difference, x_coordinates: np.ndarray, y_coordinates: np.ndarray,
degree: int, verbose: bool = False,
metadata: Optional[dict[str, Any]] = None) -> Callable[[np.ndarray, np.ndarray], np.ndarray]:
"""
Calculate a deramping function to account for rotational and non-rigid components of the elevation difference.
:param elevation_difference: The elevation difference array to analyse.
:param x_coordinates: x-coordinates of the above array (must have the same shape as elevation_difference)
:param y_coordinates: y-coordinates of the above array (must have the same shape as elevation_difference)
:param degree: The polynomial degree to estimate the ramp.
:param verbose: Print the least squares optimization progress.
:param metadata: Optional. A metadata dictionary that will be updated with the key "deramp".
:returns: A callable function to estimate the ramp.
"""
#warnings.warn("This function is deprecated in favour of the new Coreg class.", DeprecationWarning)
# Extract only the finite values of the elevation difference and corresponding coordinates.
valid_diffs = elevation_difference[np.isfinite(elevation_difference)]
valid_x_coords = x_coordinates[np.isfinite(elevation_difference)]
valid_y_coords = y_coordinates[np.isfinite(elevation_difference)]
# Randomly subsample the values if there are more than 500,000 of them.
if valid_x_coords.shape[0] > 500_000:
random_indices = np.random.randint(0, valid_x_coords.shape[0] - 1, 500_000)
valid_diffs = valid_diffs[random_indices]
valid_x_coords = valid_x_coords[random_indices]
valid_y_coords = valid_y_coords[random_indices]
# Create a function whose residuals will be attempted to minimise
def estimate_values(x_coordinates: np.ndarray, y_coordinates: np.ndarray,
coefficients: np.ndarray, degree: int) -> np.ndarray:
"""
Estimate values from a 2D-polynomial.
:param x_coordinates: x-coordinates of the difference array (must have the same shape as elevation_difference).
:param y_coordinates: y-coordinates of the difference array (must have the same shape as elevation_difference).
:param coefficients: The coefficients (a, b, c, etc.) of the polynomial.
:param degree: The degree of the polynomial.
:raises ValueError: If the length of the coefficients list is not compatible with the degree.
:returns: The values estimated by the polynomial.
"""
# Check that the coefficient size is correct.
coefficient_size = (degree + 1) * (degree + 2) / 2
if len(coefficients) != coefficient_size:
raise ValueError()
# Do Amaury's black magic to estimate the values.
estimated_values = np.sum([coefficients[k * (k + 1) // 2 + j] * x_coordinates ** (k - j) *
y_coordinates ** j for k in range(degree + 1) for j in range(k + 1)], axis=0)
return estimated_values # type: ignore
# Creat the error function
def residuals(coefficients: np.ndarray, values: np.ndarray, x_coordinates: np.ndarray,
y_coordinates: np.ndarray, degree: int) -> np.ndarray:
"""
Calculate the difference between the estimated and measured values.
:param coefficients: Coefficients for the estimation.
:param values: The measured values.
:param x_coordinates: The x-coordinates of the values.
:param y_coordinates: The y-coordinates of the values.
:param degree: The degree of the polynomial to estimate.
:returns: An array of residuals.
"""
error = estimate_values(x_coordinates, y_coordinates, coefficients, degree) - values
error = error[np.isfinite(error)]
return error
# Run a least-squares minimisation to estimate the correct coefficients.
# TODO: Maybe remove the full_output?
initial_guess = np.zeros(shape=((degree + 1) * (degree + 2) // 2))
if verbose:
print("Deramping...")
coefficients = scipy.optimize.least_squares(
fun=residuals,
x0=initial_guess,
args=(valid_diffs, valid_x_coords, valid_y_coords, degree),
verbose=2 if verbose and degree > 1 else 0
).x
# Generate the return-function which can correctly estimate the ramp
def ramp(x_coordinates: np.ndarray, y_coordinates: np.ndarray) -> np.ndarray:
"""
Get the values of the ramp that corresponds to given coordinates.
:param x_coordinates: x-coordinates of interest.
:param y_coordinates: y-coordinates of interest.
:returns: The estimated ramp offsets.
"""
return estimate_values(x_coordinates, y_coordinates, coefficients, degree)
if metadata is not None:
metadata["deramp"] = {
"coefficients": coefficients,
"nmad": xdem.spatialstats.nmad(residuals(coefficients, valid_diffs, valid_x_coords, valid_y_coords, degree))
}
# Return the function which can be used later.
return ramp
def mask_as_array(reference_raster: gu.georaster.Raster, mask: Union[str, gu.geovector.Vector, gu.georaster.Raster]) -> np.ndarray:
"""
Convert a given mask into an array.
:param reference_raster: The raster to use for rasterizing the mask if the mask is a vector.
:param mask: A valid Vector, Raster or a respective filepath to a mask.
:raises: ValueError: If the mask path is invalid.
:raises: TypeError: If the wrong mask type was given.
:returns: The mask as a squeezed array.
"""
# Try to load the mask file if it's a filepath
if isinstance(mask, str):
# First try to load it as a Vector
try:
mask = gu.geovector.Vector(mask)
# If the format is unsopported, try loading as a Raster
except fiona.errors.DriverError:
try:
mask = gu.georaster.Raster(mask)
# If that fails, raise an error
except rio.errors.RasterioIOError:
raise ValueError(f"Mask path not in a supported Raster or Vector format: {mask}")
# At this point, the mask variable is either a Raster or a Vector
# Now, convert the mask into an array by either rasterizing a Vector or by fetching a Raster's data
if isinstance(mask, gu.geovector.Vector):
mask_array = mask.create_mask(reference_raster)
elif isinstance(mask, gu.georaster.Raster):
# The true value is the maximum value in the raster, unless the maximum value is 0 or False
true_value = np.nanmax(mask.data) if not np.nanmax(mask.data) in [0, False] else True
mask_array = (mask.data == true_value).squeeze()
else:
raise TypeError(
f"Mask has invalid type: {type(mask)}. Expected one of: "
f"{[gu.georaster.Raster, gu.geovector.Vector, str, type(None)]}"
)
return mask_array
def _transform_to_bounds_and_res(shape: tuple[int, ...],
transform: rio.transform.Affine) -> tuple[rio.coords.BoundingBox, float]:
"""Get the bounding box and (horizontal) resolution from a transform and the shape of a DEM."""
bounds = rio.coords.BoundingBox(
*rio.transform.array_bounds(shape[0], shape[1], transform=transform))
resolution = (bounds.right - bounds.left) / shape[1]
return bounds, resolution
def _get_x_and_y_coords(shape: tuple[int, ...], transform: rio.transform.Affine):
"""Generate center coordinates from a transform and the shape of a DEM."""
bounds, resolution = _transform_to_bounds_and_res(shape, transform)
x_coords, y_coords = np.meshgrid(
np.linspace(bounds.left + resolution / 2, bounds.right - resolution / 2, num=shape[1]),
np.linspace(bounds.bottom + resolution / 2, bounds.top - resolution / 2, num=shape[0])[::-1]
)
return x_coords, y_coords
CoregType = TypeVar("CoregType", bound="Coreg")
class Coreg:
"""
Generic Coreg class.
Made to be subclassed.
"""
_fit_called: bool = False # Flag to check if the .fit() method has been called.
_is_affine: Optional[bool] = None
def __init__(self, meta: Optional[dict[str, Any]] = None, matrix: Optional[np.ndarray] = None):
"""Instantiate a generic Coreg method."""
self._meta: dict[str, Any] = meta or {} # All __init__ functions should instantiate an empty dict.
if matrix is not None:
with warnings.catch_warnings():
# This error is fixed in the upcoming 1.8
warnings.filterwarnings("ignore", message="`np.float` is a deprecated alias for the builtin `float`")
valid_matrix = pytransform3d.transformations.check_transform(matrix)
self._meta["matrix"] = valid_matrix
def fit(self: CoregType, reference_dem: np.ndarray | np.ma.masked_array | RasterType,
dem_to_be_aligned: np.ndarray | np.ma.masked_array | RasterType,
inlier_mask: Optional[np.ndarray] = None,
transform: Optional[rio.transform.Affine] = None,
weights: Optional[np.ndarray] = None,
subsample: Union[float, int] = 1.0,
verbose: bool = False) -> CoregType:
"""
Estimate the coregistration transform on the given DEMs.
:param reference_dem: 2D array of elevation values acting reference.
:param dem_to_be_aligned: 2D array of elevation values to be aligned.
:param inlier_mask: Optional. 2D boolean array of areas to include in the analysis (inliers=True).
:param transform: Optional. Transform of the reference_dem. Mandatory in some cases.
:param weights: Optional. Per-pixel weights for the coregistration.
:param subsample: Subsample the input to increase performance. <1 is parsed as a fraction. >1 is a pixel count.
:param verbose: Print progress messages to stdout.
"""
if weights is not None:
raise NotImplementedError("Weights have not yet been implemented")
# Validate that both inputs are valid array-like (or Raster) types.
if not all(hasattr(dem, "__array_interface__") for dem in (reference_dem, dem_to_be_aligned)):
raise ValueError(
"Both DEMs need to be array-like (implement a numpy array interface)."
f"'reference_dem': {reference_dem}, 'dem_to_be_aligned': {dem_to_be_aligned}"
)
# If both DEMs are Rasters, validate that 'dem_to_be_aligned' is in the right grid. Then extract its data.
if isinstance(dem_to_be_aligned, gu.Raster) and isinstance(reference_dem, gu.Raster):
dem_to_be_aligned = dem_to_be_aligned.reproject(reference_dem, silent=True).data
# If any input is a Raster, use its transform if 'transform is None'.
# If 'transform' was given and any input is a Raster, trigger a warning.
# Finally, extract only the data of the raster.
for name, dem in [("reference_dem", reference_dem), ("dem_to_be_aligned", dem_to_be_aligned)]:
if hasattr(dem, "transform"):
if transform is None:
transform = getattr(dem, "transform")
elif transform is not None:
warnings.warn(f"'{name}' of type {type(dem)} overrides the given 'transform'")
"""
if name == "reference_dem":
reference_dem = dem.data
else:
dem_to_be_aligned = dem.data
"""
if transform is None:
raise ValueError("'transform' must be given if both DEMs are array-like.")
ref_dem, ref_mask = xdem.spatial_tools.get_array_and_mask(reference_dem)
tba_dem, tba_mask = xdem.spatial_tools.get_array_and_mask(dem_to_be_aligned)
# Make sure that the mask has an expected format.
if inlier_mask is not None:
inlier_mask = np.asarray(inlier_mask).squeeze()
assert inlier_mask.dtype == bool, f"Invalid mask dtype: '{inlier_mask.dtype}'. Expected 'bool'"
if np.all(~inlier_mask):
raise ValueError("'inlier_mask' had no inliers.")
ref_dem[~inlier_mask] = np.nan
tba_dem[~inlier_mask] = np.nan
if np.all(ref_mask):
raise ValueError("'reference_dem' had only NaNs")
if np.all(tba_mask):
raise ValueError("'dem_to_be_aligned' had only NaNs")
# If subsample is not equal to one, subsampling should be performed.
if subsample != 1.0:
# The full mask (inliers=True) is the inverse of the above masks and the provided mask.
full_mask = (~ref_mask & ~tba_mask & (np.asarray(inlier_mask) if inlier_mask is not None else True)).squeeze()
# If subsample is less than one, it is parsed as a fraction (e.g. 0.8 => retain 80% of the values)
if subsample < 1.0:
subsample = int(np.count_nonzero(full_mask) * (1 - subsample))
# Randomly pick N inliers in the full_mask where N=subsample
random_falses = np.random.choice(np.argwhere(full_mask.flatten()).squeeze(), int(subsample), replace=False)
# Convert the 1D indices to 2D indices
cols = (random_falses // full_mask.shape[0]).astype(int)
rows = random_falses % full_mask.shape[0]
# Set the N random inliers to be parsed as outliers instead.
full_mask[rows, cols] = False
# Run the associated fitting function
self._fit_func(ref_dem=ref_dem, tba_dem=tba_dem, transform=transform, weights=weights, verbose=verbose)
# Flag that the fitting function has been called.
self._fit_called = True
return self
@overload
def apply(self, dem: RasterType, transform: rio.transform.Affine | None) -> RasterType: ...
@overload
def apply(self, dem: np.ndarray, transform: rio.transform.Affine | None) -> np.ndarray: ...
@overload
def apply(self, dem: np.ma.masked_array, transform: rio.transform.Affine | None) -> np.ma.masked_array: ...
def apply(self, dem: np.ndarray | np.ma.masked_array | RasterType,
transform: rio.transform.Affine | None = None) -> RasterType | np.ndarray | np.ma.masked_array:
"""
Apply the estimated transform to a DEM.
:param dem: A DEM array or Raster to apply the transform on.
:param transform: The transform object of the DEM. Required if 'dem' is an array and not a Raster.
:returns: The transformed DEM.
"""
if not self._fit_called and self._meta.get("matrix") is None:
raise AssertionError(".fit() does not seem to have been called yet")
if isinstance(dem, gu.Raster):
if transform is None:
transform = dem.transform
else:
warnings.warn(f"DEM of type {type(dem)} overrides the given 'transform'")
else:
if transform is None:
raise ValueError("'transform' must be given if DEM is array-like.")
# The array to provide the functions will be an ndarray with NaNs for masked out areas.
dem_array, dem_mask = xdem.spatial_tools.get_array_and_mask(dem)
if np.all(dem_mask):
raise ValueError("'dem' had only NaNs")
# See if a _apply_func exists
try:
# Run the associated apply function
applied_dem = self._apply_func(dem_array, transform) # pylint: disable=assignment-from-no-return
# If it doesn't exist, use apply_matrix()
except NotImplementedError:
if self.is_affine: # This only works on it's affine, however.
# Apply the matrix around the centroid (if defined, otherwise just from the center).
applied_dem = apply_matrix(
dem_array,
transform=transform,
matrix=self.to_matrix(),
centroid=self._meta.get("centroid"),
dilate_mask=True
)
else:
raise ValueError("Coreg method is non-rigid but has no implemented _apply_func")
# If the DEM was a masked_array, copy the mask to the new DEM
if hasattr(dem, "mask"):
applied_dem = np.ma.masked_array(applied_dem, mask=dem.mask) # type: ignore
# If the DEM was a Raster with a mask, copy the mask to the new DEM
elif hasattr(dem, "data") and hasattr(dem.data, "mask"):
applied_dem = np.ma.masked_array(applied_dem, mask=dem.data.mask) # type: ignore
# If the input was a Raster, return a Raster as well.
if isinstance(dem, gu.Raster):
return dem.from_array(applied_dem, transform, dem.crs, nodata=dem.nodata)
return applied_dem
def apply_pts(self, coords: np.ndarray) -> np.ndarray:
"""
Apply the estimated transform to a set of 3D points.
:param coords: A (N, 3) array of X/Y/Z coordinates or one coordinate of shape (3,).
:returns: The transformed coordinates.
"""
if not self._fit_called and self._meta.get("matrix") is None:
raise AssertionError(".fit() does not seem to have been called yet")
# If the coordinates represent just one coordinate
if np.shape(coords) == (3,):
coords = np.reshape(coords, (1, 3))
assert len(np.shape(coords)) == 2 and np.shape(coords)[1] == 3, f"'coords' shape must be (N, 3). Given shape: {np.shape(coords)}"
coords_c = coords.copy()
# See if an _apply_pts_func exists
try:
transformed_points = self._apply_pts_func(coords)
# If it doesn't exist, use opencv's perspectiveTransform
except NotImplementedError:
if self.is_affine: # This only works on it's rigid, however.
# Transform the points (around the centroid if it exists).
if self._meta.get("centroid") is not None:
coords_c -= self._meta["centroid"]
transformed_points = cv2.perspectiveTransform(coords_c.reshape(1, -1, 3), self.to_matrix()).squeeze()
if self._meta.get("centroid") is not None:
transformed_points += self._meta["centroid"]
else:
raise ValueError("Coreg method is non-rigid but has not implemented _apply_pts_func")
return transformed_points
@property
def is_affine(self) -> bool:
"""Check if the transform be explained by a 3D affine transform."""
# _is_affine is found by seeing if to_matrix() raises an error.
# If this hasn't been done yet, it will be None
if self._is_affine is None:
try: # See if to_matrix() raises an error.
self.to_matrix()
self._is_affine = True
except (ValueError, NotImplementedError):
self._is_affine = False
return self._is_affine
def to_matrix(self) -> np.ndarray:
"""Convert the transform to a 4x4 transformation matrix."""
return self._to_matrix_func()
def centroid(self) -> Optional[tuple[float, float, float]]:
"""Get the centroid of the coregistration, if defined."""
meta_centroid = self._meta.get("centroid")
if meta_centroid is None:
return None
# Unpack the centroid in case it is in an unexpected format (an array, list or something else).
return (meta_centroid[0], meta_centroid[1], meta_centroid[2])
def residuals(self, reference_dem: Union[np.ndarray, np.ma.masked_array],
dem_to_be_aligned: Union[np.ndarray, np.ma.masked_array],
inlier_mask: Optional[np.ndarray] = None,
transform: Optional[rio.transform.Affine] = None) -> np.ndarray:
"""
Calculate the residual offsets (the difference) between two DEMs after applying the transformation.
:param reference_dem: 2D array of elevation values acting reference.
:param dem_to_be_aligned: 2D array of elevation values to be aligned.
:param inlier_mask: Optional. 2D boolean array of areas to include in the analysis (inliers=True).
:param transform: Optional. Transform of the reference_dem. Mandatory in some cases.
:returns: A 1D array of finite residuals.
"""
# Use the transform to correct the DEM to be aligned.
aligned_dem = self.apply(dem_to_be_aligned, transform=transform)
# Format the reference DEM
ref_arr, ref_mask = xdem.spatial_tools.get_array_and_mask(reference_dem)
if inlier_mask is None:
inlier_mask = np.ones(ref_arr.shape, dtype=bool)
# Create the full inlier mask (manual inliers plus non-nans)
full_mask = (~ref_mask) & np.isfinite(aligned_dem) & inlier_mask
# Calculate the DEM difference
diff = ref_arr - aligned_dem
# Sometimes, the float minimum (for float32 = -3.4028235e+38) is returned. This and inf should be excluded.
if "float" in str(diff.dtype):
full_mask[(diff == np.finfo(diff.dtype).min) | np.isinf(diff)] = False
# Return the difference values within the full inlier mask
return diff[full_mask]
def error(self, reference_dem: Union[np.ndarray, np.ma.masked_array],
dem_to_be_aligned: Union[np.ndarray, np.ma.masked_array],
error_type: str | list[str] = "nmad",
inlier_mask: Optional[np.ndarray] = None,
transform: Optional[rio.transform.Affine] = None) -> float | list[float]:
"""
Calculate the error of a coregistration approach.
Choices:
- "nmad": Default. The Normalized Median Absolute Deviation of the residuals.
- "median": The median of the residuals.
- "mean": The mean/average of the residuals
- "std": The standard deviation of the residuals.
- "rms": The root mean square of the residuals.
- "mae": The mean absolute error of the residuals.
- "count": The residual count.
:param reference_dem: 2D array of elevation values acting reference.
:param dem_to_be_aligned: 2D array of elevation values to be aligned.
:param error_type: The type of error meaure to calculate. May be a list of error types.
:param inlier_mask: Optional. 2D boolean array of areas to include in the analysis (inliers=True).
:param transform: Optional. Transform of the reference_dem. Mandatory in some cases.
:returns: The error measure of choice for the residuals.
"""
if isinstance(error_type, str):
error_type = [error_type]
residuals = self.residuals(reference_dem=reference_dem, dem_to_be_aligned=dem_to_be_aligned,
inlier_mask=inlier_mask, transform=transform)
error_functions = {
"nmad": xdem.spatialstats.nmad,
"median": np.median,
"mean": np.mean,
"std": np.std,
"rms": lambda residuals: np.sqrt(np.mean(np.square(residuals))),
"mae": lambda residuals: np.mean(np.abs(residuals)),
"count": lambda residuals: residuals.size
}
try:
errors = [error_functions[err_type](residuals) for err_type in error_type]
except KeyError as exception:
raise ValueError(
f"Invalid 'error_type'{'s' if len(error_type) > 1 else ''}: "
f"'{error_type}'. Choices: {list(error_functions.keys())}"
) from exception
return errors if len(errors) > 1 else errors[0]
@classmethod
def from_matrix(cls, matrix: np.ndarray):
"""
Instantiate a generic Coreg class from a transformation matrix.
:param matrix: A 4x4 transformation matrix. Shape must be (4,4).
:raises ValueError: If the matrix is incorrectly formatted.
:returns: The instantiated generic Coreg class.
"""
if np.any(~np.isfinite(matrix)):
raise ValueError(f"Matrix has non-finite values:\n{matrix}")
with warnings.catch_warnings():
# This error is fixed in the upcoming 1.8
warnings.filterwarnings("ignore", message="`np.float` is a deprecated alias for the builtin `float`")
valid_matrix = pytransform3d.transformations.check_transform(matrix)
return cls(matrix=valid_matrix)
@classmethod
def from_translation(cls, x_off: float = 0.0, y_off: float = 0.0, z_off: float = 0.0):
"""
Instantiate a generic Coreg class from a X/Y/Z translation.
:param x_off: The offset to apply in the X (west-east) direction.
:param y_off: The offset to apply in the Y (south-north) direction.
:param z_off: The offset to apply in the Z (vertical) direction.
:raises ValueError: If the given translation contained invalid values.
:returns: An instantiated generic Coreg class.
"""
matrix = np.diag(np.ones(4, dtype=float))
matrix[0, 3] = x_off
matrix[1, 3] = y_off
matrix[2, 3] = z_off
return cls.from_matrix(matrix)
def copy(self: CoregType) -> CoregType:
"""Return an identical copy of the class."""
new_coreg = self.__new__(type(self))
new_coreg.__dict__ = {key: copy.copy(value) for key, value in self.__dict__.items()}
return new_coreg
def __add__(self, other: Coreg) -> CoregPipeline:
"""Return a pipeline consisting of self and the other coreg function."""
if not isinstance(other, Coreg):
raise ValueError(f"Incompatible add type: {type(other)}. Expected 'Coreg' subclass")
return CoregPipeline([self, other])
def _fit_func(self, ref_dem: np.ndarray, tba_dem: np.ndarray, transform: Optional[rio.transform.Affine],
weights: Optional[np.ndarray], verbose: bool = False):
# FOR DEVELOPERS: This function needs to be implemented.
raise NotImplementedError("This should have been implemented by subclassing")
def _to_matrix_func(self) -> np.ndarray:
# FOR DEVELOPERS: This function needs to be implemented if the `self._meta['matrix']` keyword is not None.
# Try to see if a matrix exists.
meta_matrix = self._meta.get("matrix")
if meta_matrix is not None:
assert meta_matrix.shape == (4, 4), f"Invalid _meta matrix shape. Expected: (4, 4), got {meta_matrix.shape}"
return meta_matrix
raise NotImplementedError("This should be implemented by subclassing")
def _apply_func(self, dem: np.ndarray, transform: rio.transform.Affine) -> np.ndarray:
# FOR DEVELOPERS: This function is only needed for non-rigid transforms.
raise NotImplementedError("This should have been implemented by subclassing")
def _apply_pts_func(self, coords: np.ndarray) -> np.ndarray:
# FOR DEVELOPERS: This function is only needed for non-rigid transforms.
raise NotImplementedError("This should have been implemented by subclassing")
class BiasCorr(Coreg):
"""
DEM bias correction.
Estimates the mean (or median, weighted avg., etc.) offset between two DEMs.
"""
def __init__(self, bias_func=np.average): # pylint: disable=super-init-not-called
"""
Instantiate a bias correction object.
:param bias_func: The function to use for calculating the bias. Default: (weighted) average.
"""
super().__init__(meta={"bias_func": bias_func})
def _fit_func(self, ref_dem: np.ndarray, tba_dem: np.ndarray, transform: Optional[rio.transform.Affine],
weights: Optional[np.ndarray], verbose: bool = False):
"""Estimate the bias using the bias_func."""
if verbose:
print("Estimating bias...")
diff = ref_dem - tba_dem
diff = diff[np.isfinite(diff)]
if np.count_nonzero(np.isfinite(diff)) == 0:
raise ValueError("No finite values in bias comparison.")
# Use weights if those were provided.
bias = self._meta["bias_func"](diff) if weights is None \
else self._meta["bias_func"](diff, weights=weights)
if verbose:
print("Bias estimated")
self._meta["bias"] = bias
def _to_matrix_func(self) -> np.ndarray:
"""Convert the bias to a transform matrix."""
empty_matrix = np.diag(np.ones(4, dtype=float))
empty_matrix[2, 3] += self._meta["bias"]
return empty_matrix
class ICP(Coreg):
"""
Iterative Closest Point DEM coregistration.
Estimates a rigid transform (rotation + translation) between two DEMs.
Requires 'opencv'
See opencv docs for more info: https://docs.opencv.org/master/dc/d9b/classcv_1_1ppf__match__3d_1_1ICP.html
"""
def __init__(self, max_iterations=100, tolerance=0.05, rejection_scale=2.5, num_levels=6):
"""
Instantiate an ICP coregistration object.
:param max_iterations: The maximum allowed iterations before stopping.
:param tolerance: The residual change threshold after which to stop the iterations.
:param rejection_scale: The threshold (std * rejection_scale) to consider points as outliers.
:param num_levels: Number of octree levels to consider. A higher number is faster but may be more inaccurate.
"""
if not _has_cv2:
raise ValueError("Optional dependency needed. Install 'opencv'")
self.max_iterations = max_iterations
self.tolerance = tolerance
self.rejection_scale = rejection_scale
self.num_levels = num_levels
super().__init__()
def _fit_func(self, ref_dem: np.ndarray, tba_dem: np.ndarray, transform: Optional[rio.transform.Affine],
weights: Optional[np.ndarray], verbose: bool = False):
"""Estimate the rigid transform from tba_dem to ref_dem."""
if weights is not None:
warnings.warn("ICP was given weights, but does not support it.")
bounds, resolution = _transform_to_bounds_and_res(ref_dem.shape, transform)
points: dict[str, np.ndarray] = {}
# Generate the x and y coordinates for the reference_dem
x_coords, y_coords = _get_x_and_y_coords(ref_dem.shape, transform)
centroid = np.array([np.mean([bounds.left, bounds.right]), np.mean([bounds.bottom, bounds.top]), 0.0])
# Subtract by the bounding coordinates to avoid float32 rounding errors.
x_coords -= centroid[0]
y_coords -= centroid[1]
for key, dem in zip(["ref", "tba"], [ref_dem, tba_dem]):
gradient_x, gradient_y = np.gradient(dem)
normal_east = np.sin(np.arctan(gradient_y / resolution)) * -1
normal_north = np.sin(np.arctan(gradient_x / resolution))
normal_up = 1 - np.linalg.norm([normal_east, normal_north], axis=0)
valid_mask = ~np.isnan(dem) & ~np.isnan(normal_east) & ~np.isnan(normal_north)
point_cloud = np.dstack([
x_coords[valid_mask],
y_coords[valid_mask],
dem[valid_mask],
normal_east[valid_mask],
normal_north[valid_mask],
normal_up[valid_mask]
]).squeeze()
points[key] = point_cloud[~np.any(np.isnan(point_cloud), axis=1)].astype("float32")
icp = cv2.ppf_match_3d_ICP(self.max_iterations, self.tolerance, self.rejection_scale, self.num_levels)
if verbose:
print("Running ICP...")
try:
_, residual, matrix = icp.registerModelToScene(points["tba"], points["ref"])
except cv2.error as exception:
if "(expected: 'n > 0'), where" not in str(exception):
raise exception
raise ValueError(
"Not enough valid points in input data."
f"'reference_dem' had {points['ref'].size} valid points."
f"'dem_to_be_aligned' had {points['tba'].size} valid points."
)
if verbose:
print("ICP finished")
assert residual < 1000, f"ICP coregistration failed: residual={residual}, threshold: 1000"
self._meta["centroid"] = centroid
self._meta["matrix"] = matrix
class Deramp(Coreg):
"""
Polynomial DEM deramping.
Estimates an n-D polynomial between the difference of two DEMs.
"""
def __init__(self, degree: int = 1, subsample: Union[int, float] = 5e5):
"""
Instantiate a deramping correction object.
:param degree: The polynomial degree to estimate. degree=0 is a simple bias correction.
:param subsample: Factor for subsampling the input raster for speed-up.
If <= 1, will be considered a fraction of valid pixels to extract.
If > 1 will be considered the number of pixels to extract.
"""
self.degree = degree
self.subsample = subsample
super().__init__()
def _fit_func(self, ref_dem: np.ndarray, tba_dem: np.ndarray, transform: Optional[rio.transform.Affine],
weights: Optional[np.ndarray], verbose: bool = False):
"""Fit the dDEM between the DEMs to a least squares polynomial equation."""
x_coords, y_coords = _get_x_and_y_coords(ref_dem.shape, transform)
ddem = ref_dem - tba_dem
valid_mask = np.isfinite(ddem)
ddem = ddem[valid_mask]
x_coords = x_coords[valid_mask]
y_coords = y_coords[valid_mask]
# Formulate the 2D polynomial whose coefficients will be solved for.
def poly2d(x_coordinates: np.ndarray, y_coordinates: np.ndarray,
coefficients: np.ndarray) -> np.ndarray:
"""
Estimate values from a 2D-polynomial.
:param x_coordinates: x-coordinates of the difference array (must have the same shape as elevation_difference).
:param y_coordinates: y-coordinates of the difference array (must have the same shape as elevation_difference).
:param coefficients: The coefficients (a, b, c, etc.) of the polynomial.
:param degree: The degree of the polynomial.
:raises ValueError: If the length of the coefficients list is not compatible with the degree.
:returns: The values estimated by the polynomial.
"""
# Check that the coefficient size is correct.
coefficient_size = (self.degree + 1) * (self.degree + 2) / 2
if len(coefficients) != coefficient_size:
raise ValueError()
# Do Amaury's black magic to formulate and calculate the polynomial equation.
estimated_values = np.sum([coefficients[k * (k + 1) // 2 + j] * x_coordinates ** (k - j) *
y_coordinates ** j for k in range(self.degree + 1) for j in range(k + 1)], axis=0)
return estimated_values # type: ignore
def residuals(coefs: np.ndarray, x_coords: np.ndarray, y_coords: np.ndarray, targets: np.ndarray):
res = targets - poly2d(x_coords, y_coords, coefs)
return res[np.isfinite(res)]
if verbose:
print("Estimating deramp function...")
# reduce number of elements for speed
# Get number of points to extract
max_points = np.size(x_coords)
if (self.subsample <= 1) & (self.subsample >= 0):
npoints = int(self.subsample * max_points)
elif self.subsample > 1:
npoints = int(self.subsample)
else:
raise ValueError("`subsample` must be >= 0")
if max_points > npoints:
indices = np.random.choice(max_points, npoints, replace=False)
x_coords = x_coords[indices]
y_coords = y_coords[indices]
ddem = ddem[indices]
# Optimize polynomial parameters
coefs = scipy.optimize.leastsq(
func=residuals,
x0=np.zeros(shape=((self.degree + 1) * (self.degree + 2) // 2)),
args=(x_coords, y_coords, ddem)
)
self._meta["coefficients"] = coefs[0]
self._meta["func"] = lambda x, y: poly2d(x, y, coefs[0])
def _apply_func(self, dem: np.ndarray, transform: rio.transform.Affine) -> np.ndarray:
"""Apply the deramp function to a DEM."""
x_coords, y_coords = _get_x_and_y_coords(dem.shape, transform)
ramp = self._meta["func"](x_coords, y_coords)
return dem + ramp
def _apply_pts_func(self, coords: np.ndarray) -> np.ndarray:
"""Apply the deramp function to a set of points."""
new_coords = coords.copy()
new_coords[:, 2] += self._meta["func"](new_coords[:, 0], new_coords[:, 1])
return new_coords
def _to_matrix_func(self) -> np.ndarray:
"""Return a transform matrix if possible."""
if self.degree > 1:
raise ValueError(
"Nonlinear deramping degrees cannot be represented as transformation matrices."
f" (max 1, given: {self.degree})")
if self.degree == 1:
raise NotImplementedError("Vertical shift, rotation and horizontal scaling has to be implemented.")
# If degree==0, it's just a bias correction
empty_matrix = np.diag(np.ones(4, dtype=float))
empty_matrix[2, 3] += self._meta["coefficients"][0]
return empty_matrix
class CoregPipeline(Coreg):
"""
A sequential set of coregistration steps.
"""
def __init__(self, pipeline: list[Coreg]):
"""
Instantiate a new coregistration pipeline.
:param: Coregistration steps to run in the sequence they are given.
"""
self.pipeline = pipeline
super().__init__()
def __repr__(self):
return f"CoregPipeline: {self.pipeline}"
def copy(self: CoregType) -> CoregType:
"""Return an identical copy of the class."""
new_coreg = self.__new__(type(self))
new_coreg.__dict__ = {key: copy.copy(value) for key, value in self.__dict__.items() if key != "pipeline"}
new_coreg.pipeline = [step.copy() for step in self.pipeline]
return new_coreg
def _fit_func(self, ref_dem: np.ndarray, tba_dem: np.ndarray, transform: Optional[rio.transform.Affine],
weights: Optional[np.ndarray], verbose: bool = False):
"""Fit each coregistration step with the previously transformed DEM."""
tba_dem_mod = tba_dem.copy()
for i, coreg in enumerate(self.pipeline):
if verbose:
print(f"Running pipeline step: {i + 1} / {len(self.pipeline)}")
coreg._fit_func(ref_dem, tba_dem_mod, transform=transform, weights=weights, verbose=verbose)
coreg._fit_called = True
tba_dem_mod = coreg.apply(tba_dem_mod, transform)
def _apply_func(self, dem: np.ndarray, transform: rio.transform.Affine) -> np.ndarray:
"""Apply the coregistration steps sequentially to a DEM."""
dem_mod = dem.copy()
for coreg in self.pipeline:
dem_mod = coreg.apply(dem_mod, transform)
return dem_mod
def _apply_pts_func(self, coords: np.ndarray) -> np.ndarray:
"""Apply the coregistration steps sequentially to a set of points."""
coords_mod = coords.copy()
for coreg in self.pipeline:
coords_mod = coreg.apply_pts(coords_mod).reshape(coords_mod.shape)
return coords_mod
def _to_matrix_func(self) -> np.ndarray:
"""Try to join the coregistration steps to a single transformation matrix."""
if not _HAS_P3D:
raise ValueError("Optional dependency needed. Install 'pytransform3d'")
transform_mgr = TransformManager()
with warnings.catch_warnings():
# Deprecation warning from pytransform3d. Let's hope that is fixed in the near future.
warnings.filterwarnings("ignore", message="`np.float` is a deprecated alias for the builtin `float`")
for i, coreg in enumerate(self.pipeline):
new_matrix = coreg.to_matrix()
transform_mgr.add_transform(i, i + 1, new_matrix)
return transform_mgr.get_transform(0, len(self.pipeline))
def __iter__(self):
"""Iterate over the pipeline steps."""
for coreg in self.pipeline:
yield coreg
def __add__(self, other: Union[list[Coreg], Coreg, CoregPipeline]) -> CoregPipeline:
"""Append Coreg(s) or a CoregPipeline to the pipeline."""
if not isinstance(other, Coreg):
other = list(other)
else:
other = [other]
pipelines = self.pipeline + other
return CoregPipeline(pipelines)
class NuthKaab(Coreg):
"""
Nuth and Kääb (2011) DEM coregistration.
Implemented after the paper:
https://doi.org/10.5194/tc-5-271-2011
"""
def __init__(self, max_iterations: int = 10, offset_threshold: float = 0.05):
"""
Instantiate a new Nuth and Kääb (2011) coregistration object.
:param max_iterations: The maximum allowed iterations before stopping.
:param offset_threshold: The residual offset threshold after which to stop the iterations.
"""
self.max_iterations = max_iterations
self.offset_threshold = offset_threshold
super().__init__()
def _fit_func(self, ref_dem: np.ndarray, tba_dem: np.ndarray, transform: Optional[rio.transform.Affine],
weights: Optional[np.ndarray], verbose: bool = False):
"""Estimate the x/y/z offset between two DEMs."""
if verbose:
print("Running Nuth and Kääb (2011) coregistration")
bounds, resolution = _transform_to_bounds_and_res(ref_dem.shape, transform)
# Make a new DEM which will be modified inplace
aligned_dem = tba_dem.copy()
# Calculate slope and aspect maps from the reference DEM
if verbose:
print(" Calculate slope and aspect")
slope, aspect = calculate_slope_and_aspect(ref_dem)
# Make index grids for the east and north dimensions
east_grid = np.arange(ref_dem.shape[1])
north_grid = np.arange(ref_dem.shape[0])
# Make a function to estimate the aligned DEM (used to construct an offset DEM)
elevation_function = scipy.interpolate.RectBivariateSpline(
x=north_grid, y=east_grid, z=np.where(np.isnan(aligned_dem), -9999, aligned_dem), kx=1, ky=1
)
# Make a function to estimate nodata gaps in the aligned DEM (used to fix the estimated offset DEM)
# Use spline degree 1, as higher degrees will create instabilities around 1 and mess up the nodata mask
nodata_function = scipy.interpolate.RectBivariateSpline(
x=north_grid, y=east_grid, z=np.isnan(aligned_dem), kx=1, ky=1
)
# Initialise east and north pixel offset variables (these will be incremented up and down)
offset_east, offset_north, bias = 0.0, 0.0, 0.0
# Calculate initial dDEM statistics
elevation_difference = ref_dem - aligned_dem
bias = np.nanmedian(elevation_difference)
nmad_old = xdem.spatialstats.nmad(elevation_difference)
if verbose:
print(" Statistics on initial dh:")
print(" Median = {:.2f} - NMAD = {:.2f}".format(bias, nmad_old))
# Iteratively run the analysis until the maximum iterations or until the error gets low enough
if verbose:
print(" Iteratively estimating horizontal shit:")
# If verbose is True, will use progressbar and print additional statements
pbar = trange(self.max_iterations, disable=not verbose, desc=" Progress")
for i in pbar:
# Calculate the elevation difference and the residual (NMAD) between them.
elevation_difference = ref_dem - aligned_dem
bias = np.nanmedian(elevation_difference)
# Correct potential biases
elevation_difference -= bias
# Estimate the horizontal shift from the implementation by Nuth and Kääb (2011)
east_diff, north_diff, _ = get_horizontal_shift( # type: ignore
elevation_difference=elevation_difference,
slope=slope,
aspect=aspect
)
if verbose:
pbar.write(" #{:d} - Offset in pixels : ({:.2f}, {:.2f})".format(i + 1, east_diff, north_diff))
# Increment the offsets with the overall offset
offset_east += east_diff
offset_north += north_diff
# Calculate new elevations from the offset x- and y-coordinates
new_elevation = elevation_function(y=east_grid + offset_east, x=north_grid - offset_north)
# Set NaNs where NaNs were in the original data
new_nans = nodata_function(y=east_grid + offset_east, x=north_grid - offset_north)
new_elevation[new_nans >= 1] = np.nan
# Assign the newly calculated elevations to the aligned_dem
aligned_dem = new_elevation
# Update statistics
elevation_difference = ref_dem - aligned_dem
bias = np.nanmedian(elevation_difference)
nmad_new = xdem.spatialstats.nmad(elevation_difference)
nmad_gain = (nmad_new - nmad_old) / nmad_old*100
if verbose:
pbar.write(" Median = {:.2f} - NMAD = {:.2f} ==> Gain = {:.2f}%".format(bias, nmad_new, nmad_gain))
# Stop if the NMAD is low and a few iterations have been made
assert ~np.isnan(nmad_new), (offset_east, offset_north)
offset = np.sqrt(east_diff**2 + north_diff**2)
if i > 1 and offset < self.offset_threshold:
if verbose:
pbar.write(f" Last offset was below the residual offset threshold of {self.offset_threshold} -> stopping")
break
nmad_old = nmad_new
# Print final results
if verbose:
print("\n Final offset in pixels (east, north) : ({:f}, {:f})".format(offset_east, offset_north))
print(" Statistics on coregistered dh:")
print(" Median = {:.2f} - NMAD = {:.2f}".format(bias, nmad_new))
self._meta["offset_east_px"] = offset_east
self._meta["offset_north_px"] = offset_north
self._meta["bias"] = bias
self._meta["resolution"] = resolution
def _to_matrix_func(self) -> np.ndarray:
"""Return a transformation matrix from the estimated offsets."""
offset_east = self._meta["offset_east_px"] * self._meta["resolution"]
offset_north = self._meta["offset_north_px"] * self._meta["resolution"]
matrix = np.diag(np.ones(4, dtype=float))
matrix[0, 3] += offset_east
matrix[1, 3] += offset_north
matrix[2, 3] += self._meta["bias"]
return matrix
def invert_matrix(matrix: np.ndarray) -> np.ndarray:
"""Invert a transformation matrix."""
with warnings.catch_warnings():
# Deprecation warning from pytransform3d. Let's hope that is fixed in the near future.
warnings.filterwarnings("ignore", message="`np.float` is a deprecated alias for the builtin `float`")
checked_matrix = pytransform3d.transformations.check_matrix(matrix)
# Invert the transform if wanted.
return pytransform3d.transformations.invert_transform(checked_matrix)
def apply_matrix(dem: np.ndarray, transform: rio.transform.Affine, matrix: np.ndarray, invert: bool = False,
centroid: Optional[tuple[float, float, float]] = None,
resampling: Union[int, str] = "bilinear",
dilate_mask: bool = False) -> np.ndarray:
"""
Apply a 3D transformation matrix to a 2.5D DEM.
The transformation is applied as a value correction using linear deramping, and 2D image warping.
1. Convert the DEM into a point cloud (not for gridding; for estimating the DEM shifts).
2. Transform the point cloud in 3D using the 4x4 matrix.
3. Measure the difference in elevation between the original and transformed points.
4. Estimate a linear deramp from the elevation difference, and apply the correction to the DEM values.
5. Convert the horizontal coordinates of the transformed points to pixel index coordinates.
6. Apply the pixel-wise displacement in 2D using the new pixel coordinates.
7. Apply the same displacement to a nodata-mask to exclude previous and/or new nans.
:param dem: The DEM to transform.
:param transform: The Affine transform object (georeferencing) of the DEM.
:param matrix: A 4x4 transformation matrix to apply to the DEM.
:param invert: Invert the transformation matrix.
:param centroid: The X/Y/Z transformation centroid. Irrelevant for pure translations. Defaults to the midpoint (Z=0)
:param resampling: The resampling method to use. Can be `nearest`, `bilinear`, `cubic` or an integer from 0-5.
:param dilate_mask: Dilate the nan mask to exclude edge pixels that could be wrong.
:returns: The transformed DEM with NaNs as nodata values (replaces a potential mask of the input `dem`).
"""
# Parse the resampling argument given.
if isinstance(resampling, int):
resampling_order = resampling
elif resampling == "cubic":
resampling_order = 3
elif resampling == "bilinear":
resampling_order = 1
elif resampling == "nearest":
resampling_order = 0
else:
raise ValueError(
f"`{resampling}` is not a valid resampling mode."
" Choices: [`nearest`, `bilinear`, `cubic`] or an integer."
)
# Copy the DEM to make sure the original is not modified, and convert it into an ndarray
demc = np.array(dem)
# Check if the matrix only contains a Z correction. In that case, only shift the DEM values by the bias.
empty_matrix = np.diag(np.ones(4, float))
empty_matrix[2, 3] = matrix[2, 3]
if np.mean(np.abs(empty_matrix - matrix)) == 0.0:
return demc + matrix[2, 3]
# Opencv is required down from here
if not _has_cv2:
raise ValueError("Optional dependency needed. Install 'opencv'")
nan_mask = xdem.spatial_tools.get_mask(dem)
assert np.count_nonzero(~nan_mask) > 0, "Given DEM had all nans."
# Create a filled version of the DEM. (skimage doesn't like nans)
filled_dem = np.where(~nan_mask, demc, np.nan)
# Get the centre coordinates of the DEM pixels.
x_coords, y_coords = _get_x_and_y_coords(demc.shape, transform)
bounds, resolution = _transform_to_bounds_and_res(dem.shape, transform)
# If a centroid was not given, default to the center of the DEM (at Z=0).
if centroid is None:
centroid = (np.mean([bounds.left, bounds.right]), np.mean([bounds.bottom, bounds.top]), 0.0)
else:
assert len(centroid) == 3, f"Expected centroid to be 3D X/Y/Z coordinate. Got shape of {len(centroid)}"
# Shift the coordinates to centre around the centroid.
x_coords -= centroid[0]
y_coords -= centroid[1]
# Create a point cloud of X/Y/Z coordinates
point_cloud = np.dstack((x_coords, y_coords, filled_dem))
# Shift the Z components by the centroid.
point_cloud[:, 2] -= centroid[2]
if invert:
matrix = invert_matrix(matrix)
# Transform the point cloud using the matrix.
transformed_points = cv2.perspectiveTransform(
point_cloud.reshape((1, -1, 3)),
matrix,
).reshape(point_cloud.shape)
# Estimate the vertical difference of old and new point cloud elevations.
deramp = deramping(
(point_cloud[:, :, 2] - transformed_points[:, :, 2])[~nan_mask].flatten(),
point_cloud[:, :, 0][~nan_mask].flatten(),
point_cloud[:, :, 1][~nan_mask].flatten(),
degree=1
)
# Shift the elevation values of the soon-to-be-warped DEM.
filled_dem -= deramp(x_coords, y_coords)
# Create gap-free arrays of x and y coordinates to be converted into index coordinates.
x_inds = rio.fill.fillnodata(transformed_points[:, :, 0].copy(), mask=(~nan_mask).astype("uint8"))
y_inds = rio.fill.fillnodata(transformed_points[:, :, 1].copy(), mask=(~nan_mask).astype("uint8"))
# Divide the coordinates by the resolution to create index coordinates.
x_inds /= resolution
y_inds /= resolution
# Shift the x coords so that bounds.left is equivalent to xindex -0.5
x_inds -= x_coords.min() / resolution
# Shift the y coords so that bounds.top is equivalent to yindex -0.5
y_inds = (y_coords.max() / resolution) - y_inds
# Create a skimage-compatible array of the new index coordinates that the pixels shall have after warping.
inds = np.vstack((y_inds.reshape((1,) + y_inds.shape), x_inds.reshape((1,) + x_inds.shape)))
with warnings.catch_warnings():
# An skimage warning that will hopefully be fixed soon. (2021-07-30)
warnings.filterwarnings("ignore", message="Passing `np.nan` to mean no clipping in np.clip")
# Warp the DEM
transformed_dem = skimage.transform.warp(
filled_dem,
inds,
order=resampling_order,
mode="constant",
cval=np.nan,
preserve_range=True
)
# Warp the NaN mask, setting true to all values outside the new frame.
tr_nan_mask = skimage.transform.warp(
nan_mask.astype("uint8"),
inds,
order=resampling_order,
mode="constant",
cval=1,
preserve_range=True
) > 0.1 # Due to different interpolation approaches, everything above 0.1 is assumed to be 1 (True)
if dilate_mask:
tr_nan_mask = scipy.ndimage.morphology.binary_dilation(tr_nan_mask, iterations=resampling_order)
# Apply the transformed nan_mask
transformed_dem[tr_nan_mask] = np.nan
assert np.count_nonzero(~np.isnan(transformed_dem)) > 0, "Transformed DEM has all nans."
return transformed_dem
class ZScaleCorr(Coreg):
"""
Correct linear or nonlinear elevation scale errors.
Often useful for nadir image DEM correction, where the focal length is slightly miscalculated.
DISCLAIMER: This function may introduce error when correcting non-photogrammetric biases.
See Gardelle et al. (2012) (Figure 2), http://dx.doi.org/10.3189/2012jog11j175, for curvature-related biases.
"""
def __init__(self, degree=1, bin_count=100):
"""
Instantiate a elevation scale correction object.
:param degree: The polynomial degree to estimate.
:param bin_count: The amount of bins to divide the elevation change in.
"""
self.degree = degree
self.bin_count = bin_count
super().__init__()
def _fit_func(self, ref_dem: np.ndarray, tba_dem: np.ndarray, transform: Optional[rio.transform.Affine],
weights: Optional[np.ndarray], verbose: bool = False):
"""Estimate the scale difference between the two DEMs."""
ddem = ref_dem - tba_dem
medians = xdem.volume.hypsometric_binning(
ddem=ddem,
ref_dem=tba_dem,
bins=self.bin_count,
kind="count"
)["value"]
coefficients = np.polyfit(medians.index.mid, medians.values, deg=self.degree)
self._meta["coefficients"] = coefficients
def _apply_func(self, dem: np.ndarray, transform: rio.transform.Affine) -> np.ndarray:
"""Apply the scaling model to a DEM."""
model = np.poly1d(self._meta["coefficients"])
return dem + model(dem)
def _apply_pts_func(self, coords: np.ndarray) -> np.ndarray:
"""Apply the scaling model to a set of points."""
model = np.poly1d(self._meta["coefficients"])
new_coords = coords.copy()
new_coords[:, 2] += model(new_coords[:, 2])
return new_coords
def _to_matrix_func(self) -> np.ndarray:
"""Convert the transform to a matrix, if possible."""
if self.degree == 0: # If it's just a bias correction.
return self._meta["coefficients"][-1]
elif self.degree < 2:
raise NotImplementedError
else:
raise ValueError("A 2nd degree or higher ZScaleCorr cannot be described as a 4x4 matrix!")
class BlockwiseCoreg(Coreg):
"""
Block-wise coreg class for nonlinear estimations.
A coreg class of choice is run on an arbitrary subdivision of the raster. When later applying the coregistration,\
the optimal warping is interpolated based on X/Y/Z shifts from the coreg algorithm at the grid points.
E.g. a subdivision of 4 means to divide the DEM in four equally sized parts. These parts are then coregistered\
separately, creating four Coreg.fit results. If the subdivision is not divisible by the raster shape,\
subdivision is made as best as possible to have approximately equal pixel counts.
"""
def __init__(self, coreg: Coreg | CoregPipeline, subdivision: int, success_threshold: float = 0.8, n_threads: int | None = None, warn_failures: bool = False):
"""
Instantiate a blockwise coreg object.
:param coreg: An instantiated coreg object to fit in the subdivided DEMs.
:param subdivision: The number of chunks to divide the DEMs in. E.g. 4 means four different transforms.
:param success_threshold: Raise an error if fewer chunks than the fraction failed for any reason.
:param n_threads: The maximum amount of threads to use. Default=auto
:param warn_failures: Trigger or ignore warnings for each exception/warning in each block.
"""
if isinstance(coreg, type):
raise ValueError(
"The 'coreg' argument must be an instantiated Coreg subclass. "
"Hint: write e.g. ICP() instead of ICP"
)
self.coreg = coreg
self.subdivision = subdivision
self.success_threshold = success_threshold
self.n_threads = n_threads
self.warn_failures = warn_failures
super().__init__()
self._meta["coreg_meta"] = []
def _fit_func(self, ref_dem: np.ndarray, tba_dem: np.ndarray, transform: rio.transform.Affine | None,
weights: np.ndarray | None, verbose: bool = False):
"""Fit the coreg approach for each subdivision."""
groups = self.subdivide_array(tba_dem.shape)
indices = np.unique(groups)
progress_bar = tqdm(total=indices.size, desc="Coregistering chunks", disable=(not verbose))
def coregister(i: int) -> dict[str, Any] | BaseException | None:
"""
Coregister a chunk in a thread-safe way.
:returns:
* If it succeeds: A dictionary of the fitting metadata.
* If it fails: The associated exception.
* If the block is empty: None
"""
inlier_mask = groups == i
# Find the corresponding slice of the inlier_mask to subset the data
rows, cols = np.where(inlier_mask)
arrayslice = np.s_[rows.min():rows.max() + 1, cols.min(): cols.max() + 1]
# Copy a subset of the two DEMs, the mask, the coreg instance, and make a new subset transform
ref_subset = ref_dem[arrayslice].copy()
tba_subset = tba_dem[arrayslice].copy()
if any(np.all(~np.isfinite(dem)) for dem in (ref_subset, tba_subset)):
return None
mask_subset = inlier_mask[arrayslice].copy()
west, top = rio.transform.xy(transform, min(rows), min(cols), offset="ul")
transform_subset = rio.transform.from_origin(west, top, transform.a, -transform.e)
coreg = self.coreg.copy()
# Try to run the coregistration. If it fails for any reason, skip it and save the exception.
try:
coreg.fit(
reference_dem=ref_subset,
dem_to_be_aligned=tba_subset,
transform=transform_subset,
inlier_mask=mask_subset
)
except Exception as exception:
return exception
nmad, median = coreg.error(
reference_dem=ref_subset,
dem_to_be_aligned=tba_subset,
error_type=["nmad", "median"],
inlier_mask=mask_subset,
transform=transform_subset
)
meta: dict[str, Any] = {
"i": i,
"transform": transform_subset,
"inlier_count": np.count_nonzero(mask_subset & np.isfinite(ref_subset) & np.isfinite(tba_subset)),
"nmad": nmad,
"median": median
}
# Find the center of the inliers.
inlier_positions = np.argwhere(mask_subset)
mid_row = np.mean(inlier_positions[:, 0]).astype(int)
mid_col = np.mean(inlier_positions[:, 1]).astype(int)
# Find the indices of all finites within the mask
finites = np.argwhere(np.isfinite(tba_subset) & mask_subset)
# Calculate the distance between the approximate center and all finite indices
distances = np.linalg.norm(finites - np.array([mid_row, mid_col]), axis=1)
# Find the index representing the closest finite value to the center.
closest = np.argwhere(distances == distances.min())
# Assign the closest finite value as the representative point
representative_row, representative_col = finites[closest][0][0]
meta["representative_x"], meta["representative_y"] = rio.transform.xy(transform_subset, representative_row, representative_col)
meta["representative_val"] = ref_subset[representative_row, representative_col]
# If the coreg is a pipeline, copy its metadatas to the output meta
if hasattr(coreg, "pipeline"):
meta["pipeline"] = [step._meta.copy() for step in coreg.pipeline]
# Copy all current metadata (except for the alreay existing keys like "i", "min_row", etc, and the "coreg_meta" key)
# This can then be iteratively restored when the apply function should be called.
meta.update({key: value for key, value in coreg._meta.items() if key not in ["coreg_meta"] + list(meta.keys())})
progress_bar.update()
return meta.copy()
# Catch warnings; only show them if
exceptions: list[BaseException | warnings.WarningMessage] = []
with warnings.catch_warnings(record=True) as caught_warnings:
warnings.simplefilter("default")
with concurrent.futures.ThreadPoolExecutor(max_workers=None) as executor:
results = executor.map(coregister, indices)
exceptions += list(caught_warnings)
empty_blocks = 0
for result in results:
if isinstance(result, BaseException):
exceptions.append(result)
elif result is None:
empty_blocks += 1
continue
else:
self._meta["coreg_meta"].append(result)
progress_bar.close()
# Stop if the success rate was below the threshold
if ((len(self._meta["coreg_meta"]) + empty_blocks) / self.subdivision) <= self.success_threshold:
raise ValueError(
f"Fitting failed for {len(exceptions)} chunks:\n" +
"\n".join(map(str, exceptions[:5])) +
f"\n... and {len(exceptions) - 5} more" if len(exceptions) > 5 else ""
)
if self.warn_failures:
for exception in exceptions:
warnings.warn(str(exception))
# Set the _fit_called parameters (only identical copies of self.coreg have actually been called)
self.coreg._fit_called = True
if hasattr(self.coreg, "pipeline"):
for step in self.coreg.pipeline:
step._fit_called = True
def _restore_metadata(self, meta: dict[str, Any]) -> None:
"""
Given some metadata, set it in the right place.
:param meta: A metadata file to update self._meta
"""
self.coreg._meta.update(meta)
if hasattr(self.coreg, "pipeline") and "pipeline" in meta:
for i, step in enumerate(self.coreg.pipeline):
step._meta.update(meta["pipeline"][i])
def to_points(self) -> np.ndarray:
"""
Convert the blockwise coregistration matrices to 3D (source -> destination) points.
The returned shape is (N, 3, 2) where the dimensions represent:
0. The point index where N is equal to the amount of subdivisions.
1. The X/Y/Z coordinate of the point.
2. The old/new position of the point.
To acquire the first point's original position: points[0, :, 0]
To acquire the first point's new position: points[0, :, 1]
To acquire the first point's Z difference: points[0, 2, 1] - points[0, 2, 0]
:returns: An array of 3D source -> destionation points.
"""
if len(self._meta["coreg_meta"]) == 0:
raise AssertionError("No coreg results exist. Has '.fit()' been called?")
points = np.empty(shape=(0, 3, 2))
for meta in self._meta["coreg_meta"]:
self._restore_metadata(meta)
#x_coord, y_coord = rio.transform.xy(meta["transform"], meta["representative_row"], meta["representative_col"])
x_coord, y_coord = meta["representative_x"], meta["representative_y"]
old_position = np.reshape([x_coord, y_coord, meta["representative_val"]], (1, 3))
new_position = self.coreg.apply_pts(old_position)
points = np.append(points, np.dstack((old_position, new_position)), axis=0)
return points
def stats(self) -> pd.DataFrame:
"""
Return statistics for each chunk in the blockwise coregistration.
* center_{x,y,z}: The center coordinate of the chunk in georeferenced units.
* {x,y,z}_off: The calculated offset in georeferenced units.
* inlier_count: The number of pixels that were inliers in the chunk.
* nmad: The NMAD after coregistration.
* median: The bias after coregistration.
:raises ValueError: If no coregistration results exist yet.
:returns: A dataframe of statistics for each chunk.
"""
points = self.to_points()
chunk_meta = {meta["i"]: meta for meta in self._meta["coreg_meta"]}
statistics: list[dict[str, Any]] = []
for i in range(points.shape[0]):
if i not in chunk_meta:
continue
statistics.append(
{
"center_x": points[i, 0, 0],
"center_y": points[i, 1, 0],
"center_z": points[i, 2, 0],
"x_off": points[i, 0, 1] - points[i, 0, 0],
"y_off": points[i, 1, 1] - points[i, 1, 0],
"z_off": points[i, 2, 1] - points[i, 2, 0],
"inlier_count": chunk_meta[i]["inlier_count"],
"nmad": chunk_meta[i]["nmad"],
"median": chunk_meta[i]["median"]
}
)
stats_df = | pd.DataFrame(statistics) | pandas.DataFrame |
import os
import pandas as pd
import pytest
from pandas.testing import assert_frame_equal
from .. import read_sql
@pytest.fixture(scope="module") # type: ignore
def mssql_url() -> str:
conn = os.environ["MSSQL_URL"]
return conn
@pytest.mark.xfail
def test_on_non_select(mssql_url: str) -> None:
query = "CREATE TABLE non_select(id INTEGER NOT NULL)"
df = read_sql(mssql_url, query)
def test_aggregation(mssql_url: str) -> None:
query = (
"SELECT test_bool, SUM(test_float) as sum FROM test_table GROUP BY test_bool"
)
df = read_sql(mssql_url, query)
expected = pd.DataFrame(
index=range(3),
data={
"test_bool": pd.Series([None, False, True], dtype="boolean"),
"sum": pd.Series([10.9, 5.2, -10.0], dtype="float64"),
},
)
assert_frame_equal(df, expected, check_names=True)
def test_partition_on_aggregation(mssql_url: str) -> None:
query = (
"SELECT test_bool, SUM(test_int) AS test_int FROM test_table GROUP BY test_bool"
)
df = read_sql(mssql_url, query, partition_on="test_int", partition_num=2)
expected = pd.DataFrame(
index=range(3),
data={
"test_bool": pd.Series([None, False, True], dtype="boolean"),
"test_int": pd.Series([4, 5, 1315], dtype="Int64"),
},
)
df.sort_values(by="test_int", inplace=True, ignore_index=True)
assert_frame_equal(df, expected, check_names=True)
def test_aggregation2(mssql_url: str) -> None:
query = "select DISTINCT(test_bool) from test_table"
df = read_sql(mssql_url, query)
expected = pd.DataFrame(
index=range(3),
data={
"test_bool": pd.Series([None, False, True], dtype="boolean"),
},
)
assert_frame_equal(df, expected, check_names=True)
def test_partition_on_aggregation2(mssql_url: str) -> None:
query = "select MAX(test_int) as max, MIN(test_int) as min from test_table"
df = read_sql(mssql_url, query, partition_on="max", partition_num=2)
expected = pd.DataFrame(
index=range(1),
data={
"max": pd.Series([1314], dtype="Int64"),
"min": pd.Series([0], dtype="Int64"),
},
)
assert_frame_equal(df, expected, check_names=True)
def test_udf(mssql_url: str) -> None:
query = (
"SELECT dbo.increment(test_int) AS test_int FROM test_table ORDER BY test_int"
)
df = read_sql(mssql_url, query, partition_on="test_int", partition_num=2)
expected = pd.DataFrame(
index=range(6),
data={
"test_int": pd.Series([1, 2, 3, 4, 5, 1315], dtype="Int64"),
},
)
df.sort_values(by="test_int", inplace=True, ignore_index=True)
assert_frame_equal(df, expected, check_names=True)
def test_manual_partition(mssql_url: str) -> None:
queries = [
"SELECT * FROM test_table WHERE test_int < 2",
"SELECT * FROM test_table WHERE test_int >= 2",
]
df = read_sql(mssql_url, query=queries)
expected = pd.DataFrame(
index=range(6),
data={
"test_int": pd.Series([0, 1, 2, 3, 4, 1314], dtype="int64"),
"test_nullint": pd.Series([5, 3, None, 7, 9, 2], dtype="Int64"),
"test_str": pd.Series(
["a", "str1", "str2", "b", "c", None], dtype="object"
),
"test_float": pd.Series([3.1, None, 2.2, 3, 7.8, -10], dtype="float64"),
"test_bool": pd.Series(
[None, True, False, False, None, True], dtype="boolean"
),
},
)
df.sort_values(by="test_int", inplace=True, ignore_index=True)
assert_frame_equal(df, expected, check_names=True)
def test_mssql_without_partition(mssql_url: str) -> None:
query = "SELECT * FROM test_table"
df = read_sql(mssql_url, query)
expected = pd.DataFrame(
index=range(6),
data={
"test_int": pd.Series([1, 2, 0, 3, 4, 1314], dtype="int64"),
"test_nullint": pd.Series([3, None, 5, 7, 9, 2], dtype="Int64"),
"test_str": pd.Series(
["str1", "str2", "a", "b", "c", None], dtype="object"
),
"test_float": pd.Series([None, 2.2, 3.1, 3, 7.8, -10], dtype="float64"),
"test_bool": pd.Series(
[True, False, None, False, None, True], dtype="boolean"
),
},
)
assert_frame_equal(df, expected, check_names=True)
def test_mssql_limit_without_partition(mssql_url: str) -> None:
query = "SELECT top 3 * FROM test_table"
df = read_sql(mssql_url, query)
expected = pd.DataFrame(
index=range(3),
data={
"test_int": pd.Series([1, 2, 0], dtype="int64"),
"test_nullint": pd.Series([3, None, 5], dtype="Int64"),
"test_str": pd.Series(["str1", "str2", "a"], dtype="object"),
"test_float": pd.Series([None, 2.2, 3.1], dtype="float64"),
"test_bool": pd.Series([True, False, None], dtype="boolean"),
},
)
assert_frame_equal(df, expected, check_names=True)
def test_mssql_limit_large_without_partition(mssql_url: str) -> None:
query = "SELECT top 10 * FROM test_table"
df = read_sql(mssql_url, query)
expected = pd.DataFrame(
index=range(6),
data={
"test_int": pd.Series([1, 2, 0, 3, 4, 1314], dtype="int64"),
"test_nullint": pd.Series([3, None, 5, 7, 9, 2], dtype="Int64"),
"test_str": pd.Series(
["str1", "str2", "a", "b", "c", None], dtype="object"
),
"test_float": pd.Series([None, 2.2, 3.1, 3, 7.8, -10], dtype="float64"),
"test_bool": pd.Series(
[True, False, None, False, None, True], dtype="boolean"
),
},
)
assert_frame_equal(df, expected, check_names=True)
def test_mssql_with_partition(mssql_url: str) -> None:
query = "SELECT * FROM test_table"
df = read_sql(
mssql_url,
query,
partition_on="test_int",
partition_range=(0, 2000),
partition_num=3,
)
expected = pd.DataFrame(
index=range(6),
data={
"test_int": pd.Series([0, 1, 2, 3, 4, 1314], dtype="int64"),
"test_nullint": pd.Series([5, 3, None, 7, 9, 2], dtype="Int64"),
"test_str": pd.Series(
["a", "str1", "str2", "b", "c", None], dtype="object"
),
"test_float": pd.Series([3.1, None, 2.2, 3, 7.8, -10], dtype="float64"),
"test_bool": pd.Series(
[None, True, False, False, None, True], dtype="boolean"
),
},
)
df.sort_values(by="test_int", inplace=True, ignore_index=True)
assert_frame_equal(df, expected, check_names=True)
def test_mssql_limit_with_partition(mssql_url: str) -> None:
query = "SELECT top 3 * FROM test_table"
df = read_sql(
mssql_url,
query,
partition_on="test_int",
partition_range=(0, 2000),
partition_num=3,
)
expected = pd.DataFrame(
index=range(3),
data={
"test_int": pd.Series([0, 1, 2], dtype="int64"),
"test_nullint": pd.Series([5, 3, None], dtype="Int64"),
"test_str": pd.Series(["a", "str1", "str2"], dtype="object"),
"test_float": pd.Series([3.1, None, 2.20], dtype="float64"),
"test_bool": pd.Series([None, True, False], dtype="boolean"),
},
)
df.sort_values(by="test_int", inplace=True, ignore_index=True)
assert_frame_equal(df, expected, check_names=True)
def test_mssql_limit_large_with_partition(mssql_url: str) -> None:
query = "SELECT top 10 * FROM test_table"
df = read_sql(
mssql_url,
query,
partition_on="test_int",
partition_range=(0, 2000),
partition_num=3,
)
expected = pd.DataFrame(
index=range(6),
data={
"test_int": | pd.Series([0, 1, 2, 3, 4, 1314], dtype="int64") | pandas.Series |
import pandas as pd
import glob
import os
import time
date = time.strftime('%Y-%m-%d')
# attendance in (entry)
path = r'D:\SEI-Mask-FaceAttendance\attendancein' # path to read save entry
all_files = glob.glob(os.path.join(path, "*.csv")) # advisable to use os.path.join as this makes concatenation OS independent
df_file = (pd.read_csv(f) for f in all_files)
conc_df1 = | pd.concat(df_file, ignore_index=True) | pandas.concat |
import pandas as pd
import pytest
from mortgage_scenarios.core import group_by_year
@pytest.fixture
def df_month_fixture():
"""fixture of monthy payment data with three columns"""
periods = 24
index = pd.RangeIndex(periods)
columns = ['amount', 'repayment', 'amount_end']
df_months = pd.DataFrame(index=index, columns=columns)
df_months.amount = | pd.RangeIndex(periods, 0, -1) | pandas.RangeIndex |
import pandas as pd
import numpy as np
import random as rd
from pathlib import Path
from datetime import datetime as dt
import tensorflow as tf
import tensorflow.keras.optimizers as opt
from tensorflow.keras import Input
from tensorflow.keras import backend as K
from tensorflow.keras.models import Sequential, load_model, clone_model, Model
from tensorflow.keras.layers import Dense, Dropout, BatchNormalization, concatenate
from tensorflow.keras.callbacks import EarlyStopping, LearningRateScheduler, ModelCheckpoint
def initGPU():
gpus = tf.config.experimental.list_physical_devices('GPU')
if gpus:
try:
# Currently, memory growth needs to be the same across GPUs
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
logical_gpus = tf.config.experimental.list_logical_devices('GPU')
print(len(gpus), "Physical GPUs,", len(logical_gpus), "Logical GPUs")
except RuntimeError as e:
# Memory growth must be set before GPUs have been initialized
print(e)
def getPredifinedPatterns():
patterns = {
('pythagorean','all'):([
'Pythagorean expectation versus ratio diffrence',
'Pythagorean expectation ratio diffrence',
'Team - Pythagorean expectation diffrence'],
['Versus','Ratio','Average']),
('performance','versus'):([
'Fielding performance versus ratio diffrence',
'Pitching performance versus ratio diffrence',
'Batting performance versus ratio diffrence',
'Pythagorean expectation versus ratio diffrence'],
['Fielding','Pitching','Batting','Pythagorean']),
('performance','ratio'):([
'Fielding performance ratio diffrence',
'Pitching performance ratio diffrence',
'Batting performance ratio diffrence',
'Pythagorean expectation ratio diffrence'],
['Fielding','Pitching','Batting','Pythagorean']),
('performance','average'):([
'Average Fielding performance diffrence',
'Pitcher - Pitching performance diffrence',
'Average Batting performance diffrence',
'Team - Pythagorean expectation diffrence'],
['Fielding','Pitching','Batting','Pythagorean']),
('score','ratio'):([
'Score ratio diffrence',
'Odd ratio diffrence'],
['Score','Odd']),
('score','versus'):([
'Score versus ratio diffrence',
'Odd versus ratio diffrence'],
['Score','Odd']),
('people','average'):([
'Average age diffrence',
'Batting side diffrence',
'Throwing side diffrence',
'Average BMI diffrence'],
['Age','Batting','Throwing','BMI']),
('pitcher','average'):([
'Pitcher - Strikeouts per walk diffrence',
'Pitcher - Homeruns per game diffrence',
'Pitcher - Shutouts per game diffrence',
'Pitcher - Saves per game diffrence',
'Pitcher - ERA diffrence'],
['Strikeouts','Homeruns','Shutouts','Saves','ERA'])
}
return patterns
def getBiasFreeIndex(boolSeries, size, seed=1337):
rd.seed(seed)
def getCenteredIndex(onSize=True):
def flatter(a, b):
c = []
for i in range(len(a)):
c.append(a[i])
c.append(b[i])
return c
positive = boolSeries[boolSeries==True]
negative = boolSeries[boolSeries==False]
if onSize:
positive = rd.sample(list(positive.index), size//2)
negative = rd.sample(list(negative.index), size//2)
else:
if len(positive) > len(negative):
positive = rd.sample(list(positive.index), len(negative))
negative = negative.index.tolist()
else:
positive = positive.index.tolist()
negative = rd.sample(list(negative.index), len(positive))
return flatter(positive, negative)
training = getCenteredIndex()
boolSeries = boolSeries.loc[list(set(boolSeries.index)-set(training))]
validation = getCenteredIndex(False)
return training, validation
def divideDataByIndex(data, index):
if isinstance(data, pd.DataFrame):
return data.loc[index[0]], data.loc[index[1]]
if isinstance(data, dict):
for frame in data:
data[frame] = (data[frame].loc[index[0]],data[frame].loc[index[1]])
return data
def getDataAsDictonary(data, patterns, index=None, savePath=None):
def load():
dataDictionary = {}
for frame, pattern in patterns.items():
entry = data[pattern[0]]
if pattern[1]!=None:
entry = entry.rename(columns=dict(zip(pattern[0],pattern[1])))
dataDictionary[frame] = entry
return dataDictionary
data = load()
if index!=None:
data = divideDataByIndex(data, index)
if savePath!=None:
for frame in data:
name = frame[0]+'-'+frame[1]
if index!=None:
data[frame][0].to_csv(savePath/(name+'_training.csv'))
data[frame][1].to_csv(savePath/(name+'_validation.csv'))
else:
data[frame].to_csv(savePath/(name+'.csv'))
return data
def getModel(blueprint, predictors, targets, metric):
def getOutput():
if bool==targets[0].dtypes[0]:
activation = 'sigmoid'
loss = 'binary_crossentropy'
else:
activation = None
loss = 'MSE'
model.add(Dense(targets[0].columns.size, activation=activation, kernel_initializer='ones', name=("T_"+str(hash(name))[-4:]+"_"+str(len(model.layers)+2))))
model.compile(optimizer=blueprint['optimizer'], loss=loss, metrics=[metric])
return model
name = blueprint['predictor']+"_"+blueprint['description']
model = Sequential(name=name)
model.add(Input(shape=(predictors[0].columns.size,), name=("I_"+str(hash(name))[-8:]+"_"+str(0))))
for index, nodes in enumerate(blueprint['layers']):
model.add(Dense(nodes, blueprint['activations'][index], kernel_initializer='ones', name=("D_"+str(hash(name))[-8:]+"_"+str(index+1))))
if blueprint['dropouts'][index]>0:
model.add(Dropout(blueprint['dropouts'][index]/nodes, name=("O_"+str(hash(name))[-8:]+"_"+str(index+1))))
model.add(BatchNormalization(name=("B_"+str(hash(name))[-8:]+"_"+str(len(model.layers)+1))))
return getOutput()
def getBatchSize(size, minimum=1000):
sizes = []
for i in range((size//2)+1, 2, -1):
if ((size % i)) == 0 and (size//i>1000) and (size//i<size//6):
sizes.append(size//i)
return sizes[len(sizes)//2]
def metrics2row(blueprint, metrics):
def getCopy(blueprint):
copy = {}
for key, item in blueprint.items():
if isinstance(item, str):
copy[key] = item
else:
copy[key] = item.copy()
return copy
row = {}
row['timestamp'] = dt.now()
row.update(getCopy(blueprint.copy()))
row['length'] = len(blueprint['layers'])
row['nodes'] = sum(blueprint['layers'])
row.update(metrics)
return row
def training(path, blueprint, predictors, targets, metric, epochs=100, start=0.1, stop=0.01, output='metric'):
stepping = round(epochs/(start/stop)**0.7)
epochRange = range(epochs, 0, -stepping)
decrease = (stop/start)**(1/(len(epochRange)-1))
model = getModel(blueprint, predictors, targets, metric)
model.optimizer.lr = start
lr = start
modelPath = path/(blueprint['predictor']+"-"+blueprint['description']+'.h5')
model.save(modelPath)
trained = 0
start = dt.now()
for epoch in epochRange:
print("epoche:", epoch,"learning rate:", round(model.optimizer.lr.numpy(), 16))
monitor = EarlyStopping(monitor=('val_'+metric),restore_best_weights=True, patience=epoch)
history = model.fit(predictors[0], targets[0], getBatchSize(len(targets[0])), epoch, 0, [monitor], validation_data=(predictors[1], targets[1]))
image = load_model(modelPath)
imageMetric = image.evaluate(predictors[1], targets[1], return_dict=True, verbose=0)[metric]
modelMetric = model.evaluate(predictors[1], targets[1], return_dict=True, verbose=0)[metric]
print("Image:", imageMetric, "Model:", modelMetric)
if imageMetric>modelMetric:
model = image
else:
trained = trained+len(history.history[metric])
model.save(modelPath)
lr = lr*decrease
model.optimizer.lr = lr
time = round((dt.now()-start).microseconds/1000000, 2)
metrics = model.evaluate(predictors[1], targets[1], return_dict=True)
metrics['time'] = time
metrics['epochs'] = trained
if output=='metric':
return metrics
elif output=='row':
return metrics2row(blueprint, metrics)
elif output=='ensemble':
return metrics2row(blueprint, metrics),(
pd.DataFrame(model.predict(predictors[0]), columns=targets[0].columns, index=targets[0].index),
pd.DataFrame(model.predict(predictors[1]), columns=targets[1].columns, index=targets[1].index))
def trainingRoutine(path, predictors, targets, metric, description, log=[], minimise=False, minDuration=50, maxDuration=250, epochs=1000, start=0.1, stop=0.001):
def parameterTraining(output='ensemble'):
def logModel(blueprint, metrics, update=True):
row = metrics2row(blueprint, metrics)
if update:
log.append(row)
frame = pd.DataFrame(log)
frame.to_csv(path/(str(dt.now().date())+"_log.csv"), index=False)
return row
def getBest(log):
frame = pd.DataFrame(log)
frame = frame[frame['predictor']==blueprint['predictor']]
frame = frame[frame['description']==blueprint['description']]
if minimise:
frame = frame[frame[metric]==frame[metric].min()]
else:
frame = frame[frame[metric]==frame[metric].max()]
if len(frame)>1:
frame = frame[frame['loss']==frame['loss'].min()]
if len(frame)>1:
frame = frame[frame['epochs']==frame['epochs'].min()]
if len(frame)>1:
frame = frame[frame['nodes']==frame['nodes'].min()]
if len(frame)>1:
frame = frame[frame['time']==frame['time'].min()]
return frame[list(blueprint.keys())].to_dict('records')[0]
else:
return frame[list(blueprint.keys())].to_dict('records')[0]
else:
return frame[list(blueprint.keys())].to_dict('records')[0]
else:
return frame[list(blueprint.keys())].to_dict('records')[0]
else:
return frame[list(blueprint.keys())].to_dict('records')[0]
def evaluating(model, patience=minDuration, epochs=maxDuration):
monitor = EarlyStopping(monitor=('val_'+metric),restore_best_weights=True, patience=patience)
start = dt.now()
history = model.fit(predictors[0], targets[0], getBatchSize(len(targets[0])), epochs, 0, [monitor], validation_data=(predictors[1], targets[1]))
time = (dt.now()-start).total_seconds()
metrics = model.evaluate(predictors[1], targets[1], return_dict=True)
metrics['time'] = time
metrics['epochs'] = len(history.history[metric])
return metrics
def getLength(blueprint):
minLength = 1
maxLenght = predictors[0].columns.size*2
def getDuration():
nodes = sum(blueprint['layers'])
minNodes = blueprint['layers'][0]*minLength
maxNodes = blueprint['layers'][0]*maxLenght
return minDuration+round((maxDuration-minDuration)*(nodes-minNodes)/maxNodes)
tempLog = []
for length in range(minLength, maxLenght+1):
blueprint['layers'] = [blueprint['layers'][0]]*length
blueprint['activations'] = [blueprint['activations'][0]]*length
blueprint['dropouts'] = [blueprint['dropouts'][0]]*length
model = getModel(blueprint, predictors, targets, metric)
metrics = evaluating(model, patience=getDuration()//4, epochs=getDuration())
tempLog.append(logModel(blueprint, metrics))
return getBest(tempLog)
def getSize(blueprint):
minWidth = 1
maxWidth = predictors[0].columns.size*2
def getDuration():
nodes = sum(blueprint['layers'])
minNodes = len(blueprint['layers'])*minWidth
maxNodes = len(blueprint['layers'])*maxWidth
return minDuration+round((maxDuration-minDuration)*(nodes-minNodes)/maxNodes)
tempLog = []
for index in range(len(blueprint['layers'])):
for width in range(minWidth, maxWidth+1):
blueprint['layers'][index] = width
model = getModel(blueprint, predictors, targets, metric)
metrics = evaluating(model, patience=getDuration()//4, epochs=getDuration())
tempLog.append(logModel(blueprint, metrics))
blueprint = getBest(tempLog)
return blueprint
def getActivations(blueprint):
tempLog = []
possibilities = [None,'relu','selu','elu','tanh','softsign','softplus']
for index in range(len(blueprint['layers'])):
for activation in possibilities:
blueprint['activations'][index] = activation
model = getModel(blueprint, predictors, targets, metric)
metrics = evaluating(model)
tempLog.append(logModel(blueprint, metrics))
blueprint = getBest(tempLog)
return blueprint
def getDropouts(blueprint):
tempLog = []
for index, nodes in enumerate(blueprint['layers']):
for drop in range(nodes):
blueprint['dropouts'][index] = drop
model = getModel(blueprint, predictors, targets, metric)
metrics = evaluating(model)
tempLog.append(logModel(blueprint, metrics))
blueprint = getBest(tempLog)
return blueprint
def getOptimizer(blueprint):
tempLog = []
possibilities = ['sgd','rmsprop','adam','adadelta','adagrad','adamax','nadam']
for optimizer in possibilities:
blueprint['optimizer'] = optimizer
model = getModel(blueprint, predictors, targets, metric)
metrics = evaluating(model, patience=2*minDuration, epochs=2*maxDuration)
tempLog.append(logModel(blueprint, metrics))
return getBest(tempLog)
blueprint = {
'predictor':description[0],'description':description[1],'optimizer':'adam',
'layers':[predictors[0].columns.size],'activations':['relu'],'dropouts':[0]}
blueprint = getActivations(blueprint)
blueprint = getSize(blueprint)
blueprint = getLength(blueprint)
blueprint = getSize(blueprint)
blueprint = getActivations(blueprint)
blueprint = getDropouts(blueprint)
blueprint = getOptimizer(blueprint)
return training(path, blueprint, predictors, targets, metric, epochs, start, stop, output)
if isinstance(predictors, pd.DataFrame):
return parameterTraining(output='ensemble')
elif isinstance(predictors, dict):
metrics = []
predictions = [targets[0], targets[1]]
for description, predictors in predictors.items():
row, prediction = parameterTraining()
predictions[0] = | pd.merge(predictions[0], prediction[0], how="left", left_index=True, right_index=True, suffixes=(None, "_"+row['predictor']+"-"+row['description'])) | pandas.merge |
import pymongo
import time
from sqlalchemy import create_engine
import pandas as pd
import logging
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
# wait until tweets were all collected and writen in MongoDB before starting etl job
time.sleep(120) # seconds
#----- extract data from MongoDB ------
def extract():
""" extract data from MongoDB """
# connect mongodb container of the same composer from python
client = pymongo.MongoClient("mongodb")
# get database
db = client.tweets_stream_db
# get collection
collection = db.tweet_stream_json
return collection
#----- transform data : sentiment analysis
def transform(collection):
"""
transform mongodb cursor into dataframe
perform sentiment analysis
return dataframe with 'tweet' and 'sentiment' column
"""
logging.warning('----------The datatype of collection.find() is ------------')
logging.warning(type(collection.find())) # collection.find() is of type <class 'pymongo.cursor.Cursor'>
logging.warning('-----------------------')
# pointer into dataframe
df = pd.DataFrame(list(collection.find()))
# allocate dataframe to return
tweet_df = | pd.DataFrame() | pandas.DataFrame |
from random import randrange
from pandas import Series
from matplotlib import pyplot
from statsmodels.tsa.seasonal import seasonal_decompose
import datetime
import pandas as pd
from pandas import read_csv
from pandas import DataFrame
import statsmodels
from matplotlib import pyplot
from statsmodels.tsa.stattools import adfuller
from numpy import log
from statsmodels.tsa.stattools import acf
from statsmodels.tsa.arima_model import ARIMA
import numpy as np
def project_arima(addr, train_list_of_med_val, test_list_of_med_val):
X=train_list_of_med_val[0]+test_list_of_med_val[0]
df= | pd.DataFrame(X, columns=['value']) | pandas.DataFrame |
# Preppin' Data 2021 Week 20
import pandas as pd
import numpy as np
# Load data
complaints = pd.read_csv('unprepped_data\\PD 2021 Wk 20 Input - Prep Air Complaints - Complaints per Day.csv')
# Create the mean and standard deviation for each Week
weekly_stats = complaints.groupby(['Week'],as_index=False).apply(lambda s: pd.Series({
"mean": s['Complaints'].mean(),
"std": s['Complaints'].std()}))
# Create the following calculations for each of 1, 2 and 3 standard deviations:
# - The Upper Control Limit (mean+(n*standard deviation))
# - The Lower Control Limit (mean-(n*standard deviation))
# - Variation (Upper Control Limit - Lower Control Limit)
# 1 Standard Deviation
weekly_stats['Upper Control Limit (1SD)'] = weekly_stats['mean']+(1*weekly_stats['std'])
weekly_stats['Lower Control Limit (1SD)'] = weekly_stats['mean']-(1*weekly_stats['std'])
weekly_stats['Variation (1SD)'] = weekly_stats['Upper Control Limit (1SD)'] - weekly_stats['Lower Control Limit (1SD)']
# 2 Standard Deviations
weekly_stats['Upper Control Limit (2SD)'] = weekly_stats['mean']+(2*weekly_stats['std'])
weekly_stats['Lower Control Limit (2SD)'] = weekly_stats['mean']-(2*weekly_stats['std'])
weekly_stats['Variation (2SD)'] = weekly_stats['Upper Control Limit (2SD)'] - weekly_stats['Lower Control Limit (2SD)']
# 3 Standard Deviations
weekly_stats['Upper Control Limit (3SD)'] = weekly_stats['mean']+(3*weekly_stats['std'])
weekly_stats['Lower Control Limit (3SD)'] = weekly_stats['mean']-(3*weekly_stats['std'])
weekly_stats['Variation (3SD)'] = weekly_stats['Upper Control Limit (3SD)'] - weekly_stats['Lower Control Limit (3SD)']
# Join the original data set back on to these results
complaints_df = | pd.merge(complaints, weekly_stats, on=['Week'], how='inner') | pandas.merge |
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 11 13:38:28 2021
@author: <NAME>
"""
# # IP 2020-21: DEVELOPING A VPP FOR A NACRA 17
# ## 1) Aerodynamic module
# Import necssary packages to run the code:
# In[2]:
import math
import numpy as np
import pandas as pd
import time
from math import degrees as deg
from math import radians as rad
from math import cos as cos
from math import sin as sin
from math import atan as atan
from math import sqrt as sqrt
from scipy.optimize import minimize
from scipy.interpolate import interp1d
import scipy.interpolate as spi
import matplotlib.pyplot as plt
from matplotlib.pyplot import polar
from IPython.display import display
pd.set_option('display.max_rows', 40)
pd.set_option('display.max_columns', 20)
# ### Initial data (boat, environment...)
# In[3]:
LWL = 5.25 #m
rho_water = 1025 #Kg/m3
rho_air = 1.225 #Kg/m3
A_main = 14.45 #m²
A_jib = 4 #m²
A_upwind = A_main + A_jib
A_spi = 18.5 #m²
A_downwind = A_main + A_jib + A_spi
AR = 4.85 #aspect ratio of mainsail
k = 1/(math.pi*AR)
nu_air = 1.802e-5 #kg/m-s
nu_water = 1.1892e-6
v = 1.48e-5 #m²/s
RM_max = 7397.24 #N.m
PM_max = 4550 #N.m
g = 9.81 #kg/s²
boat_weight = 163*g
crew_weight = 120*g
hull_form = 1.22
Aw_1hull = 1.914 #m²
Aw_2hulls = 3.828 #m²
# ### Aerodynamic sail coefficients, based on ORC VPP data
# In[4]:
Cl_main = [0,0.86207,1.05172,1.16379,1.34698,1.35345,1.26724,0.93103,0.38793,-0.11207]
Cd_main = [0.0431,0.02586,0.02328,0.02328,0.03259,0.11302,0.3825,0.96888,1.31578,1.34483]
Beta_main = [0,7,9,12,28,60,90,120,150,180]
a = np.array([Beta_main, Cl_main, Cd_main])
Cl_jib = [0,1,1.375,1.45,1.45,1.25,0.4,0,-0.1]
Cd_jib = [0.05,0.032,0.031,0.037,0.25,0.35,0.73,0.95,0.9]
Beta_jib = [7,15,20,27,50,60,100,150,180]
b = np.array([Beta_jib, Cl_jib, Cd_jib])
df_spi = pd.read_excel("Copy of Database.xlsx", "Asymmetric spinnaker on pole", engine = 'openpyxl')
Cl_spi = np.array(df_spi['Cl'])
Cd_spi = np.array(df_spi['Cd'])
Beta_spi = np.array(df_spi['beta'])
# ### Interpolated values for a greater set ofAWA (Main, jib, Spinnaker)
# In[5]:
beta_vals = np.linspace(0,180,40) ##increment of 4.5°
z = list(beta_vals) +Beta_main + Beta_jib
set(z)
z.sort()
z = list(dict.fromkeys(z))
beta_vals_spi = list(dict.fromkeys(list(np.linspace(28, 180, 40))+list(Beta_spi)))
beta_vals_spi.sort()
Cl_main_interp = np.interp(z, Beta_main, Cl_main)
Cd_main_interp = np.interp(z, Beta_main, Cd_main)
Cl_jib_interp = np.interp(z, Beta_jib, Cl_jib)
Cd_jib_interp = np.interp(z, Beta_jib, Cd_jib)
Cl_spi_interp = np.interp(beta_vals_spi, Beta_spi, Cl_spi)
Cd_spi_interp = np.interp(beta_vals_spi, Beta_spi, Cd_spi)
#plt.plot(z, Cl_main_interp, '-k', marker = 'x', markevery = 5, markersize = 4, label = '$C_L$ Mainsail')
#plt.plot(z, Cd_main_interp, '--k', marker = 'x', markevery = 5, markersize = 4, label = '$C_D$ Mainsail')
#plt.plot(z, Cl_jib_interp, '-k', marker = '^', markevery = 5, markersize = 4, label = '$C_L$ Jib')
#plt.plot(z, Cd_jib_interp, '--k', marker = '^', markevery = 5, markersize = 4, label= '$C_D$ Jib')
#plt.plot(beta_vals_spi, Cl_spi_interp, '-k', marker = 's', markevery = 5, markersize = 4, label = '$C_L$ Spinnaker')
#plt.plot(beta_vals_spi, Cd_spi_interp, '--k', marker = 's', markevery = 5, markersize = 4, label = '$C_D$ Spinnaker')
#plt.xlabel('Apparent wind angle')
#plt.ylabel('Coefficient')
#plt.legend(bbox_to_anchor=(1, 1))
#
#plt.subplot(3, 2, 1)
#plt.plot(z, Cl_main_interp, 'x')
#plt.title("Cl mainsail vs. wind angle")
#plt.subplot(3, 2, 2)
#plt.plot(z, Cd_main_interp, 'x')
#plt.title("Cd mainsail vs. wind angle")
#plt.subplot(3, 2, 3)
#plt.plot(z, Cl_jib_interp, 'x')
#plt.title("Cl jib vs. wind angle")
#plt.subplot(3, 2, 4)
#plt.plot(z, Cd_jib_interp, 'x')
#plt.title("Cd jib vs. wind angle")
#plt.subplot(3,2,5)
#plt.plot(beta_vals_spi, Cl_spi_interp, 'x')
#plt.title("Cl spinnaker vs wind angle")
#plt.subplot(3,2,6)
#plt.plot(beta_vals_spi, Cd_spi_interp, 'x')
#plt.title("Cd spinnaker vs. wind angle")
#plt.rcParams["figure.figsize"] = (15,10)
#plt.tight_layout()
#plt.show()
# ### Combined Interpolated values for main+jib sails
# In[6]:
Cl_upwind = [x*A_main/A_upwind + y*A_jib/A_upwind for x, y in zip(Cl_main_interp, Cl_jib_interp)]
Cl_upwind[0] = 0.0001 #avoids math error
Cdp_upwind = [x*A_main/A_upwind + y*A_jib/A_upwind for x, y in zip(Cd_main_interp, Cd_jib_interp)]
Cdi = [x**2/(AR*math.pi) for x in Cl_upwind]
Cd_upwind = [x + y for x, y in zip(Cdp_upwind, Cdi)]
#
#plt.figure(1)
#plt.plot(z, Cl_upwind, 'x', label="Cl")
#plt.title("Combined Main + jib Cl & Cd coeffs vs. AWA")
#plt.plot(z, Cd_upwind, '^', label="Cd")
#plt.rcParams["figure.figsize"] = (12,5)
#plt.tight_layout()
#plt.show()
#
#
##In[7]:
#
#plt.figure(4)
#plt.plot(Cd_upwind, Cl_upwind, '-x')
#plt.title("Cl vs Cd (Main+jib combined)")
#plt.xlabel("Cd")
#plt.ylabel("Cl")
#plt.rcParams["figure.figsize"] = (6,5)
#plt.show()
# ### Combined interpolated values for Main + jib + Spinnaker (induced drag not included)
# In[8]:
Cl_downwind = [(x*A_main+y*A_jib+z*A_spi)/A_downwind for x, y, z in zip(Cl_main_interp, Cl_jib_interp, Cl_spi_interp)]
Cd_downwind = [(x*A_main+ y*A_jib+z*A_spi)/A_downwind for x, y, z in zip(Cd_main_interp, Cd_jib_interp, Cd_spi_interp)]
Cdi_downwind = [x**2/(AR*math.pi) for x in Cl_downwind]
Cd_downwind = [x + y for x, y in zip(Cd_downwind, Cdi_downwind)]
#plt.figure(1)
#plt.plot(beta_vals_spi, Cl_downwind, 'x', label="Cl Spi")
#plt.title("Combined Main + jib + spinnaker Cl coeff. vs wind angles")
#plt.plot(beta_vals_spi, Cd_downwind, '^', label="Cd Spi")
#plt.figure(3)
#plt.plot(Cd_downwind, Cl_downwind, 'x')
#plt.title("Cl vs Cd (Main+jib+spinnaker combined)")
#plt.xlabel("Cd")
#plt.ylabel("Cl")
#plt.rcParams["figure.figsize"] = (12,5)
#plt.tight_layout()
#plt.show()
# ## 2) Maxsurf temporary data
# ### Hull resistance dataframe and Stability
# In[12]:
df = pd.read_excel("Copy of Database.xlsx", "120kg crew hull resistance", engine = 'openpyxl')
speed = np.array(df['Speed (m/s)'])
def get_hull_R(Vs, crew_weight, heel_angle):
if crew_weight == 120*g and heel_angle > 4:
return np.interp(Vs, speed, np.array(pd.read_excel("Copy of Database.xlsx", "120kg crew hull resistance", engine = 'openpyxl')['Slender body resistance 1 hull (N)'], dtype = np.float))
elif crew_weight == 120*g and heel_angle <= 4:
return np.interp(Vs, speed, np.array(pd.read_excel("Copy of Database.xlsx", "120kg crew hull resistance", engine = 'openpyxl')['Slender body resistance 2 hulls (N)'], dtype = np.float))
elif crew_weight == 150*g and heel_angle > 4:
return np.interp(Vs, speed, np.array(pd.read_excel("Copy of Database.xlsx", "150kg crew hull resistance", engine = 'openpyxl')['Slender body resistance 1 hull (N)'], dtype = np.float))
elif crew_weight == 150*g and heel_angle <= 4:
return np.interp(Vs, speed, np.array(pd.read_excel("Copy of Database.xlsx", "150kg crew hull resistance", engine = 'openpyxl')['Slender body resistance 2 hulls (N)'], dtype = np.float))
elif crew_weight == 180*g and heel_angle > 4:
return np.interp(Vs, speed, np.array(pd.read_excel("Copy of Database.xlsx", "180kg crew hull resistance", engine = 'openpyxl')['Slender body resistance 1 hull (N)'], dtype = np.float))
elif crew_weight == 180*g and heel_angle <= 4:
return np.interp(Vs, speed, np.array(pd.read_excel("Copy of Database.xlsx", "180kg crew hull resistance", engine = 'openpyxl')['Slender body resistance 2 hulls (N)'], dtype = np.float))
df_RM = pd.read_excel("Copy of Database.xlsx", "Stability case 120kg crew", engine = 'openpyxl')
heel = np.array(df_RM['Heel (deg)'])[:15]
Fn_list = np.array(pd.read_excel("Copy of Database.xlsx", "Resid Resistance Molland-Soton", engine = 'openpyxl')['Fn'], dtype = np.float)
Rr_1hull = np.array(pd.read_excel("Copy of Database.xlsx", "Resid Resistance Molland-Soton", engine = 'openpyxl')['Total Rr 1hull [N]'], dtype = np.float)
Rr_2hulls = np.array(pd.read_excel("Copy of Database.xlsx", "Resid Resistance Molland-Soton", engine = 'openpyxl')['Total Rr 2hulls [N]'], dtype=np.float)
# In[15]:
#RM_tot_0trap = np.array(df_RM['Total RM 0 trap (N.m)'])
#RM_tot_1trap = np.array(df_RM['Total RM 1 trap (N.m)'])
#RM_tot_2trap = np.array(df_RM['Total RM 2 trap (N.m)'])
heel_gz = np.array(pd.read_excel("Copy of Database.xlsx", "Stability case 120kg crew", engine = 'openpyxl')['Heel (deg)'])[:15]
GZ_list = np.array(pd.read_excel("Copy of Database.xlsx", "Stability case 120kg crew", engine = 'openpyxl')['GZ (m)'])
if crew_weight == 120*g:
RM_tot_0trap = np.array(pd.read_excel("Copy of Database.xlsx", "Stability case 120kg crew", engine = 'openpyxl')['Total RM 0 trap (N.m)'], dtype = np.float)[:15] #N.m
RM_tot_1trap = np.array(pd.read_excel("Copy of Database.xlsx", "Stability case 120kg crew", engine = 'openpyxl')['Total RM 1 trap (N.m)'], dtype = np.float)[:15] #N
RM_tot_2trap = np.array(pd.read_excel("Copy of Database.xlsx", "Stability case 120kg crew", engine = 'openpyxl')['Total RM 2 trap (N.m)'], dtype = np.float)[:15] #N
RM_max = max(np.array(pd.read_excel("Copy of Database.xlsx", "Stability case 120kg crew", engine = 'openpyxl')['Total RM 2 trap (N.m)'], dtype = np.float)) #N.m
if crew_weight == 150*g:
RM_tot_0trap = np.array(pd.read_excel("Copy of Database.xlsx", "Stability case 150kg crew", engine = 'openpyxl')['Total RM 0 trap (N.m)'], dtype = np.float)[:15] #N.m
RM_tot_1trap = np.array(pd.read_excel("Copy of Database.xlsx", "Stability case 150kg crew", engine = 'openpyxl')['Total RM 1 trap (N.m)'], dtype = np.float)[:15] #N
RM_tot_2trap = np.array(pd.read_excel("Copy of Database.xlsx", "Stability case 150kg crew", engine = 'openpyxl')['Total RM 2 trap (N.m)'], dtype = np.float)[:15] #N
RM_max = max(np.array(pd.read_excel("Copy of Database.xlsx", "Stability case 150kg crew", engine = 'openpyxl')['Total RM 2 trap (N.m)'], dtype = np.float)) #N.m
if crew_weight == 180*g:
RM_tot_0trap = np.array(pd.read_excel("Copy of Database.xlsx", "Stability case 180kg crew", engine = 'openpyxl')['Total RM 0 trap (N.m)'], dtype = np.float)[:15] #N.m
RM_tot_1trap = np.array(pd.read_excel("Copy of Database.xlsx", "Stability case 180kg crew", engine = 'openpyxl')['Total RM 1 trap (N.m)'], dtype = np.float)[:15] #N
RM_tot_2trap = np.array(pd.read_excel("Copy of Database.xlsx", "Stability case 180kg crew", engine = 'openpyxl')['Total RM 2 trap (N.m)'], dtype = np.float)[:15] #N
RM_max = max(np.array( | pd.read_excel("Copy of Database.xlsx", "Stability case 180kg crew",engine = 'openpyxl') | pandas.read_excel |
import pandas as pd
from npi.npi import NPI, convert_practitioner_data_to_long, provider_taxonomies
from npi.pecos import PECOS
from npi.samhsa import SAMHSA
from npi.utils.utils import isid
from utils.loaders import pickle_read
def getcol(df, src, idvar, col, newname):
return (df.merge(src[[idvar, col]].drop_duplicates())
.rename(columns={col: newname}))
def conform_data_sources(source, cols, **kwargs):
'''by default includes name, then adds
other variables in a systematic fashion
can use npi_source="ploc2" for secondary practice locations
need to pass kwargs=practypes
'''
if isinstance(source, NPI):
return conform_NPI(source, cols, **kwargs)
elif isinstance(source, SAMHSA):
return conform_SAMHSA(source, cols, **kwargs)
elif isinstance(source, PECOS):
return conform_PECOS(source, cols, **kwargs)
def conform_NPI(source, cols, **kwargs):
df = source.expanded_fullnames.copy()
idvar = 'npi'
if 'practitioner_type' in cols:
src = (source.practitioner_type
.pipe(convert_practitioner_data_to_long,
types=kwargs['practypes']))
df = df.pipe(
getcol, src, idvar, 'PractitionerType', 'practitioner_type')
if 'state' in cols:
if not kwargs or 'npi_source' not in kwargs.keys():
src = (source
.plocstatename.drop(columns='month')
.drop_duplicates())
df = df.pipe(getcol, src, idvar, 'plocstatename', 'state')
elif 'npi_source' in kwargs.keys() and kwargs['npi_source'] == 'ploc2':
src = (source
.secondary_practice_locations[[idvar, 'ploc2statename']]
.drop_duplicates())
df = df.pipe(getcol, src, idvar, 'ploc2statename', 'state')
if 'zip5' in cols:
if not kwargs or 'npi_source' not in kwargs.keys():
src = (source.ploczip
.assign(zip5=lambda x: x['ploczip'].str[:5])
.drop(columns=['month', 'ploczip'])
.drop_duplicates())
elif 'npi_source' in kwargs.keys() and kwargs['npi_source'] == 'ploc2':
src = (source
.secondary_practice_locations
.assign(
zip5=lambda x: x['ploc2zip'].str[:5])[[idvar, 'zip5']]
.drop_duplicates())
df = df.pipe(getcol, src, idvar, 'zip5', 'zip5')
if 'tel' in cols:
if not kwargs or 'npi_source' not in kwargs.keys():
src = (source
.ploctel.drop(columns='month')
.drop_duplicates())
df = df.pipe(getcol, src, idvar, 'ploctel', 'tel')
elif 'npi_source' in kwargs.keys() and kwargs['npi_source'] == 'ploc2':
src = (source
.secondary_practice_locations[[idvar, 'ploc2tel']]
.drop_duplicates())
src['tel'] = (src.ploc2tel
.astype('str')
.str.split('.', expand=True)[0])
src['tel'] = (src.tel.str.replace('-', '')
.str.replace('(', '')
.str.replace(')', '')
.str.replace(' ', ''))
df = df.pipe(getcol, src, idvar, 'tel', 'tel')
return df.drop_duplicates()
def conform_SAMHSA(source, cols, **kwargs):
df = source.names.copy()
idvar = 'samhsa_id'
if 'practitioner_type' in cols:
df = df.pipe(getcol, source.samhsa, idvar, 'PractitionerType',
'practitioner_type')
if 'state' in cols:
df = df.pipe(getcol, source.samhsa, idvar, 'State', 'state')
if 'zip5' in cols:
src = (source
.samhsa
.assign(zip5=lambda df: df['Zip'].str[:5])[[idvar, 'zip5']]
.drop_duplicates())
df = df.pipe(getcol, src, idvar, 'zip5', 'zip5')
if 'tel' in cols:
src = source.samhsa['samhsa_id']
src2 = (pd.DataFrame(source.samhsa['Phone']
.str.replace('-', '')
.str.replace('(', '')
.str.replace(')', '')
.str.replace(' ', '')))
src = pd.concat([src, src2], axis=1)
df = df.pipe(getcol, src, idvar, 'Phone', 'tel')
return df.drop_duplicates()
def conform_PECOS(source, cols, **kwargs):
df = source.names.copy()
idvar = 'NPI'
if 'practitioner_type' in cols:
df = df.pipe(getcol, source.practitioner_type, idvar, 'Credential',
'practitioner_type')
df.loc[df.practitioner_type.isin(['MD', 'DO']),
'practitioner_type'] = 'MD/DO'
df.loc[df.practitioner_type.isin(['CNA']),
'practitioner_type'] = 'CRNA'
df = df[df.practitioner_type.isin(kwargs['practypes'])]
if 'state' in cols:
df = df.pipe(
getcol, source.physician_compare, idvar, 'State', 'state')
if 'zip5' in cols:
src = (source
.physician_compare
.assign(zip5=lambda df: df['Zip Code'].astype(str).str[:5]))
src = src[[idvar, 'zip5']].drop_duplicates()
df = df.pipe(getcol, src, idvar, 'zip5', 'zip5')
if 'tel' in cols:
src = source.physician_compare['NPI']
src2 = (source.physician_compare['Phone Number']
.astype('string')
.apply(lambda x: str(x).replace('.0', '')))
src = pd.concat([src, src2], axis=1)
df = df.pipe(getcol, pd.DataFrame(src), idvar, 'Phone Number', 'tel')
return df.drop_duplicates()
def make_clean_matches(df1, df2, id_use, id_target,
blocklist=pd.DataFrame()):
'''merges on common columns'''
# DELETE IF NAME CONFLICTS IN MATCHES
if not blocklist.empty:
df1 = (df1.merge(blocklist, how='left', indicator=True)
.query('_merge=="left_only"'))[df1.columns]
df2 = (df2.merge(blocklist, how='left', indicator=True)
.query('_merge=="left_only"'))[df2.columns]
m = df1.merge(df2)[[id_use, id_target]].drop_duplicates()
m = m[~m[id_use].duplicated(keep=False)]
m = m[~m[id_target].duplicated(keep=False)]
assert m[id_use].is_unique
assert m[id_target].is_unique
return m
def make_clean_matches_iterate(df1, idvar1, ordervar, df2, idvar2, blocklist):
orders = sorted((df1[ordervar].value_counts().index.tolist()))
for o in orders:
m = make_clean_matches(
df1.query(f'order=={o}'),
df2,
id_use=idvar1, id_target=idvar2,
blocklist=blocklist[[x for x in blocklist.columns
if x != 'order']])
blocklist = blocklist.append(m.assign(order=o))
return blocklist
def reconcat_names(df, firstname, middlename, lastname):
n = (df.assign(
n=lambda x: x[firstname] + ' ' + x[middlename] + ' ' + x[lastname])
.n)
df[f'{firstname}_r'] = n.apply(lambda y: y.split()[0])
df[f'{middlename}_r'] = n.apply(lambda y: ' '.join(y.split()[1:-1]))
df[f'{lastname}_r'] = n.apply(lambda y: y.split()[-1])
return df
def generate_matches(s, npi, pecos, varlist, practypes, final_crosswalk):
from npi.utils.globalcache import c
df1 = conform_data_sources(s, varlist)
df2 = conform_data_sources(npi, varlist, practypes=practypes)
final_crosswalk = c.make_clean_matches_iterate(df1, 'samhsa_id', 'order',
df2, 'npi',
final_crosswalk)
print('(1) Found %s matches' % final_crosswalk.shape[0])
df3 = conform_data_sources(pecos, varlist, practypes=practypes)
df3 = df3.rename(columns={'NPI': 'npi'})
final_crosswalk = c.make_clean_matches_iterate(df1, 'samhsa_id', 'order',
df3, 'npi',
final_crosswalk)
print('(2) Found %s matches' % final_crosswalk.shape[0])
df4 = conform_data_sources(npi, varlist,
practypes=practypes, npi_source="ploc2")
final_crosswalk = c.make_clean_matches_iterate(df1, 'samhsa_id', 'order',
df4, 'npi',
final_crosswalk)
print('(3) Found %s matches' % final_crosswalk.shape[0])
return final_crosswalk
# out = make_clean_matches_iterate(df1, 'samhsa_id', 'order', df2, 'npi', pd.DataFrame())
#
# priority_names = out[['samhsa_id']].assign(order=1).merge(df1)
# priority_names['new_firstname'] = priority_names.assign(n=lambda df: df['firstname'] + ' ' + df['middlename'] + ' ' + df['lastname']).n.apply(lambda x: x.split()[0])
# priority_names['new_middlename'] = priority_names.assign(n=lambda df: df['firstname'] + ' ' + df['middlename'] + ' ' + df['lastname']).n.apply(lambda x: ' '.join(x.split()[1:-1]))
# priority_names['new_lastname'] = priority_names.assign(n=lambda df: df['firstname'] + ' ' + df['middlename'] + ' ' + df['lastname']).n.apply(lambda x: x.split()[-1])
# priority_names = priority_names.assign(new_suffix=lambda df: df.Suffix)
# priority_names = priority_names[['samhsa_id','new_firstname','new_middlename','new_lastname','new_suffix','practitioner_type','state','zip5']].drop_duplicates()
#
# # USE RECONCAT NAMES
# priority_names2 = out[['npi']].merge(df2)
# priority_names2['new_firstname'] = priority_names2.assign(n=lambda df: df['pfname'] + ' ' + df['pmname'] + ' ' + df['plname']).n.apply(lambda x: x.split()[0])
# priority_names2['new_middlename'] = priority_names2.assign(n=lambda df: df['pfname'] + ' ' + df['pmname'] + ' ' + df['plname']).n.apply(lambda x: ' '.join(x.split()[1:-1]))
# priority_names2['new_lastname'] = priority_names2.assign(n=lambda df: df['pfname'] + ' ' + df['pmname'] + ' ' + df['plname']).n.apply(lambda x: x.split()[-1])
# priority_names2 = priority_names2.assign(new_suffix=lambda df: df.pnamesuffix)
# priority_names2 = priority_names2[['npi','new_firstname','new_middlename','new_lastname','new_suffix','practitioner_type','state','zip5']].drop_duplicates()
#
# expand_matches = out[['samhsa_id','npi']].merge(priority_names).merge(out[['samhsa_id','npi']].merge(priority_names2), how='outer', indicator=True)
# all_good = expand_matches.query('_merge=="both"')[['samhsa_id','npi']].drop_duplicates()
# expand_matches = expand_matches.merge(all_good, how='left', indicator='_merge2').query('_merge2!="both"').drop(columns='_merge2')
#
# o1 = out.merge(df1[['samhsa_id', 'middlename','Suffix']].dropna().query('middlename!="" or Suffix!=""').drop_duplicates())
# o2 = out.merge(df2[['npi', 'pmname', 'pnamesuffix']].dropna().query('pmname!="" or pnamesuffix!=""').drop_duplicates())
# lo1 = o1.merge(o2, left_on=o1.columns.tolist(), right_on=o2.columns.tolist(), how='outer', indicator=True).query('_merge=="left_only"')[['samhsa_id','npi']].drop_duplicates()
# ro1 = o1.merge(o2, left_on=o1.columns.tolist(), right_on=o2.columns.tolist(), how='outer', indicator=True).query('_merge=="right_only"')[['samhsa_id','npi']].drop_duplicates()
# lo1.merge(ro1)
def match_samhsa_npi():
# I don't exploit timing here
s = SAMHSA()
s.retrieve('names')
npi = NPI(entities=1)
npi.retrieve('fullnames')
npi.retrieve('expanded_fullnames')
npi.retrieve('credentials')
npi.retrieve('ptaxcode')
npi.retrieve('practitioner_type')
npi.retrieve('plocstatename')
npi.retrieve('ploczip')
npi.retrieve('ploctel')
npi.retrieve('secondary_practice_locations')
pecos = PECOS(['NPI', 'Last Name', 'First Name', 'Middle Name',
'Suffix', 'State', 'Zip Code', 'Phone Number'])
pecos.retrieve('names')
pecos.retrieve('practitioner_type')
# matching data to generate a crosswalk
final_crosswalk = pd.DataFrame()
practypes = ['MD/DO', 'NP', 'PA', 'CRNA', 'CNM', 'CNS']
# 0. TELEPHONE
final_crosswalk = generate_matches(
s, npi, pecos,
['practitioner_type', 'state', 'zip5', 'tel'],
practypes, final_crosswalk)
final_crosswalk = generate_matches(
s, npi, pecos,
['practitioner_type', 'state', 'tel'],
practypes, final_crosswalk)
final_crosswalk = generate_matches(
s, npi, pecos,
['practitioner_type', 'state', 'zip5'],
practypes, final_crosswalk)
final_crosswalk = generate_matches(
s, npi, pecos,
['practitioner_type', 'state'],
practypes, final_crosswalk)
final_crosswalk = generate_matches(
s, npi, pecos,
['state', 'zip5', 'tel'],
practypes, final_crosswalk)
final_crosswalk1 = generate_matches(
s, npi, pecos,
['state', 'tel'],
practypes, final_crosswalk)
final_crosswalk2 = generate_matches(
s, npi, pecos,
['state', 'zip5'],
practypes, final_crosswalk)
final_crosswalk3 = generate_matches(
s, npi, pecos,
['state'],
practypes, final_crosswalk)
final_crosswalk4 = generate_matches(
s, npi, pecos,
['practitioner_type'],
practypes, final_crosswalk)
final_crosswalk5 = generate_matches(
s, npi, pecos,
[],
practypes, final_crosswalk)
fin = (final_crosswalk1.merge(final_crosswalk, how='left', indicator=True)
.query('_merge=="left_only"'))
fin = (fin.append(
final_crosswalk2
.merge(final_crosswalk, how='left', indicator=True)
.query('_merge=="left_only"')))
fin = fin.append(
final_crosswalk3.query('order==1')
.merge(s.names).query('middlename!=""')[['samhsa_id', 'npi']]
.drop_duplicates()
.merge(final_crosswalk, how='left', indicator=True)
.query('_merge=="left_only"'))
fin = fin.append(
final_crosswalk4.query('order==1')
.merge(s.names).query('middlename!=""')[['samhsa_id', 'npi']]
.drop_duplicates()
.merge(final_crosswalk, how='left', indicator=True)
.query('_merge=="left_only"'))
fin = fin.append(
final_crosswalk5.query('order==1')
.merge(s.names).query('middlename!=""')[['samhsa_id', 'npi']]
.drop_duplicates()
.merge(final_crosswalk, how='left', indicator=True)
.query('_merge=="left_only"'))
fin = fin[['samhsa_id', 'npi']].drop_duplicates()
fin = fin[~fin['samhsa_id'].duplicated(keep=False)]
fin = fin[~fin['npi'].duplicated(keep=False)]
fin = final_crosswalk.append(fin).drop(columns='order').drop_duplicates()
fin = fin.append(pd.DataFrame(dict(samhsa_id=[42325, 34010, 80, 62, 42387,
42333, 42339],
npi=[1558332031, 1154652295,
1871718890, 1275599524, 1457360588,
1609002799, 1346518842]
)))
nopunct1 = (npi
.expanded_fullnames
.assign(nopunct=npi.expanded_fullnames['name']
.str.replace("'", "")
.str.replace('-', '')
.str.replace(' ', ''))[['npi', 'nopunct']])
remainders = (fin.merge(s.samhsa.drop_duplicates(),
how='right', on='samhsa_id', indicator=True)
.query('_merge=="right_only"'))
nopunct2 = (remainders[['samhsa_id']]
.merge(s.names)
.assign(nopunct=lambda df: (df['name']
.str.replace("'", "")
.str.replace('-', '')
.str.replace(' ', ''))))
nopunct2 = nopunct2[['samhsa_id', 'nopunct']]
matches = nopunct2.merge(nopunct1)
matches2 = matches[['npi', 'samhsa_id']].drop_duplicates()
matches2 = matches2[~matches2['samhsa_id'].duplicated(keep=False)]
matches2 = matches2[~matches2['npi'].duplicated(keep=False)]
newmatches = (matches2.merge(nopunct1)
.merge(nopunct2)[
matches2.merge(nopunct1)
.merge(nopunct2)
.nopunct.str.len() >= 10][['npi', 'samhsa_id']]
.drop_duplicates())
newmatches = newmatches[~newmatches.samhsa_id.isin(fin.samhsa_id)]
newmatches = newmatches[~newmatches.npi.isin(fin.npi)]
fin = fin.append(newmatches)
assert fin['samhsa_id'].is_unique
assert fin['npi'].is_unique
fin.reset_index(inplace=True, drop=True)
return fin
def analysis_dataset():
# some of this should get added to the PECOS class
# including also the name match
# Get matches of NPI to SAMHSA
# matches = (pd.read_csv('/work/akilby/npi/final_matches.csv')
# .drop(columns='Unnamed: 0'))
# from npi.utils.globalcache import c
# matches = c.match_samhsa_npi()
npi = NPI(entities=1)
npi.retrieve('practitioner_type')
npi_practype = (npi.practitioner_type
.pipe(convert_practitioner_data_to_long,
types=['MD/DO', 'NP', 'PA',
'CRNA', 'CNM', 'CNS']))
npi.retrieve('pgender')
pecos = PECOS(['NPI', 'Last Name', 'First Name', 'Middle Name',
'Suffix', 'State', 'Zip Code', 'Phone Number'])
pecos.retrieve('practitioner_type')
# 1. Select MD/DO and NPs from either NPI or PECOS
practitioners = (pecos.practitioner_type.merge(npi_practype,
how='left',
left_on="NPI",
right_on='npi'))
mddo = (practitioners
.query('Credential=="MD/DO" or Credential=="MD" or Credential=="DO'
'" or PractitionerType=="MD/DO"')
.NPI.drop_duplicates())
nps = practitioners.loc[(practitioners['Primary specialty']
== "NURSE PRACTITIONER")
| (practitioners['Credential'] == 'NP')
| (practitioners['PractitionerType'] == "NP")]
nps = nps.NPI.drop_duplicates()
# pecos_groups = PECOS(['NPI', 'Organization legal name',
# 'Group Practice PAC ID',
# 'Number of Group Practice members',
# 'Hospital affiliation CCN 1',
# 'Hospital affiliation LBN 1',
# 'Hospital affiliation CCN 2',
# 'Hospital affiliation LBN 2',
# 'Hospital affiliation CCN 3',
# 'Hospital affiliation LBN 3',
# 'Hospital affiliation CCN 4',
# 'Hospital affiliation LBN 4',
# 'Hospital affiliation CCN 5',
# 'Hospital affiliation LBN 5'],
# drop_duplicates=False, date_var=True)
# 2. Get group practice information. most sole practitioners
# are missing a group practice ID
pecos_groups_loc = PECOS(['NPI', 'Organization legal name',
'Group Practice PAC ID',
'Number of Group Practice members',
'State', 'Zip Code', 'Phone Number'],
drop_duplicates=False, date_var=True)
groups = pecos_groups_loc.physician_compare.drop_duplicates()
groups = groups.reset_index(drop=True).reset_index()
# A bunch of sole practitioners (groupsize =1 ) are missing
# give them a single-period group practice ID (not constant over
# time even though other IDs are)
groups.loc[
groups['Group Practice PAC ID'].isnull(),
'Group Practice PAC ID'] = (groups['index'] + 100000000000)
groups = groups.drop(columns='index')
groups = groups.merge(
groups[['NPI', 'Group Practice PAC ID', 'date']]
.drop_duplicates()
.groupby(['Group Practice PAC ID', 'date'])
.size()
.reset_index())
groups.loc[
groups['Number of Group Practice members'].isnull(),
'Number of Group Practice members'] = groups[0]
groups.drop(columns=[0], inplace=True)
coprac = (groups[['Group Practice PAC ID',
'Number of Group Practice members',
'State',
'Zip Code', 'date']]
.drop_duplicates())
coprac_ids = coprac.reset_index(drop=True).reset_index().rename(
columns={'index': 'group_prac_zip_date_id'})
coprac_np_counts = (groups
.merge(nps)
.merge(coprac_ids))
idvars = ['group_prac_zip_date_id', 'date', 'NPI']
coprac_np_counts = coprac_np_counts[idvars].drop_duplicates()
coprac_np_counts = (coprac_np_counts
.groupby(['group_prac_zip_date_id', 'date'])
.size()
.reset_index()
.rename(columns={0: 'np_count'}))
coprac_mds = (groups
.merge(mddo)
.merge(coprac_ids))
coprac_mds = coprac_mds[idvars].drop_duplicates()
coprac_mds = coprac_mds.merge(coprac_np_counts, how='left')
coprac_mds['np_count'] = coprac_mds.np_count.fillna(0)
preproc = (coprac_mds
.sort_values(['NPI', 'date', 'np_count',
'group_prac_zip_date_id'])
.groupby(['NPI', 'date']))
mins = preproc.first()
maxes = preproc.last()
mins = (mins
.reset_index()
.merge(coprac_ids)
.sort_values(['NPI', 'date'])
.reset_index(drop=True))
maxes = (maxes
.reset_index()
.merge(coprac_ids)
.sort_values(['NPI', 'date'])
.reset_index(drop=True))
copracs = mins.merge(maxes, on=['NPI', 'date'], suffixes=['_min', '_max'])
# mins = (coprac_mds
# .drop(columns='group_prac_zip_date_id')
# .groupby(['NPI', 'date'], as_index=False).min())
# maxes = (coprac_mds.drop(columns='group_prac_zip_date_id')
# .groupby(['NPI', 'date'], as_index=False).max())
# copracs = mins.merge(maxes.rename(columns={'np_count': 'np_count_max'}))
assert (copracs[['NPI', 'date']].drop_duplicates().shape[0]
== copracs.shape[0])
# Specialties. time varying?
pecos_specs = PECOS(['NPI', 'Primary specialty',
'Secondary specialty 1',
'Secondary specialty 2',
'Secondary specialty 3',
'Secondary specialty 4'],
drop_duplicates=False, date_var=True)
mddo = pecos_specs.physician_compare.merge(mddo)
prim_spec = mddo[['NPI', 'date', 'Primary specialty']].drop_duplicates()
prim_spec = prim_spec.groupby(['NPI', 'date']).first().reset_index()
# prim_spec = pd.concat([m[['NPI', 'date']],
# pd.get_dummies(
# m['Primary specialty'])],
# axis=1).groupby(['NPI', 'date']).sum()
# prim_spec = 1*(prim_spec > 0)
sec_spec = (mddo.drop(columns=['Primary specialty'])
.drop_duplicates()[mddo.drop(columns=['Primary specialty'])
.drop_duplicates()
.isnull().sum(1) < 4]
.set_index(['NPI', 'date'])
.stack()
.reset_index()
.drop(columns='level_2')
.dropna()
.drop_duplicates()
.rename(columns={0: 'secondary_spec'})
.query('secondary_spec!=" "'))
sec_spec = pd.concat([sec_spec[['NPI', 'date']],
pd.get_dummies(
sec_spec['secondary_spec'])],
axis=1).groupby(['NPI', 'date']).sum()
sec_spec = 1*(sec_spec > 0)
copracs = copracs.merge(prim_spec)
# copracs = copracs.merge(sec_spec, how='left')
copracs = copracs.merge(sec_spec.reset_index(), how='left')
copracs = copracs.fillna({x: 0 for x in sec_spec.columns})
# copracs = copracs.merge(mddo[['NPI', 'Primary specialty']])
pecos_education = PECOS(['NPI', 'Medical school name', 'Graduation year'])
copracs = (copracs
.merge(pecos_education
.physician_compare[['NPI', 'Graduation year']]
.groupby('NPI', as_index=False)
.first()))
copracs['gradyear'] = pd.qcut(copracs['Graduation year'], 20)
copracs = copracs.merge(npi.pgender, left_on='NPI', right_on='npi')
# waiver dates from new file
matches = pickle_read(
'/work/akilby/npi/Cache/Caches/output_1588990540883395.pkl')
s = SAMHSA()
samhsa_match = (s.samhsa[['WaiverType', 'samhsa_id', 'Date']]
.drop_duplicates())
samhsa_match = samhsa_match.merge(matches)
sam = (samhsa_match[['npi', 'Date', 'WaiverType']]
.groupby(['npi', 'WaiverType'])
.min()
.unstack(1)
.reset_index())
sam.columns = ['npi', 'Date30', 'Date100', 'Date275']
copracs = copracs.merge(sam, how='left')
copracs = copracs.drop(columns=['NPI', 'Graduation year'])
for variable in ['Group Practice PAC ID_min',
'Group Practice PAC ID_max',
'Number of Group Practice members_min',
'Number of Group Practice members_max']:
copracs[variable] = copracs[variable].astype(int)
copracs['State_min'] = copracs['State_min'].astype(str)
copracs['State_max'] = copracs['State_max'].astype(str)
copracs['Zip Code_min'] = copracs['Zip Code_min'].astype(str)
copracs['Zip Code_max'] = copracs['Zip Code_max'].astype(str)
copracs['Primary specialty'] = copracs['Primary specialty'].astype(str)
isid(copracs, ['npi', 'date'])
return copracs
def final_analysis_dataset(final):
npi = NPI(entities=1)
# gender
npi.retrieve('pgender')
# education
educ = (PECOS(['NPI', 'Medical school name', 'Graduation year'])
.physician_compare)
educ = educ.groupby('NPI', as_index=False).first()
educ['gradyear'] = | pd.qcut(educ['Graduation year'], 20) | pandas.qcut |
import os
import sys
os.getcwd()
sys.path.append(os.getcwd() + '/src')
from NeuralNetwork import NeuralNetwork
import pandas as pd
import numpy as np
training_file = "/Users/amirmukeri/Downloads/mnist_train.csv"
test_file = "/Users/amirmukeri/Downloads/mnist_test.csv"
# training_file = "mnist_dataset/mnist_train_100.csv"
# test_file = "mnist_dataset/mnist_test_10.csv"
# training_file = "mnist_dataset/process_data_test.csv"
# test_file = "mnist_dataset/process_data_test.csv"
input_nodes = 784
hidden_nodes = 200
output_nodes = 10
learning_rate = 0.1
n = NeuralNetwork(input_nodes, hidden_nodes, output_nodes, learning_rate)
training_data_file = pd.read_csv(training_file, header=None, index_col=None)
epochs = 5
for e in range(epochs):
for i,row in training_data_file.iterrows():
#all_values = record.split(',')
#inputs = (numpy.asfarray(all_values[1:]) / 255.0 * 0.99) + 0.01
row
inputs = pd.Series(row[1:])
inputs = inputs.apply(lambda x:(x/255.0 * 0.99) + 0.01)
inputs
targets = pd.Series(0.0,index=np.arange(10))
targets
targets[int(row[0])] = 0.99
targets
inputs.tolist()
n.train(inputs.values.tolist(),targets.values.tolist())
pass
pass
test_data_file = pd.read_csv(test_file, header=None, index_col=None)
test_data_file.head()
scorecard = []
for i,row in test_data_file.iterrows():
correct_label = int(row[0])
correct_label
inputs = | pd.Series([row[1:]]) | pandas.Series |
import pandas as pd
import bioGRID as bg
import traversalHelper as tr
import numpy as np
import os
from collections import defaultdict
from statistics import mean
from scipy import stats
def parse_uniProt_map(uniProtMapF):
df = pd.read_csv(uniProtMapF, sep='\t')
df.dropna(inplace=True)
uniProtMapping = dict(zip([i.split(";")[0] for i in df['Cross-reference (STRING)']], list(df['Gene names (primary )'])))
return uniProtMapping
def parse_STRING(ppiFile='./data/STRING/4932.protein.links.v11.0.txt'
, typeFile='./data/STRING/4932.protein.actions.v11.0.txt'
, uniProtMap='./data/UniProt/uniprot-taxonomy_559292_STRING.tab', root='./'
, wFile_GGI='./data/parsed/STRING_GGI.pkl', wFile_PPI='./data/parsed/STRING_PPI.pkl'):
ppiFile, typeFile, wFile_GGI, wFile_PPI, uniProtMap = root+ppiFile, root+typeFile, root+wFile_GGI, root+wFile_PPI, root+uniProtMap
if os.path.exists(wFile_GGI) and os.path.exists(wFile_PPI):
return | pd.read_pickle(wFile_GGI) | pandas.read_pickle |
import os
import numpy as np
import pandas as pd
from shutil import copy2
from sklearn.metrics import confusion_matrix, accuracy_score
class ExperimentUtils(object):
"""
This class is used to create experiments splitting the dataset for human classification.
Also evaluate the performance of that classification.
"""
def __init__(self, genres_folder, exp_folder):
"""
Initialize the class
:param genres_folder: Folder where is the subfolder with musical genres
:param exp_folder: Folder to create the experiment
"""
self.genres_folder = genres_folder
self.exp_folder = exp_folder
self.genres_dict = {"BigRoom": "A", "ElectroHouse": "B", "DrumAndBass": "C",
"Dubstep": "D", "HipHop": "E", "Dance": "F", "FutureHouse": "G"}
self.inv_genres_dict = dict(map(reversed, self.genres_dict.items()))
def _generate_full_alias_dataframes(self, n_songs):
"""
:param n_songs: Number of songs to select at random for every genre
:return: two pd.DataFrame
dftest -> contains the names of the test songs of all the genres
dftrain -> contains the names of the train songs of all the genres
"""
genres_folders = [self.genres_folder + "/" + g for g in self.genres_dict.keys()]
dftest = pd.DataFrame()
dftrain = pd.DataFrame()
for g in genres_folders:
df1, df2 = self._generate_alias_dataframes(g, n_songs)
dftest = pd.concat([dftest, df1])
dftrain = pd.concat([dftrain, df2])
anonymized_names = ["song" + str((x % 100) / 10) + str(x % 10) for x in range(1, dftest.shape[0] + 1)]
dftest = dftest.sample(frac=1) # Relocation at random, avoiding songs grouped by genre
dftest["test_name"] = anonymized_names # Use the anonymized_names with the random DataFrame
return dftest, dftrain
def _generate_alias_dataframes(self, genre_folder, n_songs):
"""
:param genre_folder: Genre folder to extract alias DataFrames
:param n_songs: Number of songs to select at random of the genre
:return: two pd.DataFrame
dftest -> contains the names of the test songs of genre_folder
dftrain -> contains the names of the train songs of genre_folder
"""
songs = [y for y in [x for x in os.walk(genre_folder)][0][2] if ".mp3" in y]
if (len(songs) != 100):
print("WARNING: The genre folder " + genre_folder + " only have " + str(len(songs)) + " songs.")
genre = os.path.basename(genre_folder)
label_names = [genre + str(x / 100) + str((x % 100) / 10) + str(x % 10) for x in range(1, len(songs) + 1)]
labels = [genre] * len(songs)
df = pd.DataFrame()
df["class"] = labels # genre label
df["real_name"] = songs # filename
df["label_name"] = label_names # dataset name
dftest, dftrain = np.split(df.sample(n_songs), 2, axis=0)
train_names = [self.genres_dict[genre] + # Names according to the label in genres_dict
str((x % 100) / 10) + str(x % 10) for x in range(1, dftrain.shape[0] + 1)]
dftrain["train_name"] = train_names # Anonymizing train
return dftest, dftrain
def build_experiment(self, number_of_songs):
"""
Creates the experiment folder at exp_folder path
:param number_of_songs: Number of songs of each genre (half to train, half to test)
"""
dftest, dftrain = self._generate_full_alias_dataframes(n_songs=number_of_songs) # Get names DataFrames
os.mkdir(self.exp_folder) # Create main exp folder
train_folder = os.path.join(self.exp_folder, "train")
test_folder = os.path.join(self.exp_folder, "test")
eval_folder = os.path.join(self.exp_folder, "evaluation")
os.mkdir(train_folder)
os.mkdir(test_folder)
# Create one folder for each genre in train and test folders
for key in self.genres_dict.keys():
gen = dftrain["class"] == key # Rule to select the actual genre later in dftrain
os.mkdir(os.path.join(train_folder, self.genres_dict[key]))
os.mkdir(os.path.join(test_folder, self.genres_dict[key])) # Empty folder
for index, row in dftrain[gen].iterrows():
source = os.path.join(self.genres_folder, key, row["real_name"]) # Song in the dataset folder
target = os.path.join(train_folder,
self.genres_dict[key], row["train_name"] + ".mp3") # Song anonymized
copy2(source, target) # Copying into train/label folder
print(source, target)
# Add the test songs to the test folder
for index, row in dftest.iterrows():
source = os.path.join(self.genres_folder, row["class"], row["real_name"]) # Song in the dataset folder
target = os.path.join(test_folder, row["test_name"] + ".mp3") # Song anonymized
copy2(source, target) # Copying into test folder
print(source, target)
os.mkdir(eval_folder) # Create the evaluation folder to save results
# Save the alias files to evaluate when the experiments will be done
pd.DataFrame.to_csv(dftrain, os.path.join(eval_folder, "train_songs.csv"))
pd.DataFrame.to_csv(dftest, os.path.join(eval_folder, "test_songs.csv"))
def evaluate_results(self):
"""
Evaluates the performance in the experiment given the saved .CSV test file with the alias and the folder test
modified by the subjects. This evaluation is shown as a confusion_matrix
"""
dftest = pd.DataFrame.from_csv(os.path.join(self.exp_folder, "evaluation", "test_songs.csv"))
dfeval = self.get_evaluation_df(os.path.join(self.exp_folder, "test"))
# Joining the DataFrame we used and the one results from the user by the "test_name" column in both
dfeval = pd.merge(dftest, dfeval)
labels = dfeval["class"] # Real class
pred_labels = dfeval["pred_class"] # Predicted class
cm = confusion_matrix(labels, pred_labels, self.genres_dict.keys())
print(cm)
return cm
def get_evaluation_df(self, test_folder):
dfeval = pd.DataFrame()
# Generate DataFrame with the name of the songs and the label predicted by the subject
for key in self.inv_genres_dict:
dfaux = pd.DataFrame()
songs = [y.replace(".mp3", "") for y in
[x for x in os.walk(os.path.join(test_folder, key))][0][2] if ".mp3" in y]
dfaux["test_name"] = songs
pred_labels = [self.inv_genres_dict[key]] * len(songs)
dfaux["pred_class"] = pred_labels
dfeval = pd.concat([dfeval, dfaux])
return dfeval
def evaluate_all_exps(self, experiments_folder):
accuracies = []
dftest = pd.DataFrame.from_csv(os.path.join(self.exp_folder, "evaluation", "test_songs.csv"))
experiment_folders = [x for x in os.walk(experiments_folder)][0][1]
dffinal = pd.DataFrame()
for f in experiment_folders:
dfeval = self.get_evaluation_df(os.path.join(experiments_folder,f,"test"))
dfeval = pd.merge(dftest, dfeval)
labels = dfeval["class"] # Real class
pred_labels = dfeval["pred_class"] # Predicted class
accuracies.append(accuracy_score(labels, pred_labels))
dffinal = pd.concat([dffinal, dfeval])
labels = dffinal["class"] # Real class
pred_labels = dffinal["pred_class"] # Predicted class
cm = confusion_matrix(labels, pred_labels, sorted(self.genres_dict.keys()))
print(cm)
print(accuracies)
print(np.mean(accuracies))
print(np.std(accuracies))
return cm
def evaluate_all_forest_style(self, experiments_folder):
accuracies = []
dftest = pd.DataFrame.from_csv(os.path.join(self.exp_folder, "evaluation", "test_songs.csv"))
experiment_folders = [x for x in os.walk(experiments_folder)][0][1]
dffinal = | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 15 14:52:02 2021
@author: <NAME>
"""
# Importing Librries
import numpy as np
import pandas as pd
import tensorflow as tf
from tensorflow import keras
from sklearn.model_selection import train_test_split
from utils import boilerplate_model
#for creating feature column
from tensorflow.keras import layers
from tensorflow import feature_column
from os import getcwd
#Importing Training and Validation Dataset
ds = | pd.read_csv('train1.csv') | pandas.read_csv |
#! /usr/bin/env python3
import os, sys
import re
import functools
import numpy as np
import pandas as pd
import scipy as sp
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
flatui = ["#9b59b6", "#3498db", "#95a5a6", "#e74c3c", "#34495e", "#2ecc71"]
sns.set_palette(flatui)
pd.set_option('display.float_format', lambda x: '%.3f' % x)
def parse_syscall_results(f):
data = []
data = pd.read_csv(f, sep=r'\s+', skiprows=5, header=0)
try:
data.columns = ['syscall', 'count', 'dummy', 'time']
#data = data[['syscall', 'count', 'time']]
except ValueError:
data.columns = ['syscall', 'count', 'time']
data['time'] = data['time'] / data['count']
return data
def parse_results_file(f):
if 'base' in f:
ftype = 'base'
elif 'ebph' in f:
ftype = 'ebph'
else:
raise Exception(f'{f} does not contain "base" or "ebph"')
data = parse_syscall_results(f)
return (ftype, data)
def parse_all_results(d):
base = []
ebph = []
for f in sorted(os.listdir(d)):
f = os.path.join(d, f)
fname, dfs = parse_results_file(f)
if fname == 'base':
base.append(dfs)
else:
ebph.append(dfs)
base = combine_data(base)
ebph = combine_data(ebph)
return base, ebph
def discard_outliers(data):
grp = data.groupby('syscall')
mean = grp['time'].transform('mean')
std = grp['time'].transform('std')
zscore = (data['time'] - mean) / std
zscore = zscore.fillna(0)
good = zscore < 3
return data[good]
def combine_data(data):
combined = pd.concat(data)
combined = discard_outliers(combined)
combined = combined.groupby('syscall', as_index=False).agg({'count': 'sum', 'time': [ 'mean', 'std', 'sem']})
combined.columns = ['_'.join(col).strip('_') for col in combined.columns.values]
combined = combined.rename(columns={'count_sum': 'count'})
return combined
def compare(base, ebph):
data = | pd.merge(base, ebph, on='syscall', suffixes=['_base', '_ebph']) | pandas.merge |
import unittest
import pandas as pd
import numpy as np
from ..timeseries import TimeSeries
class TimeSeriesTestCase(unittest.TestCase):
times = pd.date_range('20130101', '20130110')
pd_series1 = pd.Series(range(10), index=times)
pd_series2 = pd.Series(range(5, 15), index=times)
pd_series3 = pd.Series(range(15, 25), index=times)
series1: TimeSeries = TimeSeries(pd_series1)
series2: TimeSeries = TimeSeries(pd_series1, pd_series2, pd_series3)
series3: TimeSeries = TimeSeries(pd_series2)
def test_creation(self):
with self.assertRaises(ValueError):
# Index is dateTimeIndex
TimeSeries(pd.Series(range(10), range(10)))
with self.assertRaises(ValueError):
# Conf interval must be same length as main series
pd_lo = pd.Series(range(5, 14), index=pd.date_range('20130101', '20130109'))
TimeSeries(self.pd_series1, pd_lo)
with self.assertRaises(ValueError):
# Conf interval must have same time index as main series
pd_lo = pd.Series(range(5, 15), index=pd.date_range('20130102', '20130111'))
TimeSeries(self.pd_series1, pd_lo)
with self.assertRaises(ValueError):
# Conf interval must be same length as main series
pd_hi = pd.Series(range(5, 14), index=pd.date_range('20130101', '20130109'))
TimeSeries(self.pd_series1, None, pd_hi)
with self.assertRaises(ValueError):
# Conf interval must have same time index as main series
pd_lo = pd.Series(range(5, 15), index=pd.date_range('20130102', '20130111'))
TimeSeries(self.pd_series1, None, pd_lo)
with self.assertRaises(ValueError):
# Main series cannot have date holes
range_ = pd.date_range('20130101', '20130104').append(pd.date_range('20130106', '20130110'))
TimeSeries(pd.Series(range(9), index=range_))
series_test = TimeSeries(self.pd_series1, self.pd_series2, self.pd_series3)
self.assertTrue(series_test.pd_series().equals(self.pd_series1))
self.assertTrue(series_test.conf_lo_pd_series().equals(self.pd_series2))
self.assertTrue(series_test.conf_hi_pd_series().equals(self.pd_series3))
def test_alt_creation(self):
with self.assertRaises(ValueError):
# Series cannot be lower than three
index = pd.date_range('20130101', '20130102')
TimeSeries.from_times_and_values(index, self.pd_series1.values[:2])
with self.assertRaises(ValueError):
# all array must have same length
TimeSeries.from_times_and_values(self.pd_series1.index,
self.pd_series1.values[:-1],
self.pd_series2[:-2],
self.pd_series3[:-1])
# test if reordering is correct
rand_perm = np.random.permutation(range(1, 11))
index = pd.to_datetime(['201301{:02d}'.format(i) for i in rand_perm])
series_test = TimeSeries.from_times_and_values(index, self.pd_series1.values[rand_perm-1],
self.pd_series2[rand_perm-1],
self.pd_series3[rand_perm-1].tolist())
self.assertTrue(series_test.start_time() == pd.to_datetime('20130101'))
self.assertTrue(series_test.end_time() == pd.to_datetime('20130110'))
self.assertTrue(series_test.pd_series().equals(self.pd_series1))
self.assertTrue(series_test.conf_lo_pd_series().equals(self.pd_series2))
self.assertTrue(series_test.conf_hi_pd_series().equals(self.pd_series3))
self.assertTrue(series_test.freq() == self.series1.freq())
# TODO test over to_dataframe when multiple features choice is decided
def test_eq(self):
seriesA: TimeSeries = TimeSeries(self.pd_series1)
self.assertTrue(self.series1 == seriesA)
# with a defined CI
seriesB: TimeSeries = TimeSeries(self.pd_series1,
confidence_hi=pd.Series(range(10, 20),
index=pd.date_range('20130101', '20130110')))
self.assertFalse(self.series1 == seriesB)
self.assertTrue(self.series1 != seriesB)
# with different dates
seriesC = TimeSeries(pd.Series(range(10), index=pd.date_range('20130102', '20130111')))
self.assertFalse(self.series1 == seriesC)
# compare with both CI
seriesD: TimeSeries = TimeSeries(self.pd_series1, self.pd_series2, self.pd_series3)
seriesE: TimeSeries = TimeSeries(self.pd_series1, self.pd_series3, self.pd_series2)
self.assertTrue(self.series2 == seriesD)
self.assertFalse(self.series2 == seriesE)
def test_dates(self):
self.assertEqual(self.series1.start_time(), pd.Timestamp('20130101'))
self.assertEqual(self.series1.end_time(), pd.Timestamp('20130110'))
self.assertEqual(self.series1.duration(), pd.Timedelta(days=9))
def test_slice(self):
# base case
seriesA = self.series1.slice(pd.Timestamp('20130104'), pd.Timestamp('20130107'))
self.assertEqual(seriesA.start_time(), pd.Timestamp('20130104'))
self.assertEqual(seriesA.end_time(), pd.Timestamp('20130107'))
# time stamp not in series
seriesB = self.series1.slice(pd.Timestamp('20130104 12:00:00'), pd.Timestamp('20130107'))
self.assertEqual(seriesB.start_time(), pd.Timestamp('20130105'))
self.assertEqual(seriesB.end_time(), pd.Timestamp('20130107'))
# end timestamp after series
seriesC = self.series1.slice(pd.Timestamp('20130108'), pd.Timestamp('20130201'))
self.assertEqual(seriesC.start_time(), pd.Timestamp('20130108'))
self.assertEqual(seriesC.end_time(), pd.Timestamp('20130110'))
# n points, base case
seriesD = self.series1.slice_n_points_after(pd.Timestamp('20130102'), n=3)
self.assertEqual(seriesD.start_time(), pd.Timestamp('20130102'))
self.assertTrue(len(seriesD.values()) == 3)
self.assertEqual(seriesD.end_time(), pd.Timestamp('20130104'))
seriesE = self.series1.slice_n_points_after(pd.Timestamp('20130107 12:00:10'), n=10)
self.assertEqual(seriesE.start_time(), pd.Timestamp('20130108'))
self.assertEqual(seriesE.end_time(), | pd.Timestamp('20130110') | pandas.Timestamp |
"""
Author: <NAME> (<EMAIL>)
Date: 2020-02-10
-----Description-----
This script provides a class and set of functions for bringing CSPP science variables into Python memory.
This is set up for recovered_cspp streams, but should also work for telemetered data.
Note that CTD, DOSTA, SPKIR, PAR, and VELPT are the only data sets that are telemetered. OPTAA and NUTNR data
packets are too large to transfer in a short surface window.
There are three general functions and one function for each CSPP data stream.
To make multiple data requests, submit each request before checking to see if the data is available.
-----Required Libraries-----
requests: For issuing and checking request status.
re: For parsing returned json for URLs that contain instrument data.
time: For pausing the script while checking a data request status.
pandas: For organizing data.
xarray: For opening remote NetCDFs.
-----Class-----
OOIM2M() <<< This is the overall class. This must prepend a function.
Example 1: url = OOIM2M.create_url(url,start_date,start_time,stop_date,stop_time)
request = OOIM2M.make_request(url,user,token)
nc = OOIM2M.get_location(request)
Example 2: THIS_EXAMPLE_IS_TOO_LONG = OOIM2M()
url = THIS_EXAMPLE_IS_TOO_LONG.create_url(url)
request = THIS_EXAMPLE_IS_TOO_LONG.make_request(url,user,token)
nc = THIS_EXAMPLE_IS_TOO_LONG.get_location(request)
-----General Functions-----
url = OOIM2M.create_url(url,start_date,start_time,stop_date,stop_time) <<< Function for generating a request URL for data between two datetimes. Returns a complete request URL. URL is the base request url for the data you want. Dates in YYYY-MM-DD. Times in HH:MM:SS.
request = OOIM2M.make_request(url,user,token) <<< Function for making the request from the URL created from create_url. User and token are found in your account information on OOINet. Returns a requests object.
nc = OOIM2M.get_location(request) <<< Function that gathers the remote locations of the requested data. Returns a list of URLs where the data is stored as netCDFs. This list includes data that is used in the creation of data products. Example: CTD data accompanies DOSTA data.
-----Instrument Functions-----
ctd = cspp_ctd(nc) <<< Returns a pandas dataframe that contains datetime, pressure, temperature, salinity, and density.
dosta = cspp_dosta(nc) <<< Returns a pandas dataframe that contains datetime, pressure, temperature, concentration, and estimated saturation. CTD data is also made available.
flort = cspp_flort(nc) <<< Returns pandas dataframe that contains datetime, pressure, chlorophyll-a, cdom, and optical backscatter.
nutnr = cspp_nutnr(nc) <<< Interpolates pressure for nitrate data using time and CTD pressure. Returns a pandas dataframe that contains datetime, pressure, and nitrate.
par = cspp_parad(nc) <<< Returns a pandas dataframe that contains datetime, pressure, bulk photosynthetically active radiation.
velpt = cspp_velpt(nc) <<< Returns a pandas dataframe that contains datetime, pressure, northward velocity, eastward velocity, upward velocity, heading, pitch, roll, soundspeed, and temperature measured by the aquadopp.
batt1, batt2 = cspp_batts(nc) <<< Returns two pandas dataframes that contain datetime and voltage for each CSPP battery.
compass = cspp_cpass(nc) <<< Returns a pandas dataframe that contains datetime, pressure, heading, pitch, and roll from the control can.
sbe50 = cspp_sbe50(nc) <<< Returns a pandas dataframe that contains datetime, pressure, and profiler velocity calculated from the SBE50 in the control can.
winch = cspp_winch(nc) <<< Returns a pandas dataframe that contains datetime, pressure, internal temperature of the winch, current seen by the winch, voltage seen by the winch, and the rope on the winch drum.
cspp_spkir(nc) <<< Under development.
cspp_optaa(nc) <<< Under development.
-----Extra Functions-----
find_site(nc) <<< Function that identifies the requested CSPP site and standard depth of that site. Used in removing bad pressure data. Called by data functions. Not generally called by the user.
-----Notes/Issues-----
Flort_sample is the stream name for CSPP fluorometer data.
However, when requests are made for this stream, only deployments 5 and greater are returned.
For deployments 1-4, the current stream is flort_dj_cspp_instrument_recovered.
OOI personnel are working to make flort_sample the stream that contains all data from all deployments.
NUTNR data does not have pressure data associated with it in the raw files produces by the CSPP.
The function provided in this script interpolates based on time.
Alternatively, the user can call the int_ctd_pressure variable.
The cspp_optaa function is in the works.
OOI ion-function for VELPT-J assumes data from the instrument is output in mm/s, when it is actually output in m/s.
https://github.com/oceanobservatories/ion-functions/blob/master/ion_functions/data/vel_functions.py
The simple fix now is to multiply returned velocity values by 1000 to get it back into to m/s.
"""
import requests, re, time, pandas as pd, numpy as np, xarray as xr
#CE01ISSP URLs
CE01ISSP_OPTAA = 'https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/CE01ISSP/SP001/04-OPTAAJ000/recovered_cspp/optaa_dj_cspp_instrument_recovered'
CE01ISSP_CTDPF = 'https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/CE01ISSP/SP001/09-CTDPFJ000/recovered_cspp/ctdpf_j_cspp_instrument_recovered'
CE01ISSP_NUTNR = 'https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/CE01ISSP/SP001/06-NUTNRJ000/recovered_cspp/nutnr_j_cspp_instrument_recovered'
CE01ISSP_SPKIR = 'https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/CE01ISSP/SP001/07-SPKIRJ000/recovered_cspp/spkir_abj_cspp_instrument_recovered'
CE01ISSP_FLORT = 'https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/CE01ISSP/SP001/08-FLORTJ000/recovered_cspp/flort_sample'
CE01ISSP_PARAD = 'https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/CE01ISSP/SP001/10-PARADJ000/recovered_cspp/parad_j_cspp_instrument_recovered'
CE01ISSP_VELPT = 'https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/CE01ISSP/SP001/05-VELPTJ000/recovered_cspp/velpt_j_cspp_instrument_recovered'
CE01ISSP_DOSTA = 'https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/CE01ISSP/SP001/02-DOSTAJ000/recovered_cspp/dosta_abcdjm_cspp_instrument_recovered'
CE01ISSP_BATTS = 'https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/CE01ISSP/SP001/00-SPPENG000/recovered_cspp/cspp_eng_cspp_dbg_pdbg_batt_eng_recovered'
CE01ISSP_CPASS = 'https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/CE01ISSP/SP001/00-SPPENG000/recovered_cspp/cspp_eng_cspp_wc_hmr_eng_recovered'
CE01ISSP_SBE50 = 'https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/CE01ISSP/SP001/00-SPPENG000/recovered_cspp/cspp_eng_cspp_wc_sbe_eng_recovered'
CE01ISSP_WINCH = 'https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/CE01ISSP/SP001/00-SPPENG000/recovered_cspp/cspp_eng_cspp_wc_wm_eng_recovered'
#CE02SHSP URLs
CE02SHSP_OPTAA = 'https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/CE02SHSP/SP001/04-OPTAAJ000/recovered_cspp/optaa_dj_cspp_instrument_recovered'
CE02SHSP_CTDPF = 'https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/CE02SHSP/SP001/08-CTDPFJ000/recovered_cspp/ctdpf_j_cspp_instrument_recovered'
CE02SHSP_NUTNR = 'https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/CE02SHSP/SP001/05-NUTNRJ000/recovered_cspp/nutnr_j_cspp_instrument_recovered'
CE02SHSP_SPKIR = 'https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/CE02SHSP/SP001/06-SPKIRJ000/recovered_cspp/spkir_abj_cspp_instrument_recovered'
CE02SHSP_FLORT = 'https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/CE02SHSP/SP001/07-FLORTJ000/recovered_cspp/flort_sample'
CE02SHSP_PARAD = 'https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/CE02SHSP/SP001/09-PARADJ000/recovered_cspp/parad_j_cspp_instrument_recovered'
CE02SHSP_VELPT = 'https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/CE02SHSP/SP001/02-VELPTJ000/recovered_cspp/velpt_j_cspp_instrument_recovered'
CE02SHSP_DOSTA = 'https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/CE02SHSP/SP001/01-DOSTAJ000/recovered_cspp/dosta_abcdjm_cspp_instrument_recovered'
CE02SHSP_BATTS = 'https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/CE02SHSP/SP001/00-SPPENG000/recovered_cspp/cspp_eng_cspp_dbg_pdbg_batt_eng_recovered'
CE02SHSP_CPASS = 'https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/CE02SHSP/SP001/00-SPPENG000/recovered_cspp/cspp_eng_cspp_wc_hmr_eng_recovered'
CE02SHSP_SBE50 = 'https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/CE02SHSP/SP001/00-SPPENG000/recovered_cspp/cspp_eng_cspp_wc_sbe_eng_recovered'
CE02SHSP_WINCH = 'https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/CE02SHSP/SP001/00-SPPENG000/recovered_cspp/cspp_eng_cspp_wc_wm_eng_recovered'
#CE06ISSP URLs
CE06ISSP_OPTAA = 'https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/CE06ISSP/SP001/04-OPTAAJ000/recovered_cspp/optaa_dj_cspp_instrument_recovered'
CE06ISSP_CTDPF = 'https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/CE06ISSP/SP001/09-CTDPFJ000/recovered_cspp/ctdpf_j_cspp_instrument_recovered'
CE06ISSP_NUTNR = 'https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/CE06ISSP/SP001/06-NUTNRJ000/recovered_cspp/nutnr_j_cspp_instrument_recovered'
CE06ISSP_SPKIR = 'https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/CE06ISSP/SP001/07-SPKIRJ000/recovered_cspp/spkir_abj_cspp_instrument_recovered'
CE06ISSP_FLORT = 'https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/CE06ISSP/SP001/08-FLORTJ000/recovered_cspp/flort_sample'
CE06ISSP_PARAD = 'https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/CE06ISSP/SP001/10-PARADJ000/recovered_cspp/parad_j_cspp_instrument_recovered'
CE06ISSP_VELPT = 'https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/CE06ISSP/SP001/05-VELPTJ000/recovered_cspp/velpt_j_cspp_instrument_recovered'
CE06ISSP_DOSTA = 'https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/CE06ISSP/SP001/02-DOSTAJ000/recovered_cspp/dosta_abcdjm_cspp_instrument_recovered'
CE06ISSP_BATTS = 'https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/CE06ISSP/SP001/00-SPPENG000/recovered_cspp/cspp_eng_cspp_dbg_pdbg_batt_eng_recovered'
CE06ISSP_CPASS = 'https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/CE06ISSP/SP001/00-SPPENG000/recovered_cspp/cspp_eng_cspp_wc_hmr_eng_recovered'
CE06ISSP_SBE50 = 'https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/CE06ISSP/SP001/00-SPPENG000/recovered_cspp/cspp_eng_cspp_wc_sbe_eng_recovered'
CE06ISSP_WINCH = 'https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/CE06ISSP/SP001/00-SPPENG000/recovered_cspp/cspp_eng_cspp_wc_wm_eng_recovered'
#CE07SHSP URLs
CE07SHSP_OPTAA = 'https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/CE07SHSP/SP001/04-OPTAAJ000/recovered_cspp/optaa_dj_cspp_instrument_recovered'
CE07SHSP_CTDPF = 'https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/CE07SHSP/SP001/08-CTDPFJ000/recovered_cspp/ctdpf_j_cspp_instrument_recovered'
CE07SHSP_NUTNR = 'https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/CE07SHSP/SP001/05-NUTNRJ000/recovered_cspp/nutnr_j_cspp_instrument_recovered'
CE07SHSP_SPKIR = 'https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/CE07SHSP/SP001/06-SPKIRJ000/recovered_cspp/spkir_abj_cspp_instrument_recovered'
CE07SHSP_FLORT = 'https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/CE07SHSP/SP001/07-FLORTJ000/recovered_cspp/flort_sample'
CE07SHSP_PARAD = 'https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/CE07SHSP/SP001/09-PARADJ000/recovered_cspp/parad_j_cspp_instrument_recovered'
CE07SHSP_VELPT = 'https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/CE07SHSP/SP001/02-VELPTJ000/recovered_cspp/velpt_j_cspp_instrument_recovered'
CE07SHSP_DOSTA = 'https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/CE07SHSP/SP001/01-DOSTAJ000/recovered_cspp/dosta_abcdjm_cspp_instrument_recovered'
CE07SHSP_BATTS = 'https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/CE07SHSP/SP001/00-SPPENG000/recovered_cspp/cspp_eng_cspp_dbg_pdbg_batt_eng_recovered'
CE07SHSP_CPASS = 'https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/CE07SHSP/SP001/00-SPPENG000/recovered_cspp/cspp_eng_cspp_wc_hmr_eng_recovered'
CE07SHSP_SBE50 = 'https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/CE07SHSP/SP001/00-SPPENG000/recovered_cspp/cspp_eng_cspp_wc_sbe_eng_recovered'
CE07SHSP_WINCH = 'https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/CE07SHSP/SP001/00-SPPENG000/recovered_cspp/cspp_eng_cspp_wc_wm_eng_recovered'
class OOIM2M():
def __init__(self):
return
def create_url(url,start_date = '2014-04-04',start_time = '00:00:00',stop_date = '2035-12-31',stop_time = '23:59:59'): #Create a request URL.
timestring = "?beginDT=" + start_date + 'T' + start_time + ".000Z&endDT=" + stop_date + 'T' + stop_time + '.999Z' #Get the timespan into an OOI M2M format.
m2m_url = url + timestring #Combine the partial URL with the timespan to get a full url.
return m2m_url
def make_request(m2m_url, user ='OOIAPI-BCJPAYP2KUVXFX', token = '<KEY>O'): #Request data from UFRAME using the generated request URL.
request = requests.get(m2m_url,auth = (user,token))
if request.status_code == requests.codes.ok: #If the response is 200, then continue.
print('Request successful.')
return request
elif request.status_code == requests.codes.bad: #If the response is 400, then issue a warning to force the user to find an issue.
print(request)
print('Bad request. Check request URL, user, and token.')
return
elif request.status_code == requests.codes.not_found: #If the response is 404, there might not be data during the prescribed time period.
print(request)
print('Not found. There may be no data available during the requested time period.')
return
else: #If an error that is unusual is thrown, show this message.
print(request)
print('Unanticipated error code. Look up error code here: https://github.com/psf/requests/blob/master/requests/status_codes.py')
return
def get_location(request): #Check the status of the data request and return the remote location when complete.
data = request.json() #Return the request information as a json.
check = data['allURLs'][1] + '/status.txt' #Make a checker.
for i in range(60*30): #Given roughly half an hour...
r = requests.get(check) #check the request.
if r.status_code == requests.codes.ok: #If everything is okay.
print('Request complete.') #Print this message.
break
else:
print('Checking request...',end = " ")
print(i)
time.sleep(1) #If the request isn't complete, wait 1 second before checking again.
print("")
data_url = data['allURLs'][0] #This webpage provides all URLs for the request.
data_urls= requests.get(data_url).text #Convert the page to text.
data_nc = re.findall(r'(ooi/.*?.nc)',data_urls) #Find netCDF urls in the text.
for j in data_nc:
if j.endswith('.nc') == False: #If the URL does not end in .nc, toss it.
data_nc.remove(j)
for j in data_nc:
try:
float(j[-4]) == True #If the 4th to last value isn't a number, then toss it.
except:
data_nc.remove(j)
thredds_url = 'https://opendap.oceanobservatories.org/thredds/dodsC/' #This is the base url for remote data access.
fill = '#fillmismatch' #Applying fill mismatch prevents issues.
data_nc = np.char.add(thredds_url,data_nc) #Combine the thredds_url and the netCDF urls.
nc = np.char.add(data_nc,fill) #Append the fill.
return nc
def find_site(nc): #Function for finding the requested site and setting the standard depth.
df = pd.DataFrame(data = {'location':nc}) #Put the remote location in a dataframe.
url = df['location'].iloc[0] #Take the first URL...
banana = url.split("-") #Split it by the dashes.
site = banana[1] #The value in the second location is the site.
if site == 'CE01ISSP': #If the site is..
depth = 25 #This is the standard deployment depth.
elif site == 'CE02SHSP':
depth = 80
elif site == 'CE06ISSP':
depth = 29
elif site == 'CE07SHSP':
depth = 87
else:
depth = 87
return site,depth #Return the site and depth for use later.
def cspp_ctd(nc):
site,depth = OOIM2M.find_site(nc)
data = pd.DataFrame() #Create a placeholder dataframe.
for remote in nc: #For each remote netcdf location
dataset = xr.open_dataset(remote) #Open the dataset.
d = ({'datetime':dataset['profiler_timestamp'], #Pull the following variables.
'pressure':dataset['pressure'],
'temperature':dataset['temperature'],
'salinity':dataset['salinity'],
'density':dataset['density'],
'conductivity':dataset['conductivity']})
d = pd.DataFrame(data = d) #Put the variables in a dataframe.
data = pd.concat([data,d]) #Concatenate the new dataframe with the old dataframe.
data = data[data.pressure < depth] #Remove obviously bad values.
data = data[data.pressure > 0]
data = data[data.temperature > 0]
data = data[data.salinity > 2]
data = data[data.salinity < 42]
data = data.dropna() #Remove rows with any NaNs.
data = data.sort_values('datetime') #Sort the data chronologically.
data = data.reset_index(drop=True) #Reset the index.
print('CTD data for ' + site + ' available.')
print('CTD datetime in UTC.')
print('CTD pressure in dbars.')
print('CTD temperature in degC.')
print('CTD salinity in PSU.')
print('CTD density in kg m^-3.')
print('CTD conductivity in S m^-1.')
return data
def cspp_dosta(nc):
site,depth = OOIM2M.find_site(nc) #Determine the CSPP site and standard depth.
dfnc = pd.DataFrame(data = {'location':nc}) #The returned NetCDFs contain both DOSTA and CTDPF files.
dosta = dfnc.loc[~dfnc['location'].str.contains('ctdpf_j_cspp_instrument')] #Identify the DOSTA files. (Files that do not (~) contain "cspp-ctdpf_j_cspp_instrument".)
# ctd = dfnc.loc[dfnc['location'].str.contains('ctdpf_j_cspp_instrument')] #Identify the CTD file. CTD data accompanies DOSTA data because it is used in the computation of data products.
data = pd.DataFrame()
for remote in dosta['location']: #For each DOSTA remote location.
dataset = xr.open_dataset(remote) #Open the dataset.
d = ({'datetime':dataset['profiler_timestamp'], #Pull out these variables.
'pressure':dataset['pressure_depth'],
'temperature':dataset['optode_temperature'],
'concentration':dataset['dissolved_oxygen'],
'estimated_saturation':dataset['estimated_oxygen_saturation']})
d = pd.DataFrame(data = d)
data = pd.concat([data,d]) #Concatenate it with the previous loop.
data = data[data.pressure < depth] #Remove bad values.
data = data[data.pressure > 0]
data = data[data.estimated_saturation > 0]
data = data.dropna() #Remove rows with any NaNs.
data = data.sort_values('datetime') #Sort the data chronologically.
data = data.reset_index(drop=True) #Reset the index.
print('DOSTA data for ' + site + ' available.')
print('DOSTA datetime in UTC.')
print('DOSTA pressure in dbars.')
print('DOSTA temperature in degC.')
print('DOSTA concentration in umol kg^-1.')
print('DOSTA estimated_saturation in %.')
return data
def cspp_flort(nc):
site,depth = OOIM2M.find_site(nc)
dfnc = pd.DataFrame(data = {'location':nc})
flort = dfnc.loc[~dfnc['location'].str.contains('ctdpf_j_cspp_instrument')]
# ctd = dfnc.loc[dfnc['location'].str.contains('ctdpf_j_cspp_instrument')]
data = pd.DataFrame()
for remote in flort['location']:
dataset = xr.open_dataset(remote)
d = ({'datetime':dataset['time'],
'pressure':dataset['pressure_depth'],
'chla':dataset['fluorometric_chlorophyll_a'],
'cdom':dataset['fluorometric_cdom'],
'obs':dataset['optical_backscatter']})
d = pd.DataFrame(data = d)
data = pd.concat([data,d])
data = data[data.pressure < depth] #Remove obviously bad values.
data = data[data.pressure > 0]
data = data[data.chla > 0]
data = data[data.cdom > 0]
data = data[data.obs > 0]
data = data.dropna() #Remove rows with any NaNs.
data = data.sort_values('datetime') #Sort the data chronologically.
data = data.reset_index(drop=True) #Reset the index.
print('FLORT data for ' + site + ' available.')
print('FLORT datetime in UTC.')
print('FLORT pressure in dbars.')
print('FLORT chl in ug L^-1.')
print('FLORT cdom in ppb.')
print('FLORT obs in m^-1.')
return data
def cspp_par(nc):
site,depth = OOIM2M.find_site(nc)
dfnc = pd.DataFrame(data = {'location':nc})
par = dfnc.loc[~dfnc['location'].str.contains('ctdpf_j_cspp_instrument')]
# ctd = dfnc.loc[dfnc['location'].str.contains('ctdpf_j_cspp_instrument')]
data = pd.DataFrame()
for remote in par['location']:
dataset = xr.open_dataset(remote)
d = ({'datetime':dataset['profiler_timestamp'],
'pressure':dataset['pressure_depth'],
'par':dataset['parad_j_par_counts_output']})
d = pd.DataFrame(data = d)
data = pd.concat([data,d])
data = data[data.pressure < depth] #Remove obviously bad pressures.
data = data[data.pressure > 0]
data = data[data.par > 0] #Remove obviously bad values.
data = data.dropna() #Remove rows with any NaNs.
data = data.sort_values('datetime') #Sort the data chronologically.
data = data.reset_index(drop=True) #Reset the index.
print('PAR data for ' + site + ' available.')
print('PAR datetime in UTC.')
print('PAR pressure in dbars.')
print('PAR par in umol photons m^-2 s^-1.')
return data
def cspp_velpt(nc):
# OOI ion-function for VELPT-J assumes data from the instrument is output in mm/s, when it is actually output in m/s.
# https://github.com/oceanobservatories/ion-functions/blob/master/ion_functions/data/vel_functions.py
# The simple fix now is to multiply returned velocity values by 1000 to get it back into to m/s.
site,depth = OOIM2M.find_site(nc)
dfnc = pd.DataFrame(data = {'location':nc})
velpt = dfnc.loc[~dfnc['location'].str.contains('ctdpf_j_cspp_instrument')]
# ctd = dfnc.loc[dfnc['location'].str.contains('ctdpf_j_cspp_instrument')]
data = | pd.DataFrame() | pandas.DataFrame |
import numpy as np
import os
import csv
import requests
import pandas as pd
import time
import datetime
from stockstats import StockDataFrame as Sdf
from ta import add_all_ta_features
from ta.utils import dropna
from ta import add_all_ta_features
from ta.utils import dropna
from config import config
def load_dataset(*, file_name: str) -> pd.DataFrame:
"""
load csv dataset from path
:return: (df) pandas dataframe
"""
#_data = pd.read_csv(f"{config.DATASET_DIR}/{file_name}")
_data = pd.read_csv(file_name)
return _data
def data_split(df,start,end):
"""
split the dataset into training or testing using date
:param data: (df) pandas dataframe, start, end
:return: (df) pandas dataframe
"""
data = df[(df.datadate >= start) & (df.datadate < end)]
data=data.sort_values(['datadate','tic'],ignore_index=True)
#data = data[final_columns]
data.index = data.datadate.factorize()[0]
return data
def calcualte_price(df):
"""
calcualte adjusted close price, open-high-low price and volume
:param data: (df) pandas dataframe
:return: (df) pandas dataframe
"""
data = df.copy()
data = data[['datadate', 'tic', 'prccd', 'ajexdi', 'prcod', 'prchd', 'prcld', 'cshtrd']]
data['ajexdi'] = data['ajexdi'].apply(lambda x: 1 if x == 0 else x)
data['adjcp'] = data['prccd'] / data['ajexdi']
data['open'] = data['prcod'] / data['ajexdi']
data['high'] = data['prchd'] / data['ajexdi']
data['low'] = data['prcld'] / data['ajexdi']
data['volume'] = data['cshtrd']
data = data[['datadate', 'tic', 'adjcp', 'open', 'high', 'low', 'volume']]
data = data.sort_values(['tic', 'datadate'], ignore_index=True)
return data
def add_technical_indicator(df):
"""
calcualte technical indicators
use stockstats package to add technical inidactors
:param data: (df) pandas dataframe
:return: (df) pandas dataframe
"""
stock = Sdf.retype(df.copy())
stock['close'] = stock['adjcp']
unique_ticker = stock.tic.unique()
macd = pd.DataFrame()
rsi = pd.DataFrame()
cci = pd.DataFrame()
dx = pd.DataFrame()
#temp = stock[stock.tic == unique_ticker[0]]['macd']
for i in range(len(unique_ticker)):
## macd
temp_macd = stock[stock.tic == unique_ticker[i]]['macd']
temp_macd = pd.DataFrame(temp_macd)
macd = macd.append(temp_macd, ignore_index=True)
## rsi
temp_rsi = stock[stock.tic == unique_ticker[i]]['rsi_30']
temp_rsi = pd.DataFrame(temp_rsi)
rsi = rsi.append(temp_rsi, ignore_index=True)
## cci
temp_cci = stock[stock.tic == unique_ticker[i]]['cci_30']
temp_cci = pd.DataFrame(temp_cci)
cci = cci.append(temp_cci, ignore_index=True)
## adx
temp_dx = stock[stock.tic == unique_ticker[i]]['dx_30']
temp_dx = pd.DataFrame(temp_dx)
dx = dx.append(temp_dx, ignore_index=True)
df['macd'] = macd
df['rsi'] = rsi
df['cci'] = cci
df['adx'] = dx
return df
def load_stocks_data(stocks_data_file):
url = 'https://finfo-api.vndirect.com.vn/v4/stocks?q=type:STOCK~status:LISTED&fields=code,type,floor,isin,status,companyName,companyNameEng,shortName,listedDate,indexCode,industryName&size=3000'
print('retriving data from {}'.format(url))
response = requests.get(url=url)
data = response.json()
stocks_data = data['data']
print('got stocks data with {} elements'.format(len(stocks_data)))
stocks_df = pd.DataFrame(stocks_data)
stocks_df.to_csv(stocks_data_file, index=False, encoding='utf-8')
print('saved stocks data to {}'.format(stocks_data_file))
def to_timestamp(date_str):
timestanp = int(time.mktime(datetime.datetime.strptime(date_str, "%d/%m/%Y").timetuple()))
return timestanp
def to_date_str(timestamp):
date = datetime.datetime.utcfromtimestamp(timestamp).strftime("%Y%m%d")
return date
def get_stock_price_part(stock_code, start_date, end_date):
start_time = to_timestamp(start_date)
end_time = to_timestamp(end_date)
params = {
"resolution": 'D',
"symbol": stock_code,
"from": start_time,
"to": end_time
}
url = 'https://dchart-api.vndirect.com.vn/dchart/history'
print('retreving price history for {} period {} - {}'.format(stock_code, start_date, end_date))
response = requests.get(url=url, params=params)
data = response.json()
columns = {
"tic": stock_code,
"datadate": data["t"],
"adjcp": data["c"],
"close": data["c"],
"open": data["o"],
"high": data["h"],
"low": data["l"],
"volume": data["v"],
}
df = | pd.DataFrame(columns) | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
API to get FX prices from TrueFX
http://www.truefx.com/
http://www.truefx.com/?page=download&description=api
http://www.truefx.com/dev/data/TrueFX_MarketDataWebAPI_DeveloperGuide.pdf
"""
import logging
logger = logging.getLogger(__name__)
import click
import os
import requests
import requests_cache
import datetime
import pandas as pd
#pd.set_option('max_rows', 10)
pd.set_option('expand_frame_repr', False)
| pd.set_option('max_columns', 8) | pandas.set_option |
import itertools
import json
from copy import deepcopy
import networkx as nx
import numpy as np
import pandas as pd
import syspy.assignment.raw as assignment_raw
from quetzal.engine import engine
from quetzal.engine.subprocesses import filepaths
from quetzal.os import parallel_call
from scipy.sparse import csr_matrix
from scipy.sparse.csgraph import dijkstra
from syspy.routing.frequency import graph as frequency_graph
from syspy.skims import skims
from syspy.spatial import spatial
from tqdm import tqdm
def get_path(predecessors, i, j):
path = [j]
k = j
p = 0
while p != -9999:
k = p = predecessors[i, k]
path.append(p)
return path[::-1][1:]
def get_reversed_path(predecessors, i, j):
path = [j]
k = j
p = 0
while p != -9999:
k = p = predecessors[i, k]
path.append(p)
return path[:-1]
def path_and_duration_from_graph(
nx_graph,
pole_set,
od_set=None,
sources=None,
reversed_nx_graph=None,
reverse=False,
ntlegs_penalty=1e9,
cutoff=np.inf,
**kwargs
):
sources = pole_set if sources is None else sources
source_los = sparse_los_from_nx_graph(
nx_graph, pole_set, sources=sources,
cutoff=cutoff + ntlegs_penalty, od_set=od_set, **kwargs
)
source_los['reversed'] = False
reverse = reverse or reversed_nx_graph is not None
if reverse:
if reversed_nx_graph is None:
reversed_nx_graph = nx_graph.reverse()
try:
reversed_od_set = {(d, o) for o, d in od_set}
except TypeError:
reversed_od_set = None
target_los = sparse_los_from_nx_graph(
reversed_nx_graph, pole_set, sources=sources,
cutoff=cutoff + ntlegs_penalty, od_set=reversed_od_set, **kwargs)
target_los['reversed'] = True
target_los['path'] = target_los['path'].apply(lambda x: list(reversed(x)))
target_los[['origin', 'destination']] = target_los[['destination', 'origin']]
los = pd.concat([source_los, target_los]) if reverse else source_los
los.loc[los['origin'] != los['destination'], 'gtime'] -= ntlegs_penalty
tuples = [tuple(l) for l in los[['origin', 'destination']].values.tolist()]
los = los.loc[[t in od_set for t in tuples]]
return los
def sparse_los_from_nx_graph(
nx_graph,
pole_set,
sources=None,
cutoff=np.inf,
od_set=None,
):
sources = pole_set if sources is None else sources
if od_set is not None:
sources = {o for o, d in od_set if o in sources}
# INDEX
pole_list = sorted(list(pole_set)) # fix order
source_list = [zone for zone in pole_list if zone in sources]
nodes = list(nx_graph.nodes)
node_index = dict(zip(nodes, range(len(nodes))))
zones = [node_index[zone] for zone in source_list]
source_index = dict(zip(source_list, range(len(source_list))))
zone_index = dict(zip(pole_list, range(len(pole_list))))
# SPARSE GRAPH
sparse = nx.to_scipy_sparse_matrix(nx_graph)
graph = csr_matrix(sparse)
dist_matrix, predecessors = dijkstra(
csgraph=graph,
directed=True,
indices=zones,
return_predecessors=True,
limit=cutoff
)
# LOS LAYOUT
df = pd.DataFrame(dist_matrix)
df.index = [zone for zone in pole_list if zone in sources]
df.columns = list(nx_graph.nodes)
df.columns.name = 'destination'
df.index.name = 'origin'
stack = df[pole_list].stack()
stack.name = 'gtime'
los = stack.reset_index()
# QUETZAL FORMAT
los = los.loc[los['gtime'] < np.inf]
if od_set is not None:
tuples = [tuple(l) for l in los[['origin', 'destination']].values.tolist()]
los = los.loc[[t in od_set for t in tuples]]
# BUILD PATH FROM PREDECESSORS
od_list = los[['origin', 'destination']].values.tolist()
paths = [
[nodes[i] for i in get_path(predecessors, source_index[o], node_index[d])]
for o, d in od_list
]
los['path'] = paths
return los
def sparse_matrix(edges):
nodelist = {e[0] for e in edges}.union({e[1] for e in edges})
nlen = len(nodelist)
index = dict(zip(nodelist, range(nlen)))
coefficients = zip(*((index[u], index[v], w) for u, v, w in edges))
row, col, data = coefficients
return csr_matrix((data, (row, col)), shape=(nlen, nlen)), index
def link_edges(links, boarding_time=None, alighting_time=None):
assert not (boarding_time is not None and 'boarding_time' in links.columns)
boarding_time = 0 if boarding_time is None else boarding_time
assert not (alighting_time is not None and 'alighting_time' in links.columns)
alighting_time = 0 if alighting_time is None else alighting_time
l = links.copy()
l['index'] = l.index
l['next'] = l['link_sequence'] + 1
if 'cost' not in l.columns:
l['cost'] = l['time'] + l['headway'] / 2
if 'boarding_time' not in l.columns:
l['boarding_time'] = boarding_time
if 'alighting_time' not in l.columns:
l['alighting_time'] = alighting_time
l['total_time'] = l['boarding_time'] + l['cost']
boarding_edges = l[['a', 'index', 'total_time']].values.tolist()
alighting_edges = l[['index', 'b', 'alighting_time']].values.tolist()
transit = pd.merge(
l[['index', 'next', 'trip_id']],
l[['index', 'link_sequence', 'trip_id', 'time']],
left_on=['trip_id', 'next'],
right_on=['trip_id', 'link_sequence'],
)
transit_edges = transit[['index_x', 'index_y', 'time']].values.tolist()
return boarding_edges + transit_edges + alighting_edges
def adjacency_matrix(
links,
ntlegs,
footpaths,
ntlegs_penalty=1e9,
boarding_time=None,
alighting_time=None,
**kwargs
):
ntlegs = ntlegs.copy()
# ntlegs and footpaths
ntlegs.loc[ntlegs['direction'] == 'access', 'time'] += ntlegs_penalty
ntleg_edges = ntlegs[['a', 'b', 'time']].values.tolist()
footpaths_edges = footpaths[['a', 'b', 'time']].values.tolist()
edges = link_edges(links, boarding_time, alighting_time)
edges += footpaths_edges + ntleg_edges
return sparse_matrix(edges)
def los_from_graph(
csgraph, # graph is assumed to be a scipy csr_matrix
node_index=None,
pole_set=None,
sources=None,
cutoff=np.inf,
od_set=None,
ntlegs_penalty=1e9
):
sources = pole_set if sources is None else sources
if od_set is not None:
sources = {o for o, d in od_set if o in sources}
# INDEX
pole_list = sorted(list(pole_set)) # fix order
source_list = [zone for zone in pole_list if zone in sources]
zones = [node_index[zone] for zone in source_list]
source_index = dict(zip(source_list, range(len(source_list))))
zone_index = dict(zip(pole_list, range(len(pole_list))))
# SPARSE GRAPH
dist_matrix, predecessors = dijkstra(
csgraph=csgraph,
directed=True,
indices=zones,
return_predecessors=True,
limit=cutoff + ntlegs_penalty
)
# LOS LAYOUT
df = pd.DataFrame(dist_matrix)
indexed_nodes = {v: k for k, v in node_index.items()}
df.rename(columns=indexed_nodes, inplace=True)
df.index = [zone for zone in pole_list if zone in sources]
df.columns.name = 'destination'
df.index.name = 'origin'
stack = df[pole_list].stack()
stack.name = 'gtime'
los = stack.reset_index()
# QUETZAL FORMAT
los = los.loc[los['gtime'] < np.inf]
los.loc[los['origin'] != los['destination'], 'gtime'] -= ntlegs_penalty
if od_set is not None:
tuples = [tuple(l) for l in los[['origin', 'destination']].values.tolist()]
los = los.loc[[t in od_set for t in tuples]]
# BUILD PATH FROM PREDECESSORS
od_list = los[['origin', 'destination']].values.tolist()
paths = [
[indexed_nodes[i] for i in get_path(predecessors, source_index[o], node_index[d])]
for o, d in od_list
]
los['path'] = paths
return los
def paths_from_graph(
csgraph,
node_index,
sources,
targets,
od_set=None,
cutoff=np.inf
):
reverse = False
if od_set:
o_set = {o for o, d in od_set}
d_set = {d for o, d in od_set}
sources = [s for s in sources if s in o_set]
targets = [t for t in targets if t in d_set]
if len(sources) > len(targets):
reverse = True
sources, targets, csgraph = targets, sources, csgraph.T
# INDEX
source_indices = [node_index[s] for s in sources]
target_indices = [node_index[t] for t in targets]
source_index = dict(zip(sources, range(len(sources))))
index_node = {v: k for k, v in node_index.items()}
# DIKSTRA
dist_matrix, predecessors = dijkstra(
csgraph=csgraph,
directed=True,
indices=source_indices,
return_predecessors=True,
limit=cutoff
)
dist_matrix = dist_matrix.T[target_indices].T
df = pd.DataFrame(dist_matrix, index=sources, columns=targets)
df.columns.name = 'destination'
df.index.name = 'origin'
if od_set is not None:
mask_series = pd.Series(0, index=pd.MultiIndex.from_tuples(list(od_set)))
mask = mask_series.unstack().loc[sources, targets]
df += mask
stack = df.stack()
stack.name = 'length'
odl = stack.reset_index()
od_list = odl[['origin', 'destination']].values
path = get_reversed_path if reverse else get_path
paths = [
[
index_node[i] for i in
path(predecessors, source_index[o], node_index[d])
]
for o, d in od_list
]
odl['path'] = paths
if reverse:
odl[['origin', 'destination']] = odl[['destination', 'origin']]
return odl
class PublicPathFinder:
def __init__(self, model, walk_on_road=False):
self.zones = model.zones.copy()
self.links = engine.graph_links(model.links.copy())
if walk_on_road:
road_links = model.road_links.copy()
road_links['time'] = road_links['walk_time']
self.footpaths = | pd.concat([model.footpaths, road_links, model.road_to_transit]) | pandas.concat |
# -*- coding: utf-8 -*-
import pandas as pd
import numpy as np
from sklearn.metrics import roc_auc_score, accuracy_score
from sklearn.externals import joblib
import os
def log_loss(predictions,actual,eps=1e-15):
'''take an array of prediction probabilities (clipped to avoid undefined values) and measures accuracy while
also factoring for confidence'''
#assert (max(predictions)<=1 and min(predictions)>=0), 'Please make sure to use predict_proba'
p_clipped = np.clip(predictions,eps,1-eps)
loss = -1 * np.mean((actual * np.log(p_clipped)) + ((1-actual) * np.log(1-p_clipped)))
return loss
def sigmoid(array):
sig = 1 / (1 + np.exp(-array))
return sig
class BinaryClassifier:
def __init__(self,regularization=None):
'''initializing the object with the option to select regularization
Regularization will be a dict with type (ridge/lasso) and lambda value'''
if regularization is None:
self.penalty_type = None
self.penalty_lambda_ = 0
else:
self.penalty_type = list(regularization.keys())[0]
self.penalty_lambda_ = regularization.get(self.penalty_type)
def _gradient_descent(self, X, y, lr=.1, pandas=False, full_history=False, weights=None, early_stopping=True):
if pandas or (isinstance(X,pd.DataFrame) & isinstance(y,pd.DataFrame)):
X = X.values
y = y.values
Xnames = X.columns
ynames = y.columns
else:
X = X
y = y
Xnames = [i for i in range(X.shape[1])]
'''learning rate for gradient descent algorithim'''
self.lr = lr
m = len(X)
n_features = X.shape[1]
'''creating the weights, which will typically be all zeros'''
if weights is None:
self.init_weights = np.zeros(n_features)
else:
self.init_weights = weights
if self.penalty_type is 'lasso':
reg_loss = (self.penalty_lambda_/m)
reg_gradient = (-2*self.penalty_lambda_/m)
elif self.penalty_type is 'ridge':
reg_loss = (self.penalty_lambda_/2)
reg_gradient = (-2*self.penalty_lambda_/m)
else:
reg_loss = 0
reg_gradient = 0
weights_list = []
scores_list = []
weights = self.init_weights
for i in range(5000):
if self.penalty_type is 'ridge':
gradient_suffix = reg_gradient * weights
loss_suffix = np.sum(reg_loss * np.square(weights)/m)
elif self.penalty_type is 'lasso':
gradient_suffix = reg_gradient * np.where(weights==0,0,np.where(weights>0,1,-1))
loss_suffix = np.sum(reg_loss * np.abs(weights)/m)
else:
gradient_suffix = 0
loss_suffix = 0
lr = self.lr
'''p = prediction probabilities (0 < p < 1)'''
p = sigmoid(np.dot(X, weights))
error = p - y
gradient = (np.dot(X.T,error) * lr) /m
weights = weights - gradient + gradient_suffix
p = sigmoid(np.dot(X, weights))
preds = np.round(p)
loss = log_loss(p, y) + loss_suffix
auc = roc_auc_score(y, p)
acc = accuracy_score(y,preds)
weights_list.append([*weights])
scores_list.append([auc,loss,acc])
'''Early Stopping: if AUC does not change more than 0.01%, then break'''
if early_stopping:
if i >50:
if abs((scores_list[i][-3] - scores_list[i-50][-3]) / scores_list[i][-3]) < 0.0001:
break
scores_df = pd.DataFrame(scores_list,columns=['auc','loss','acc'])
'''Finding the index with highest AUC score'''
highest_auc = scores_df.iloc[:,0].idxmax(axis=0)
final_weights = weights_list[highest_auc]
#self.weights_final = weights_list[highest_auc]
weights_df = | pd.DataFrame(weights_list,columns=Xnames) | pandas.DataFrame |
"""unit test for loanpy.loanfinder.py (2.0 BETA) for pytest 7.1.1"""
from inspect import ismethod
from os import remove
from pathlib import Path
from unittest.mock import patch, call
from pandas import DataFrame, RangeIndex, Series, read_csv
from pandas.testing import (assert_frame_equal, assert_index_equal,
assert_series_equal)
from pytest import raises
from loanpy.loanfinder import Search, gen, read_data, NoPhonMatch
from loanpy import loanfinder as lf
def test_read_data():
"""test if data is being read correctly"""
# setup expected outcome, path, input-dataframe, mock pandas.read_csv
srsexp = Series(["a", "b", "c"], name="col1", index=[0, 1, 1])
path = Path(__file__).parent / "test_read_data.csv"
dfin = DataFrame({"col1": ["a", "b, c", "wrong clusters",
"wrong phonotactics"], "col2": [1, 2, 3, 4]})
with patch("loanpy.loanfinder.read_csv") as read_csv_mock:
read_csv_mock.return_value = dfin
# assert that the actual outcome equals the expected outcome
assert_series_equal(read_data(path, "col1"), srsexp)
# assert mock call to read_csv_mock was correct
assert read_csv_mock.call_args_list[0] == call(
path, encoding="utf-8", usecols=["col1"])
# test read recip
# setup: overwrite expected outcome and input-dataframe, mock
# pandas.read_csv
srsexp = Series(["(a)?", "(b|c)"], name="col1", index=[1, 3])
dfin = DataFrame({"col1": ["wrong vowel harmony", "(a)?",
"wrong phonotactics", "(b|c)"], "col2": [1, 2, 3, 4]})
with patch("loanpy.loanfinder.read_csv") as read_csv_mock:
read_csv_mock.return_value = dfin
# assert expected and actual outcome are the same pandas Series
assert_series_equal(read_data(path, "col1"), srsexp)
# assert mock was called with correct input
assert read_csv_mock.call_args_list[0] == call(
path, encoding="utf-8", usecols=["col1"])
# tear down
del path, dfin, srsexp
def test_gen():
"""test if generator yields the right things"""
# set up mock-tqdm (which is a progress bar)
def tqdm_mock(iterable, prefix):
"""this just returns the input and remembers it"""
tqdm_mock.called_with = (iterable, prefix)
return iterable
tqdm = lf.tqdm # remember the original tqdm to plug back in later
lf.tqdm = tqdm_mock # overwrite real tqdm with mock-tqdm function
# set up: create custom class
class SomeMonkeyClass:
def __init__(self):
self.somefunc_called_with = []
def somefunc(self, *args):
arglist = [*args]
self.somefunc_called_with.append(arglist)
return arglist[0] + arglist[1]
# set up: create instance of mock class
somemockclass = SomeMonkeyClass()
# assert generator yields/returns the expected outcome
assert list(gen([2, 3, 4], [4, 5, 6],
somemockclass.somefunc, "lol", "rofl")) == [6, 8, 10]
# assert 2 mock calls: tqdm and somefunc in SomeMonkeyClass
assert tqdm_mock.called_with == ([2, 3, 4], "lol")
assert somemockclass.somefunc_called_with == [
[2, 4, "rofl"], [3, 5, "rofl"], [4, 6, "rofl"]]
# tear down
lf.tqdm = tqdm # plug back in the original tqdm
del tqdm, somemockclass, tqdm_mock, SomeMonkeyClass
def test_init():
"""test if class Search is initiated correctly"""
# set up mock panphon class with mock edit distance
class DistanceMonkey:
def hamming_feature_edit_distance(): pass
# set up mock Adrc class for get_nse
class AdrcMonkey:
def get_nse(self, *args): pass
# set up mock function for semantic distance measure
def mock_gensim_mw():
return "sthsth"
# set up vars 4 exped outcome, set up mock instance of DistanceMonkey class
srsad = Series(["a", "b", "c"], name="adapted", index=[0, 1, 1])
srsrc = | Series(["a", "b", "c"], name="adapted", index=[0, 1, 1]) | pandas.Series |
"""
Daily Class
Meteorological data provided by Meteostat (https://dev.meteostat.net)
under the terms of the Creative Commons Attribution-NonCommercial
4.0 International Public License.
The code is licensed under the MIT license.
"""
import os
from copy import copy
from datetime import datetime
from typing import Union
from numpy import NaN
import numpy as np
import pandas as pd
from meteostat.core import Core
from meteostat.point import Point
class Daily(Core):
"""
Retrieve daily weather observations for one or multiple weather stations or
a single geographical point
"""
# The cache subdirectory
cache_subdir: str = 'daily'
# The list of weather Stations
stations = None
# The start date
start: datetime = None
# The end date
end: datetime = None
# Include model data?
model: bool = True
# The data frame
data = pd.DataFrame()
# Columns
_columns: list = [
'date',
'tavg',
'tmin',
'tmax',
'prcp',
'snow',
'wdir',
'wspd',
'wpgt',
'pres',
'tsun'
]
# Data tapes
_types: dict = {
'tavg': 'float64',
'tmin': 'float64',
'tmax': 'float64',
'prcp': 'float64',
'snow': 'float64',
'wdir': 'float64',
'wspd': 'float64',
'wpgt': 'float64',
'pres': 'float64',
'tsun': 'float64'
}
# Columns for date parsing
_parse_dates: dict = {
'time': [0]
}
# Default aggregation functions
_aggregations: dict = {
'tavg': 'mean',
'tmin': 'min',
'tmax': 'max',
'prcp': 'sum',
'snow': 'mean',
'wdir': Core._degree_mean,
'wspd': 'mean',
'wpgt': 'max',
'pres': 'mean',
'tsun': 'sum'
}
def _load(
self,
station: str
) -> None:
"""
Load file from Meteostat
"""
# File name
file = 'daily/' + ('full' if self.model else 'obs') + \
'/' + station + '.csv.gz'
# Get local file path
path = self._get_file_path(self.cache_subdir, file)
# Check if file in cache
if self.max_age > 0 and self._file_in_cache(path):
# Read cached data
df = pd.read_pickle(path)
else:
# Get data from Meteostat
df = self._load_handler(
file,
self._columns,
self._types,
self._parse_dates)
# Validate Series
df = self._validate_series(df, station)
# Save as Pickle
if self.max_age > 0:
df.to_pickle(path)
# Filter time period and append to DataFrame
if self.start and self.end:
# Get time index
time = df.index.get_level_values('time')
# Filter & append
self.data = self.data.append(
df.loc[(time >= self.start) & (time <= self.end)])
else:
# Append
self.data = self.data.append(df)
def _get_data(self) -> None:
"""
Get all required data
"""
if len(self.stations) > 0:
# List of datasets
datasets = []
for station in self.stations:
datasets.append((
str(station),
))
# Data Processing
self._processing_handler(datasets, self._load, self.max_threads)
else:
# Empty DataFrame
self.data = pd.DataFrame(columns=[*self._types])
def _resolve_point(
self,
method: str,
stations: pd.DataFrame,
alt: int,
adapt_temp: bool
) -> None:
"""
Project weather station data onto a single point
"""
if self.stations.size == 0:
return None
if method == 'nearest':
self.data = self.data.groupby(
pd.Grouper(level='time', freq='1D')).agg('first')
else:
# Join score and elevation of involved weather stations
data = self.data.join(
stations[['score', 'elevation']], on='station')
# Adapt temperature-like data based on altitude
if adapt_temp:
data.loc[data['tavg'] != np.NaN, 'tavg'] = data['tavg'] + \
((2 / 3) * ((data['elevation'] - alt) / 100))
data.loc[data['tmin'] != np.NaN, 'tmin'] = data['tmin'] + \
((2 / 3) * ((data['elevation'] - alt) / 100))
data.loc[data['tmax'] != np.NaN, 'tmax'] = data['tmax'] + \
((2 / 3) * ((data['elevation'] - alt) / 100))
# Exclude non-mean data & perform aggregation
excluded = data['wdir']
excluded = excluded.groupby(
pd.Grouper(level='time', freq='1D')).agg('first')
# Aggregate mean data
data = data.groupby(
pd.Grouper(level='time', freq='1D')).apply(self._weighted_average)
# Drop RangeIndex
data.index = data.index.droplevel(1)
# Merge excluded fields
data['wdir'] = excluded
# Drop score and elevation
self.data = data.drop(['score', 'elevation'], axis=1).round(1)
# Set placeholder station ID
self.data['station'] = 'XXXXX'
self.data = self.data.set_index(
['station', self.data.index.get_level_values('time')])
self.stations = pd.Index(['XXXXX'])
def __init__(
self,
loc: Union[pd.DataFrame, Point, list, str],
start: datetime = None,
end: datetime = None,
model: bool = True
) -> None:
# Set list of weather stations
if isinstance(loc, pd.DataFrame):
self.stations = loc.index
elif isinstance(loc, Point):
stations = loc.get_stations('hourly', start, end)
self.stations = stations.index
else:
if not isinstance(loc, list):
loc = [loc]
self.stations = pd.Index(loc)
# Set start date
self.start = start
# Set end date
self.end = end
# Set model
self.model = model
# Get data for all weather stations
self._get_data()
# Interpolate data
if isinstance(loc, Point):
self._resolve_point(loc.method, stations, loc.alt, loc.adapt_temp)
# Clear cache
if self.max_age > 0:
self.clear_cache()
def normalize(self) -> 'Daily':
"""
Normalize the DataFrame
"""
# Create temporal instance
temp = copy(self)
# Create result DataFrame
result = pd.DataFrame(columns=temp._columns[1:])
# Go through list of weather stations
for station in temp.stations:
# Create data frame
df = | pd.DataFrame(columns=temp._columns[1:]) | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# # Phone Plan Comparision Project
#
#
# - Telecom operator Megaline offers their clients two prepaid plans, Surf and Ultimate. The two plans include charges if the client useage goes over the limit for call minutes, data, or number of messages.
# - The commercial department requested we perform a preliminary analysis of the two plans for the year 2018 to determine which brings in more revenue.
# - The commercial department will use our report to adjust the advertising budget.
# - File(s) accessed:
# - /datasets/megaline_calls.csv
# - /datasets/megaline_internet.csv
# - /datasets/megaline_messages.csv
# - /datasets/megaline_plans.csv
# - /datasets/megaline_users.csv
# - We will [Open the data file(s) and study the general information](#general_overview)
# - Summarize observations in [Introductory conclusion section](#general_overview_conclusion).
#
#
# * **Project Plan**
#
# 1. **In the [Data preprocessing](#data_preprocessing) stage**:
# * We will identify missing values and fill in as appropriate.
# * We will removed duplicates.
# * We will study data types. Change data types where needed.
# * We need to check users_df.churn_date, consider the value of users who've cancelled their plan
# * We will summarize observations, actions taken, and rationales in [Data preprocessing conclusion section](#data_preprocessing_conclusion).
# 2. **In the [Calculations](#calculations) stage**:
# * We need to round each calls_df.duration
# * We need to calculate the number of calls per month per user
# * We need to calculate the minutes used per month per user
# * We need to calculate the volume of data per month per user
# * We need to round monthly aggregate of mb_used by user by month
# * We need to calculate the number of text messages sent per month per user
# * We will summarize actions taken and rationales in [Calculations conclusion section](#calculations_conclusion).
# 3. **In the [Exploratory data analysis](#exploratory_data_analysis) stage**:
# * We will test the null hypothesis:
# - The average revenue from clients on the Surf plan - the average revenue from clients on the Ultimate plan = 0.
# * We will test the null hypothesis:
# - The average revenue from clients in NY-NJ area - the average revenue from clients anywhere else = 0.
# * We will summarize observations, actions taken, and rationales in [Exploratory data analysis conclusion section](#exploratory_data_analysis_conclusion).
# 4. **In the [Overall conclusion](#conclusion)**:
# * We will summarize the project's analysis.
#
#
# * **Table of Contents** <a class="anchor" id="table_of_contents"></a>
#
# 1. **[Data preprocessing](#data_preprocessing)**
# * 1.1 [Data preprocessing conclusion section](#data_preprocessing_conclusion)
# 2. **[Calculations](#calculations)**
# * 2.1 [Calculations conclusion section](#calculations_conclusion)
# 3. **[Carry out exploratory data analysis](#exploratory_data_analysis)**
# * 3.1 [Exploratory data analysis conclusion section](#exploratory_data_analysis_conclusion)
# 4. **[Overall conclusion](#conclusion)**
#
# <a class="anchor" id="general_overview"></a>
# **Open the data file and study the general information**
# In[1]:
# import libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats as st
# In[2]:
# import sys and insert code to ignore warnings
import sys
if not sys.warnoptions:
import warnings
warnings.simplefilter("ignore")
# In[3]:
# load the data
try:
calls_df = pd.read_csv('/datasets/megaline_calls.csv')
internet_df = pd.read_csv('/datasets/megaline_internet.csv')
plans_df = pd.read_csv('/datasets/megaline_plans.csv')
messages_df = pd.read_csv('/datasets/megaline_messages.csv')
users_df = pd.read_csv('/datasets/megaline_users.csv')
except:
print('ERROR: Unable to find or access file.')
# In[4]:
# print the first 5 rows
print('\nRows of plans table')
plans_df.head()
# In[5]:
# df general info
print('\nGeneral info of plans table\n')
print(plans_df.info())
# check df for duplicates
print('\nNumber of duplicate rows:', plans_df.duplicated().sum())
# check df for shape
print('\nNumber rows and columns:', plans_df.shape)
# The **plans table** has 2 rows in 8 columns and there are no missing values and no duplicate rows.
#
# The information matches the information given in the program brief.
#
# Column | Data Type | Description
# ------------ | ------------- | -----------------
# messages_included | int64 | monthly text allowance
# mb_per_month_included | int64 | data volume allowance (in megabytes)
# minutes_included | int64 | monthly minute allowance
# usd_monthly_pay | int64 | monthly charge in US dollars
# usd_per_gb | int64 | price per extra gigabyte of data after exceeding the package limits (1 GB = 1024 megabytes)
# usd_per_message | float64 | price per text after exceeding the package limit
# usd_per_minute | float64 | price per minute after exceeding the package limits
# plan_name | object | calling plan name
#
# **plans_df** We will change and/or optimize the datatypes of messages_included, mb_per_month_included, minutes_included, usd_monthly_pay, usd_per_gb, usd_per_message, usd_per_minute, plan_name.
# In[6]:
# print the first 5 rows of the dataframe
print('\nFirst 5 rows of calls table')
calls_df.head()
# In[7]:
# df general info
print('\nGeneral info for calls table\n')
print(calls_df.info())
# check df for duplicates
print('\nNumber of duplicate rows:', calls_df.duplicated().sum())
# check df for shape
print('\nNumber rows and columns:', calls_df.shape)
# check df cols for unique values
print('\nNumber of unique id:', calls_df.id.nunique())
# check df cols for unique values
print('\nNumber of unique user_id:', calls_df.user_id.nunique())
# In[8]:
# check general statistics for dataframe
print('Statistics for duration in calls table')
calls_df.duration.describe()
# In[9]:
# investigate mean and median of duration
duration_mean = calls_df.duration.mean()
duration_median = calls_df.duration.median()
# Percentage difference = Absolute difference / Average x 100
pct = abs(((duration_mean - duration_median)/ ((duration_mean + duration_median)/2)*100)).round(2)
print('The mean of duration is', duration_mean.round(2), 'and the median is:', duration_median)
print('That is a difference of '+ str(pct) +'%.')
# In[10]:
# overall info for dataframe
print('Overall info for calls table')
calls_df.sort_values(by='id', na_position='first')
# The **calls table** has 137735 rows in 4 columns and there are no missing values and no duplicate rows.
# The duration mean and the median have a 12.04% percent difference and may need to be addressed in the preprocessing section.
#
# The table catalogs 137735 call sessions from 481 unique users.
#
# This table provides useful information on call duration, but we need to be mindful that the duration is in fractions of minutes.
# In the calculation section we need to round this up to the next integer.
#
# Column | Data Type | Description
# ------------ | ------------- | -----------------
# id | object | unique call identifier
# call_date | int64 | call date
# duration | object | call duration (in minutes)
# user_id | float64 | the identifier of the user making the call
#
# **calls_df** We will change and/or optimize the datatypes of call_date, duration, and user_id. id will not be changed because it doesn't have low cardinality.
# In[11]:
# print the first 5 rows
print('\nFirst 5 rows of internet table')
internet_df.head()
# In[12]:
# df general info
print('\nGeneral info for internet table\n')
print(internet_df.info())
# check df for duplicates
print('\nNumber of duplicate rows:', internet_df.duplicated().sum())
# check df for shape
print('\nNumber rows and columns:', internet_df.shape)
# check df cols for unique values
print('\nNumber of unique id:', internet_df.id.nunique())
# check df cols for unique values
print('\nNumber of unique user_id:', internet_df.user_id.nunique())
# In[13]:
# check general statistics for dataframe
print('Statistics for mb_used internet table')
internet_df.mb_used.describe()
# In[14]:
# investigate mean and median of mb_used
mb_used_mean = internet_df.mb_used.mean()
mb_used_median = internet_df.mb_used.median()
# Percentage difference = Absolute difference / Average x 100
pct = abs(((mb_used_mean - mb_used_median)/ ((mb_used_mean + mb_used_median)/2)*100)).round(2)
print('The mean of mb_used is', mb_used_mean, 'and the median is:', mb_used_median)
print('That is a difference of '+ str(pct) +'%.')
# In[15]:
# overall info for dataframe
print('Overall info for internet table')
internet_df.sort_values(by='id', na_position='first')
# The **internet table** has 104825 rows in 4 columns and there are no missing values and no duplicate rows.
# The mb_used mean and the median are close, with a 6.4% difference. We may need address this in the preprocessing section.
#
# The table catalogs 104825 unique sessions of internet use for 489 users.
#
# The mb_used column gives us valuable information about the amount of data used, but that amount is per individual web session. Megaline rounds the total for each month from megabytes to gigabytes. We will need to add up the amount of data used for each user for each month and round that up from megabytes to gigabytes.
# In the calculation section we will create a df with the aggregate of monthly mb_used by user and round those monthly values upwards for calculations.
#
# Column | Data Type | Description
# ------------ | ------------- | -----------------
# id | object | unique session identifier
# user_id | int64 | user identifier
# session_date | object | web session date
# mb_used | float64 | the volume of data spent during the session (in megabytes)
#
# **internet_df** We will change and/or optimize the datatypes of user_id, session_date, and mb_used. id will not be changed because it doesn't have low cardinality.
# In[16]:
# print the first 5 rows
print('\nFirst 5 rows of messages table')
messages_df.head()
# In[17]:
# df general info
print('\nGeneral info of messages table\n')
print(messages_df.info())
# check df for duplicates
print('\nNumber of duplicate rows:', messages_df.duplicated().sum())
# check df for shape
print('\nNumber rows and columns:', messages_df.shape)
# check df cols for unique values
print('\nNumber of unique id:', messages_df.id.nunique())
# check df cols for unique values
print('\nNumber of unique user_id:', messages_df.user_id.nunique())
# The **messages table** has 76051 rows in 3 columns and there are no missing values and no duplicate rows.
#
# The table catalogs 76051 messages from 402 unique users.
#
# Column | Data Type | Description
# ------------ | ------------- | -----------------
# id | object | unique text message identifier
# user_id | int64 | the identifier of the user sending the text
# message_date | object | text message date
#
# **messages_df** We will change and/or optimize the datatypes of user_id and message date. id will not be changed because it doesn't have low cardinality.
# In[18]:
# print the first 5 rows
print('\nFirst 5 rows of users')
users_df.head()
# In[19]:
# df general info
print('\nGeneral info of users table\n')
print(users_df.info())
# check df for duplicates
print('\nNumber of duplicate rows:', users_df.duplicated().sum())
# check df for shape
print('\nNumber rows and columns:', users_df.shape)
# check df cols for unique values
print('\nNumber of unique first_name out of 500:', users_df.first_name.nunique())
# check df cols for unique values
print('\nNumber of unique last_name out of 500:', users_df.last_name.nunique())
# check df cols for unique values
print('\nNumber of unique city out of 500:', users_df.city.nunique())
# check df cols for unique values
print('\nNumber of unique plan out of 500:', users_df.plan.nunique(), '\n')
# check proportion in each plan
print(users_df['plan'].value_counts().sort_index())
# The **users table** has 500 rows in 8 columns and there **are** missing values in the churn_date column, but no duplicate rows. The missing values in the churn_date column indicate the calling plan was being used when this database was extracted.
#
# Out of 500 users, about 2/3 (339) have the surf plan and 1/3 (161) have the ultimate plan. There are 73 unique locations (city). This city information will be useful in the analysis of renue by location.
#
# Column | Data Type | Description
# ------------ | ------------- | -----------------
# user_id | int64 | unique user identifier
# first_name | object | user's name
# last_name | object | user's last name
# age | int64 | user's age (years)
# city | object | user's city of residence
# reg_date | object | subscription date (dd, mm, yy)
# plan | object | calling plan name
# churn_date | object | the date the user stopped using the service
#
# **users_df** We will change and/or optimize the datatypes of user_id, age, city, reg_date, plan, churn_date. first_name and last_name will not be changed because they doesn't have low cardinality.
# <a class="anchor" id="general_overview_conclusion"></a>
# **Introductory Conclusions**
#
# - We loaded 5 dataframes, calls_df, internet_df, plans_df, messages_df, users_df.
# - No duplicate rows
# - No unexplained missing values (missing values in churn_date indicate the plan is active)
# - calls_df.duration and internet_df.mb_used likely have outliers
# - users_df.churn_date needs further investigation.
#
#
# Table | Unique user_id | Rows | Columns
# ------------ | ----------------- | -------------- | ----------
# calls_df | 481 | 137735 | 4
# internet_df | 489 | 104825 | 4
# messages_df | 402 | 76051 | 3
# users_df | 500 | 500 | 8
#
# **[Return to table of contents](#table_of_contents)**
# <a class="anchor" id="data_preprocessing"></a>
# **1. Data preprocessing**
#
# - Change data types
#
# - **calls_df** We will change and/or optimize the datatypes of call_date, duration, and user_id. id will not be changed because it doesn't have low cardinality.
# - **internet_df** We will change and/or optimize the datatypes of user_id, session_date, and mb_used. id will not be changed because it doesn't have low cardinality.
# - **plans_df** We will change and/or optimize the datatypes of messages_included, mb_per_month_included, minutes_included, usd_monthly_pay, usd_per_gb, usd_per_message, usd_per_minute, plan_name.
# - **messages_df** We will change and/or optimize the datatypes of user_id and message date. id will not be changed because it doesn't have low cardinality.
# - **users_df** We will change and/or optimize the datatypes of user_id, age, city, reg_date, plan, churn_date. first_name and last_name will not be changed because they doesn't have low cardinality.
#
# - Check for outliers, specifically calls_df.duration, internet_df.mb_used
# - Investigate users_df.churn_date
# - Check for errors
# In[20]:
# for dfs: calls_df, internet_df, plans_df, messages_df, users_df
# change/downcast datatypes as appropriate
# For columns with low cardinality (the amount of unique values is lower than 50% of the count of these values)
# changing from object to category will help optimize memory and retrieval
calls_df['user_id'] = pd.to_numeric(calls_df['user_id'], downcast='integer')
calls_df['call_date'] = pd.to_datetime(calls_df['call_date'], format='%Y-%m-%d')
calls_df['duration'] = pd.to_numeric(calls_df['duration'], downcast='integer')
internet_df['user_id'] = pd.to_numeric(internet_df['user_id'], downcast='integer')
internet_df['session_date'] = pd.to_datetime(internet_df['session_date'], format='%Y-%m-%d')
internet_df['mb_used'] = pd.to_numeric(internet_df['mb_used'], downcast='float')
plans_df['messages_included'] = pd.to_numeric(plans_df['messages_included'], downcast='integer')
plans_df['mb_per_month_included'] = pd.to_numeric(plans_df['mb_per_month_included'], downcast='integer')
plans_df['minutes_included'] = pd.to_numeric(plans_df['minutes_included'], downcast='integer')
plans_df['usd_monthly_pay'] = pd.to_numeric(plans_df['usd_monthly_pay'], downcast='integer')
plans_df['usd_per_gb'] = pd.to_numeric(plans_df['usd_per_gb'], downcast='integer')
plans_df['usd_per_message'] = pd.to_numeric(plans_df['usd_per_message'], downcast='float')
plans_df['usd_per_minute'] = pd.to_numeric(plans_df['usd_per_minute'], downcast='float')
plans_df['plan_name'] = plans_df['plan_name'].astype('category')
messages_df['user_id'] = pd.to_numeric(messages_df['user_id'], downcast='integer')
messages_df['message_date'] = pd.to_datetime(messages_df['message_date'], format='%Y-%m-%d')
users_df['user_id'] = | pd.to_numeric(users_df['user_id'], downcast='integer') | pandas.to_numeric |
import os
import logging
from notion.client import NotionClient
import numpy as np
import pandas as pd
import yfinance as yf
from datetime import datetime
import matplotlib.pyplot as plt
import seaborn as sns
from telegram.ext import Updater, CommandHandler, MessageHandler, Filters
from telegram import ParseMode, ReplyKeyboardMarkup, KeyboardButton
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# settings
TELEGRAM_TOKEN = os.environ['TELEGRAM_TOKEN']
NOTION_TOKEN = os.environ['NOTION_TOKEN']
LIMIT = os.environ['CREDIT_LIMIT']
POWER_USER_ID = int(os.environ['POWER_USER_ID'])
POWER_USER_NAME = os.environ['POWER_USER_NAME']
notion_balance = "https://www.notion.so/chenchiks/2062899533a048579f572a7e3d40182f?v=1fb6c93b1a5045af9ea3a83b4aa90dd0"
notion_transactions = "https://www.notion.so/chenchiks/1604cc3bb0614273a690710f17b138ca?v=8f278effcac4457d803aeb5cc0a1c93e"
credit_limit = int(LIMIT)
recalculate = "Recalculate"
newlink = "Update numbers"
recalculate_keyboard = KeyboardButton(text=recalculate)
link_keyboard = KeyboardButton(text=newlink)
custom_keyboard = [[recalculate_keyboard, link_keyboard]]
reply_markup = ReplyKeyboardMarkup(custom_keyboard, resize_keyboard=True)
newlink_filter = Filters.text([newlink]) & (
Filters.user(user_id=POWER_USER_ID) | Filters.user(username=POWER_USER_NAME))
recalculate_filter = Filters.text([recalculate]) & (
Filters.user(user_id=POWER_USER_ID) | Filters.user(username=POWER_USER_NAME))
in_known_filters = newlink_filter | recalculate_filter | Filters.command('start')
def start(update, context):
context.bot.send_message(chat_id=update.effective_chat.id, text="I'm a bot, please talk to me!",
reply_markup=reply_markup)
def unknown(update, context):
context.bot.send_message(chat_id=update.effective_chat.id, text="I'm sorry Dave I'm afraid I can't do that.",
reply_markup=reply_markup)
def daily_status(day, date, planned_month, daily):
return (
planned_month[planned_month["transaction_time"] <= date][
"transaction_amount"
].sum()
- (day + 1) * daily
)
def transactions_left(date, planned_month):
return planned_month[planned_month["transaction_time"] > date][
"transaction_amount"
].sum()
def transactions_made(date, planned_month):
return planned_month[planned_month["transaction_time"] <= date][
"transaction_amount"
].sum()
def generate_link(update, context):
context.bot.send_message(chat_id=update.effective_chat.id, text="5 sec")
now = pd.Timestamp(datetime.now().timestamp(), unit="s").to_period(freq="M")
month = now
client = NotionClient(token_v2=NOTION_TOKEN)
cv = client.get_collection_view(notion_balance)
notion_data = [[row.id, row.date.start, row.credit, row.cash, row.usd] for row in cv.collection.get_rows()]
balance = pd.DataFrame(notion_data, columns=['id', 'balance_time', 'Credit', 'Cash', 'USD'])
balance["balance_year"] = balance["balance_time"].dt.to_period("Y").dt.start_time
balance["balance_month"] = balance["balance_time"].dt.to_period("M").dt.start_time
balance["balance_day"] = balance["balance_time"].dt.to_period("D").dt.start_time
balance["balance_week"] = balance["balance_day"] - balance[
"balance_day"
].dt.weekday * np.timedelta64(1, "D")
yf_exchange = (
(yf.Ticker("RUB=X")
.history(period="max")["Close"]
.reset_index()
.rename({"index": "date"}, axis=1))
)
exchange = pd.DataFrame(
pd.date_range(start=month.start_time, end=month.end_time)
).rename({0: "date"}, axis=1)
exchange["usd_rate"] = exchange["date"].apply(
lambda x: yf_exchange[yf_exchange["Date"] <= x].iloc[-1]["Close"]
)
balance = balance.merge(exchange, left_on="balance_day", right_on="date")
balance["Business"] = (
balance["USD"] * balance["usd_rate"]
)
balance["balance"] = (
balance["Credit"] - credit_limit + balance["Cash"] + balance["Business"]
)
latest_balance = balance.sort_values(by='balance_time').iloc[-1]
context.bot.send_message(chat_id=update.effective_chat.id,
text=f"*Please fill in* [Balance form](https://docs.google.com/forms/d/e/1FAIpQLSe8JaUuKA22qdeun1MtOUK21LkxXjdcu-yJPUjri-T4Y7m60g/viewform?usp=pp_url&entry.910435313={latest_balance['Credit']}&entry.2073871224={latest_balance['Cash']}&entry.1266770758={latest_balance['USD']}) and after that ask me to recalculate the balance",
parse_mode=ParseMode.MARKDOWN)
def recalculate_balance(update, context):
context.bot.send_message(chat_id=update.effective_chat.id, text="Uno momento!")
# настроим отображение дробных величин
pd.options.display.float_format = "{:,.2f}".format
# настроим размеры графиков (пришлось вынести в отдельный блок, т.к. иначе оно не работает)
sns.set(
context="notebook", style="whitegrid", rc={"figure.figsize": (15, 9)},
)
plt.style.use("seaborn-muted")
# get info about this month
now = pd.Timestamp(datetime.now().timestamp(), unit="s").to_period(freq="M")
month = now
days_in_month = month.days_in_month
client = NotionClient(token_v2=NOTION_TOKEN)
cv = client.get_collection_view(notion_balance)
notion_data = [[row.id, row.date.start, row.credit, row.cash, row.usd] for row in cv.collection.get_rows()]
balance = pd.DataFrame(notion_data, columns=['id', 'balance_time', 'Credit', 'Cash', 'USD'])
cv = client.get_collection_view(notion_transactions)
notion_data = [[row.id, row.date.start, row.amount] for row in cv.collection.get_rows()]
planned = pd.DataFrame(notion_data, columns=['id', 'Date', 'transaction_amount'])
balance["balance_year"] = balance["balance_time"].dt.to_period("Y").dt.start_time
balance["balance_month"] = balance["balance_time"].dt.to_period("M").dt.start_time
balance["balance_day"] = balance["balance_time"].dt.to_period("D").dt.start_time
balance["balance_week"] = balance["balance_day"] - balance[
"balance_day"
].dt.weekday * np.timedelta64(1, "D")
yf_exchange = (
(yf.Ticker("RUB=X")
.history(period="max")["Close"]
.reset_index()
.rename({"index": "date"}, axis=1))
)
exchange = pd.DataFrame(
pd.date_range(start=month.start_time, end=month.end_time)
).rename({0: "date"}, axis=1)
exchange["usd_rate"] = exchange["date"].apply(
lambda x: yf_exchange[yf_exchange["Date"] <= x].iloc[-1]["Close"]
)
balance = balance.merge(exchange, left_on="balance_day", right_on="date")
balance["Business"] = (
balance["USD"] * balance["usd_rate"]
)
balance["balance"] = (
balance["Credit"] - credit_limit + balance["Cash"] + balance["Business"]
)
balance_daily = balance.sort_values(by='balance_time')[["balance_day", "balance"]].drop_duplicates(
subset="balance_day", keep="last"
)
planned["transaction_time"] = pd.to_datetime(planned["Date"])
planned["transaction_year"] = (
planned["transaction_time"].dt.to_period("Y").dt.start_time
)
planned["transaction_month"] = (
planned["transaction_time"].dt.to_period("M").dt.start_time
)
planned["transaction_day"] = planned["transaction_time"].dt.to_period("D").dt.start_time
planned["transaction_week"] = planned["transaction_day"] - planned[
"transaction_day"
].dt.weekday * np.timedelta64(1, "D")
planned_month = planned[planned["transaction_month"] == month.start_time]
monthly_chart = (
pd.DataFrame(index= | pd.date_range(start=month.start_time, end=month.end_time) | pandas.date_range |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.