prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
#!/usr/bin/env python
# -*- coding: utf-8 -*--
# Copyright (c) 2021, 2022 Oracle and/or its affiliates.
# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/
"""
The ADS accessor for the Pandas DataFrame.
The accessor will be initialized with the pandas object the user is interacting with.
Examples
--------
>>> from ads.feature_engineering.accessor.dataframe_accessor import ADSDataFrameAccessor
>>> from ads.feature_engineering.feature_type.continuous import Continuous
>>> from ads.feature_engineering.feature_type.creditcard import CreditCard
>>> from ads.feature_engineering.feature_type.string import String
>>> from ads.feature_engineering.feature_type.base import Tag
>>> df = pd.DataFrame({'Name': ['Alex'], 'CreditCard': ["4532640527811543"]})
>>> df.ads.feature_type
{'Name': ['string'], 'Credit Card': ['string']}
>>> df.ads.feature_type_description
Column Feature Type Description
------------------------------------------------------------------
0 Name string Type representing string values.
1 Credit Card string Type representing string values.
>>> df.ads.default_type
{'Name': 'string', 'Credit Card': 'string'}
>>> df.ads.feature_type = {'Name':['string', Tag('abc')]}
>>> df.ads.tags
{'Name': ['abc']}
>>> df.ads.feature_type = {'Credit Card':['credit_card']}
>>> df.ads.feature_select(include=['credit_card'])
Credit Card
-------------------------------
0 4532640527811543
"""
from typing import Any, Dict, List, Union
import numpy as np
import pandas as pd
from ads.common.utils import DATA_SCHEMA_MAX_COL_NUM
from ads.data_labeling.mixin.data_labeling import DataLabelingAccessMixin
from ads.dbmixin.db_pandas_accessor import DBAccessMixin
from ads.feature_engineering import schema
from ads.feature_engineering.accessor.mixin.eda_mixin import EDAMixin
from ads.feature_engineering.accessor.mixin.feature_types_mixin import (
ADSFeatureTypesMixin,
)
from ads.feature_engineering.feature_type.base import FeatureType
from pandas.core.dtypes.common import is_list_like
@pd.api.extensions.register_dataframe_accessor("ads")
class ADSDataFrameAccessor(
ADSFeatureTypesMixin, EDAMixin, DBAccessMixin, DataLabelingAccessMixin
):
"""ADS accessor for the Pandas DataFrame.
Attributes
----------
columns: List[str]
The column labels of the DataFrame.
tags(self) -> Dict[str, str]
Gets the dictionary of user defined tags for the dataframe.
default_type(self) -> Dict[str, str]
Gets the map of columns and associated default feature type names.
feature_type(self) -> Dict[str, List[str]]
Gets the list of registered feature types.
feature_type_description(self) -> pd.DataFrame
Gets the list of registered feature types in a DataFrame format.
Methods
-------
sync(self, src: Union[pd.DataFrame, pd.Series]) -> pd.DataFrame
Syncs feature types of current DataFrame with that from src.
feature_select(self, include: List[Union[FeatureType, str]] = None, exclude: List[Union[FeatureType, str]] = None) -> pd.DataFrame
Gets the list of registered feature types in a DataFrame format.
help(self, prop: str = None) -> None
Provids docstring for affordable methods and properties.
Examples
--------
>>> from ads.feature_engineering.accessor.dataframe_accessor import ADSDataFrameAccessor
>>> from ads.feature_engineering.feature_type.continuous import Continuous
>>> from ads.feature_engineering.feature_type.creditcard import CreditCard
>>> from ads.feature_engineering.feature_type.string import String
>>> from ads.feature_engineering.feature_type.base import Tag
df = pd.DataFrame({'Name': ['Alex'], 'CreditCard': ["4532640527811543"]})
>>> df.ads.feature_type
{'Name': ['string'], 'Credit Card': ['string']}
>>> df.ads.feature_type_description
Column Feature Type Description
-------------------------------------------------------------------
0 Name string Type representing string values.
1 Credit Card string Type representing string values.
>>> df.ads.default_type
{'Name': 'string', 'Credit Card': 'string'}
>>> df.ads.feature_type = {'Name':['string', Tag('abc')]}
>>> df.ads.tags
{'Name': ['abc']}
>>> df.ads.feature_type = {'Credit Card':['credit_card']}
>>> df.ads.feature_select(include=['credit_card'])
Credit Card
------------------------------
0 4532640527811543
"""
def __init__(self, pandas_obj) -> None:
"""Initializes ADS Pandas DataFrame Accessor.
Parameters
----------
pandas_obj : pandas.DataFrame
Pandas dataframe
Raises
------
ValueError
If provided DataFrame has duplicate columns.
"""
if len(set(pandas_obj.columns)) != len(pandas_obj.columns):
raise ValueError(
"Failed to initialize a DataFrame accessor. " "Duplicate column found."
)
self._obj = pandas_obj
super().__init__()
self.columns = self._obj.columns
self._info = None
def info(self) -> Any:
"""Gets information about the dataframe.
Returns
-------
Any
The information about the dataframe.
"""
return self._info
@property
def _feature_type(self) -> Dict[str, List[FeatureType]]:
"""Gets the map of columns and associated feature types.
Key is column name and value is list of feature types.
"""
return {
self._obj[col].name: self._obj[col].ads._feature_type for col in self._obj
}
@property
def _default_type(self) -> Dict[str, FeatureType]:
"""Gets the map of columns and associated default feature types.
Key is column name and value is a default feature type.
"""
return {
self._obj[col].name: self._obj[col].ads._default_type for col in self._obj
}
@property
def tags(self) -> Dict[str, List[str]]:
"""Gets the dictionary of user defined tags for the dataframe. Key is column name
and value is list of tag names.
Returns
-------
Dict[str, List[str]]
The map of columns and associated default tags.
"""
return {self._obj[col].name: self._obj[col].ads.tags for col in self._obj}
@property
def default_type(self) -> Dict[str, str]:
"""Gets the map of columns and associated default feature type names.
Returns
-------
Dict[str, str]
The dictionary where key is column name and value is the name of default feature
type.
"""
return {k: v.name for k, v in self._default_type.items()}
@property
def feature_type(self) -> Dict[str, List[str]]:
"""Gets the list of registered feature types.
Returns
-------
Dict[str, List[str]]
The dictionary where key is column name and value is list of associated feature type
names.
"""
return {col.name: col.ads.feature_type for _, col in self._obj.items()}
@property
def feature_type_description(self) -> pd.DataFrame:
"""Gets the list of registered feature types in a DataFrame format.
Returns
-------
:class:`pandas.DataFrame`
Examples
________
>>> df.ads.feature_type_description()
Column Feature Type Description
-------------------------------------------------------------------
0 City string Type representing string values.
1 Phone Number string Type representing string values.
"""
result_df = pd.DataFrame([], columns=["Column", "Feature Type", "Description"])
for col in self._obj:
series_feature_type_df = self._obj[col].ads.feature_type_description
series_feature_type_df.insert(0, "Column", col)
result_df = result_df.append(series_feature_type_df)
result_df.reset_index(drop=True, inplace=True)
return result_df
@feature_type.setter
def feature_type(
self, feature_type_map: Dict[str, List[Union[FeatureType, str]]]
) -> None:
"""Sets feature types for the DataFrame.
Parameters
----------
feature_type_map : Dict[str, List[Union[FeatureType, str]]]
The map of feature types where key is column name and value is list of feature
types.
Returns
-------
None
Nothing.
"""
for col, feature_types in feature_type_map.items():
self._obj[col].ads.feature_type = feature_types
def sync(self, src: Union[pd.DataFrame, pd.Series]) -> pd.DataFrame:
"""Syncs feature types of current DataFrame with that from src.
Syncs feature types of current dataframe with that from src, where src
can be a dataframe or a series. In either case, only columns with
matched names are synced.
Parameters
----------
src: `pd.DataFrame` | `pd.Series`
The source to sync from.
Returns
-------
:class:`pandas.DataFrame`
Synced dataframe.
"""
for _, col in self._obj.items():
col.ads.sync(src)
def _extract_columns_of_target_types(
self, target_types: List[Union[FeatureType, str]]
) -> List:
"""Returns all the column names that are of the target types from the
feature_type dictionary.
Parameters
----------
target_types: list
A list of target feature types, can be either feature type names of
feature type class.
Returns:
-------
List[str]
The list of columns names.
"""
columns = []
target_types = (
np.unique(
[self._get_type(feature_type).name for feature_type in target_types]
)
if target_types is not None
else None
)
for target_type in target_types:
for name, feature_types in self.feature_type.items():
if target_type in feature_types:
columns.append(name)
return columns
def feature_select(
self,
include: List[Union[FeatureType, str]] = None,
exclude: List[Union[FeatureType, str]] = None,
) -> pd.DataFrame:
"""Returns a subset of the DataFrameโs columns based on the column feature_types.
Parameters
----------
include: List[Union[FeatureType, str]], optional
Defaults to None. A list of FeatureType subclass or str to be included.
exclude: List[Union[FeatureType, str]], optional
Defaults to None. A list of FeatureType subclass or str to be excluded.
Raises
------
ValueError
If both of include and exclude are empty
ValueError
If include and exclude are used simultaneously
Returns
-------
:class:`pandas.DataFrame`
The subset of the frame including the feature types in include and excluding
the feature types in exclude.
"""
if not (include or exclude):
raise ValueError("at least one of include or exclude must be nonempty")
if not is_list_like(include):
include = (include,) if include is not None else ()
if not | is_list_like(exclude) | pandas.core.dtypes.common.is_list_like |
import logging
import numpy as np
import pandas as pd
import scipy.stats as ss
from scipy.linalg import eig
from numba import jit
import sg_covid_impact
# from mi_scotland.utils.pandas import preview
logger = logging.getLogger(__name__)
np.seterr(all="raise") # Raise errors on floating point errors
def process_complexity(df, dataset, year, geo_type, cluster, PCI=False):
"""Calculate complexity variables aggregated over the columns.
Calculates: size, complexity index, complexity outlook index
Args:
df (pandas.DataFrame): Long dataframe
Expected columns: `{"geo_nm", "geo_cd", cluster, "value"}`
year (str): Year
dataset (str): Name of dataset
geo_type (str): Type of regional geography
cluster (str): Name of cluster column to use to pivot on
PCI (bool, optional): If True, calculate product complexity by
transposing input
# TODO refactor outside of function
Returns:
pandas.DataFrame
"""
X = (
df.pipe(pivot_area_cluster, cluster).fillna(0)
# Transpose if PCI
.pipe(lambda x: x.T if PCI else x)
)
X.index.name = "cluster"
size = X.sum(1).to_frame("size")
complexity = (
X.pipe(create_lq, binary=True)
.pipe(calc_eci, sign_correction=X.sum(1))
.pipe(lambda x: x.rename(columns={"eci": "pci"}) if PCI else x)
)
outlook = X.pipe(complexity_outlook_index).to_frame("coi" if not PCI else "poi")
return (
size.join(complexity)
.join(outlook)
.assign(year=year, geo_type=geo_type, source=dataset, cluster_type=cluster)
)
def _melt_keep_index(df, value_name="value"):
""" Fully melt a dataframe keeping index, setting new index as all but `value` """
id_vars = df.index.names
return (
df.reset_index()
.melt(id_vars=id_vars, value_name=value_name)
.set_index([*id_vars, df.columns.name])
)
def process_complexity_unit(df, dataset, year, geo_type, cluster):
"""Calculate unaggregated complexity analysis variables
Calculates: raw value, location quotient, RCA?, distance, opportunity outlook gain
Args:
df (pandas.DataFrame): Long dataframe
Expected columns: `{"geo_nm", "geo_cd", cluster, "value"}`
year (str): Year
dataset (str): Name of dataset
geo_type (str): Type of regional geography
cluster (str): Name of cluster column to use to pivot on
Returns:
pandas.DataFrame
"""
X = df.pipe(pivot_area_cluster, cluster).fillna(0)
X.columns.name = "cluster"
# Index: year, location, cluster, geo_type
# value, LQ, RCA?, distance, OOG
value = X.pipe(_melt_keep_index, "value")
lq = X.pipe(create_lq).pipe(_melt_keep_index, "lq")
has_rca = (lq > 1).rename(columns={"lq": "has_rca"})
d = X.pipe(distance).pipe(_melt_keep_index, "distance")
omega = 1 - X.pipe(proximity_density).pipe(_melt_keep_index, "omega")
oog = opportunity_outlook_gain(X).pipe(_melt_keep_index, "oog")
return (
pd.concat([value, lq, has_rca, d, omega, oog], axis=1)
.assign(year=year, geo_type=geo_type, source=dataset, cluster_type=cluster)
.pipe(preview)
)
@jit(nopython=True)
def _proximity_matrix(M):
""" `proximity_matrix` helper function """
n_c, n_p = M.shape
phi = np.empty((n_p, n_p), dtype=np.float64)
k = M.sum(0) # Ubiquity
for i in range(n_p):
Mci = M[:, i]
for j in range(n_p):
if j > i:
continue
Mcj = M[:, j]
m = max([k[i], k[j]])
if m == 0:
v = np.nan
else:
v = (Mci * Mcj).sum() / m
phi[i, j] = v
phi[j, i] = v
return phi
def proximity_matrix(X, threshold=1):
""" Calculates proximity matrix
Proximity between entries calculates the probability that given a revealed
comparative advantage (RCA) in entity `j`, a location also has a RCA in
entity `i`.
The same probability is calculated with `i` and `j` permuted, and the
minimum of the two probabilities is then taken.
.. math::
\\large{ \\phi_{ij} = \\min\\left\\{\\mathbb{P}(\\text{RCA}_i \\geq 1 |
\\text{RCA}_j \\geq 1), \\mathbb{P}(\\text{RCA}_j \\geq 1 |
\\text{RCA}_i \\geq 1)\\right\\} } \\\\
\\large{ \\phi_{ij} = \\frac{\\sum_c M_{ci} * M_{cj}}{\\max(k_i, k_j)} }
k = \\sum_i M_{i, j}
Args:
X (pandas.DataFrame): Activity matrix [m x n]
threshold (float, optional): Binarisation threshold for location quotient.
Returns:
pandas.DataFrame [n x n]
"""
M = create_lq(X, binary=True, threshold=threshold)
return pd.DataFrame(_proximity_matrix(M.values), index=M.columns, columns=M.columns)
def proximity_density(X, threshold=1):
"""Calculate proximity density
.. math:
\\omega_{ik} = \\frac{ \\sum_j M_{ij} \\phi_{jk}}{\\sum_j \\phi_{jk}}
Args:
X (pandas.DataFrame): Activity matrix [m x n]
threshold (float, optional): Binarisation threshold for location quotient.
Returns:
pandas.DataFrame [m x n]
"""
M = create_lq(X, binary=True, threshold=threshold)
phi = proximity_matrix(X, threshold)
return (M @ phi) / phi.sum(axis=0)
def distance(X, threshold=1):
"""Distance: 1 - proximity density w/ existing capabilities as NaN
Args:
X (pandas.DataFrame): [locations x activities]
threshold (float, optional): Binarisation threshold for location
quotient.
Returns:
pandas.DataFrame [locations x activites]
"""
M = create_lq(X, threshold, binary=True)
phi = proximity_matrix(X, threshold)
return (((1 - M) @ phi) / phi.sum(axis=1)) * M.applymap(
lambda x: np.nan if x == 1 else 1
)
def complexity_outlook_index(X, threshold=1):
"""Calculate economic complexity outlook index
Args:
X (pandas.DataFrame): [locations x activities]
threshold (float, optional): Binarisation threshold for location
quotient.
Returns:
pandas.Series [locations]
"""
M = create_lq(X, threshold, binary=True)
d = distance(X, threshold)
PCI = calc_eci(M.T, sign_correction=X.sum(0))
if PCI.shape[0] != M.shape[1]:
M = M.loc[:, PCI.index]
d = d.loc[:, PCI.index]
return ((1 - d) * (1 - M) * PCI.values.T).sum(axis=1)
def opportunity_outlook_gain(X, threshold=1):
"""Calculate opportunity outlook gain
Value for existing capabilities is NaN.
Args:
X (pandas.DataFrame): [locations x activities]
threshold (float, optional): Binarisation threshold for location
quotient.
Returns:
pandas.DataFrame [locations x activites]
"""
M = create_lq(X, threshold, binary=True)
phi = proximity_matrix(X, threshold)
d = distance(X, threshold)
PCI = calc_eci(M.T, sign_correction=X.sum(0))
if PCI.shape[0] != M.shape[1]:
M = M.loc[:, PCI.index]
phi = phi.loc[PCI.index, PCI.index]
d = d.loc[:, PCI.index]
return (
(1 - M) * PCI.values.T @ (phi / phi.sum(0)) - ((1 - d) * PCI.values.T)
) * M.applymap(lambda x: np.nan if x == 1 else 1)
def pivot_area_cluster(df, cluster, aggfunc=sum):
"""Convert long data into a matrix, pivoting on `cluster`
For example, take BRES/IDBR data at Local authority (LAD) geographic level
and SIC4 sectoral level to create matrix with elements representing the
activity level for a given LAD-SIC4 combination.
Args:
df (pandas.DataFrame): Long dataframe
Expected Columns: `{"geo_nm", "geo_cd", cluster}`
cluster (str): Column of the sector type to pivot on
agg_func (function, optional): Aggregation function passed to
`pandas.DataFrame.pivot_table`.
Returns:
pandas.DataFrame: [number areas x number cluster]
Note: Fills missing values with zero
"""
return (
df
# Fill missing values with zeros
.fillna(0)
# Pivot to [areas x sectors]
.pivot_table(
index=["geo_cd", "geo_nm"],
columns=cluster,
values="value",
fill_value=0,
aggfunc=aggfunc,
)
)
def create_lq(X, threshold=1, binary=False):
"""Calculate the location quotient.
Divides the share of activity in a location by the share of activity in
the UK total.
Args:
X (pandas.DataFrame): Rows are locations, columns are sectors,
threshold (float, optional): Binarisation threshold.
binary (bool, optional): If True, binarise matrix at `threshold`.
and values are activity in a given sector at a location.
Returns:
pandas.DataFrame
#UTILS
"""
Xm = X.values
with np.errstate(invalid="ignore"): # Accounted for divide by zero
X = pd.DataFrame(
(Xm * Xm.sum()) / (Xm.sum(1)[:, np.newaxis] * Xm.sum(0)),
index=X.index,
columns=X.columns,
).fillna(0)
return (X > threshold).astype(float) if binary else X
def calc_fitness(X, n_iters):
"""Calculate the fitness metric of economic complexity
Args:
X (pandas.DataFrame): Rows are locations, columns are sectors,
and values are activity in a given sector at a location.
n_iters (int): Number of iterations to calculate fitness for
Returns:
pandas.DataFrame
#UTILS
"""
X = _drop_zero_rows_cols(X)
x = np.ones(X.shape[0])
for n in range(1, n_iters):
x = (X.values / (X.values / x[:, np.newaxis]).sum(0)).sum(1)
x = x / x.mean()
return pd.DataFrame(np.log(x), index=X.index, columns=["fitness"])
def calc_fit_plus(X, n_iters, correction=True):
"""Calculate the fitness+ (ECI+) metric of economic complexity
Args:
X (pandas.Dataframe): Rows are locations, columns are sectors,
and values are activity in a given sector at a location.
n_iters (int): Number of iterations to calculate fitness for
correction (bool, optional): If true, apply logarithmic correction.
Returns:
pandas.Dataframe
#UTILS
"""
X = _drop_zero_rows_cols(X)
if X.dtypes[0] == bool:
norm_mean = np.mean
else:
norm_mean = ss.gmean
x = X.values.sum(axis=1)
x = x / norm_mean(x)
for n in range(1, n_iters):
x = (X.values / (X.values / x[:, np.newaxis]).sum(0)).sum(1)
x = x / norm_mean(x)
if correction:
x = np.log(x) - np.log((X / X.sum(0)).sum(1))
else:
pass # x = np.log(x)
return pd.DataFrame(x, index=X.index, columns=["fit_p"])
def calc_eci(X, sign_correction=None):
"""Calculate the original economic complexity index (ECI).
Args:
X (pandas.DataFrame): Rows are locations, columns are sectors,
and values are activity in a given sector at a location.
sign_correction (pd.Series, optional): Array to correlate with ECI
to calculate sign correction. Typically, ubiquity. If None, uses
the sum over columns of the input data.
Returns:
pandas.DataFrame
#UTILS
"""
X = _drop_zero_rows_cols(X)
C = np.diag(1 / X.sum(1)) # Diagonal entries k_C
P = np.diag(1 / X.sum(0)) # Diagonal entries k_P
H = C @ X.values @ P @ X.T.values
w, v = eig(H, left=False, right=True)
eci = | pd.DataFrame(v[:, 1].real, index=X.index, columns=["eci"]) | pandas.DataFrame |
import pandas as pd
import numpy as np
import scipy as sp
from scipy.special import expit as sigmoid_function
import matplotlib.pyplot as plt
import matplotlib as mpl
mpl.style.use('ggplot')
def load_data(location):
""" Given a directory string, returns a pandas dataframe containing hw data."""
# dictionary containing various matrices and some metadata
data = sp.io.loadmat(location)
x = pd.DataFrame(data['X'])
y = pd.DataFrame(data['y'], columns=['digit_class'])
y[y == 10] = 0 # convert from matlab's 1-index to python's 0-index
return x, y
def visualize_digit_images_data(data, gridSize=(10, 10), desiredDigitIndices=None, title=None):
""" Provides a plot of image data so we can see what we are playing with.
The kwarg allows for the option of hand selecting digit images we desired to see. """
# thanks to pdf we know data is (5000,400). for plotting images, we want to
# take it to (5000,20,20)
pixelSquares = pd.Panel(data.values.reshape(5000, 20, 20)).transpose(0, 2, 1)
# we have to manually build the image by stitching together individual digits
# first, we choose the digits we want
if desiredDigitIndices is None:
desiredDigitIndices = []
desiredDigits = pixelSquares.ix[desiredDigitIndices, :, :] # for default kwarg, this is empty
randomDigits = pixelSquares.sample(gridSize[0] * gridSize[1] - len(desiredDigitIndices), axis=0) # get remaining images
allDigits = pd.concat([desiredDigits, randomDigits], axis=0)
# now we must fill in the matrix that represents the picture
pixelRows = 20 * gridSize[0]
pixelCols = 20 * gridSize[1]
digitImage = np.zeros((pixelRows, pixelCols))
digitToPlot = -1
for i in range(0, pixelRows, 20):
for j in range(0, pixelCols, 20):
digitToPlot += 1
digitImage[i:i+20, j:j+20] = allDigits.iloc[digitToPlot]
# lastly we convert to Pillow image (accepted by mpl) and plot
digitImage = sp.misc.toimage(digitImage)
plt.figure()
plt.imshow(digitImage, cmap=mpl.cm.Greys)
if title is None:
title = ''
plt.title(title)
return
# shamelessly stolen from my own hw2, where i had previously written this
def compute_cost(theta, features, response, regularizationParameter=0):
""" Returns the logistic regression func, evaluated on the data set and passed theta. This
also provides the opportunity for regularization. """
# set up regularization so that we always ignore the intercept parameter
interceptKnockOut = np.ones(len(features.columns))
interceptKnockOut[0] = 0
regularization = np.dot(interceptKnockOut, theta**2) # this is SUM (i=1, numFeatures) theta_i^2
regularization = regularization * regularizationParameter / (2 * len(features))
features = np.dot(theta, features.T) # dont forget H(x; theta) = sigmoid(innerprod(theta, features))
# build up the cost function one step at a time
cost = sigmoid_function(features)
cost = response * np.log(cost) + (1 - response) * np.log(1 - cost)
cost = -cost.sum(axis=0) / len(features)
return cost + regularization
def compute_dCost_dTheta(theta, features, response, regularizationParameter=0):
""" Returns the gradient of the cost function with respect to theta, evaluated on the data """
# set up regularization so that we always ignore the intercept parameter
interceptKnockOut = np.ones(len(features.columns))
interceptKnockOut[0] = 0
regularization = interceptKnockOut * theta # no summation this time, so just elementwise mult
regularization = regularization * regularizationParameter / len(features)
dottedFeats = np.dot(theta, features.T)
gradient = sigmoid_function(dottedFeats) - response
gradient = gradient[:,np.newaxis] * features
gradient = gradient.sum(axis=0)
return gradient / len(features) + regularization
def train_one_vs_all(features, response, classes, regularizationParameter, numIters=500):
""" Trains classifiers for the provided number of classes, returning optimal model parameters
in a len(classes) x numFeatures parameter matrix. """
# some preprocessing
features.insert(0, 'intercept', 1)
optimalTheta = np.zeros((len(classes), len(features.columns)))
# as specified by the hw, train separate models for the classes
for model in range(len(classes)):
print('Training model {0}'.format(classes[model]))
classResponse = | pd.get_dummies(response, columns=['digit_class']) | pandas.get_dummies |
"""
๊ตญํ ๊ตํต๋ถ Open API
molit(Ministry of Land, Infrastructure and Transport)
1. Transaction ํด๋์ค: ๋ถ๋์ฐ ์ค๊ฑฐ๋๊ฐ ์กฐํ
- AptTrade: ์ํํธ๋งค๋งค ์ค๊ฑฐ๋์๋ฃ ์กฐํ
- AptTradeDetail: ์ํํธ๋งค๋งค ์ค๊ฑฐ๋ ์์ธ ์๋ฃ ์กฐํ
- AptRent: ์ํํธ ์ ์์ธ ์๋ฃ ์กฐํ
- AptOwnership: ์ํํธ ๋ถ์๊ถ์ ๋งค ์ ๊ณ ์๋ฃ ์กฐํ
- OffiTrade: ์คํผ์คํ
๋งค๋งค ์ ๊ณ ์กฐํ
- OffiRent: ์คํผ์คํ
์ ์์ธ ์ ๊ณ ์กฐํ
- RHTrade: ์ฐ๋ฆฝ๋ค์ธ๋ ๋งค๋งค ์ค๊ฑฐ๋์๋ฃ ์กฐํ
- RHRent: ์ฐ๋ฆฝ๋ค์ธ๋ ์ ์์ธ ์ค๊ฑฐ๋์๋ฃ ์กฐํ
- DHTrade: ๋จ๋
/๋ค๊ฐ๊ตฌ ๋งค๋งค ์ค๊ฑฐ๋ ์กฐํ
- DHRent: ๋จ๋
/๋ค๊ฐ๊ตฌ ์ ์์ธ ์๋ฃ ์กฐํ
- LandTrade: ํ ์ง ๋งค๋งค ์ ๊ณ ์กฐํ
- BizTrade: ์์
์
๋ฌด์ฉ ๋ถ๋์ฐ ๋งค๋งค ์ ๊ณ ์๋ฃ ์กฐํ
2. Building ํด๋์ค: ๊ฑด์ถ๋ฌผ๋์ฅ์ ๋ณด ์๋น์ค
01 ๊ฑด์ถ๋ฌผ๋์ฅ ๊ธฐ๋ณธ๊ฐ์ ์กฐํ: getBrBasisOulnInfo
02 ๊ฑด์ถ๋ฌผ๋์ฅ ์ด๊ดํ์ ๋ถ ์กฐํ: getBrRecapTitleInfo
03 ๊ฑด์ถ๋ฌผ๋์ฅ ํ์ ๋ถ ์กฐํ: getBrTitleInfo
04 ๊ฑด์ถ๋ฌผ๋์ฅ ์ธต๋ณ๊ฐ์ ์กฐํ: getBrFlrOulnInfo
05 ๊ฑด์ถ๋ฌผ๋์ฅ ๋ถ์์ง๋ฒ ์กฐํ: getBrAtchJibunInfo
06 ๊ฑด์ถ๋ฌผ๋์ฅ ์ ์ ๊ณต์ฉ๋ฉด์ ์กฐํ: getBrExposPubuseAreaInfo
07 ๊ฑด์ถ๋ฌผ๋์ฅ ์ค์์ ํ์์ค ์กฐํ: getBrWclfInfo
08 ๊ฑด์ถ๋ฌผ๋์ฅ ์ฃผํ๊ฐ๊ฒฉ ์กฐํ: getBrHsprcInfo
09 ๊ฑด์ถ๋ฌผ๋์ฅ ์ ์ ๋ถ ์กฐํ: getBrExposInfo
10 ๊ฑด์ถ๋ฌผ๋์ฅ ์ง์ญ์ง๊ตฌ๊ตฌ์ญ ์กฐํ: getBrJijiguInfo
"""
import datetime
import numpy as np
import pandas as pd
import requests
from bs4 import BeautifulSoup
class Transaction:
"""
๋ถ๋์ฐ ์ค๊ฑฐ๋๊ฐ ์กฐํ ํด๋์ค
"""
def __init__(self, serviceKey):
"""
๊ณต๊ณต ๋ฐ์ดํฐ ํฌํธ์์ ๋ฐ๊ธ๋ฐ์ Service Key๋ฅผ ์
๋ ฅ๋ฐ์ ์ด๊ธฐํํฉ๋๋ค.
"""
# Open API ์๋น์ค ํค ์ด๊ธฐํ
self.serviceKey = serviceKey
# ServiceKey ์ ํจ์ฑ ๊ฒ์ฌ
self.urlAptTrade = (
"http://openapi.molit.go.kr:8081/OpenAPI_ToolInstallPackage/service/rest/RTMSOBJSvc/getRTMSDataSvcAptTrade?serviceKey="
+ self.serviceKey)
self.urlAptTradeDetail = (
"http://openapi.molit.go.kr/OpenAPI_ToolInstallPackage/service/rest/RTMSOBJSvc/getRTMSDataSvcAptTradeDev?serviceKey="
+ self.serviceKey)
self.urlAptRent = (
"http://openapi.molit.go.kr:8081/OpenAPI_ToolInstallPackage/service/rest/RTMSOBJSvc/getRTMSDataSvcAptRent?serviceKey="
+ self.serviceKey)
self.urlAptOwnership = (
"http://openapi.molit.go.kr/OpenAPI_ToolInstallPackage/service/rest/RTMSOBJSvc/getRTMSDataSvcSilvTrade?serviceKey="
+ self.serviceKey)
self.urlOffiTrade = (
"http://openapi.molit.go.kr/OpenAPI_ToolInstallPackage/service/rest/RTMSOBJSvc/getRTMSDataSvcOffiTrade?serviceKey="
+ self.serviceKey)
self.urlOffiRent = (
"http://openapi.molit.go.kr/OpenAPI_ToolInstallPackage/service/rest/RTMSOBJSvc/getRTMSDataSvcOffiRent?serviceKey="
+ self.serviceKey)
self.urlRHTrade = (
"http://openapi.molit.go.kr:8081/OpenAPI_ToolInstallPackage/service/rest/RTMSOBJSvc/getRTMSDataSvcRHTrade?serviceKey="
+ self.serviceKey)
self.urlRHRent = (
"http://openapi.molit.go.kr:8081/OpenAPI_ToolInstallPackage/service/rest/RTMSOBJSvc/getRTMSDataSvcRHRent?serviceKey="
+ self.serviceKey)
self.urlDHTrade = (
"http://openapi.molit.go.kr:8081/OpenAPI_ToolInstallPackage/service/rest/RTMSOBJSvc/getRTMSDataSvcSHTrade?serviceKey="
+ self.serviceKey)
self.urlDHRent = (
"http://openapi.molit.go.kr:8081/OpenAPI_ToolInstallPackage/service/rest/RTMSOBJSvc/getRTMSDataSvcSHRent?serviceKey="
+ self.serviceKey)
self.urlLandTrade = (
"http://openapi.molit.go.kr/OpenAPI_ToolInstallPackage/service/rest/RTMSOBJSvc/getRTMSDataSvcLandTrade?serviceKey="
+ self.serviceKey)
self.urlBizTrade = (
"http://openapi.molit.go.kr/OpenAPI_ToolInstallPackage/service/rest/RTMSOBJSvc/getRTMSDataSvcNrgTrade?serviceKey="
+ self.serviceKey)
# Open API URL Dict
urlDict = {
"์ํํธ๋งค๋งค ์ค๊ฑฐ๋์๋ฃ ์กฐํ": self.urlAptTrade,
"์ํํธ๋งค๋งค ์ค๊ฑฐ๋ ์์ธ ์๋ฃ ์กฐํ": self.urlAptTradeDetail,
"์ํํธ ์ ์์ธ ์๋ฃ ์กฐํ": self.urlAptRent,
"์ํํธ ๋ถ์๊ถ์ ๋งค ์ ๊ณ ์๋ฃ ์กฐํ": self.urlAptOwnership,
"์คํผ์คํ
๋งค๋งค ์ ๊ณ ์กฐํ": self.urlOffiTrade,
"์คํผ์คํ
์ ์์ธ ์ ๊ณ ์กฐํ": self.urlOffiRent,
"์ฐ๋ฆฝ๋ค์ธ๋ ๋งค๋งค ์ค๊ฑฐ๋์๋ฃ ์กฐํ": self.urlRHTrade,
"์ฐ๋ฆฝ๋ค์ธ๋ ์ ์์ธ ์ค๊ฑฐ๋์๋ฃ ์กฐํ": self.urlRHRent,
"๋จ๋
/๋ค๊ฐ๊ตฌ ๋งค๋งค ์ค๊ฑฐ๋ ์กฐํ": self.urlDHTrade,
"๋จ๋
/๋ค๊ฐ๊ตฌ ์ ์์ธ ์๋ฃ ์กฐํ": self.urlDHRent,
"ํ ์ง ๋งค๋งค ์ ๊ณ ์กฐํ": self.urlLandTrade,
"์์
์
๋ฌด์ฉ ๋ถ๋์ฐ ๋งค๋งค ์ ๊ณ ์๋ฃ ์กฐํ": self.urlBizTrade,
}
# ์๋น์ค ์ ์ ์๋ ์ฌ๋ถ ํ์ธ
for serviceName, url in urlDict.items():
result = requests.get(url, verify=False)
xmlsoup = BeautifulSoup(result.text, "lxml-xml")
te = xmlsoup.findAll("header")
if te[0].find("resultCode").text == "00":
print(f">>> {serviceName} ์๋น์ค๊ฐ ์ ์ ์๋ํฉ๋๋ค.")
else:
print(f">>> {serviceName} ์๋น์คํค ๋ฏธ๋ฑ๋ก ์ค๋ฅ์
๋๋ค.")
# ์ง์ญ ์ฝ๋ ์ด๊ธฐํ
# ๋ฒ์ ๋ ์ฝ๋ ์ถ์ฒ : https://code.go.kr
path_code = "https://raw.githubusercontent.com/WooilJeong/PublicDataReader/f14e4de3410cc0f798a83ee5934070d651cbd67b/docs/%EB%B2%95%EC%A0%95%EB%8F%99%EC%BD%94%EB%93%9C%20%EC%A0%84%EC%B2%B4%EC%9E%90%EB%A3%8C.txt"
code = pd.read_csv(path_code, encoding="cp949", sep="\t")
code = code.loc[code["ํ์ง์ฌ๋ถ"] == "์กด์ฌ"]
code["๋ฒ์ ๊ตฌ์ฝ๋"] = list(map(lambda a: str(a)[:5], list(code["๋ฒ์ ๋์ฝ๋"])))
self.code = code
def CodeFinder(self, name):
"""
๊ตญํ ๊ตํต๋ถ ์ค๊ฑฐ๋๊ฐ ์ ๋ณด ์คํAPI๋ ๋ฒ์ ๋์ฝ๋ 10์๋ฆฌ ์ค ์ 5์๋ฆฌ์ธ ๊ตฌ๋ฅผ ๋ํ๋ด๋ ์ง์ญ์ฝ๋๋ฅผ ์ฌ์ฉํฉ๋๋ค.
API์ ์ฌ์ฉํ ๊ตฌ ๋ณ ์ฝ๋๋ฅผ ์กฐํํ๋ ๋ฉ์๋์ด๋ฉฐ, ๋ฌธ์์ด ์ง์ญ ๋ช
์ ์
๋ ฅ๋ฐ๊ณ , ์กฐํ ๊ฒฐ๊ณผ๋ฅผ Pandas DataFrameํ์์ผ๋ก ์ถ๋ ฅํฉ๋๋ค.
"""
result = self.code[self.code["๋ฒ์ ๋๋ช
"].str.contains(name)][[
"๋ฒ์ ๋๋ช
", "๋ฒ์ ๊ตฌ์ฝ๋"
]]
result.index = range(len(result))
return result
def DataCollector(self, service, LAWD_CD, start_date, end_date):
"""
์๋น์ค๋ณ ๊ธฐ๊ฐ๋ณ ์กฐํ
์
๋ ฅ: ์๋น์ค๋ณ ์กฐํ ๋ฉ์๋, ์ง์ญ์ฝ๋, ์์์(YYYYmm), ์ข
๋ฃ์(YYYYmm)
"""
start_date = datetime.datetime.strptime(str(start_date), "%Y%m")
start_date = datetime.datetime.strftime(start_date, "%Y-%m")
end_date = datetime.datetime.strptime(str(end_date), "%Y%m")
end_date = end_date + datetime.timedelta(days=31)
end_date = datetime.datetime.strftime(end_date, "%Y-%m")
ts = pd.date_range(start=start_date, end=end_date, freq="m")
date_list = list(ts.strftime("%Y%m"))
df = pd.DataFrame()
df_sum = pd.DataFrame()
for m in date_list:
print(">>> LAWD_CD :", LAWD_CD, "DEAL_YMD :", m)
DEAL_YMD = m
df = service(LAWD_CD, DEAL_YMD)
df_sum = pd.concat([df_sum, df])
df_sum.index = range(len(df_sum))
return df_sum
def AptTrade(self, LAWD_CD, DEAL_YMD):
"""
01 ์ํํธ๋งค๋งค ์ค๊ฑฐ๋์๋ฃ ์กฐํ
์
๋ ฅ: ์ง์ญ์ฝ๋(๋ฒ์ ๋์ฝ๋ 5์๋ฆฌ), ๊ณ์ฝ์(YYYYmm)
"""
# URL
url_1 = self.urlAptTrade + "&LAWD_CD=" + str(LAWD_CD)
url_2 = "&DEAL_YMD=" + str(DEAL_YMD)
url_3 = "&numOfRows=99999"
url = url_1 + url_2 + url_3
try:
# Get raw data
result = requests.get(url, verify=False)
# Parsing
xmlsoup = BeautifulSoup(result.text, "lxml-xml")
# Filtering
te = xmlsoup.findAll("item")
# Creating Pandas Data Frame
df = pd.DataFrame()
variables = [
"๋ฒ์ ๋",
"์ง์ญ์ฝ๋",
"์ํํธ",
"์ง๋ฒ",
"๋
",
"์",
"์ผ",
"๊ฑด์ถ๋
๋",
"์ ์ฉ๋ฉด์ ",
"์ธต",
"๊ฑฐ๋๊ธ์ก",
]
for t in te:
for variable in variables:
try:
globals()[variable] = t.find(variable).text
except:
globals()[variable] = np.nan
data = pd.DataFrame(
[[๋ฒ์ ๋, ์ง์ญ์ฝ๋, ์ํํธ, ์ง๋ฒ, ๋
, ์, ์ผ, ๊ฑด์ถ๋
๋, ์ ์ฉ๋ฉด์ , ์ธต, ๊ฑฐ๋๊ธ์ก]],
columns=variables,
)
df = pd.concat([df, data])
# Set Columns
colNames = [
"์ง์ญ์ฝ๋", "๋ฒ์ ๋", "๊ฑฐ๋์ผ", "์ํํธ", "์ง๋ฒ", "์ ์ฉ๋ฉด์ ", "์ธต", "๊ฑด์ถ๋
๋", "๊ฑฐ๋๊ธ์ก"
]
# Feature Engineering
try:
if len(df["๋
"] != 0) & len(df["์"] != 0) & len(df["์ผ"] != 0):
df["๊ฑฐ๋์ผ"] = df["๋
"] + "-" + df["์"] + "-" + df["์ผ"]
df["๊ฑฐ๋์ผ"] = pd.to_datetime(df["๊ฑฐ๋์ผ"])
df["๊ฑฐ๋๊ธ์ก"] = pd.to_numeric(df["๊ฑฐ๋๊ธ์ก"].str.replace(",", ""))
except:
df = pd.DataFrame(columns=colNames)
print("์กฐํํ ์๋ฃ๊ฐ ์์ต๋๋ค.")
# Arange Columns
df = df[colNames]
df = df.sort_values(["๋ฒ์ ๋", "๊ฑฐ๋์ผ"])
df["๋ฒ์ ๋"] = df["๋ฒ์ ๋"].str.strip()
df["์ํํธ"] = df["์ํํธ"].str.strip()
df.index = range(len(df))
# ํ ๋ณํ
cols = df.columns.drop(["๋ฒ์ ๋", "๊ฑฐ๋์ผ", "์ํํธ", "์ง๋ฒ"])
df[cols] = df[cols].apply(pd.to_numeric, errors="coerce")
return df
except:
# Get raw data
result = requests.get(url, verify=False)
# Parsing
xmlsoup = BeautifulSoup(result.text, "lxml-xml")
# Filtering
te = xmlsoup.findAll("header")
# ์ ์ ์์ฒญ์ ์๋ฌ ๋ฐ์ -> Python ์ฝ๋ ์๋ฌ
if te[0].find("resultCode").text == "00":
print(">>> Python Logic Error. e-mail : <EMAIL>")
# Open API ์๋น์ค ์ ๊ณต์ฒ ์ค๋ฅ
else:
print(">>> Open API Error: {}".format(te[0].find["resultMsg"]))
def AptTradeDetail(self, LAWD_CD, DEAL_YMD):
"""
02 ์ํํธ๋งค๋งค ์ค๊ฑฐ๋ ์์ธ ์๋ฃ ์กฐํ
์
๋ ฅ: ์ง์ญ์ฝ๋(๋ฒ์ ๋์ฝ๋ 5์๋ฆฌ), ๊ณ์ฝ์(YYYYmm)
"""
# URL
url_1 = self.urlAptTradeDetail + "&LAWD_CD=" + str(LAWD_CD)
url_2 = "&DEAL_YMD=" + str(DEAL_YMD)
url_3 = "&numOfRows=99999"
url = url_1 + url_2 + url_3
try:
# Get raw data
result = requests.get(url, verify=False)
# Parsing
xmlsoup = BeautifulSoup(result.text, "lxml-xml")
# Filtering
te = xmlsoup.findAll("item")
# Creating Pandas Data Frame
df = pd.DataFrame()
variables = [
"๊ฑฐ๋๊ธ์ก",
"๊ฑด์ถ๋
๋",
"๋
",
"๋๋ก๋ช
",
"๋๋ก๋ช
๊ฑด๋ฌผ๋ณธ๋ฒํธ์ฝ๋",
"๋๋ก๋ช
๊ฑด๋ฌผ๋ถ๋ฒํธ์ฝ๋",
"๋๋ก๋ช
์๊ตฐ๊ตฌ์ฝ๋",
"๋๋ก๋ช
์ผ๋ จ๋ฒํธ์ฝ๋",
"๋๋ก๋ช
์ง์์งํ์ฝ๋",
"๋๋ก๋ช
์ฝ๋",
"๋ฒ์ ๋",
"๋ฒ์ ๋๋ณธ๋ฒ์ฝ๋",
"๋ฒ์ ๋๋ถ๋ฒ์ฝ๋",
"๋ฒ์ ๋์๊ตฐ๊ตฌ์ฝ๋",
"๋ฒ์ ๋์๋ฉด๋์ฝ๋",
"๋ฒ์ ๋์ง๋ฒ์ฝ๋",
"์ํํธ",
"์",
"์ผ",
"์ ์ฉ๋ฉด์ ",
"์ง๋ฒ",
"์ง์ญ์ฝ๋",
"์ธต",
]
for t in te:
for variable in variables:
try:
globals()[variable] = t.find(variable).text
except:
globals()[variable] = np.nan
data = pd.DataFrame(
[[
๊ฑฐ๋๊ธ์ก,
๊ฑด์ถ๋
๋,
๋
,
๋๋ก๋ช
,
๋๋ก๋ช
๊ฑด๋ฌผ๋ณธ๋ฒํธ์ฝ๋,
๋๋ก๋ช
๊ฑด๋ฌผ๋ถ๋ฒํธ์ฝ๋,
๋๋ก๋ช
์๊ตฐ๊ตฌ์ฝ๋,
๋๋ก๋ช
์ผ๋ จ๋ฒํธ์ฝ๋,
๋๋ก๋ช
์ง์์งํ์ฝ๋,
๋๋ก๋ช
์ฝ๋,
๋ฒ์ ๋,
๋ฒ์ ๋๋ณธ๋ฒ์ฝ๋,
๋ฒ์ ๋๋ถ๋ฒ์ฝ๋,
๋ฒ์ ๋์๊ตฐ๊ตฌ์ฝ๋,
๋ฒ์ ๋์๋ฉด๋์ฝ๋,
๋ฒ์ ๋์ง๋ฒ์ฝ๋,
์ํํธ,
์,
์ผ,
์ ์ฉ๋ฉด์ ,
์ง๋ฒ,
์ง์ญ์ฝ๋,
์ธต,
]],
columns=variables,
)
df = pd.concat([df, data])
# Set Columns
colNames = [
"์ง์ญ์ฝ๋",
"๋ฒ์ ๋",
"๊ฑฐ๋์ผ",
"์ํํธ",
"์ง๋ฒ",
"์ ์ฉ๋ฉด์ ",
"์ธต",
"๊ฑด์ถ๋
๋",
"๊ฑฐ๋๊ธ์ก",
"๋ฒ์ ๋๋ณธ๋ฒ์ฝ๋",
"๋ฒ์ ๋๋ถ๋ฒ์ฝ๋",
"๋ฒ์ ๋์๊ตฐ๊ตฌ์ฝ๋",
"๋ฒ์ ๋์๋ฉด๋์ฝ๋",
"๋ฒ์ ๋์ง๋ฒ์ฝ๋",
"๋๋ก๋ช
",
"๋๋ก๋ช
๊ฑด๋ฌผ๋ณธ๋ฒํธ์ฝ๋",
"๋๋ก๋ช
๊ฑด๋ฌผ๋ถ๋ฒํธ์ฝ๋",
"๋๋ก๋ช
์๊ตฐ๊ตฌ์ฝ๋",
"๋๋ก๋ช
์ผ๋ จ๋ฒํธ์ฝ๋",
"๋๋ก๋ช
์ง์์งํ์ฝ๋",
"๋๋ก๋ช
์ฝ๋",
]
# Feature Engineering
try:
if len(df["๋
"] != 0) & len(df["์"] != 0) & len(df["์ผ"] != 0):
df["๊ฑฐ๋์ผ"] = df["๋
"] + "-" + df["์"] + "-" + df["์ผ"]
df["๊ฑฐ๋์ผ"] = pd.to_datetime(df["๊ฑฐ๋์ผ"])
df["๊ฑฐ๋๊ธ์ก"] = pd.to_numeric(df["๊ฑฐ๋๊ธ์ก"].str.replace(",", ""))
except:
df = pd.DataFrame(columns=colNames)
print("์กฐํํ ์๋ฃ๊ฐ ์์ต๋๋ค.")
# Arange Columns
df = df[colNames]
df = df.sort_values(["๋ฒ์ ๋", "๊ฑฐ๋์ผ"])
df["๋ฒ์ ๋"] = df["๋ฒ์ ๋"].str.strip()
df["์ํํธ"] = df["์ํํธ"].str.strip()
df.index = range(len(df))
# ์ซ์ํ ๋ณํ
cols = df.columns.drop(["๋ฒ์ ๋", "๊ฑฐ๋์ผ", "์ํํธ", "์ง๋ฒ", "๋๋ก๋ช
"])
df[cols] = df[cols].apply(pd.to_numeric, errors="coerce")
return df
except:
# Get raw data
result = requests.get(url, verify=False)
# Parsing
xmlsoup = BeautifulSoup(result.text, "lxml-xml")
# Filtering
te = xmlsoup.findAll("header")
# ์ ์ ์์ฒญ์ ์๋ฌ ๋ฐ์ -> Python ์ฝ๋ ์๋ฌ
if te[0].find("resultCode").text == "00":
print(">>> Python Logic Error. e-mail : <EMAIL>")
# Open API ์๋น์ค ์ ๊ณต์ฒ ์ค๋ฅ
else:
print(">>> Open API Error: {}".format(te[0].find["resultMsg"]))
def AptRent(self, LAWD_CD, DEAL_YMD):
"""
03 ์ํํธ ์ ์์ธ ์๋ฃ ์กฐํ
์
๋ ฅ: ์ง์ญ์ฝ๋(๋ฒ์ ๋์ฝ๋ 5์๋ฆฌ), ๊ณ์ฝ์(YYYYmm)
"""
# URL
url_1 = self.urlAptRent + "&LAWD_CD=" + str(LAWD_CD)
url_2 = "&DEAL_YMD=" + str(DEAL_YMD)
url_3 = "&numOfRows=99999"
url = url_1 + url_2 + url_3
try:
# Get raw data
result = requests.get(url, verify=False)
# Parsing
xmlsoup = BeautifulSoup(result.text, "lxml-xml")
# Filtering
te = xmlsoup.findAll("item")
# Creating Pandas Data Frame
df = pd.DataFrame()
variables = [
"๋ฒ์ ๋",
"์ง์ญ์ฝ๋",
"์ํํธ",
"์ง๋ฒ",
"๋
",
"์",
"์ผ",
"๊ฑด์ถ๋
๋",
"์ ์ฉ๋ฉด์ ",
"์ธต",
"๋ณด์ฆ๊ธ์ก",
"์์ธ๊ธ์ก",
]
for t in te:
for variable in variables:
try:
globals()[variable] = t.find(variable).text
except:
globals()[variable] = np.nan
data = pd.DataFrame(
[[๋ฒ์ ๋, ์ง์ญ์ฝ๋, ์ํํธ, ์ง๋ฒ, ๋
, ์, ์ผ, ๊ฑด์ถ๋
๋, ์ ์ฉ๋ฉด์ , ์ธต, ๋ณด์ฆ๊ธ์ก, ์์ธ๊ธ์ก]],
columns=variables,
)
df = pd.concat([df, data])
# Set Columns
colNames = [
"์ง์ญ์ฝ๋",
"๋ฒ์ ๋",
"๊ฑฐ๋์ผ",
"์ํํธ",
"์ง๋ฒ",
"์ ์ฉ๋ฉด์ ",
"์ธต",
"๊ฑด์ถ๋
๋",
"๋ณด์ฆ๊ธ์ก",
"์์ธ๊ธ์ก",
]
# Feature Engineering
try:
if len(df["๋
"] != 0) & len(df["์"] != 0) & len(df["์ผ"] != 0):
df["๊ฑฐ๋์ผ"] = df["๋
"] + "-" + df["์"] + "-" + df["์ผ"]
df["๊ฑฐ๋์ผ"] = pd.to_datetime(df["๊ฑฐ๋์ผ"])
df["๋ณด์ฆ๊ธ์ก"] = pd.to_numeric(df["๋ณด์ฆ๊ธ์ก"].str.replace(",", ""))
df["์์ธ๊ธ์ก"] = pd.to_numeric(df["์์ธ๊ธ์ก"].str.replace(",", ""))
except:
df = pd.DataFrame(columns=colNames)
print("์กฐํํ ์๋ฃ๊ฐ ์์ต๋๋ค.")
# Arange Columns
df = df[colNames]
df = df.sort_values(["๋ฒ์ ๋", "๊ฑฐ๋์ผ"])
df["๋ฒ์ ๋"] = df["๋ฒ์ ๋"].str.strip()
df.index = range(len(df))
# ์ซ์ํ ๋ณํ
cols = df.columns.drop(["๋ฒ์ ๋", "๊ฑฐ๋์ผ", "์ง๋ฒ", "์ํํธ"])
df[cols] = df[cols].apply(pd.to_numeric, errors="coerce")
return df
except:
# Get raw data
result = requests.get(url, verify=False)
# Parsing
xmlsoup = BeautifulSoup(result.text, "lxml-xml")
# Filtering
te = xmlsoup.findAll("header")
# ์ ์ ์์ฒญ์ ์๋ฌ ๋ฐ์ -> Python ์ฝ๋ ์๋ฌ
if te[0].find("resultCode").text == "00":
print(">>> Python Logic Error. e-mail : <EMAIL>")
# Open API ์๋น์ค ์ ๊ณต์ฒ ์ค๋ฅ
else:
print(">>> Open API Error: {}".format(te[0].find["resultMsg"]))
def AptOwnership(self, LAWD_CD, DEAL_YMD):
"""
04 ์ํํธ ๋ถ์๊ถ์ ๋งค ์ ๊ณ ์๋ฃ ์กฐํ
์
๋ ฅ: ์ง์ญ์ฝ๋(๋ฒ์ ๋์ฝ๋ 5์๋ฆฌ), ๊ณ์ฝ์(YYYYmm)
"""
# URL
url_1 = self.urlAptOwnership + "&LAWD_CD=" + str(LAWD_CD)
url_2 = "&DEAL_YMD=" + str(DEAL_YMD)
url_3 = "&numOfRows=99999"
url = url_1 + url_2 + url_3
try:
# Get raw data
result = requests.get(url, verify=False)
# Parsing
xmlsoup = BeautifulSoup(result.text, "lxml-xml")
# Filtering
te = xmlsoup.findAll("item")
# Creating Pandas Data Frame
df = pd.DataFrame()
variables = [
"๋ฒ์ ๋",
"์ง์ญ์ฝ๋",
"์๊ตฐ๊ตฌ",
"๋จ์ง",
"์ง๋ฒ",
"๊ตฌ๋ถ",
"๋
",
"์",
"์ผ",
"์ ์ฉ๋ฉด์ ",
"์ธต",
"๊ฑฐ๋๊ธ์ก",
]
for t in te:
for variable in variables:
try:
globals()[variable] = t.find(variable).text
except:
globals()[variable] = np.nan
data = pd.DataFrame(
[[๋ฒ์ ๋, ์ง์ญ์ฝ๋, ์๊ตฐ๊ตฌ, ๋จ์ง, ์ง๋ฒ, ๊ตฌ๋ถ, ๋
, ์, ์ผ, ์ ์ฉ๋ฉด์ , ์ธต, ๊ฑฐ๋๊ธ์ก]],
columns=variables,
)
df = pd.concat([df, data])
# Set Columns
colNames = [
"์ง์ญ์ฝ๋",
"๋ฒ์ ๋",
"๊ฑฐ๋์ผ",
"์๊ตฐ๊ตฌ",
"๋จ์ง",
"์ง๋ฒ",
"๊ตฌ๋ถ",
"์ ์ฉ๋ฉด์ ",
"์ธต",
"๊ฑฐ๋๊ธ์ก",
]
# Feature Engineering
try:
if len(df["๋
"] != 0) & len(df["์"] != 0) & len(df["์ผ"] != 0):
df["๊ฑฐ๋์ผ"] = df["๋
"] + "-" + df["์"] + "-" + df["์ผ"]
df["๊ฑฐ๋์ผ"] = pd.to_datetime(df["๊ฑฐ๋์ผ"])
df["๊ฑฐ๋๊ธ์ก"] = pd.to_numeric(df["๊ฑฐ๋๊ธ์ก"].str.replace(",", ""))
except:
df = pd.DataFrame(columns=colNames)
print("์กฐํํ ์๋ฃ๊ฐ ์์ต๋๋ค.")
# Arange Columns
df = df[colNames]
df = df.sort_values(["๋ฒ์ ๋", "๊ฑฐ๋์ผ"])
df["๋ฒ์ ๋"] = df["๋ฒ์ ๋"].str.strip()
df.index = range(len(df))
# ์ซ์ํ ๋ณํ
cols = df.columns.drop(["๋ฒ์ ๋", "๊ฑฐ๋์ผ", "์๊ตฐ๊ตฌ", "๋จ์ง", "์ง๋ฒ", "๊ตฌ๋ถ"])
df[cols] = df[cols].apply(pd.to_numeric, errors="coerce")
return df
except:
# Get raw data
result = requests.get(url, verify=False)
# Parsing
xmlsoup = BeautifulSoup(result.text, "lxml-xml")
# Filtering
te = xmlsoup.findAll("header")
# ์ ์ ์์ฒญ์ ์๋ฌ ๋ฐ์ -> Python ์ฝ๋ ์๋ฌ
if te[0].find("resultCode").text == "00":
print(">>> Python Logic Error. e-mail : <EMAIL>")
# Open API ์๋น์ค ์ ๊ณต์ฒ ์ค๋ฅ
else:
print(">>> Open API Error: {}".format(te[0].find["resultMsg"]))
def OffiTrade(self, LAWD_CD, DEAL_YMD):
"""
05 ์คํผ์คํ
๋งค๋งค ์ ๊ณ ์กฐํ
์
๋ ฅ: ์ง์ญ์ฝ๋(๋ฒ์ ๋์ฝ๋ 5์๋ฆฌ), ๊ณ์ฝ์(YYYYmm)
"""
# URL
url_1 = self.urlOffiTrade + "&LAWD_CD=" + str(LAWD_CD)
url_2 = "&DEAL_YMD=" + str(DEAL_YMD)
url_3 = "&numOfRows=99999"
url = url_1 + url_2 + url_3
try:
# Get raw data
result = requests.get(url, verify=False)
# Parsing
xmlsoup = BeautifulSoup(result.text, "lxml-xml")
# Filtering
te = xmlsoup.findAll("item")
# Creating Pandas Data Frame
df = pd.DataFrame()
variables = [
"๋ฒ์ ๋",
"์ง์ญ์ฝ๋",
"์๊ตฐ๊ตฌ",
"๋จ์ง",
"์ง๋ฒ",
"๋
",
"์",
"์ผ",
"์ ์ฉ๋ฉด์ ",
"์ธต",
"๊ฑฐ๋๊ธ์ก",
]
for t in te:
for variable in variables:
try:
globals()[variable] = t.find(variable).text
except:
globals()[variable] = np.nan
data = pd.DataFrame(
[[๋ฒ์ ๋, ์ง์ญ์ฝ๋, ์๊ตฐ๊ตฌ, ๋จ์ง, ์ง๋ฒ, ๋
, ์, ์ผ, ์ ์ฉ๋ฉด์ , ์ธต, ๊ฑฐ๋๊ธ์ก]],
columns=variables,
)
df = pd.concat([df, data])
# Set Columns
colNames = [
"์ง์ญ์ฝ๋", "๋ฒ์ ๋", "๊ฑฐ๋์ผ", "์๊ตฐ๊ตฌ", "๋จ์ง", "์ง๋ฒ", "์ ์ฉ๋ฉด์ ", "์ธต", "๊ฑฐ๋๊ธ์ก"
]
# Feature Engineering
try:
if len(df["๋
"] != 0) & len(df["์"] != 0) & len(df["์ผ"] != 0):
df["๊ฑฐ๋์ผ"] = df["๋
"] + "-" + df["์"] + "-" + df["์ผ"]
df["๊ฑฐ๋์ผ"] = pd.to_datetime(df["๊ฑฐ๋์ผ"])
df["๊ฑฐ๋๊ธ์ก"] = pd.to_numeric(df["๊ฑฐ๋๊ธ์ก"].str.replace(",", ""))
except:
df = pd.DataFrame(columns=colNames)
print("์กฐํํ ์๋ฃ๊ฐ ์์ต๋๋ค.")
# Arange Columns
df = df[colNames]
df = df.sort_values(["๋ฒ์ ๋", "๊ฑฐ๋์ผ"])
df["๋ฒ์ ๋"] = df["๋ฒ์ ๋"].str.strip()
df.index = range(len(df))
# ์ซ์ํ ๋ณํ
cols = df.columns.drop(["๋ฒ์ ๋", "๊ฑฐ๋์ผ", "์๊ตฐ๊ตฌ", "๋จ์ง", "์ง๋ฒ"])
df[cols] = df[cols].apply(pd.to_numeric, errors="coerce")
return df
except:
# Get raw data
result = requests.get(url, verify=False)
# Parsing
xmlsoup = BeautifulSoup(result.text, "lxml-xml")
# Filtering
te = xmlsoup.findAll("header")
# ์ ์ ์์ฒญ์ ์๋ฌ ๋ฐ์ -> Python ์ฝ๋ ์๋ฌ
if te[0].find("resultCode").text == "00":
print(">>> Python Logic Error. e-mail : <EMAIL>")
# Open API ์๋น์ค ์ ๊ณต์ฒ ์ค๋ฅ
else:
print(">>> Open API Error: {}".format(te[0].find["resultMsg"]))
def OffiRent(self, LAWD_CD, DEAL_YMD):
"""
06 ์คํผ์คํ
์ ์์ธ ์ ๊ณ ์กฐํ
์
๋ ฅ: ์ง์ญ์ฝ๋(๋ฒ์ ๋์ฝ๋ 5์๋ฆฌ), ๊ณ์ฝ์(YYYYmm)
"""
# URL
url_1 = self.urlOffiRent + "&LAWD_CD=" + str(LAWD_CD)
url_2 = "&DEAL_YMD=" + str(DEAL_YMD)
url_3 = "&numOfRows=99999"
url = url_1 + url_2 + url_3
try:
# Get raw data
result = requests.get(url, verify=False)
# Parsing
xmlsoup = BeautifulSoup(result.text, "lxml-xml")
# Filtering
te = xmlsoup.findAll("item")
# Creating Pandas Data Frame
df = pd.DataFrame()
variables = [
"๋ฒ์ ๋",
"์ง์ญ์ฝ๋",
"์๊ตฐ๊ตฌ",
"๋จ์ง",
"์ง๋ฒ",
"๋
",
"์",
"์ผ",
"์ ์ฉ๋ฉด์ ",
"์ธต",
"๋ณด์ฆ๊ธ",
"์์ธ",
]
for t in te:
for variable in variables:
try:
globals()[variable] = t.find(variable).text
except:
globals()[variable] = np.nan
data = pd.DataFrame(
[[๋ฒ์ ๋, ์ง์ญ์ฝ๋, ์๊ตฐ๊ตฌ, ๋จ์ง, ์ง๋ฒ, ๋
, ์, ์ผ, ์ ์ฉ๋ฉด์ , ์ธต, ๋ณด์ฆ๊ธ, ์์ธ]],
columns=variables,
)
df = pd.concat([df, data])
# Set Columns
colNames = [
"์ง์ญ์ฝ๋",
"๋ฒ์ ๋",
"๊ฑฐ๋์ผ",
"์๊ตฐ๊ตฌ",
"๋จ์ง",
"์ง๋ฒ",
"์ ์ฉ๋ฉด์ ",
"์ธต",
"๋ณด์ฆ๊ธ",
"์์ธ",
]
# Feature Engineering
try:
if len(df["๋
"] != 0) & len(df["์"] != 0) & len(df["์ผ"] != 0):
df["๊ฑฐ๋์ผ"] = df["๋
"] + "-" + df["์"] + "-" + df["์ผ"]
df["๊ฑฐ๋์ผ"] = pd.to_datetime(df["๊ฑฐ๋์ผ"])
df["๋ณด์ฆ๊ธ"] = pd.to_numeric(df["๋ณด์ฆ๊ธ"].str.replace(",", ""))
df["์์ธ"] = pd.to_numeric(df["์์ธ"].str.replace(",", ""))
except:
df = pd.DataFrame(columns=colNames)
print("์กฐํํ ์๋ฃ๊ฐ ์์ต๋๋ค.")
# Arange Columns
df = df[colNames]
df = df.sort_values(["๋ฒ์ ๋", "๊ฑฐ๋์ผ"])
df["๋ฒ์ ๋"] = df["๋ฒ์ ๋"].str.strip()
df.index = range(len(df))
# ์ซ์ํ ๋ณํ
cols = df.columns.drop(["๋ฒ์ ๋", "๊ฑฐ๋์ผ", "์๊ตฐ๊ตฌ", "๋จ์ง", "์ง๋ฒ"])
df[cols] = df[cols].apply(pd.to_numeric, errors="coerce")
return df
except:
# Get raw data
result = requests.get(url, verify=False)
# Parsing
xmlsoup = BeautifulSoup(result.text, "lxml-xml")
# Filtering
te = xmlsoup.findAll("header")
# ์ ์ ์์ฒญ์ ์๋ฌ ๋ฐ์ -> Python ์ฝ๋ ์๋ฌ
if te[0].find("resultCode").text == "00":
print(">>> Python Logic Error. e-mail : <EMAIL>")
# Open API ์๋น์ค ์ ๊ณต์ฒ ์ค๋ฅ
else:
print(">>> Open API Error: {}".format(te[0].find["resultMsg"]))
def RHTrade(self, LAWD_CD, DEAL_YMD):
"""
07 ์ฐ๋ฆฝ๋ค์ธ๋ ๋งค๋งค ์ค๊ฑฐ๋์๋ฃ ์กฐํ
์
๋ ฅ: ์ง์ญ์ฝ๋(๋ฒ์ ๋์ฝ๋ 5์๋ฆฌ), ๊ณ์ฝ์(YYYYmm)
"""
# URL
url_1 = self.urlRHTrade + "&LAWD_CD=" + str(LAWD_CD)
url_2 = "&DEAL_YMD=" + str(DEAL_YMD)
url_3 = "&numOfRows=99999"
url = url_1 + url_2 + url_3
try:
# Get raw data
result = requests.get(url, verify=False)
# Parsing
xmlsoup = BeautifulSoup(result.text, "lxml-xml")
# Filtering
te = xmlsoup.findAll("item")
# Creating Pandas Data Frame
df = pd.DataFrame()
variables = [
"๋ฒ์ ๋",
"์ง์ญ์ฝ๋",
"์ฐ๋ฆฝ๋ค์ธ๋",
"์ง๋ฒ",
"๋
",
"์",
"์ผ",
"์ ์ฉ๋ฉด์ ",
"๊ฑด์ถ๋
๋",
"์ธต",
"๊ฑฐ๋๊ธ์ก",
]
for t in te:
for variable in variables:
try:
globals()[variable] = t.find(variable).text
except:
globals()[variable] = np.nan
data = pd.DataFrame(
[[๋ฒ์ ๋, ์ง์ญ์ฝ๋, ์ฐ๋ฆฝ๋ค์ธ๋, ์ง๋ฒ, ๋
, ์, ์ผ, ์ ์ฉ๋ฉด์ , ๊ฑด์ถ๋
๋, ์ธต, ๊ฑฐ๋๊ธ์ก]],
columns=variables,
)
df = pd.concat([df, data])
# Set Columns
colNames = [
"์ง์ญ์ฝ๋",
"๋ฒ์ ๋",
"๊ฑฐ๋์ผ",
"์ฐ๋ฆฝ๋ค์ธ๋",
"์ง๋ฒ",
"์ ์ฉ๋ฉด์ ",
"๊ฑด์ถ๋
๋",
"์ธต",
"๊ฑฐ๋๊ธ์ก",
]
# Feature Engineering
try:
if len(df["๋
"] != 0) & len(df["์"] != 0) & len(df["์ผ"] != 0):
df["๊ฑฐ๋์ผ"] = df["๋
"] + "-" + df["์"] + "-" + df["์ผ"]
df["๊ฑฐ๋์ผ"] = pd.to_datetime(df["๊ฑฐ๋์ผ"])
df["๊ฑฐ๋๊ธ์ก"] = pd.to_numeric(df["๊ฑฐ๋๊ธ์ก"].str.replace(",", ""))
except:
df = pd.DataFrame(columns=colNames)
print("์กฐํํ ์๋ฃ๊ฐ ์์ต๋๋ค.")
# Arange Columns
df = df[colNames]
df = df.sort_values(["๋ฒ์ ๋", "๊ฑฐ๋์ผ"])
df["๋ฒ์ ๋"] = df["๋ฒ์ ๋"].str.strip()
df.index = range(len(df))
# ์ซ์ํ ๋ณํ
cols = df.columns.drop(["๋ฒ์ ๋", "๊ฑฐ๋์ผ", "์ฐ๋ฆฝ๋ค์ธ๋", "์ง๋ฒ"])
df[cols] = df[cols].apply(pd.to_numeric, errors="coerce")
return df
except:
# Get raw data
result = requests.get(url, verify=False)
# Parsing
xmlsoup = BeautifulSoup(result.text, "lxml-xml")
# Filtering
te = xmlsoup.findAll("header")
# ์ ์ ์์ฒญ์ ์๋ฌ ๋ฐ์ -> Python ์ฝ๋ ์๋ฌ
if te[0].find("resultCode").text == "00":
print(">>> Python Logic Error. e-mail : <EMAIL>")
# Open API ์๋น์ค ์ ๊ณต์ฒ ์ค๋ฅ
else:
print(">>> Open API Error: {}".format(te[0].find["resultMsg"]))
def RHRent(self, LAWD_CD, DEAL_YMD):
"""
08 ์ฐ๋ฆฝ๋ค์ธ๋ ์ ์์ธ ์ค๊ฑฐ๋์๋ฃ ์กฐํ
์
๋ ฅ: ์ง์ญ์ฝ๋(๋ฒ์ ๋์ฝ๋ 5์๋ฆฌ), ๊ณ์ฝ์(YYYYmm)
"""
# URL
url_1 = self.urlRHRent + "&LAWD_CD=" + str(LAWD_CD)
url_2 = "&DEAL_YMD=" + str(DEAL_YMD)
url_3 = "&numOfRows=99999"
url = url_1 + url_2 + url_3
try:
# Get raw data
result = requests.get(url, verify=False)
# Parsing
xmlsoup = BeautifulSoup(result.text, "lxml-xml")
# Filtering
te = xmlsoup.findAll("item")
# Creating Pandas Data Frame
df = pd.DataFrame()
variables = [
"๋ฒ์ ๋",
"์ง์ญ์ฝ๋",
"์ฐ๋ฆฝ๋ค์ธ๋",
"์ง๋ฒ",
"๋
",
"์",
"์ผ",
"์ ์ฉ๋ฉด์ ",
"๊ฑด์ถ๋
๋",
"์ธต",
"๋ณด์ฆ๊ธ์ก",
"์์ธ๊ธ์ก",
]
for t in te:
for variable in variables:
try:
globals()[variable] = t.find(variable).text
except:
globals()[variable] = np.nan
data = pd.DataFrame(
[[
๋ฒ์ ๋, ์ง์ญ์ฝ๋, ์ฐ๋ฆฝ๋ค์ธ๋, ์ง๋ฒ, ๋
, ์, ์ผ, ์ ์ฉ๋ฉด์ , ๊ฑด์ถ๋
๋, ์ธต, ๋ณด์ฆ๊ธ์ก,
์์ธ๊ธ์ก
]],
columns=variables,
)
df = pd.concat([df, data])
# Set Columns
colNames = [
"์ง์ญ์ฝ๋",
"๋ฒ์ ๋",
"๊ฑฐ๋์ผ",
"์ฐ๋ฆฝ๋ค์ธ๋",
"์ง๋ฒ",
"์ ์ฉ๋ฉด์ ",
"๊ฑด์ถ๋
๋",
"์ธต",
"๋ณด์ฆ๊ธ์ก",
"์์ธ๊ธ์ก",
]
# Feature Engineering
try:
if len(df["๋
"] != 0) & len(df["์"] != 0) & len(df["์ผ"] != 0):
df["๊ฑฐ๋์ผ"] = df["๋
"] + "-" + df["์"] + "-" + df["์ผ"]
df["๊ฑฐ๋์ผ"] = pd.to_datetime(df["๊ฑฐ๋์ผ"])
df["๋ณด์ฆ๊ธ์ก"] = pd.to_numeric(df["๋ณด์ฆ๊ธ์ก"].str.replace(",", ""))
df["์์ธ๊ธ์ก"] = pd.to_numeric(df["์์ธ๊ธ์ก"].str.replace(",", ""))
except:
df = pd.DataFrame(columns=colNames)
print("์กฐํํ ์๋ฃ๊ฐ ์์ต๋๋ค.")
# Arange Columns
df = df[colNames]
df = df.sort_values(["๋ฒ์ ๋", "๊ฑฐ๋์ผ"])
df["๋ฒ์ ๋"] = df["๋ฒ์ ๋"].str.strip()
df.index = range(len(df))
# ์ซ์ํ ๋ณํ
cols = df.columns.drop(["๋ฒ์ ๋", "๊ฑฐ๋์ผ", "์ฐ๋ฆฝ๋ค์ธ๋", "์ง๋ฒ"])
df[cols] = df[cols].apply(pd.to_numeric, errors="coerce")
return df
except:
# Get raw data
result = requests.get(url, verify=False)
# Parsing
xmlsoup = BeautifulSoup(result.text, "lxml-xml")
# Filtering
te = xmlsoup.findAll("header")
# ์ ์ ์์ฒญ์ ์๋ฌ ๋ฐ์ -> Python ์ฝ๋ ์๋ฌ
if te[0].find("resultCode").text == "00":
print(">>> Python Logic Error. e-mail : <EMAIL>")
# Open API ์๋น์ค ์ ๊ณต์ฒ ์ค๋ฅ
else:
print(">>> Open API Error: {}".format(te[0].find["resultMsg"]))
def DHTrade(self, LAWD_CD, DEAL_YMD):
"""
09 ๋จ๋
/๋ค๊ฐ๊ตฌ ๋งค๋งค ์ค๊ฑฐ๋ ์กฐํ
์
๋ ฅ: ์ง์ญ์ฝ๋(๋ฒ์ ๋์ฝ๋ 5์๋ฆฌ), ๊ณ์ฝ์(YYYYmm)
"""
# URL
url_1 = self.urlDHTrade + "&LAWD_CD=" + str(LAWD_CD)
url_2 = "&DEAL_YMD=" + str(DEAL_YMD)
url_3 = "&numOfRows=99999"
url = url_1 + url_2 + url_3
try:
# Get raw data
result = requests.get(url, verify=False)
# Parsing
xmlsoup = BeautifulSoup(result.text, "lxml-xml")
# Filtering
te = xmlsoup.findAll("item")
# Creating Pandas Data Frame
df = pd.DataFrame()
variables = [
"๋ฒ์ ๋",
"์ง์ญ์ฝ๋",
"์ฃผํ์ ํ",
"๋
",
"์",
"์ผ",
"๋์ง๋ฉด์ ",
"์ฐ๋ฉด์ ",
"๊ฑด์ถ๋
๋",
"๊ฑฐ๋๊ธ์ก",
]
for t in te:
for variable in variables:
try:
globals()[variable] = t.find(variable).text
except:
globals()[variable] = np.nan
data = pd.DataFrame(
[[๋ฒ์ ๋, ์ง์ญ์ฝ๋, ์ฃผํ์ ํ, ๋
, ์, ์ผ, ๋์ง๋ฉด์ , ์ฐ๋ฉด์ , ๊ฑด์ถ๋
๋, ๊ฑฐ๋๊ธ์ก]],
columns=variables,
)
df = pd.concat([df, data])
# Set Columns
colNames = [
"์ง์ญ์ฝ๋", "๋ฒ์ ๋", "๊ฑฐ๋์ผ", "์ฃผํ์ ํ", "๋์ง๋ฉด์ ", "์ฐ๋ฉด์ ", "๊ฑด์ถ๋
๋", "๊ฑฐ๋๊ธ์ก"
]
# Feature Engineering
try:
if len(df["๋
"] != 0) & len(df["์"] != 0) & len(df["์ผ"] != 0):
df["๊ฑฐ๋์ผ"] = df["๋
"] + "-" + df["์"] + "-" + df["์ผ"]
df["๊ฑฐ๋์ผ"] = pd.to_datetime(df["๊ฑฐ๋์ผ"])
df["๊ฑฐ๋๊ธ์ก"] = pd.to_numeric(df["๊ฑฐ๋๊ธ์ก"].str.replace(",", ""))
except:
df = pd.DataFrame(columns=colNames)
print("์กฐํํ ์๋ฃ๊ฐ ์์ต๋๋ค.")
# Arange Columns
df = df[colNames]
df = df.sort_values(["๋ฒ์ ๋", "๊ฑฐ๋์ผ"])
df["๋ฒ์ ๋"] = df["๋ฒ์ ๋"].str.strip()
df.index = range(len(df))
# ์ซ์ํ ๋ณํ
cols = df.columns.drop(["๋ฒ์ ๋", "๊ฑฐ๋์ผ", "์ฃผํ์ ํ"])
df[cols] = df[cols].apply(pd.to_numeric, errors="coerce")
return df
except:
# Get raw data
result = requests.get(url, verify=False)
# Parsing
xmlsoup = BeautifulSoup(result.text, "lxml-xml")
# Filtering
te = xmlsoup.findAll("header")
# ์ ์ ์์ฒญ์ ์๋ฌ ๋ฐ์ -> Python ์ฝ๋ ์๋ฌ
if te[0].find("resultCode").text == "00":
print(">>> Python Logic Error. e-mail : <EMAIL>")
# Open API ์๋น์ค ์ ๊ณต์ฒ ์ค๋ฅ
else:
print(">>> Open API Error: {}".format(te[0].find["resultMsg"]))
def DHRent(self, LAWD_CD, DEAL_YMD):
"""
10 ๋จ๋
/๋ค๊ฐ๊ตฌ ์ ์์ธ ์๋ฃ ์กฐํ
์
๋ ฅ: ์ง์ญ์ฝ๋(๋ฒ์ ๋์ฝ๋ 5์๋ฆฌ), ๊ณ์ฝ์(YYYYmm)
"""
# URL
url_1 = self.urlDHRent + "&LAWD_CD=" + str(LAWD_CD)
url_2 = "&DEAL_YMD=" + str(DEAL_YMD)
url_3 = "&numOfRows=99999"
url = url_1 + url_2 + url_3
try:
# Get raw data
result = requests.get(url, verify=False)
# Parsing
xmlsoup = BeautifulSoup(result.text, "lxml-xml")
# Filtering
te = xmlsoup.findAll("item")
# Creating Pandas Data Frame
df = pd.DataFrame()
variables = ["๋ฒ์ ๋", "์ง์ญ์ฝ๋", "๋
", "์", "์ผ", "๊ณ์ฝ๋ฉด์ ", "๋ณด์ฆ๊ธ์ก", "์์ธ๊ธ์ก"]
for t in te:
for variable in variables:
try:
globals()[variable] = t.find(variable).text
except:
globals()[variable] = np.nan
data = pd.DataFrame([[๋ฒ์ ๋, ์ง์ญ์ฝ๋, ๋
, ์, ์ผ, ๊ณ์ฝ๋ฉด์ , ๋ณด์ฆ๊ธ์ก, ์์ธ๊ธ์ก]],
columns=variables)
df = pd.concat([df, data])
# Set Columns
colNames = ["์ง์ญ์ฝ๋", "๋ฒ์ ๋", "๊ฑฐ๋์ผ", "๊ณ์ฝ๋ฉด์ ", "๋ณด์ฆ๊ธ์ก", "์์ธ๊ธ์ก"]
# Feature Engineering
try:
if len(df["๋
"] != 0) & len(df["์"] != 0) & len(df["์ผ"] != 0):
df["๊ฑฐ๋์ผ"] = df["๋
"] + "-" + df["์"] + "-" + df["์ผ"]
df["๊ฑฐ๋์ผ"] = pd.to_datetime(df["๊ฑฐ๋์ผ"])
df["๋ณด์ฆ๊ธ์ก"] = pd.to_numeric(df["๋ณด์ฆ๊ธ์ก"].str.replace(",", ""))
df["์์ธ๊ธ์ก"] = pd.to_numeric(df["์์ธ๊ธ์ก"].str.replace(",", ""))
except:
df = pd.DataFrame(columns=colNames)
print("์กฐํํ ์๋ฃ๊ฐ ์์ต๋๋ค.")
# Arange Columns
df = df[colNames]
df = df.sort_values(["๋ฒ์ ๋", "๊ฑฐ๋์ผ"])
df["๋ฒ์ ๋"] = df["๋ฒ์ ๋"].str.strip()
df.index = range(len(df))
# ์ซ์ํ ๋ณํ
cols = df.columns.drop(["๋ฒ์ ๋", "๊ฑฐ๋์ผ"])
df[cols] = df[cols].apply(pd.to_numeric, errors="coerce")
return df
except:
# Get raw data
result = requests.get(url, verify=False)
# Parsing
xmlsoup = BeautifulSoup(result.text, "lxml-xml")
# Filtering
te = xmlsoup.findAll("header")
# ์ ์ ์์ฒญ์ ์๋ฌ ๋ฐ์ -> Python ์ฝ๋ ์๋ฌ
if te[0].find("resultCode").text == "00":
print(">>> Python Logic Error. e-mail : <EMAIL>")
# Open API ์๋น์ค ์ ๊ณต์ฒ ์ค๋ฅ
else:
print(">>> Open API Error: {}".format(te[0].find["resultMsg"]))
def LandTrade(self, LAWD_CD, DEAL_YMD):
"""
11 ํ ์ง ๋งค๋งค ์ ๊ณ ์กฐํ
์
๋ ฅ: ์ง์ญ์ฝ๋(๋ฒ์ ๋์ฝ๋ 5์๋ฆฌ), ๊ณ์ฝ์(YYYYmm)
"""
# URL
url_1 = self.urlLandTrade + "&LAWD_CD=" + str(LAWD_CD)
url_2 = "&DEAL_YMD=" + str(DEAL_YMD)
url_3 = "&numOfRows=99999"
url = url_1 + url_2 + url_3
try:
# Get raw data
result = requests.get(url, verify=False)
# Parsing
xmlsoup = BeautifulSoup(result.text, "lxml-xml")
# Filtering
te = xmlsoup.findAll("item")
# Creating Pandas Data Frame
df = pd.DataFrame()
variables = [
"๋ฒ์ ๋",
"์ง์ญ์ฝ๋",
"์๊ตฐ๊ตฌ",
"์ฉ๋์ง์ญ",
"์ง๋ชฉ",
"๋
",
"์",
"์ผ",
"์ง๋ถ๊ฑฐ๋๊ตฌ๋ถ",
"๊ฑฐ๋๋ฉด์ ",
"๊ฑฐ๋๊ธ์ก",
]
for t in te:
for variable in variables:
try:
globals()[variable] = t.find(variable).text
except:
globals()[variable] = np.nan
data = pd.DataFrame(
[[๋ฒ์ ๋, ์ง์ญ์ฝ๋, ์๊ตฐ๊ตฌ, ์ฉ๋์ง์ญ, ์ง๋ชฉ, ๋
, ์, ์ผ, ์ง๋ถ๊ฑฐ๋๊ตฌ๋ถ, ๊ฑฐ๋๋ฉด์ , ๊ฑฐ๋๊ธ์ก]],
columns=variables,
)
df = pd.concat([df, data])
# Set Columns
colNames = [
"์ง์ญ์ฝ๋",
"๋ฒ์ ๋",
"๊ฑฐ๋์ผ",
"์๊ตฐ๊ตฌ",
"์ฉ๋์ง์ญ",
"์ง๋ชฉ",
"์ง๋ถ๊ฑฐ๋๊ตฌ๋ถ",
"๊ฑฐ๋๋ฉด์ ",
"๊ฑฐ๋๊ธ์ก",
]
# Feature Engineering
try:
if len(df["๋
"] != 0) & len(df["์"] != 0) & len(df["์ผ"] != 0):
df["๊ฑฐ๋์ผ"] = df["๋
"] + "-" + df["์"] + "-" + df["์ผ"]
df["๊ฑฐ๋์ผ"] = pd.to_datetime(df["๊ฑฐ๋์ผ"])
df["๊ฑฐ๋๊ธ์ก"] = pd.to_numeric(df["๊ฑฐ๋๊ธ์ก"].str.replace(",", ""))
except:
df = pd.DataFrame(columns=colNames)
print("์กฐํํ ์๋ฃ๊ฐ ์์ต๋๋ค.")
# Arange Columns
df = df[colNames]
df = df.sort_values(["๋ฒ์ ๋", "๊ฑฐ๋์ผ"])
df["๋ฒ์ ๋"] = df["๋ฒ์ ๋"].str.strip()
df.index = range(len(df))
# ์ซ์ํ ๋ณํ
cols = df.columns.drop(
["๋ฒ์ ๋", "๊ฑฐ๋์ผ", "์๊ตฐ๊ตฌ", "์ฉ๋์ง์ญ", "์ง๋ชฉ", "์ง๋ถ๊ฑฐ๋๊ตฌ๋ถ"])
df[cols] = df[cols].apply(pd.to_numeric, errors="coerce")
return df
except:
# Get raw data
result = requests.get(url, verify=False)
# Parsing
xmlsoup = BeautifulSoup(result.text, "lxml-xml")
# Filtering
te = xmlsoup.findAll("header")
# ์ ์ ์์ฒญ์ ์๋ฌ ๋ฐ์ -> Python ์ฝ๋ ์๋ฌ
if te[0].find("resultCode").text == "00":
print(">>> Python Logic Error. e-mail : <EMAIL>")
# Open API ์๋น์ค ์ ๊ณต์ฒ ์ค๋ฅ
else:
print(">>> Open API Error: {}".format(te[0].find["resultMsg"]))
def BizTrade(self, LAWD_CD, DEAL_YMD):
"""
12 ์์
์
๋ฌด์ฉ ๋ถ๋์ฐ ๋งค๋งค ์ ๊ณ ์๋ฃ ์กฐํ
์
๋ ฅ: ์ง์ญ์ฝ๋(๋ฒ์ ๋์ฝ๋ 5์๋ฆฌ), ๊ณ์ฝ์(YYYYmm)
"""
# URL
url_1 = self.urlBizTrade + "&LAWD_CD=" + str(LAWD_CD)
url_2 = "&DEAL_YMD=" + str(DEAL_YMD)
url_3 = "&numOfRows=99999"
url = url_1 + url_2 + url_3
try:
# Get raw data
result = requests.get(url, verify=False)
# Parsing
xmlsoup = BeautifulSoup(result.text, "lxml-xml")
# Filtering
te = xmlsoup.findAll("item")
# Creating Pandas Data Frame
df = pd.DataFrame()
variables = [
"๊ฑฐ๋๊ธ์ก",
"๊ฑด๋ฌผ๋ฉด์ ",
"๊ฑด๋ฌผ์ฃผ์ฉ๋",
"๊ฑด์ถ๋
๋",
"๊ตฌ๋ถ",
"๋
",
"์",
"์ผ",
"๋์ง๋ฉด์ ",
"๋ฒ์ ๋",
"์๊ตฐ๊ตฌ",
"์ฉ๋์ง์ญ",
"์ ํ",
"์ง์ญ์ฝ๋",
"์ธต",
]
for t in te:
for variable in variables:
try:
globals()[variable] = t.find(variable).text
except:
globals()[variable] = np.nan
data = pd.DataFrame(
[[
๊ฑฐ๋๊ธ์ก,
๊ฑด๋ฌผ๋ฉด์ ,
๊ฑด๋ฌผ์ฃผ์ฉ๋,
๊ฑด์ถ๋
๋,
๊ตฌ๋ถ,
๋
,
์,
์ผ,
๋์ง๋ฉด์ ,
๋ฒ์ ๋,
์๊ตฐ๊ตฌ,
์ฉ๋์ง์ญ,
์ ํ,
์ง์ญ์ฝ๋,
์ธต,
]],
columns=variables,
)
df = pd.concat([df, data])
# Set Columns
colNames = [
"์ง์ญ์ฝ๋",
"๋ฒ์ ๋",
"๊ฑฐ๋์ผ",
"์๊ตฐ๊ตฌ",
"์ฉ๋์ง์ญ",
"์ ํ",
"๋์ง๋ฉด์ ",
"๊ตฌ๋ถ",
"๊ฑด๋ฌผ๋ฉด์ ",
"๊ฑด๋ฌผ์ฃผ์ฉ๋",
"๊ฑด์ถ๋
๋",
"์ธต",
"๊ฑฐ๋๊ธ์ก",
]
# Feature Engineering
try:
if len(df["๋
"] != 0) & len(df["์"] != 0) & len(df["์ผ"] != 0):
df["๊ฑฐ๋์ผ"] = df["๋
"] + "-" + df["์"] + "-" + df["์ผ"]
df["๊ฑฐ๋์ผ"] = pd.to_datetime(df["๊ฑฐ๋์ผ"])
df["๊ฑฐ๋๊ธ์ก"] = pd.to_numeric(df["๊ฑฐ๋๊ธ์ก"].str.replace(",", ""))
except:
df = pd.DataFrame(columns=colNames)
print("์กฐํํ ์๋ฃ๊ฐ ์์ต๋๋ค.")
# Arange Columns
df = df[colNames]
df = df.sort_values(["๋ฒ์ ๋", "๊ฑฐ๋์ผ"])
df["๋ฒ์ ๋"] = df["๋ฒ์ ๋"].str.strip()
df.index = range(len(df))
# ์ซ์ํ ๋ณํ
cols = df.columns.drop(
["๋ฒ์ ๋", "๊ฑฐ๋์ผ", "์๊ตฐ๊ตฌ", "์ฉ๋์ง์ญ", "์ ํ", "๊ฑด๋ฌผ์ฃผ์ฉ๋"])
df[cols] = df[cols].apply(pd.to_numeric, errors="coerce")
return df
except:
# Get raw data
result = requests.get(url, verify=False)
# Parsing
xmlsoup = BeautifulSoup(result.text, "lxml-xml")
# Filtering
te = xmlsoup.findAll("header")
# ์ ์ ์์ฒญ์ ์๋ฌ ๋ฐ์ -> Python ์ฝ๋ ์๋ฌ
if te[0].find("resultCode").text == "00":
print(">>> Python Logic Error. e-mail : <EMAIL>")
# Open API ์๋น์ค ์ ๊ณต์ฒ ์ค๋ฅ
else:
print(">>> Open API Error: {}".format(te[0].find["resultMsg"]))
class Building:
"""
๊ฑด์ถ๋ฌผ๋์ฅ์ ๋ณด ์๋น์ค
"""
def __init__(self, serviceKey):
"""
๊ณต๊ณต ๋ฐ์ดํฐ ํฌํธ์์ ๋ฐ๊ธ๋ฐ์ Service Key๋ฅผ ์
๋ ฅ๋ฐ์ ์ด๊ธฐํํฉ๋๋ค.
"""
# Open API ์๋น์ค ํค ์ด๊ธฐํ
self.serviceKey = serviceKey
# ServiceKey ์ ํจ์ฑ ๊ฒ์ฌ
self.baseUrl = "http://apis.data.go.kr/1613000/BldRgstService_v2/"
self.url_getBrBasisOulnInfo = (self.baseUrl + "getBrBasisOulnInfo" +
f"?serviceKey={self.serviceKey}")
self.url_getBrRecapTitleInfo = (self.baseUrl + "getBrRecapTitleInfo" +
f"?serviceKey={self.serviceKey}")
self.url_getBrTitleInfo = (self.baseUrl + "getBrTitleInfo" +
f"?serviceKey={self.serviceKey}")
self.url_getBrFlrOulnInfo = (self.baseUrl + "getBrFlrOulnInfo" +
f"?serviceKey={self.serviceKey}")
self.url_getBrAtchJibunInfo = (self.baseUrl + "getBrAtchJibunInfo" +
f"?serviceKey={self.serviceKey}")
self.url_getBrExposPubuseAreaInfo = (self.baseUrl +
"getBrExposPubuseAreaInfo" +
f"?serviceKey={self.serviceKey}")
self.url_getBrWclfInfo = (self.baseUrl + "getBrWclfInfo" +
f"?serviceKey={self.serviceKey}")
self.url_getBrHsprcInfo = (self.baseUrl + "getBrHsprcInfo" +
f"?serviceKey={self.serviceKey}")
self.url_getBrExposInfo = (self.baseUrl + "getBrExposInfo" +
f"?serviceKey={self.serviceKey}")
self.url_getBrJijiguInfo = (self.baseUrl + "getBrJijiguInfo" +
f"?serviceKey={self.serviceKey}")
# Open API URL Dict
urlDict = {
"๊ฑด์ถ๋ฌผ๋์ฅ ๊ธฐ๋ณธ๊ฐ์ ์กฐํ": self.url_getBrBasisOulnInfo,
"๊ฑด์ถ๋ฌผ๋์ฅ ์ด๊ดํ์ ๋ถ ์กฐํ": self.url_getBrRecapTitleInfo,
"๊ฑด์ถ๋ฌผ๋์ฅ ํ์ ๋ถ ์กฐํ": self.url_getBrTitleInfo,
"๊ฑด์ถ๋ฌผ๋์ฅ ์ธต๋ณ๊ฐ์ ์กฐํ": self.url_getBrFlrOulnInfo,
"๊ฑด์ถ๋ฌผ๋์ฅ ๋ถ์์ง๋ฒ ์กฐํ": self.url_getBrAtchJibunInfo,
"๊ฑด์ถ๋ฌผ๋์ฅ ์ ์ ๊ณต์ฉ๋ฉด์ ์กฐํ": self.url_getBrExposPubuseAreaInfo,
"๊ฑด์ถ๋ฌผ๋์ฅ ์ค์์ ํ์์ค ์กฐํ": self.url_getBrWclfInfo,
"๊ฑด์ถ๋ฌผ๋์ฅ ์ฃผํ๊ฐ๊ฒฉ ์กฐํ": self.url_getBrHsprcInfo,
"๊ฑด์ถ๋ฌผ๋์ฅ ์ ์ ๋ถ ์กฐํ": self.url_getBrExposInfo,
"๊ฑด์ถ๋ฌผ๋์ฅ ์ง์ญ์ง๊ตฌ๊ตฌ์ญ ์กฐํ": self.url_getBrJijiguInfo,
}
# ์๋น์ค ์ ์ ์๋ ์ฌ๋ถ ํ์ธ
for serviceName, url in urlDict.items():
result = requests.get(url, verify=False)
xmlsoup = BeautifulSoup(result.text, "lxml-xml")
te = xmlsoup.findAll("header")
if te[0].find("resultCode").text == "00":
print(f">>> {serviceName} ์๋น์ค๊ฐ ์ ์ ์๋ํฉ๋๋ค.")
else:
print(f">>> {serviceName} ์๋น์คํค ๋ฏธ๋ฑ๋ก ์ค๋ฅ์
๋๋ค.")
# ์ง์ญ ์ฝ๋ ์ด๊ธฐํ
# ๋ฒ์ ๋ ์ฝ๋ ์ถ์ฒ : https://code.go.kr
path_code = "https://raw.githubusercontent.com/WooilJeong/PublicDataReader/f14e4de3410cc0f798a83ee5934070d651cbd67b/docs/%EB%B2%95%EC%A0%95%EB%8F%99%EC%BD%94%EB%93%9C%20%EC%A0%84%EC%B2%B4%EC%9E%90%EB%A3%8C.txt"
code = pd.read_csv(path_code, encoding="cp949", sep="\t")
code = code.loc[code["ํ์ง์ฌ๋ถ"] == "์กด์ฌ"]
code["๋ฒ์ ๊ตฌ์ฝ๋"] = list(map(lambda a: str(a)[:5], list(code["๋ฒ์ ๋์ฝ๋"])))
self.code = code
def CodeFinder(self, name):
"""
๊ตญํ ๊ตํต๋ถ ์ค๊ฑฐ๋๊ฐ ์ ๋ณด ์คํAPI๋ ๋ฒ์ ๋์ฝ๋ 10์๋ฆฌ ์ค ์ 5์๋ฆฌ์ธ ๊ตฌ๋ฅผ ๋ํ๋ด๋ ์ง์ญ์ฝ๋๋ฅผ ์ฌ์ฉํฉ๋๋ค.
API์ ์ฌ์ฉํ ๊ตฌ ๋ณ ์ฝ๋๋ฅผ ์กฐํํ๋ ๋ฉ์๋์ด๋ฉฐ, ๋ฌธ์์ด ์ง์ญ ๋ช
์ ์
๋ ฅ๋ฐ๊ณ , ์กฐํ ๊ฒฐ๊ณผ๋ฅผ Pandas DataFrameํ์์ผ๋ก ์ถ๋ ฅํฉ๋๋ค.
"""
result = self.code[self.code["๋ฒ์ ๋๋ช
"].str.contains(name)][[
"๋ฒ์ ๋๋ช
", "๋ฒ์ ๊ตฌ์ฝ๋"
]]
result.index = range(len(result))
return result
def ChangeCols(self, df, operationName):
"""
์๋ฌธ ์ปฌ๋ผ๋ช
์ ๊ตญ๋ฌธ ์ปฌ๋ผ๋ช
์ผ๋ก ๋ณ๊ฒฝ
"""
if operationName == "getBrBasisOulnInfo":
self.colDict = {
"bjdongCd": "๋ฒ์ ๋์ฝ๋",
"bldNm": "๊ฑด๋ฌผ๋ช
",
"block": "๋ธ๋ก",
"bun": "๋ฒ",
"bylotCnt": "์ธํ์ง์",
"crtnDay": "์์ฑ์ผ์",
"guyukCd": "๊ตฌ์ญ์ฝ๋",
"guyukCdNm": "๊ตฌ์ญ์ฝ๋๋ช
",
"ji": "์ง",
"jiguCd": "์ง๊ตฌ์ฝ๋",
"jiguCdNm": "์ง๊ตฌ์ฝ๋๋ช
",
"jiyukCd": "์ง์ญ์ฝ๋",
"jiyukCdNm": "์ง์ญ์ฝ๋๋ช
",
"lot": "๋กํธ",
"mgmBldrgstPk": "๊ด๋ฆฌ๊ฑด์ถ๋ฌผ๋์ฅPK",
"mgmUpBldrgstPk": "๊ด๋ฆฌ์์๊ฑด์ถ๋ฌผ๋์ฅPK",
"naBjdongCd": "์์ฃผ์๋ฒ์ ๋์ฝ๋",
"naMainBun": "์์ฃผ์๋ณธ๋ฒ",
"naRoadCd": "์์ฃผ์๋๋ก์ฝ๋",
"naSubBun": "์์ฃผ์๋ถ๋ฒ",
"naUgrndCd": "์์ฃผ์์ง์์งํ์ฝ๋",
"newPlatPlc": "๋๋ก๋ช
๋์ง์์น",
"platGbCd": "๋์ง๊ตฌ๋ถ์ฝ๋",
"platPlc": "๋์ง์์น",
"regstrGbCd": "๋์ฅ๊ตฌ๋ถ์ฝ๋",
"regstrGbCdNm": "๋์ฅ๊ตฌ๋ถ์ฝ๋๋ช
",
"regstrKindCd": "๋์ฅ์ข
๋ฅ์ฝ๋",
"regstrKindCdNm": "๋์ฅ์ข
๋ฅ์ฝ๋๋ช
",
"rnum": "์๋ฒ",
"sigunguCd": "์๊ตฐ๊ตฌ์ฝ๋",
"splotNm": "ํน์์ง๋ช
",
}
elif operationName == "getBrRecapTitleInfo":
self.colDict = {
"archArea": "๊ฑด์ถ๋ฉด์ ",
"atchBldArea": "๋ถ์๊ฑด์ถ๋ฌผ๋ฉด์ ",
"atchBldCnt": "๋ถ์๊ฑด์ถ๋ฌผ์",
"bcRat": "๊ฑดํ์จ",
"bjdongCd": "๋ฒ์ ๋์ฝ๋",
"bldNm": "๊ฑด๋ฌผ๋ช
",
"block": "๋ธ๋ก",
"bun": "๋ฒ",
"bylotCnt": "์ธํ์ง์",
"crtnDay": "์์ฑ์ผ์",
"engrEpi": "EPI์ ์",
"engrGrade": "์๋์งํจ์จ๋ฑ๊ธ",
"engrRat": "์๋์ง์ ๊ฐ์จ",
"etcPurps": "๊ธฐํ์ฉ๋",
"fmlyCnt": "๊ฐ๊ตฌ์",
"gnBldCert": "์นํ๊ฒฝ๊ฑด์ถ๋ฌผ์ธ์ฆ์ ์",
"gnBldGrade": "์นํ๊ฒฝ๊ฑด์ถ๋ฌผ๋ฑ๊ธ",
"hhldCnt": "์ธ๋์",
"hoCnt": "ํธ์",
"indrAutoArea": "์ฅ๋ด์์ฃผ์๋ฉด์ ",
"indrAutoUtcnt": "์ฅ๋ด์์ฃผ์๋์",
"indrMechArea": "์ฅ๋ด๊ธฐ๊ณ์๋ฉด์ ",
"indrMechUtcnt": "์ฅ๋ด๊ธฐ๊ณ์๋์",
"itgBldCert": "์ง๋ฅํ๊ฑด์ถ๋ฌผ์ธ์ฆ์ ์",
"itgBldGrade": "์ง๋ฅํ๊ฑด์ถ๋ฌผ๋ฑ๊ธ",
"ji": "์ง",
"lot": "๋กํธ",
"mainBldCnt": "์ฃผ๊ฑด์ถ๋ฌผ์",
"mainPurpsCd": "์ฃผ์ฉ๋์ฝ๋",
"mainPurpsCdNm": "์ฃผ์ฉ๋์ฝ๋๋ช
",
"mgmBldrgstPk": "๊ด๋ฆฌ๊ฑด์ถ๋ฌผ๋์ฅPK",
"naBjdongCd": "์์ฃผ์๋ฒ์ ๋์ฝ๋",
"naMainBun": "์์ฃผ์๋ณธ๋ฒ",
"naRoadCd": "์์ฃผ์๋๋ก์ฝ๋",
"naSubBun": "์์ฃผ์๋ถ๋ฒ",
"naUgrndCd": "์์ฃผ์์ง์์งํ์ฝ๋",
"newOldRegstrGbCd": "์ ๊ตฌ๋์ฅ๊ตฌ๋ถ์ฝ๋",
"newOldRegstrGbCdNm": "์ ๊ตฌ๋์ฅ๊ตฌ๋ถ์ฝ๋๋ช
",
"newPlatPlc": "๋๋ก๋ช
๋์ง์์น",
"oudrAutoArea": "์ฅ์ธ์์ฃผ์๋ฉด์ ",
"oudrAutoUtcnt": "์ฅ์ธ์์ฃผ์๋์",
"oudrMechArea": "์ฅ์ธ๊ธฐ๊ณ์๋ฉด์ ",
"oudrMechUtcnt": "์ฅ์ธ๊ธฐ๊ณ์๋์",
"platArea": "๋์ง๋ฉด์ ",
"platGbCd": "๋์ง๊ตฌ๋ถ์ฝ๋",
"platPlc": "๋์ง์์น",
"pmsDay": "ํ๊ฐ์ผ",
"pmsnoGbCd": "ํ๊ฐ๋ฒํธ๊ตฌ๋ถ์ฝ๋",
"pmsnoGbCdNm": "ํ๊ฐ๋ฒํธ๊ตฌ๋ถ์ฝ๋๋ช
",
"pmsnoKikCd": "ํ๊ฐ๋ฒํธ๊ธฐ๊ด์ฝ๋",
"pmsnoKikCdNm": "ํ๊ฐ๋ฒํธ๊ธฐ๊ด์ฝ๋๋ช
",
"pmsnoYear": "ํ๊ฐ๋ฒํธ๋
",
"regstrGbCd": "๋์ฅ๊ตฌ๋ถ์ฝ๋",
"regstrGbCdNm": "๋์ฅ๊ตฌ๋ถ์ฝ๋๋ช
",
"regstrKindCd": "๋์ฅ์ข
๋ฅ์ฝ๋",
"regstrKindCdNm": "๋์ฅ์ข
๋ฅ์ฝ๋๋ช
",
"rnum": "์๋ฒ",
"sigunguCd": "์๊ตฐ๊ตฌ์ฝ๋",
"splotNm": "ํน์์ง๋ช
",
"stcnsDay": "์ฐฉ๊ณต์ผ",
"totArea": "์ฐ๋ฉด์ ",
"totPkngCnt": "์ด์ฃผ์ฐจ์",
"useAprDay": "์ฌ์ฉ์น์ธ์ผ",
"vlRat": "์ฉ์ ๋ฅ ",
"vlRatEstmTotArea": "์ฉ์ ๋ฅ ์ฐ์ ์ฐ๋ฉด์ ",
}
elif operationName == "getBrTitleInfo":
self.colDict = {
"archArea": "๊ฑด์ถ๋ฉด์ ",
"atchBldArea": "๋ถ์๊ฑด์ถ๋ฌผ๋ฉด์ ",
"atchBldCnt": "๋ถ์๊ฑด์ถ๋ฌผ์",
"bcRat": "๊ฑดํ์จ",
"bjdongCd": "๋ฒ์ ๋์ฝ๋",
"bldNm": "๊ฑด๋ฌผ๋ช
",
"block": "๋ธ๋ก",
"bun": "๋ฒ",
"bylotCnt": "์ธํ์ง์",
"crtnDay": "์์ฑ์ผ์",
"dongNm": "๋๋ช
์นญ",
"emgenUseElvtCnt": "๋น์์ฉ์น๊ฐ๊ธฐ์",
"engrEpi": "EPI์ ์",
"engrGrade": "์๋์งํจ์จ๋ฑ๊ธ",
"engrRat": "์๋์ง์ ๊ฐ์จ",
"etcPurps": "๊ธฐํ์ฉ๋",
"etcRoof": "๊ธฐํ์ง๋ถ",
"etcStrct": "๊ธฐํ๊ตฌ์กฐ",
"fmlyCnt": "๊ฐ๊ตฌ์",
"gnBldCert": "์นํ๊ฒฝ๊ฑด์ถ๋ฌผ์ธ์ฆ์ ์",
"gnBldGrade": "์นํ๊ฒฝ๊ฑด์ถ๋ฌผ๋ฑ๊ธ",
"grndFlrCnt": "์ง์์ธต์",
"heit": "๋์ด",
"hhldCnt": "์ธ๋์",
"hoCnt": "ํธ์",
"indrAutoArea": "์ฅ๋ด์์ฃผ์๋ฉด์ ",
"indrAutoUtcnt": "์ฅ๋ด์์ฃผ์๋์",
"indrMechArea": "์ฅ๋ด๊ธฐ๊ณ์๋ฉด์ ",
"indrMechUtcnt": "์ฅ๋ด๊ธฐ๊ณ์๋์",
"itgBldCert": "์ง๋ฅํ๊ฑด์ถ๋ฌผ์ธ์ฆ์ ์",
"itgBldGrade": "์ง๋ฅํ๊ฑด์ถ๋ฌผ๋ฑ๊ธ",
"ji": "์ง",
"lot": "๋กํธ",
"mainAtchGbCd": "์ฃผ๋ถ์๊ตฌ๋ถ์ฝ๋",
"mainAtchGbCdNm": "์ฃผ๋ถ์๊ตฌ๋ถ์ฝ๋๋ช
",
"mainPurpsCd": "์ฃผ์ฉ๋์ฝ๋",
"mainPurpsCdNm": "์ฃผ์ฉ๋์ฝ๋๋ช
",
"mgmBldrgstPk": "๊ด๋ฆฌ๊ฑด์ถ๋ฌผ๋์ฅPK",
"naBjdongCd": "์์ฃผ์๋ฒ์ ๋์ฝ๋",
"naMainBun": "์์ฃผ์๋ณธ๋ฒ",
"naRoadCd": "์์ฃผ์๋๋ก์ฝ๋",
"naSubBun": "์์ฃผ์๋ถ๋ฒ",
"naUgrndCd": "์์ฃผ์์ง์์งํ์ฝ๋",
"newPlatPlc": "๋๋ก๋ช
๋์ง์์น",
"oudrAutoArea": "์ฅ์ธ์์ฃผ์๋ฉด์ ",
"oudrAutoUtcnt": "์ฅ์ธ์์ฃผ์๋์",
"oudrMechArea": "์ฅ์ธ๊ธฐ๊ณ์๋ฉด์ ",
"oudrMechUtcnt": "์ฅ์ธ๊ธฐ๊ณ์๋์",
"platArea": "๋์ง๋ฉด์ ",
"platGbCd": "๋์ง๊ตฌ๋ถ์ฝ๋",
"platPlc": "๋์ง์์น",
"pmsDay": "ํ๊ฐ์ผ",
"pmsnoGbCd": "ํ๊ฐ๋ฒํธ๊ตฌ๋ถ์ฝ๋",
"pmsnoGbCdNm": "ํ๊ฐ๋ฒํธ๊ตฌ๋ถ์ฝ๋๋ช
",
"pmsnoKikCd": "ํ๊ฐ๋ฒํธ๊ธฐ๊ด์ฝ๋",
"pmsnoKikCdNm": "ํ๊ฐ๋ฒํธ๊ธฐ๊ด์ฝ๋๋ช
",
"pmsnoYear": "ํ๊ฐ๋ฒํธ๋
",
"regstrGbCd": "๋์ฅ๊ตฌ๋ถ์ฝ๋",
"regstrGbCdNm": "๋์ฅ๊ตฌ๋ถ์ฝ๋๋ช
",
"regstrKindCd": "๋์ฅ์ข
๋ฅ์ฝ๋",
"regstrKindCdNm": "๋์ฅ์ข
๋ฅ์ฝ๋๋ช
",
"rideUseElvtCnt": "์น์ฉ์น๊ฐ๊ธฐ์",
"rnum": "์๋ฒ",
"roofCd": "์ง๋ถ์ฝ๋",
"roofCdNm": "์ง๋ถ์ฝ๋๋ช
",
"rserthqkAblty": "๋ด์ง ๋ฅ๋ ฅ",
"rserthqkDsgnApplyYn": "๋ด์ง ์ค๊ณ ์ ์ฉ ์ฌ๋ถ",
"sigunguCd": "์๊ตฐ๊ตฌ์ฝ๋",
"splotNm": "ํน์์ง๋ช
",
"stcnsDay": "์ฐฉ๊ณต์ผ",
"strctCd": "๊ตฌ์กฐ์ฝ๋",
"strctCdNm": "๊ตฌ์กฐ์ฝ๋๋ช
",
"totArea": "์ฐ๋ฉด์ ",
"totDongTotArea": "์ด๋์ฐ๋ฉด์ ",
"ugrndFlrCnt": "์งํ์ธต์",
"useAprDay": "์ฌ์ฉ์น์ธ์ผ",
"vlRat": "์ฉ์ ๋ฅ ",
"vlRatEstmTotArea": "์ฉ์ ๋ฅ ์ฐ์ ์ฐ๋ฉด์ ",
}
elif operationName == "getBrFlrOulnInfo":
self.colDict = colDict = {
"area": "๋ฉด์ ",
"areaExctYn": "๋ฉด์ ์ ์ธ์ฌ๋ถ",
"bjdongCd": "๋ฒ์ ๋์ฝ๋",
"bldNm": "๊ฑด๋ฌผ๋ช
",
"block": "๋ธ๋ก",
"bun": "๋ฒ",
"crtnDay": "์์ฑ์ผ์",
"dongNm": "๋๋ช
์นญ",
"etcPurps": "๊ธฐํ์ฉ๋",
"etcStrct": "๊ธฐํ๊ตฌ์กฐ",
"flrGbCd": "์ธต๊ตฌ๋ถ์ฝ๋",
"flrGbCdNm": "์ธต๊ตฌ๋ถ์ฝ๋๋ช
",
"flrNo": "์ธต๋ฒํธ",
"flrNoNm": "์ธต๋ฒํธ๋ช
",
"ji": "์ง",
"lot": "๋กํธ",
"mainAtchGbCd": "์ฃผ๋ถ์๊ตฌ๋ถ์ฝ๋",
"mainAtchGbCdNm": "์ฃผ๋ถ์๊ตฌ๋ถ์ฝ๋๋ช
",
"mainPurpsCd": "์ฃผ์ฉ๋์ฝ๋",
"mainPurpsCdNm": "์ฃผ์ฉ๋์ฝ๋๋ช
",
"mgmBldrgstPk": "๊ด๋ฆฌ๊ฑด์ถ๋ฌผ๋์ฅPK",
"naBjdongCd": "์์ฃผ์๋ฒ์ ๋์ฝ๋",
"naMainBun": "์์ฃผ์๋ณธ๋ฒ",
"naRoadCd": "์์ฃผ์๋๋ก์ฝ๋",
"naSubBun": "์์ฃผ์๋ถ๋ฒ",
"naUgrndCd": "์์ฃผ์์ง์์งํ์ฝ๋",
"newPlatPlc": "๋๋ก๋ช
๋์ง์์น",
"platGbCd": "๋์ง๊ตฌ๋ถ์ฝ๋",
"platPlc": "๋์ง์์น",
"rnum": "์๋ฒ",
"sigunguCd": "์๊ตฐ๊ตฌ์ฝ๋",
"splotNm": "ํน์์ง๋ช
",
"strctCd": "๊ตฌ์กฐ์ฝ๋",
"strctCdNm": "๊ตฌ์กฐ์ฝ๋๋ช
",
}
elif operationName == "getBrAtchJibunInfo":
self.colDict = colDict = {
"atchBjdongCd": "๋ถ์๋ฒ์ ๋์ฝ๋",
"atchBlock": "๋ถ์๋ธ๋ก",
"atchBun": "๋ถ์๋ฒ",
"atchEtcJibunNm": "๋ถ์๊ธฐํ์ง๋ฒ๋ช
",
"atchJi": "๋ถ์์ง",
"atchLot": "๋ถ์๋กํธ",
"atchPlatGbCd": "๋ถ์๋์ง๊ตฌ๋ถ์ฝ๋",
"atchRegstrGbCd": "๋ถ์๋์ฅ๊ตฌ๋ถ์ฝ๋",
"atchRegstrGbCdNm": "๋ถ์๋์ฅ๊ตฌ๋ถ์ฝ๋๋ช
",
"atchSigunguCd": "๋ถ์์๊ตฐ๊ตฌ์ฝ๋",
"atchSplotNm": "๋ถ์ํน์์ง๋ช
",
"bjdongCd": "๋ฒ์ ๋์ฝ๋",
"bldNm": "๊ฑด๋ฌผ๋ช
",
"block": "๋ธ๋ก",
"bun": "๋ฒ",
"crtnDay": "์์ฑ์ผ์",
"ji": "์ง",
"lot": "๋กํธ",
"mgmBldrgstPk": "๊ด๋ฆฌ๊ฑด์ถ๋ฌผ๋์ฅPK",
"naBjdongCd": "์์ฃผ์๋ฒ์ ๋์ฝ๋",
"naMainBun": "์์ฃผ์๋ณธ๋ฒ",
"naRoadCd": "์์ฃผ์๋๋ก์ฝ๋",
"naSubBun": "์์ฃผ์๋ถ๋ฒ",
"naUgrndCd": "์์ฃผ์์ง์์งํ์ฝ๋",
"newPlatPlc": "๋๋ก๋ช
๋์ง์์น",
"platGbCd": "๋์ง๊ตฌ๋ถ์ฝ๋",
"platPlc": "๋์ง์์น",
"regstrGbCd": "๋์ฅ๊ตฌ๋ถ์ฝ๋",
"regstrGbCdNm": "๋์ฅ๊ตฌ๋ถ์ฝ๋๋ช
",
"regstrKindCd": "๋์ฅ์ข
๋ฅ์ฝ๋",
"regstrKindCdNm": "๋์ฅ์ข
๋ฅ์ฝ๋๋ช
",
"rnum": "์๋ฒ",
"sigunguCd": "์๊ตฐ๊ตฌ์ฝ๋",
"splotNm": "ํน์์ง๋ช
",
}
elif operationName == "getBrExposPubuseAreaInfo":
self.colDict = colDict = {
"area": "๋ฉด์ ",
"bjdongCd": "๋ฒ์ ๋์ฝ๋",
"bldNm": "๊ฑด๋ฌผ๋ช
",
"block": "๋ธ๋ก",
"bun": "๋ฒ",
"crtnDay": "์์ฑ์ผ์",
"dongNm": "๋๋ช
์นญ",
"etcPurps": "๊ธฐํ์ฉ๋",
"etcStrct": "๊ธฐํ๊ตฌ์กฐ",
"exposPubuseGbCd": "์ ์ ๊ณต์ฉ๊ตฌ๋ถ์ฝ๋",
"exposPubuseGbCdNm": "์ ์ ๊ณต์ฉ๊ตฌ๋ถ์ฝ๋๋ช
",
"flrGbCd": "์ธต๊ตฌ๋ถ์ฝ๋",
"flrGbCdNm": "์ธต๊ตฌ๋ถ์ฝ๋๋ช
",
"flrNo": "์ธต๋ฒํธ",
"flrNoNm": "์ธต๋ฒํธ๋ช
",
"hoNm": "ํธ๋ช
์นญ",
"ji": "์ง",
"lot": "๋กํธ",
"mainAtchGbCd": "์ฃผ๋ถ์๊ตฌ๋ถ์ฝ๋",
"mainAtchGbCdNm": "์ฃผ๋ถ์๊ตฌ๋ถ์ฝ๋๋ช
",
"mainPurpsCd": "์ฃผ์ฉ๋์ฝ๋",
"mainPurpsCdNm": "์ฃผ์ฉ๋์ฝ๋๋ช
",
"mgmBldrgstPk": "๊ด๋ฆฌ๊ฑด์ถ๋ฌผ๋์ฅPK",
"naBjdongCd": "์์ฃผ์๋ฒ์ ๋์ฝ๋",
"naMainBun": "์์ฃผ์๋ณธ๋ฒ",
"naRoadCd": "์์ฃผ์๋๋ก์ฝ๋",
"naSubBun": "์์ฃผ์๋ถ๋ฒ",
"naUgrndCd": "์์ฃผ์์ง์์งํ์ฝ๋",
"newPlatPlc": "๋๋ก๋ช
๋์ง์์น",
"platGbCd": "๋์ง๊ตฌ๋ถ์ฝ๋",
"platPlc": "๋์ง์์น",
"regstrGbCd": "๋์ฅ๊ตฌ๋ถ์ฝ๋",
"regstrGbCdNm": "๋์ฅ๊ตฌ๋ถ์ฝ๋๋ช
",
"regstrKindCd": "๋์ฅ์ข
๋ฅ์ฝ๋",
"regstrKindCdNm": "๋์ฅ์ข
๋ฅ์ฝ๋๋ช
",
"rnum": "์๋ฒ",
"sigunguCd": "์๊ตฐ๊ตฌ์ฝ๋",
"splotNm": "ํน์์ง๋ช
",
"strctCd": "๊ตฌ์กฐ์ฝ๋",
"strctCdNm": "๊ตฌ์กฐ์ฝ๋๋ช
",
}
elif operationName == "getBrWclfInfo":
self.colDict = colDict = {
"bjdongCd": "๋ฒ์ ๋์ฝ๋",
"bldNm": "๊ฑด๋ฌผ๋ช
",
"block": "๋ธ๋ก",
"bun": "๋ฒ",
"capaLube": "์ฉ๋(๋ฃจ๋ฒ )",
"capaPsper": "์ฉ๋(์ธ์ฉ)",
"crtnDay": "์์ฑ์ผ์",
"etcMode": "๊ธฐํํ์",
"ji": "์ง",
"lot": "๋กํธ",
"mgmBldrgstPk": "๊ด๋ฆฌ๊ฑด์ถ๋ฌผ๋์ฅPK",
"modeCd": "ํ์์ฝ๋",
"modeCdNm": "ํ์์ฝ๋๋ช
",
"naBjdongCd": "์์ฃผ์๋ฒ์ ๋์ฝ๋",
"naMainBun": "์์ฃผ์๋ณธ๋ฒ",
"naRoadCd": "์์ฃผ์๋๋ก์ฝ๋",
"naSubBun": "์์ฃผ์๋ถ๋ฒ",
"naUgrndCd": "์์ฃผ์์ง์์งํ์ฝ๋",
"newPlatPlc": "๋๋ก๋ช
๋์ง์์น",
"platGbCd": "๋์ง๊ตฌ๋ถ์ฝ๋",
"platPlc": "๋์ง์์น",
"regstrGbCd": "๋์ฅ๊ตฌ๋ถ์ฝ๋",
"regstrGbCdNm": "๋์ฅ๊ตฌ๋ถ์ฝ๋๋ช
",
"regstrKindCd": "๋์ฅ์ข
๋ฅ์ฝ๋",
"regstrKindCdNm": "๋์ฅ์ข
๋ฅ์ฝ๋๋ช
",
"rnum": "์๋ฒ",
"sigunguCd": "์๊ตฐ๊ตฌ์ฝ๋",
"splotNm": "ํน์์ง๋ช
",
"unitGbCd": "๋จ์๊ตฌ๋ถ์ฝ๋",
"unitGbCdNm": "๋จ์๊ตฌ๋ถ์ฝ๋๋ช
",
}
elif operationName == "getBrHsprcInfo":
self.colDict = colDict = {
"bjdongCd": "๋ฒ์ ๋์ฝ๋",
"bldNm": "๊ฑด๋ฌผ๋ช
",
"block": "๋ธ๋ก",
"bun": "๋ฒ",
"bylotCnt": "์ธํ์ง์",
"crtnDay": "์์ฑ์ผ์",
"hsprc": "์ฃผํ๊ฐ๊ฒฉ",
"ji": "์ง",
"lot": "๋กํธ",
"mgmBldrgstPk": "๊ด๋ฆฌ๊ฑด์ถ๋ฌผ๋์ฅPK",
"naBjdongCd": "์์ฃผ์๋ฒ์ ๋์ฝ๋",
"naMainBun": "์์ฃผ์๋ณธ๋ฒ",
"naRoadCd": "์์ฃผ์๋๋ก์ฝ๋",
"naSubBun": "์์ฃผ์๋ถ๋ฒ",
"naUgrndCd": "์์ฃผ์์ง์์งํ์ฝ๋",
"newPlatPlc": "๋๋ก๋ช
๋์ง์์น",
"platGbCd": "๋์ง๊ตฌ๋ถ์ฝ๋",
"platPlc": "๋์ง์์น",
"regstrGbCd": "๋์ฅ๊ตฌ๋ถ์ฝ๋",
"regstrGbCdNm": "๋์ฅ๊ตฌ๋ถ์ฝ๋๋ช
",
"regstrKindCd": "๋์ฅ์ข
๋ฅ์ฝ๋",
"regstrKindCdNm": "๋์ฅ์ข
๋ฅ์ฝ๋๋ช
",
"rnum": "์๋ฒ",
"sigunguCd": "์๊ตฐ๊ตฌ์ฝ๋",
"splotNm": "ํน์์ง๋ช
",
}
elif operationName == "getBrExposInfo":
self.colDict = colDict = {
"bjdongCd": "๋ฒ์ ๋์ฝ๋",
"bldNm": "๊ฑด๋ฌผ๋ช
",
"block": "๋ธ๋ก",
"bun": "๋ฒ",
"crtnDay": "์์ฑ์ผ์",
"dongNm": "๋๋ช
์นญ",
"flrGbCd": "์ธต๊ตฌ๋ถ์ฝ๋",
"flrGbCdNm": "์ธต๊ตฌ๋ถ์ฝ๋๋ช
",
"flrNo": "์ธต๋ฒํธ",
"hoNm": "ํธ๋ช
์นญ",
"ji": "์ง",
"lot": "๋กํธ",
"mgmBldrgstPk": "๊ด๋ฆฌ๊ฑด์ถ๋ฌผ๋์ฅPK",
"naBjdongCd": "์์ฃผ์๋ฒ์ ๋์ฝ๋",
"naMainBun": "์์ฃผ์๋ณธ๋ฒ",
"naRoadCd": "์์ฃผ์๋๋ก์ฝ๋",
"naSubBun": "์์ฃผ์๋ถ๋ฒ",
"naUgrndCd": "์์ฃผ์์ง์์งํ์ฝ๋",
"newPlatPlc": "๋๋ก๋ช
๋์ง์์น",
"platGbCd": "๋์ง๊ตฌ๋ถ์ฝ๋",
"platPlc": "๋์ง์์น",
"regstrGbCd": "๋์ฅ๊ตฌ๋ถ์ฝ๋",
"regstrGbCdNm": "๋์ฅ๊ตฌ๋ถ์ฝ๋๋ช
",
"regstrKindCd": "๋์ฅ์ข
๋ฅ์ฝ๋",
"regstrKindCdNm": "๋์ฅ์ข
๋ฅ์ฝ๋๋ช
",
"rnum": "์๋ฒ",
"sigunguCd": "์๊ตฐ๊ตฌ์ฝ๋",
"splotNm": "ํน์์ง๋ช
",
}
elif operationName == "getBrJijiguInfo":
self.colDict = colDict = {
"bjdongCd": "๋ฒ์ ๋์ฝ๋",
"block": "๋ธ๋ก",
"bun": "๋ฒ",
"crtnDay": "์์ฑ์ผ์",
"etcJijigu": "๊ธฐํ์ง์ญ์ง๊ตฌ๊ตฌ์ญ",
"ji": "์ง",
"jijiguCd": "์ง์ญ์ง๊ตฌ๊ตฌ์ญ์ฝ๋",
"jijiguCdNm": "์ง์ญ์ง๊ตฌ๊ตฌ์ญ์ฝ๋๋ช
",
"jijiguGbCd": "์ง์ญ์ง๊ตฌ๊ตฌ์ญ๊ตฌ๋ถ์ฝ๋",
"jijiguGbCdNm": "์ง์ญ์ง๊ตฌ๊ตฌ์ญ๊ตฌ๋ถ์ฝ๋๋ช
",
"lot": "๋กํธ",
"mgmBldrgstPk": "๊ด๋ฆฌ๊ฑด์ถ๋ฌผ๋์ฅPK",
"newPlatPlc": "๋๋ก๋ช
๋์ง์์น",
"platGbCd": "๋์ง๊ตฌ๋ถ์ฝ๋",
"platPlc": "๋์ง์์น",
"reprYn": "๋ํ์ฌ๋ถ",
"rnum": "์๋ฒ",
"sigunguCd": "์๊ตฐ๊ตฌ์ฝ๋",
"splotNm": "ํน์์ง๋ช
",
}
df = df.rename(columns=self.colDict)
return df
def getBrBasisOulnInfo(
self,
sigunguCd_,
bjdongCd_,
platGbCd_="",
bun_="",
ji_="",
startDate_="",
endDate_="",
):
"""
01 ๊ฑด์ถ๋ฌผ๋์ฅ ๊ธฐ๋ณธ๊ฐ์ ์กฐํ
์
๋ ฅ: ์๊ตฐ๊ตฌ์ฝ๋, ๋ฒ์ ๋์ฝ๋, ๋์ง๊ตฌ๋ถ์ฝ๋, ๋ฒ, ์ง
"""
# URL
url = f"{self.url_getBrBasisOulnInfo}&sigunguCd={sigunguCd_}&bjdongCd={bjdongCd_}&platGbCd={platGbCd_}&bun={bun_}&ji={ji_}&startDate={startDate_}&endDate={endDate_}&numOfRows=99999"
try:
# Get raw data
result = requests.get(url, verify=False)
# Parsing
xmlsoup = BeautifulSoup(result.text, "lxml-xml")
# Filtering
te = xmlsoup.findAll("item")
# Creating Pandas Data Frame
df = pd.DataFrame()
variables = [
"bjdongCd",
"bldNm",
"block",
"bun",
"bylotCnt",
"crtnDay",
"guyukCd",
"guyukCdNm",
"ji",
"jiguCd",
"jiguCdNm",
"jiyukCd",
"jiyukCdNm",
"lot",
"mgmBldrgstPk",
"mgmUpBldrgstPk",
"naBjdongCd",
"naMainBun",
"naRoadCd",
"naSubBun",
"naUgrndCd",
"newPlatPlc",
"platGbCd",
"platPlc",
"regstrGbCd",
"regstrGbCdNm",
"regstrKindCd",
"regstrKindCdNm",
"rnum",
"sigunguCd",
"splotNm",
]
for t in te:
for variable in variables:
try:
globals()[variable] = t.find(variable).text
except:
globals()[variable] = np.nan
data = pd.DataFrame(
[[
bjdongCd,
bldNm,
block,
bun,
bylotCnt,
crtnDay,
guyukCd,
guyukCdNm,
ji,
jiguCd,
jiguCdNm,
jiyukCd,
jiyukCdNm,
lot,
mgmBldrgstPk,
mgmUpBldrgstPk,
naBjdongCd,
naMainBun,
naRoadCd,
naSubBun,
naUgrndCd,
newPlatPlc,
platGbCd,
platPlc,
regstrGbCd,
regstrGbCdNm,
regstrKindCd,
regstrKindCdNm,
rnum,
sigunguCd,
splotNm,
]],
columns=variables,
)
df = pd.concat([df, data])
df.index = range(len(df))
return df
except:
# Get raw data
result = requests.get(url, verify=False)
# Parsing
xmlsoup = BeautifulSoup(result.text, "lxml-xml")
# Filtering
te = xmlsoup.findAll("header")
# ์ ์ ์์ฒญ์ ์๋ฌ ๋ฐ์ -> Python ์ฝ๋ ์๋ฌ
if te[0].find("resultCode").text == "00":
print(">>> Python Logic Error. e-mail : <EMAIL>")
# Open API ์๋น์ค ์ ๊ณต์ฒ ์ค๋ฅ
else:
print(">>> Open API Error: {}".format(te[0].find["resultMsg"]))
def getBrRecapTitleInfo(
self,
sigunguCd_,
bjdongCd_,
platGbCd_="",
bun_="",
ji_="",
startDate_="",
endDate_="",
):
"""
02 ๊ฑด์ถ๋ฌผ๋์ฅ ์ด๊ดํ์ ๋ถ ์กฐํ
์
๋ ฅ: ์๊ตฐ๊ตฌ์ฝ๋, ๋ฒ์ ๋์ฝ๋, ๋์ง๊ตฌ๋ถ์ฝ๋, ๋ฒ, ์ง, ๊ฒ์์์์ผ, ๊ฒ์์ข
๋ฃ์ผ
"""
# URL
url = f"{self.url_getBrRecapTitleInfo}&sigunguCd={sigunguCd_}&bjdongCd={bjdongCd_}&platGbCd={platGbCd_}&bun={bun_}&ji={ji_}&startDate={startDate_}&endDate={endDate_}&numOfRows=99999"
try:
# Get raw data
result = requests.get(url, verify=False)
# Parsing
xmlsoup = BeautifulSoup(result.text, "lxml-xml")
# Filtering
te = xmlsoup.findAll("item")
# Creating Pandas Data Frame
df = | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
# Arithmetc tests for DataFrame/Series/Index/Array classes that should
# behave identically.
from datetime import timedelta
import operator
import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from pandas.compat import long
from pandas.core import ops
from pandas.errors import NullFrequencyError, PerformanceWarning
from pandas._libs.tslibs import IncompatibleFrequency
from pandas import (
timedelta_range,
Timedelta, Timestamp, NaT, Series, TimedeltaIndex, DatetimeIndex)
# ------------------------------------------------------------------
# Fixtures
@pytest.fixture
def tdser():
"""
Return a Series with dtype='timedelta64[ns]', including a NaT.
"""
return Series(['59 Days', '59 Days', 'NaT'], dtype='timedelta64[ns]')
@pytest.fixture(params=[pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)],
ids=lambda x: type(x).__name__)
def delta(request):
"""
Several ways of representing two hours
"""
return request.param
@pytest.fixture(params=[timedelta(minutes=5, seconds=4),
Timedelta('5m4s'),
Timedelta('5m4s').to_timedelta64()],
ids=lambda x: type(x).__name__)
def scalar_td(request):
"""
Several variants of Timedelta scalars representing 5 minutes and 4 seconds
"""
return request.param
@pytest.fixture(params=[pd.Index, Series, pd.DataFrame],
ids=lambda x: x.__name__)
def box(request):
"""
Several array-like containers that should have effectively identical
behavior with respect to arithmetic operations.
"""
return request.param
@pytest.fixture(params=[pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(strict=True))],
ids=lambda x: x.__name__)
def box_df_fail(request):
"""
Fixture equivalent to `box` fixture but xfailing the DataFrame case.
"""
return request.param
# ------------------------------------------------------------------
# Numeric dtypes Arithmetic with Timedelta Scalar
class TestNumericArraylikeArithmeticWithTimedeltaScalar(object):
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="block.eval incorrect",
strict=True))
])
@pytest.mark.parametrize('index', [
pd.Int64Index(range(1, 11)),
pd.UInt64Index(range(1, 11)),
pd.Float64Index(range(1, 11)),
pd.RangeIndex(1, 11)],
ids=lambda x: type(x).__name__)
@pytest.mark.parametrize('scalar_td', [
Timedelta(days=1),
Timedelta(days=1).to_timedelta64(),
Timedelta(days=1).to_pytimedelta()],
ids=lambda x: type(x).__name__)
def test_numeric_arr_mul_tdscalar(self, scalar_td, index, box):
# GH#19333
if (box is Series and
type(scalar_td) is timedelta and index.dtype == 'f8'):
raise pytest.xfail(reason="Cannot multiply timedelta by float")
expected = timedelta_range('1 days', '10 days')
index = tm.box_expected(index, box)
expected = tm.box_expected(expected, box)
result = index * scalar_td
tm.assert_equal(result, expected)
commute = scalar_td * index
tm.assert_equal(commute, expected)
@pytest.mark.parametrize('index', [
pd.Int64Index(range(1, 3)),
pd.UInt64Index(range(1, 3)),
pd.Float64Index(range(1, 3)),
pd.RangeIndex(1, 3)],
ids=lambda x: type(x).__name__)
@pytest.mark.parametrize('scalar_td', [
Timedelta(days=1),
Timedelta(days=1).to_timedelta64(),
Timedelta(days=1).to_pytimedelta()],
ids=lambda x: type(x).__name__)
def test_numeric_arr_rdiv_tdscalar(self, scalar_td, index, box):
if box is Series and type(scalar_td) is timedelta:
raise pytest.xfail(reason="TODO: Figure out why this case fails")
if box is pd.DataFrame and isinstance(scalar_td, timedelta):
raise pytest.xfail(reason="TODO: Figure out why this case fails")
expected = TimedeltaIndex(['1 Day', '12 Hours'])
index = tm.box_expected(index, box)
expected = tm.box_expected(expected, box)
result = scalar_td / index
tm.assert_equal(result, expected)
with pytest.raises(TypeError):
index / scalar_td
# ------------------------------------------------------------------
# Timedelta64[ns] dtype Arithmetic Operations
class TestTimedeltaArraylikeAddSubOps(object):
# Tests for timedelta64[ns] __add__, __sub__, __radd__, __rsub__
# -------------------------------------------------------------
# Invalid Operations
def test_td64arr_add_str_invalid(self, box):
# GH#13624
tdi = TimedeltaIndex(['1 day', '2 days'])
tdi = tm.box_expected(tdi, box)
with pytest.raises(TypeError):
tdi + 'a'
with pytest.raises(TypeError):
'a' + tdi
@pytest.mark.parametrize('other', [3.14, np.array([2.0, 3.0])])
@pytest.mark.parametrize('op', [operator.add, ops.radd,
operator.sub, ops.rsub],
ids=lambda x: x.__name__)
def test_td64arr_add_sub_float(self, box, op, other):
tdi = TimedeltaIndex(['-1 days', '-1 days'])
tdi = tm.box_expected(tdi, box)
if box is pd.DataFrame and op in [operator.add, operator.sub]:
pytest.xfail(reason="Tries to align incorrectly, "
"raises ValueError")
with pytest.raises(TypeError):
op(tdi, other)
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Tries to cast df to "
"Period",
strict=True,
raises=IncompatibleFrequency))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('freq', [None, 'H'])
def test_td64arr_sub_period(self, box, freq):
# GH#13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
idx = TimedeltaIndex(['1 hours', '2 hours'], freq=freq)
idx = tm.box_expected(idx, box)
with pytest.raises(TypeError):
idx - p
with pytest.raises(TypeError):
p - idx
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="broadcasts along "
"wrong axis",
raises=ValueError,
strict=True))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('pi_freq', ['D', 'W', 'Q', 'H'])
@pytest.mark.parametrize('tdi_freq', [None, 'H'])
def test_td64arr_sub_pi(self, box, tdi_freq, pi_freq):
# GH#20049 subtracting PeriodIndex should raise TypeError
tdi = TimedeltaIndex(['1 hours', '2 hours'], freq=tdi_freq)
dti = Timestamp('2018-03-07 17:16:40') + tdi
pi = dti.to_period(pi_freq)
# TODO: parametrize over box for pi?
tdi = tm.box_expected(tdi, box)
with pytest.raises(TypeError):
tdi - pi
# -------------------------------------------------------------
# Binary operations td64 arraylike and datetime-like
def test_td64arr_sub_timestamp_raises(self, box):
idx = TimedeltaIndex(['1 day', '2 day'])
idx = tm.box_expected(idx, box)
msg = "cannot subtract a datelike from|Could not operate"
with tm.assert_raises_regex(TypeError, msg):
idx - Timestamp('2011-01-01')
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Returns object dtype",
strict=True))
], ids=lambda x: x.__name__)
def test_td64arr_add_timestamp(self, box):
idx = TimedeltaIndex(['1 day', '2 day'])
expected = DatetimeIndex(['2011-01-02', '2011-01-03'])
idx = tm.box_expected(idx, box)
expected = tm.box_expected(expected, box)
result = idx + Timestamp('2011-01-01')
tm.assert_equal(result, expected)
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Returns object dtype",
strict=True))
], ids=lambda x: x.__name__)
def test_td64_radd_timestamp(self, box):
idx = TimedeltaIndex(['1 day', '2 day'])
expected = DatetimeIndex(['2011-01-02', '2011-01-03'])
idx = tm.box_expected(idx, box)
expected = tm.box_expected(expected, box)
# TODO: parametrize over scalar datetime types?
result = Timestamp('2011-01-01') + idx
tm.assert_equal(result, expected)
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Returns object dtype "
"instead of "
"datetime64[ns]",
strict=True))
], ids=lambda x: x.__name__)
def test_td64arr_add_sub_timestamp(self, box):
# GH#11925
ts = Timestamp('2012-01-01')
# TODO: parametrize over types of datetime scalar?
tdser = Series( | timedelta_range('1 day', periods=3) | pandas.timedelta_range |
from datetime import datetime
import operator
import numpy as np
import pytest
from pandas import DataFrame, Index, Series, bdate_range
import pandas._testing as tm
from pandas.core import ops
class TestSeriesLogicalOps:
@pytest.mark.parametrize("bool_op", [operator.and_, operator.or_, operator.xor])
def test_bool_operators_with_nas(self, bool_op):
# boolean &, |, ^ should work with object arrays and propagate NAs
ser = Series(bdate_range("1/1/2000", periods=10), dtype=object)
ser[::2] = np.nan
mask = ser.isna()
filled = ser.fillna(ser[0])
result = bool_op(ser < ser[9], ser > ser[3])
expected = bool_op(filled < filled[9], filled > filled[3])
expected[mask] = False
tm.assert_series_equal(result, expected)
def test_logical_operators_bool_dtype_with_empty(self):
# GH#9016: support bitwise op for integer types
index = list("bca")
s_tft = Series([True, False, True], index=index)
s_fff = Series([False, False, False], index=index)
s_empty = Series([], dtype=object)
res = s_tft & s_empty
expected = s_fff
tm.assert_series_equal(res, expected)
res = s_tft | s_empty
expected = s_tft
tm.assert_series_equal(res, expected)
def test_logical_operators_int_dtype_with_int_dtype(self):
# GH#9016: support bitwise op for integer types
# TODO: unused
# s_0101 = Series([0, 1, 0, 1])
s_0123 = Series(range(4), dtype="int64")
s_3333 = Series([3] * 4)
s_4444 = Series([4] * 4)
res = s_0123 & s_3333
expected = Series(range(4), dtype="int64")
tm.assert_series_equal(res, expected)
res = s_0123 | s_4444
expected = Series(range(4, 8), dtype="int64")
tm.assert_series_equal(res, expected)
s_1111 = Series([1] * 4, dtype="int8")
res = s_0123 & s_1111
expected = Series([0, 1, 0, 1], dtype="int64")
tm.assert_series_equal(res, expected)
res = s_0123.astype(np.int16) | s_1111.astype(np.int32)
expected = Series([1, 1, 3, 3], dtype="int32")
tm.assert_series_equal(res, expected)
def test_logical_operators_int_dtype_with_int_scalar(self):
# GH#9016: support bitwise op for integer types
s_0123 = Series(range(4), dtype="int64")
res = s_0123 & 0
expected = Series([0] * 4)
tm.assert_series_equal(res, expected)
res = s_0123 & 1
expected = Series([0, 1, 0, 1])
tm.assert_series_equal(res, expected)
def test_logical_operators_int_dtype_with_float(self):
# GH#9016: support bitwise op for integer types
s_0123 = Series(range(4), dtype="int64")
msg = "Cannot perform.+with a dtyped.+array and scalar of type"
with pytest.raises(TypeError, match=msg):
s_0123 & np.NaN
with pytest.raises(TypeError, match=msg):
s_0123 & 3.14
msg = "unsupported operand type.+for &:"
with pytest.raises(TypeError, match=msg):
s_0123 & [0.1, 4, 3.14, 2]
with pytest.raises(TypeError, match=msg):
s_0123 & np.array([0.1, 4, 3.14, 2])
with pytest.raises(TypeError, match=msg):
s_0123 & Series([0.1, 4, -3.14, 2])
def test_logical_operators_int_dtype_with_str(self):
s_1111 = Series([1] * 4, dtype="int8")
msg = "Cannot perform 'and_' with a dtyped.+array and scalar of type"
with pytest.raises(TypeError, match=msg):
s_1111 & "a"
with pytest.raises(TypeError, match="unsupported operand.+for &"):
s_1111 & ["a", "b", "c", "d"]
def test_logical_operators_int_dtype_with_bool(self):
# GH#9016: support bitwise op for integer types
s_0123 = Series(range(4), dtype="int64")
expected = Series([False] * 4)
result = s_0123 & False
tm.assert_series_equal(result, expected)
result = s_0123 & [False]
tm.assert_series_equal(result, expected)
result = s_0123 & (False,)
tm.assert_series_equal(result, expected)
result = s_0123 ^ False
expected = Series([False, True, True, True])
tm.assert_series_equal(result, expected)
def test_logical_operators_int_dtype_with_object(self):
# GH#9016: support bitwise op for integer types
s_0123 = Series(range(4), dtype="int64")
result = s_0123 & Series([False, np.NaN, False, False])
expected = Series([False] * 4)
tm.assert_series_equal(result, expected)
s_abNd = Series(["a", "b", np.NaN, "d"])
with pytest.raises(TypeError, match="unsupported.* 'int' and 'str'"):
s_0123 & s_abNd
def test_logical_operators_bool_dtype_with_int(self):
index = list("bca")
s_tft = Series([True, False, True], index=index)
s_fff = Series([False, False, False], index=index)
res = s_tft & 0
expected = s_fff
tm.assert_series_equal(res, expected)
res = s_tft & 1
expected = s_tft
tm.assert_series_equal(res, expected)
def test_logical_ops_bool_dtype_with_ndarray(self):
# make sure we operate on ndarray the same as Series
left = Series([True, True, True, False, True])
right = [True, False, None, True, np.nan]
expected = Series([True, False, False, False, False])
result = left & right
tm.assert_series_equal(result, expected)
result = left & np.array(right)
tm.assert_series_equal(result, expected)
result = left & Index(right)
tm.assert_series_equal(result, expected)
result = left & Series(right)
tm.assert_series_equal(result, expected)
expected = Series([True, True, True, True, True])
result = left | right
tm.assert_series_equal(result, expected)
result = left | np.array(right)
tm.assert_series_equal(result, expected)
result = left | Index(right)
tm.assert_series_equal(result, expected)
result = left | Series(right)
tm.assert_series_equal(result, expected)
expected = Series([False, True, True, True, True])
result = left ^ right
tm.assert_series_equal(result, expected)
result = left ^ np.array(right)
tm.assert_series_equal(result, expected)
result = left ^ Index(right)
tm.assert_series_equal(result, expected)
result = left ^ Series(right)
tm.assert_series_equal(result, expected)
def test_logical_operators_int_dtype_with_bool_dtype_and_reindex(self):
# GH#9016: support bitwise op for integer types
# with non-matching indexes, logical operators will cast to object
# before operating
index = list("bca")
s_tft = Series([True, False, True], index=index)
s_tft = Series([True, False, True], index=index)
s_tff = Series([True, False, False], index=index)
s_0123 = Series(range(4), dtype="int64")
# s_0123 will be all false now because of reindexing like s_tft
expected = Series([False] * 7, index=[0, 1, 2, 3, "a", "b", "c"])
result = s_tft & s_0123
tm.assert_series_equal(result, expected)
expected = Series([False] * 7, index=[0, 1, 2, 3, "a", "b", "c"])
result = s_0123 & s_tft
tm.assert_series_equal(result, expected)
s_a0b1c0 = Series([1], list("b"))
res = s_tft & s_a0b1c0
expected = s_tff.reindex(list("abc"))
tm.assert_series_equal(res, expected)
res = s_tft | s_a0b1c0
expected = s_tft.reindex(list("abc"))
tm.assert_series_equal(res, expected)
def test_scalar_na_logical_ops_corners(self):
s = Series([2, 3, 4, 5, 6, 7, 8, 9, 10])
msg = "Cannot perform.+with a dtyped.+array and scalar of type"
with pytest.raises(TypeError, match=msg):
s & datetime(2005, 1, 1)
s = Series([2, 3, 4, 5, 6, 7, 8, 9, datetime(2005, 1, 1)])
s[::2] = np.nan
expected = Series(True, index=s.index)
expected[::2] = False
result = s & list(s)
tm.assert_series_equal(result, expected)
def test_scalar_na_logical_ops_corners_aligns(self):
s = Series([2, 3, 4, 5, 6, 7, 8, 9, datetime(2005, 1, 1)])
s[::2] = np.nan
d = DataFrame({"A": s})
expected = DataFrame(False, index=range(9), columns=["A"] + list(range(9)))
result = s & d
tm.assert_frame_equal(result, expected)
result = d & s
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("op", [operator.and_, operator.or_, operator.xor])
def test_logical_ops_with_index(self, op):
# GH#22092, GH#19792
ser = Series([True, True, False, False])
idx1 = Index([True, False, True, False])
idx2 = Index([1, 0, 1, 0])
expected = Series([op(ser[n], idx1[n]) for n in range(len(ser))])
result = op(ser, idx1)
tm.assert_series_equal(result, expected)
expected = Series([op(ser[n], idx2[n]) for n in range(len(ser))], dtype=bool)
result = op(ser, idx2)
tm.assert_series_equal(result, expected)
def test_reversed_xor_with_index_returns_index(self):
# GH#22092, GH#19792
ser = Series([True, True, False, False])
idx1 = Index([True, False, True, False])
idx2 = Index([1, 0, 1, 0])
expected = Index.symmetric_difference(idx1, ser)
with tm.assert_produces_warning(FutureWarning):
result = idx1 ^ ser
tm.assert_index_equal(result, expected)
expected = Index.symmetric_difference(idx2, ser)
with tm.assert_produces_warning(FutureWarning):
result = idx2 ^ ser
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize(
"op",
[
pytest.param(
ops.rand_,
marks=pytest.mark.xfail(
reason="GH#22092 Index __and__ returns Index intersection",
raises=AssertionError,
strict=True,
),
),
pytest.param(
ops.ror_,
marks=pytest.mark.xfail(
reason="GH#22092 Index __or__ returns Index union",
raises=AssertionError,
strict=True,
),
),
],
)
def test_reversed_logical_op_with_index_returns_series(self, op):
# GH#22092, GH#19792
ser = Series([True, True, False, False])
idx1 = Index([True, False, True, False])
idx2 = Index([1, 0, 1, 0])
expected = Series(op(idx1.values, ser.values))
with tm.assert_produces_warning(FutureWarning):
result = op(ser, idx1)
tm.assert_series_equal(result, expected)
expected = Series(op(idx2.values, ser.values))
with tm.assert_produces_warning(FutureWarning):
result = op(ser, idx2)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"op, expected",
[
(ops.rand_, Index([False, True])),
(ops.ror_, Index([False, True])),
(ops.rxor, Index([])),
],
)
def test_reverse_ops_with_index(self, op, expected):
# https://github.com/pandas-dev/pandas/pull/23628
# multi-set Index ops are buggy, so let's avoid duplicates...
ser = Series([True, False])
idx = Index([False, True])
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
# behaving as set ops is deprecated, will become logical ops
result = op(ser, idx)
tm.assert_index_equal(result, expected)
def test_logical_ops_label_based(self):
# GH#4947
# logical ops should be label based
a = Series([True, False, True], list("bca"))
b = Series([False, True, False], list("abc"))
expected = Series([False, True, False], list("abc"))
result = a & b
tm.assert_series_equal(result, expected)
expected = Series([True, True, False], list("abc"))
result = a | b
tm.assert_series_equal(result, expected)
expected = Series([True, False, False], list("abc"))
result = a ^ b
tm.assert_series_equal(result, expected)
# rhs is bigger
a = Series([True, False, True], list("bca"))
b = Series([False, True, False, True], list("abcd"))
expected = Series([False, True, False, False], list("abcd"))
result = a & b
| tm.assert_series_equal(result, expected) | pandas._testing.assert_series_equal |
"""
@file
@brief Command line about validation of prediction runtime.
"""
import os
from logging import getLogger
import warnings
import json
from multiprocessing import Pool
from pandas import DataFrame
from sklearn.exceptions import ConvergenceWarning
def validate_runtime(verbose=1, opset_min=-1, opset_max="",
check_runtime=True, runtime='python', debug=False,
models=None, out_raw="model_onnx_raw.xlsx",
out_summary="model_onnx_summary.xlsx",
dump_folder=None, dump_all=False, benchmark=False,
catch_warnings=True, assume_finite=True,
versions=False, skip_models=None,
extended_list=True, separate_process=False,
time_kwargs=None, n_features=None, fLOG=print,
out_graph=None, force_return=False,
dtype=None, skip_long_test=False,
number=1, repeat=1, time_kwargs_fact='lin',
time_limit=4, n_jobs=0):
"""
Walks through most of :epkg:`scikit-learn` operators
or model or predictor or transformer, tries to convert
them into :epkg:`ONNX` and computes the predictions
with a specific runtime.
:param verbose: integer from 0 (None) to 2 (full verbose)
:param opset_min: tries every conversion from this minimum opset,
-1 to get the current opset
:param opset_max: tries every conversion up to maximum opset,
-1 to get the current opset
:param check_runtime: to check the runtime
and not only the conversion
:param runtime: runtime to check, python,
onnxruntime1 to check :epkg:`onnxruntime`,
onnxruntime2 to check every *ONNX* node independently
with onnxruntime, many runtime can be checked at the same time
if the value is a comma separated list
:param models: comma separated list of models to test or empty
string to test them all
:param skip_models: models to skip
:param debug: stops whenever an exception is raised,
only if *separate_process* is False
:param out_raw: output raw results into this file (excel format)
:param out_summary: output an aggregated view into this file (excel format)
:param dump_folder: folder where to dump information (pickle)
in case of mismatch
:param dump_all: dumps all models, not only the failing ones
:param benchmark: run benchmark
:param catch_warnings: catch warnings
:param assume_finite: See `config_context
<https://scikit-learn.org/stable/modules/generated/sklearn.config_context.html>`_,
If True, validation for finiteness will be skipped, saving time, but leading
to potential crashes. If False, validation for finiteness will be performed,
avoiding error.
:param versions: add columns with versions of used packages,
:epkg:`numpy`, :epkg:`scikit-learn`, :epkg:`onnx`, :epkg:`onnxruntime`,
:epkg:`sklearn-onnx`
:param extended_list: extends the list of :epkg:`scikit-learn` converters
with converters implemented in this module
:param separate_process: run every model in a separate process,
this option must be used to run all model in one row
even if one of them is crashing
:param time_kwargs: a dictionary which defines the number of rows and
the parameter *number* and *repeat* when benchmarking a model,
the value must follow :epkg:`json` format
:param n_features: change the default number of features for
a specific problem, it can also be a comma separated list
:param force_return: forces the function to return the results,
used when the results are produces through a separate process
:param out_graph: image name, to output a graph which summarizes
a benchmark in case it was run
:param dtype: '32' or '64' or None for both,
limits the test to one specific number types
:param skip_long_test: skips tests for high values of N if
they seem too long
:param number: to multiply number values in *time_kwargs*
:param repeat: to multiply repeat values in *time_kwargs*
:param time_kwargs_fact: to multiply number and repeat in
*time_kwargs* depending on the model
(see :func:`_multiply_time_kwargs <mlprodict.onnxrt.validate.validate_helper._multiply_time_kwargs>`)
:param time_limit: to stop benchmarking after this limit of time
:param n_jobs: force the number of jobs to have this value,
by default, it is equal to the number of CPU
:param fLOG: logging function
.. cmdref::
:title: Validate a runtime against scikit-learn
:cmd: -m mlprodict validate_runtime --help
:lid: l-cmd-validate_runtime
The command walks through all scikit-learn operators,
tries to convert them, checks the predictions,
and produces a report.
Example::
python -m mlprodict validate_runtime --models LogisticRegression,LinearRegression
Following example benchmarks models
:epkg:`sklearn:ensemble:RandomForestRegressor`,
:epkg:`sklearn:tree:DecisionTreeRegressor`, it compares
:epkg:`onnxruntime` against :epkg:`scikit-learn` for opset 10.
::
python -m mlprodict validate_runtime -v 1 -o 10 -op 10 -c 1 -r onnxruntime1
-m RandomForestRegressor,DecisionTreeRegressor -out bench_onnxruntime.xlsx -b 1
Parameter ``--time_kwargs`` may be used to reduce or increase
bencharmak precisions. The following value tells the function
to run a benchmarks with datasets of 1 or 10 number, to repeat
a given number of time *number* predictions in one row.
The total time is divided by :math:`number \\times repeat``.
Parameter ``--time_kwargs_fact`` may be used to increase these
number for some specific models. ``'lin'`` multiplies
by 10 number when the model is linear.
::
-t "{\\"1\\":{\\"number\\":10,\\"repeat\\":10},\\"10\\":{\\"number\\":5,\\"repeat\\":5}}"
The following example dumps every model in the list:
::
python -m mlprodict validate_runtime --out_raw raw.csv --out_summary sum.csv
--models LinearRegression,LogisticRegression,DecisionTreeRegressor,DecisionTreeClassifier
-r python,onnxruntime1 -o 10 -op 10 -v 1 -b 1 -dum 1
-du model_dump -n 20,100,500 --out_graph benchmark.png --dtype 32
The command line generates a graph produced by function
:func:`plot_validate_benchmark
<mlprodict.onnxrt.validate.validate_graph.plot_validate_benchmark>`.
"""
if separate_process:
return _validate_runtime_separate_process(
verbose=verbose, opset_min=opset_min, opset_max=opset_max,
check_runtime=check_runtime, runtime=runtime, debug=debug,
models=models, out_raw=out_raw,
out_summary=out_summary, dump_all=dump_all,
dump_folder=dump_folder, benchmark=benchmark,
catch_warnings=catch_warnings, assume_finite=assume_finite,
versions=versions, skip_models=skip_models,
extended_list=extended_list, time_kwargs=time_kwargs,
n_features=n_features, fLOG=fLOG, force_return=True,
out_graph=None, dtype=dtype, skip_long_test=skip_long_test,
time_kwargs_fact=time_kwargs_fact, time_limit=time_limit,
n_jobs=n_jobs)
from ..onnxrt.validate import enumerate_validated_operator_opsets # pylint: disable=E0402
if not isinstance(models, list):
models = (None if models in (None, "")
else models.strip().split(','))
if not isinstance(skip_models, list):
skip_models = ({} if skip_models in (None, "")
else skip_models.strip().split(','))
if verbose <= 1:
logger = getLogger('skl2onnx')
logger.disabled = True
if not dump_folder:
dump_folder = None
if dump_folder and not os.path.exists(dump_folder):
os.mkdir(dump_folder) # pragma: no cover
if dump_folder and not os.path.exists(dump_folder):
raise FileNotFoundError( # pragma: no cover
"Cannot find dump_folder '{0}'.".format(
dump_folder))
# handling parameters
if opset_max == "":
opset_max = None # pragma: no cover
if isinstance(opset_min, str):
opset_min = int(opset_min) # pragma: no cover
if isinstance(opset_max, str):
opset_max = int(opset_max)
if isinstance(verbose, str):
verbose = int(verbose) # pragma: no cover
if isinstance(extended_list, str):
extended_list = extended_list in (
'1', 'True', 'true') # pragma: no cover
if time_kwargs in (None, ''):
time_kwargs = None
if isinstance(time_kwargs, str):
time_kwargs = json.loads(time_kwargs)
# json only allows string as keys
time_kwargs = {int(k): v for k, v in time_kwargs.items()}
if isinstance(n_jobs, str):
n_jobs = int(n_jobs)
if n_jobs == 0:
n_jobs = None
if time_kwargs is not None and not isinstance(time_kwargs, dict):
raise ValueError( # pragma: no cover
"time_kwargs must be a dictionary not {}\n{}".format(
type(time_kwargs), time_kwargs))
if not isinstance(n_features, list):
if n_features in (None, ""):
n_features = None
elif ',' in n_features:
n_features = list(map(int, n_features.split(',')))
else:
n_features = int(n_features)
if not isinstance(runtime, list) and ',' in runtime:
runtime = runtime.split(',')
def fct_filter_exp(m, s):
return str(m) not in skip_models
if dtype in ('', None):
fct_filter = fct_filter_exp
elif dtype == '32':
def fct_filter_exp2(m, p):
return fct_filter_exp(m, p) and '64' not in p
fct_filter = fct_filter_exp2
elif dtype == '64': # pragma: no cover
def fct_filter_exp3(m, p):
return fct_filter_exp(m, p) and '64' in p
fct_filter = fct_filter_exp3
else:
raise ValueError( # pragma: no cover
"dtype must be empty, 32, 64 not '{}'.".format(dtype))
# time_kwargs
if benchmark:
if time_kwargs is None:
from ..onnxrt.validate.validate_helper import default_time_kwargs # pylint: disable=E0402
time_kwargs = default_time_kwargs()
for _, v in time_kwargs.items():
v['number'] *= number
v['repeat'] *= repeat
if verbose > 0:
fLOG("time_kwargs=%r" % time_kwargs)
# body
def build_rows(models_):
rows = list(enumerate_validated_operator_opsets(
verbose, models=models_, fLOG=fLOG, runtime=runtime, debug=debug,
dump_folder=dump_folder, opset_min=opset_min, opset_max=opset_max,
benchmark=benchmark, assume_finite=assume_finite, versions=versions,
extended_list=extended_list, time_kwargs=time_kwargs, dump_all=dump_all,
n_features=n_features, filter_exp=fct_filter,
skip_long_test=skip_long_test, time_limit=time_limit,
time_kwargs_fact=time_kwargs_fact, n_jobs=n_jobs))
return rows
def catch_build_rows(models_):
if catch_warnings:
with warnings.catch_warnings():
warnings.simplefilter("ignore",
(UserWarning, ConvergenceWarning,
RuntimeWarning, FutureWarning))
rows = build_rows(models_)
else:
rows = build_rows(models_) # pragma: no cover
return rows
rows = catch_build_rows(models)
res = _finalize(rows, out_raw, out_summary,
verbose, models, out_graph, fLOG)
return res if (force_return or verbose >= 2) else None
def _finalize(rows, out_raw, out_summary, verbose, models, out_graph, fLOG):
from ..onnxrt.validate import summary_report # pylint: disable=E0402
from ..tools.cleaning import clean_error_msg # pylint: disable=E0402
# Drops data which cannot be serialized.
for row in rows:
keys = []
for k in row:
if 'lambda' in k:
keys.append(k)
for k in keys:
del row[k]
df = | DataFrame(rows) | pandas.DataFrame |
# This script is used to read the binary file produced by the DCA1000 and Mmwave Studio
import numpy as np
import pandas as pd
def readTIdata(filename,csvname):
"""
Reads in a binary file and outputs the iq complex data to a csv file specified by csvname.
Parameter:
filename: str
file name of binary file.
csvname: str
csv file name that stores the iq data from binary file.
Example:
>>> readTIdata('TIdata.bin','TIdata')
>>> 'converted'
Return:
Readable csv file containing complex data.
"""
# global variables
# change based on sensor config
numADCSamples = 256 # number of ADC samples per chirp
numADCBits = 16 # number of ADC bits per sample
numRX = 4 # number of receivers
numLanes = 2 # do not change. number of lanes is always 2
isReal = False # set to True if real only data, False if complex data
# read file
# read .bin file
with open(filename, 'rb') as f:
adcData = np.fromfile(f, dtype='int16', count=-1)
adcData = np.transpose([adcData])
# if 12 or 14 bits ADC per sample compensate for sign extension
if numADCBits != 16:
l_max = 2**(numADCBits-1)-1
# If value greater than l_max, this loop prevents it
for index, val in enumerate(adcData):
if adcData[index] > l_max:
adcData[index] -= 2**(numADCBits)
fileSize = len(adcData)
# real data reshape, filesize = numADCSamples*numChirps
if isReal:
numChirps = int(fileSize/numADCSamples/numRX)
LVDS = np.zeros((1, fileSize), dtype='int16')
# each row is data from one chirp
LVDS = np.reshape(adcData, (numChirps, numADCSamples*numRX))
else:
# for complex data
# filesize = 2 * numADCSamples*numChirps
numChirps = int(fileSize/2/numADCSamples/numRX)
LVDS = np.zeros((1, int(fileSize/2)), dtype='complex')
# combine real and imaginary part into complex data
# read in file: 2I is followed by 2Q
counter = 0
for i in range(0, fileSize-2, 4):
# LVDS[0, i] = adcData[i, 0]
# LVDS[0, i+1] = adcData[i+2, 0]
LVDS[0, counter] = complex(adcData[i], adcData[i+2])
LVDS[0, counter+1] = complex(adcData[i+1], adcData[i+3])
counter += 2
# each row is data from one chirp
LVDS = np.reshape(LVDS, (numChirps, numADCSamples*numRX))
# organize data per RX
adcData = np.zeros((numRX, numChirps * numADCSamples), dtype='complex')
for row in range(0, numRX):
for i in range(0, numChirps):
adcData[row, i * numADCSamples:(i + 1) * numADCSamples] = LVDS[i, row * numADCSamples:(row + 1) * numADCSamples]
data = | pd.DataFrame(adcData) | pandas.DataFrame |
# -*- coding: utf-8 -*-
'''
McFlyin API example.
Take data from Python to send to an API in Python to transform data in Python to receive in Python to transform in Python.
But you can take data from ___ to send to an API in Python to transform data in Python to recieve in ____ to transform in ____
'''
import pandas as pd
import requests
import json
#Some DataFrame transformations for convenience
def single_df(response):
'''Convert single item response to DataFrame'''
key = response.keys()[0]
index = pd.to_datetime(response[key]['time'])
df = pd.DataFrame({key: response[key]['data']}, index=index)
return df
def multi_df(response):
'''Convert multi-item response to DataFrame'''
concat = []
for day, data in response.iteritems():
concat.append(pd.DataFrame({day: data['data']}, index=data['time']))
df = pd.concat(concat, axis=1)
return df
#Read data, turn into single list of timestamps
data = | pd.read_csv('AllPandas.csv') | pandas.read_csv |
import asyncio
import sys
import random as rand
import os
from .integration_test_utils import setup_teardown_test, _generate_table_name, V3ioHeaders, V3ioError
from storey import build_flow, CSVSource, CSVTarget, SyncEmitSource, Reduce, Map, FlatMap, AsyncEmitSource, ParquetTarget, ParquetSource, \
DataframeSource, ReduceToDataFrame
import pandas as pd
import aiohttp
import pytest
import v3io
import uuid
import datetime
@pytest.fixture()
def v3io_create_csv():
# Setup
file_path = _generate_table_name('bigdata/csv_test')
asyncio.run(_write_test_csv(file_path))
# Test runs
yield file_path
# Teardown
asyncio.run(_delete_file(file_path))
@pytest.fixture()
def v3io_teardown_file():
# Setup
file_path = _generate_table_name('bigdata/csv_test')
# Test runs
yield file_path
# Teardown
asyncio.run(_delete_file(file_path))
async def _write_test_csv(file_path):
connector = aiohttp.TCPConnector()
v3io_access = V3ioHeaders()
client_session = aiohttp.ClientSession(connector=connector)
try:
data = "n,n*10\n0,0\n1,10\n2,20\n3,30\n4,40\n5,50\n6,60\n7,70\n8,80\n9,90\n"
await client_session.put(f'{v3io_access._webapi_url}/{file_path}', data=data,
headers=v3io_access._get_put_file_headers, ssl=False)
finally:
await client_session.close()
async def _delete_file(path):
connector = aiohttp.TCPConnector()
v3io_access = V3ioHeaders()
client_session = aiohttp.ClientSession(connector=connector)
try:
response = await client_session.delete(f'{v3io_access._webapi_url}/{path}',
headers=v3io_access._get_put_file_headers, ssl=False)
if response.status >= 300 and response.status != 404 and response.status != 409:
body = await response.text()
raise V3ioError(f'Failed to delete item at {path}. Response status code was {response.status}: {body}')
finally:
await client_session.close()
def test_csv_reader_from_v3io(v3io_create_csv):
controller = build_flow([
CSVSource(f'v3io:///{v3io_create_csv}', header=True),
FlatMap(lambda x: x),
Map(lambda x: int(x)),
Reduce(0, lambda acc, x: acc + x),
]).run()
termination_result = controller.await_termination()
assert termination_result == 495
def test_csv_reader_from_v3io_error_on_file_not_found():
controller = build_flow([
CSVSource('v3io:///bigdatra/tests/idontexist.csv', header=True),
]).run()
try:
controller.await_termination()
assert False
except FileNotFoundError:
pass
async def async_test_write_csv_to_v3io(v3io_teardown_csv):
controller = build_flow([
AsyncEmitSource(),
CSVTarget(f'v3io:///{v3io_teardown_csv}', columns=['n', 'n*10'], header=True)
]).run()
for i in range(10):
await controller.emit([i, 10 * i])
await controller.terminate()
await controller.await_termination()
v3io_access = V3ioHeaders()
v3io_client = v3io.aio.dataplane.Client(endpoint=v3io_access._webapi_url, access_key=v3io_access._access_key)
try:
container, path = v3io_teardown_csv.split('/', 1)
result = await v3io_client.object.get(container, path)
finally:
await v3io_client.close()
expected = "n,n*10\n0,0\n1,10\n2,20\n3,30\n4,40\n5,50\n6,60\n7,70\n8,80\n9,90\n"
assert result.body.decode("utf-8") == expected
def test_write_csv_to_v3io(v3io_teardown_file):
asyncio.run(async_test_write_csv_to_v3io(v3io_teardown_file))
def test_write_csv_with_dict_to_v3io(v3io_teardown_file):
file_path = f'v3io:///{v3io_teardown_file}'
controller = build_flow([
SyncEmitSource(),
CSVTarget(file_path, columns=['n', 'n*10'], header=True)
]).run()
for i in range(10):
controller.emit({'n': i, 'n*10': 10 * i})
controller.terminate()
controller.await_termination()
v3io_access = V3ioHeaders()
v3io_client = v3io.dataplane.Client(endpoint=v3io_access._webapi_url, access_key=v3io_access._access_key)
try:
container, path = v3io_teardown_file.split('/', 1)
result = v3io_client.object.get(container, path)
finally:
v3io_client.close()
expected = "n,n*10\n0,0\n1,10\n2,20\n3,30\n4,40\n5,50\n6,60\n7,70\n8,80\n9,90\n"
assert result.body.decode("utf-8") == expected
def test_write_csv_infer_columns_without_header_to_v3io(v3io_teardown_file):
file_path = f'v3io:///{v3io_teardown_file}'
controller = build_flow([
SyncEmitSource(),
CSVTarget(file_path)
]).run()
for i in range(10):
controller.emit({'n': i, 'n*10': 10 * i})
controller.terminate()
controller.await_termination()
v3io_access = V3ioHeaders()
v3io_client = v3io.dataplane.Client(endpoint=v3io_access._webapi_url, access_key=v3io_access._access_key)
try:
container, path = v3io_teardown_file.split('/', 1)
result = v3io_client.object.get(container, path)
finally:
v3io_client.close()
expected = "0,0\n1,10\n2,20\n3,30\n4,40\n5,50\n6,60\n7,70\n8,80\n9,90\n"
assert result.body.decode("utf-8") == expected
def test_write_csv_from_lists_with_metadata_and_column_pruning_to_v3io(v3io_teardown_file):
file_path = f'v3io:///{v3io_teardown_file}'
controller = build_flow([
SyncEmitSource(),
CSVTarget(file_path, columns=['event_key=$key', 'n*10'], header=True)
]).run()
for i in range(10):
controller.emit({'n': i, 'n*10': 10 * i}, key=f'key{i}')
controller.terminate()
controller.await_termination()
v3io_access = V3ioHeaders()
v3io_client = v3io.dataplane.Client(endpoint=v3io_access._webapi_url, access_key=v3io_access._access_key)
try:
container, path = v3io_teardown_file.split('/', 1)
result = v3io_client.object.get(container, path)
finally:
v3io_client.close()
expected = "event_key,n*10\nkey0,0\nkey1,10\nkey2,20\nkey3,30\nkey4,40\nkey5,50\nkey6,60\nkey7,70\nkey8,80\nkey9,90\n"
assert result.body.decode("utf-8") == expected
def test_write_to_parquet_to_v3io(setup_teardown_test):
out_dir = f'v3io:///{setup_teardown_test}'
columns = ['my_int', 'my_string']
controller = build_flow([
SyncEmitSource(),
ParquetTarget(out_dir, partition_cols='my_int', columns=columns, max_events=1)
]).run()
expected = []
for i in range(10):
controller.emit([i, f'this is {i}'])
expected.append([i, f'this is {i}'])
expected_in_pyarrow1 = pd.DataFrame(expected, columns=columns)
expected_in_pyarrow3 = expected_in_pyarrow1.copy()
expected_in_pyarrow1['my_int'] = expected_in_pyarrow1['my_int'].astype('int32')
expected_in_pyarrow3['my_int'] = expected_in_pyarrow3['my_int'].astype('category')
controller.terminate()
controller.await_termination()
read_back_df = pd.read_parquet(out_dir, columns=columns)
assert read_back_df.equals(expected_in_pyarrow1) or read_back_df.equals(expected_in_pyarrow3)
def test_write_to_parquet_to_v3io_single_file_on_termination(setup_teardown_test):
out_file = f'v3io:///{setup_teardown_test}/out.parquet'
columns = ['my_int', 'my_string']
controller = build_flow([
SyncEmitSource(),
ParquetTarget(out_file, columns=columns)
]).run()
expected = []
for i in range(10):
controller.emit([i, f'this is {i}'])
expected.append([i, f'this is {i}'])
expected = pd.DataFrame(expected, columns=columns, dtype='int64')
controller.terminate()
controller.await_termination()
read_back_df = pd.read_parquet(out_file, columns=columns)
assert read_back_df.equals(expected), f"{read_back_df}\n!=\n{expected}"
# ML-775
def test_write_to_parquet_key_hash_partitioning(setup_teardown_test):
out_dir = f'v3io:///{setup_teardown_test}/test_write_to_parquet_default_partitioning{uuid.uuid4().hex}/'
controller = build_flow([
SyncEmitSource(key_field=1),
ParquetTarget(out_dir, columns=['my_int', 'my_string'], partition_cols=[('$key', 4)])
]).run()
expected = []
expected_buckets = [3, 0, 1, 3, 0, 3, 1, 1, 1, 2]
for i in range(10):
controller.emit([i, f'this is {i}'])
expected.append([i, f'this is {i}', expected_buckets[i]])
expected = pd.DataFrame(expected, columns=['my_int', 'my_string', 'hash4_key'])
controller.terminate()
controller.await_termination()
read_back_df = pd.read_parquet(out_dir)
read_back_df['hash4_key'] = read_back_df['hash4_key'].astype('int64')
read_back_df.sort_values('my_int', inplace=True)
read_back_df.reset_index(inplace=True, drop=True)
assert read_back_df.equals(expected), f"{read_back_df}\n!=\n{expected}"
# ML-701
def test_write_to_parquet_to_v3io_force_string_to_timestamp(setup_teardown_test):
out_file = f'v3io:///{setup_teardown_test}/out.parquet'
columns = ['time']
controller = build_flow([
SyncEmitSource(),
ParquetTarget(out_file, columns=[('time', 'datetime')])
]).run()
expected = []
for i in range(10):
t = '2021-03-02T19:45:00'
controller.emit([t])
expected.append([datetime.datetime.fromisoformat(t)])
expected = pd.DataFrame(expected, columns=columns)
controller.terminate()
controller.await_termination()
read_back_df = pd.read_parquet(out_file, columns=columns)
assert read_back_df.equals(expected)
def test_write_to_parquet_to_v3io_with_indices(setup_teardown_test):
out_file = f'v3io:///{setup_teardown_test}/test_write_to_parquet_with_indices{uuid.uuid4().hex}.parquet'
controller = build_flow([
SyncEmitSource(),
ParquetTarget(out_file, index_cols='event_key=$key', columns=['my_int', 'my_string'])
]).run()
expected = []
for i in range(10):
controller.emit([i, f'this is {i}'], key=f'key{i}')
expected.append([f'key{i}', i, f'this is {i}'])
columns = ['event_key', 'my_int', 'my_string']
expected = pd.DataFrame(expected, columns=columns, dtype='int64')
expected.set_index(['event_key'], inplace=True)
controller.terminate()
controller.await_termination()
read_back_df = pd.read_parquet(out_file, columns=columns)
assert read_back_df.equals(expected), f"{read_back_df}\n!=\n{expected}"
# ML-602
def test_write_to_parquet_to_v3io_with_nulls(setup_teardown_test):
out_dir = f'v3io:///{setup_teardown_test}/test_write_to_parquet_to_v3io_with_nulls{uuid.uuid4().hex}/'
flow = build_flow([
SyncEmitSource(),
ParquetTarget(out_dir, columns=[('key=$key', 'str'), ('my_int', 'int'), ('my_string', 'str'), ('my_datetime', 'datetime')],
partition_cols=[], max_events=1)
])
expected = []
my_time = datetime.datetime(2021, 1, 1, tzinfo=datetime.timezone(datetime.timedelta(hours=5)))
controller = flow.run()
controller.emit({'my_int': 0, 'my_string': 'hello', 'my_datetime': my_time}, key=f'key1')
# TODO: Expect correct time zone. Can be done in _Writer, but requires fix for ARROW-10511, which is pyarrow>=3.
expected.append(['key1', 0, 'hello', my_time.astimezone(datetime.timezone(datetime.timedelta())).replace(tzinfo=None)])
controller.terminate()
controller.await_termination()
controller = flow.run()
controller.emit({}, key=f'key2')
expected.append(['key2', None, None, None])
controller.terminate()
controller.await_termination()
read_back_df = pd.read_parquet(out_dir)
read_back_df.sort_values('key', inplace=True)
read_back_df.reset_index(inplace=True, drop=True)
expected = pd.DataFrame(expected, columns=['key', 'my_int', 'my_string', 'my_datetime'])
assert read_back_df.compare(expected).empty
def append_and_return(lst, x):
lst.append(x)
return lst
def test_filter_before_after_non_partitioned(setup_teardown_test):
columns = ['my_string', 'my_time']
df = pd.DataFrame([['good', pd.Timestamp('2018-05-07 13:52:37')],
['hello', pd.Timestamp('2019-01-26 14:52:37')],
['world', pd.Timestamp('2020-05-11 13:52:37')]],
columns=columns)
df.set_index('my_string')
out_file = f'v3io:///{setup_teardown_test}/before_after_non_partioned/'
controller = build_flow([
DataframeSource(df),
ParquetTarget(out_file, columns=columns, partition_cols=[]),
]).run()
controller.await_termination()
before = pd.Timestamp('2019-12-01 00:00:00')
after = pd.Timestamp('2019-01-01 23:59:59.999999')
controller = build_flow([
ParquetSource(out_file, end_filter=before, start_filter=after, filter_column='my_time'),
Reduce([], append_and_return)
]).run()
read_back_result = controller.await_termination()
expected = [{'my_string': 'hello', 'my_time': pd.Timestamp('2019-01-26 14:52:37')}]
assert read_back_result == expected, f"{read_back_result}\n!=\n{expected}"
def test_filter_before_after_partitioned_random(setup_teardown_test):
low_limit = pd.Timestamp('2018-01-01')
high_limit = | pd.Timestamp('2020-12-31 23:59:59.999999') | pandas.Timestamp |
import pandas as pd
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.cluster import KMeans
import sys
from yellowbrick.cluster import KElbowVisualizer
import numpy as np
def my_tokenizer(text):
tokens = text.split(",")
return tokens
def cluster_synsetframe_communities(filtered_enriched_synsetframe_csv, clusters_output_csv):
colnames = ['community', 'doc', 'annotatedtext' , 'synsetframe', 'filteredsynsetframe', 'synsetframetriplet','nounverbwoframe', 'adjadvwoframe', 'annotatedsynsetframe']
data = | pd.read_csv(filtered_enriched_synsetframe_csv, skiprows=[0], names=colnames) | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Created on Fri Feb 22 09:13:58 2019
@author: rocco
"""
import os
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
files = [i for i in os.listdir("../data/mipas_pd")]
files = files[19:24]
classifier_type = "labels_svm_pc_rf_2"
def plot_bar(files, classifier_type, cl_max):
if cl_max == True:
cl = "cal_max_cl"
else:
cl = "caliop_class_dense"
months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
year = files[0].split("_")[0]
month_b = int(files[0].split("_")[1])
month_e = int(files[-1].split("_")[1])
if classifier_type == "labels_bc":
mat_tot = np.zeros([9, 7])
else:
mat_tot = np.zeros([9, 5])
for file in files:
#load mipas df
if classifier_type == "labels_bc":
mat = np.empty([0, 7])
else:
mat = np.empty([0, 5])
df_reduced = pd.read_hdf(os.path.join('../data/mipas_pd', file),'df_reduced')
for i in range(0, 9):
ind = (pd.value_counts(df_reduced[df_reduced[cl] == i][classifier_type]).index).astype(int)
print(ind)
if classifier_type == "labels_bc":
arr = np.zeros([1, 7])
else:
arr = np.zeros([1, 5])
for j in ind:
if classifier_type == "labels_bc":
arr[0][j] = pd.value_counts(df_reduced[df_reduced[cl] == i][classifier_type])[j]
else:
arr[0][j-1] = | pd.value_counts(df_reduced[df_reduced[cl] == i][classifier_type]) | pandas.value_counts |
# import libraries
import sys
import pandas as pd
import numpy as np
from sqlalchemy import create_engine
def load_data(messages_filepath, categories_filepath):
'''
INPUT:
'message_filepath' : path to a csv file
'categories_filepath' : path to a csv file
OUTPUT:
transformed pandas dataframe
'''
# load datasets
messages = pd.read_csv(messages_filepath)
categories = pd.read_csv(categories_filepath)
# merge datasets
df = | pd.merge(messages, categories, on='id') | pandas.merge |
#!/usr/bin/python
# <NAME>
# The University of Sheffield
# 06.03.2021
# NOTES
# AGENT class uses some parts of https://github.com/PacktPublishing/PyTorch-1.x-Reinforcement-Learning-Cookbook/blob/master/Chapter08/chapter8/reinforce.py for REINFORCE implementation
# TODO: documentation
from env import gyMBlocks
import pickle, time
import torch
import torch.nn as nn
from tqdm.notebook import trange, tqdm
import matplotlib.pyplot as P
import pandas as pd
import numpy as np
# A simple function for calculating rolling sum
def rolling_sum(a, n=10, normalise=False):
ret = np.cumsum(a)
ret[n:] = ret[n:] - ret[:-n]
if normalise:
return ret[n - 1:]/n
else:
return ret[n - 1:]
# REINFORCE Agent class
# The network policy is a built-in one hidden-layer MLP
class AGENT():
def __init__(self, envSize = 5, nRobot = 6 , n_hidden = 50, lr = 0.001, maxIter = 1000, rewardType = 1, randSeed = 0):
self.env = gyMBlocks(envSize, nRobot, returnIndex = True, rewardType=rewardType, maxIter = maxIter)
self.env.seed(randSeed)
nStates = len(self.env.STATES)
nAction = self.env.action_space.n
self.model = nn.Sequential(
nn.Linear(nStates, n_hidden),
nn.ReLU(),
nn.Linear(n_hidden, nAction),
nn.Softmax(dim=0),
)
self.optimizer = torch.optim.Adam(self.model.parameters(), lr)
self.OH = torch.eye((nStates))
# self attribute
self.envSize = envSize
self.nRobot = nRobot
self.n_hidden = n_hidden
self.lr = lr
self.maxIter = maxIter
self.rewardType = rewardType
self.randSeed = randSeed
def predict(self, state):
return self.model(torch.Tensor(state))
def update(self, advantages, log_probs):
policy_gradient = []
for log_prob, Gt in zip(log_probs, advantages):
policy_gradient.append(-log_prob * Gt)
loss = torch.stack(policy_gradient).sum()
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
def get_action(self, s):
probs = self.predict(s)
action = torch.multinomial(probs, 1).item()
log_prob = torch.log(probs[action])
return action, log_prob
def reinforce(self, nEpisode, gamma=0.99, returnDF=False, progressBar=False):
total_reward_episode = [0] * nEpisode
logs = []
env = self.env
for episode in trange(nEpisode,disable=not progressBar):
log_probs = []
rewards = []
state = env.reset()
while True:
action, log_prob = self.get_action(self.OH[state])
next_state, reward, is_done, _ = env.step(action)
log_probs.append(log_prob)
rewards.append(reward)
if is_done or env.iIter >= env.maxIter:
total_reward_episode[episode] += reward
Gt = 0
pw = 0
returns = []
for t in range(len(rewards)-1, -1, -1):
Gt += gamma ** pw * rewards[t]
pw += 1
returns.append(Gt)
returns = returns[::-1]
returns = torch.tensor(returns)
self.update(returns, log_probs)
break
state = next_state
logs.append([episode, is_done, reward, env.iIter, env.boundingBox[-1]])
self.df = | pd.DataFrame(logs, columns=['ep', 'done', 'reward', 'epLength', 'bbox']) | pandas.DataFrame |
'''
CIS 419/519 project: Using decision tree ensembles to infer the pathological
cause of age-related neurodegenerative changes based on clinical assessment
nadfahors: <NAME>, <NAME>, & <NAME>
This file contains code for preparing NACC data for analysis, including:
* synthesis of pathology data to create pathology class outcomes
* dropping uninformative variables from predictor set
* identifying and merging/resolving redundant clusters of variables
* identifying missing data codes and replacing with NaNs as appropriate
* creating change variables from longitudinal data
* imputation of missing data
* categorizing retained variables as interval/ratio, ordinal, or nominal
* creation of dummy variables for nominal variables
* standardizing interval/ratio and ordinal variables
* creating date variables, then converting these to useful ages or intervals
* quadratic expansion for interval/ratio variables?
'''
# Module imports
import pandas as pd
import numpy as np
import datetime
# Read in full dataset. Warning: this is about 340 MB.
fulldf = pd.read_csv('investigator_nacc48.csv')
# List of Uniform Data Set (UDS) values that will serve as potential
# predictors. Those with a "False" next to them will be excluded after data
# preparation; those with a True will be kept.
xvar = pd.read_csv('xvar.csv')
# Variables from the NACC neuropathology table that will be used to group
# individuals by pathology class:
# 1) Alzheimer's disease (AD);
# 2) frontotemporal lobar degeneration due to tauopathy (FTLD-tau)
# 3) frontotemporal lobar degeneration due to TDP-43 (FTLD-TDP)
# 4) Lewy body disease due to alpha synuclein (including Lewy body dementia and Parkinson's disease)
# 5) vascular disease
# Path classes: AD (ABC criteria); FTLD-tau; FTLD-TDP, including ALS; Lewy body disease (are PD patients captured here?); vascular
npvar = pd.DataFrame(np.array(["NPPMIH",0, # Postmortem interval--keep in as a potential confound variable?
"NPFIX",0,
"NPFIXX",0,
"NPWBRWT",0,
"NPWBRF",0,
"NACCBRNN",0,
"NPGRCCA",0,
"NPGRLA",0,
"NPGRHA",0,
"NPGRSNH",0,
"NPGRLCH",0,
"NACCAVAS",0,
"NPTAN",False,
"NPTANX",False,
"NPABAN",False,
"NPABANX",False,
"NPASAN",False,
"NPASANX",False,
"NPTDPAN",False,
"NPTDPANX",False,
"NPHISMB",False,
"NPHISG",False,
"NPHISSS",False,
"NPHIST",False,
"NPHISO",False,
"NPHISOX",False,
"NPTHAL",False,# Use for ABC scoring to create ordinal measure of AD change
"NACCBRAA",False,# Use for ABC scoring to create ordinal measure of AD change
"NACCNEUR",False,# Use for ABC scoring to create ordinal measure of AD change
"NPADNC",False,# Use for ABC scoring to create ordinal measure of AD change
"NACCDIFF",False,
"NACCVASC",False,# Vasc presence/absence
"NACCAMY",False,
"NPLINF",False,
"NPLAC",False,
"NPINF",False,# Derived variable summarizing several assessments of infarcts and lacunes
"NPINF1A",False,
"NPINF1B",False,
"NPINF1D",False,
"NPINF1F",False,
"NPINF2A",False,
"NPINF2B",False,
"NPINF2D",False,
"NPINF2F",False,
"NPINF3A",False,
"NPINF3B",False,
"NPINF3D",False,
"NPINF3F",False,
"NPINF4A",False,
"NPINF4B",False,
"NPINF4D",False,
"NPINF4F",False,
"NACCINF",False,
"NPHEM",False,
"NPHEMO",False,
"NPHEMO1",False,
"NPHEMO2",False,
"NPHEMO3",False,
"NPMICRO",False,
"NPOLD",False,
"NPOLD1",False,
"NPOLD2",False,
"NPOLD3",False,
"NPOLD4",False,
"NACCMICR",False,# Derived variable for microinfarcts
"NPOLDD",False,
"NPOLDD1",False,
"NPOLDD2",False,
"NPOLDD3",False,
"NPOLDD4",False,
"NACCHEM",False,# Derived variables for microbleeds and hemorrhages
"NACCARTE",False,
"NPWMR",False,
"NPPATH",False,# Other ischemic/vascular pathology
"NACCNEC",False,
"NPPATH2",False,
"NPPATH3",False,
"NPPATH4",False,
"NPPATH5",False,
"NPPATH6",False,
"NPPATH7",False,
"NPPATH8",False,
"NPPATH9",False,
"NPPATH10",False,
"NPPATH11",False,
"NPPATHO",False,
"NPPATHOX",False,
"NPART",False,
"NPOANG",False,
"NACCLEWY",False,# Note that limbic/transitional and amygdala-predominant are not differentiated
"NPLBOD",False,# But here they are differentiated!
"NPNLOSS",False,
"NPHIPSCL",False,
"NPSCL",False,
"NPFTDTAU",False,# FTLD-tau
"NACCPICK",False,# FTLD-tau
"NPFTDT2",False,# FTLD-tau
"NACCCBD",False,# FTLD-tau
"NACCPROG",False,# FTLD-tau
"NPFTDT5",False,# FTLD-tau
"NPFTDT6",False,# FTLD-tau
"NPFTDT7",False,# FTLD-tau
"NPFTDT8",False,# This is FTLD-tau but associated with ALS/parkinsonism--wut?
"NPFTDT9",False,# tangle-dominant disease--is this PART? Maybe exclude cases who have this as only path type.
"NPFTDT10",False,# FTLD-tau: other 3R+4R tauopathy. What is this if not AD? Maybe exclude. How many cases?
"NPFRONT",False,# FTLD-tau
"NPTAU",False,# FTLD-tau
"NPFTD",False,# FTLD-TDP
"NPFTDTDP",False,# FTLD-TDP
"NPALSMND",False,# FTLD-TDP (but exclude FUS and SOD1)
"NPOFTD",False,
"NPOFTD1",False,
"NPOFTD2",False,
"NPOFTD3",False,
"NPOFTD4",False,
"NPOFTD5",False,
"NPFTDNO",False,
"NPFTDSPC",False,
"NPTDPA",False,# In second pass, use anatomical distribution to stage
"NPTDPB",False,# In second pass, use anatomical distribution to stage
"NPTDPC",False,# In second pass, use anatomical distribution to stage
"NPTDPD",False,# In second pass, use anatomical distribution to stage
"NPTDPE",False,# In second pass, use anatomical distribution to stage
"NPPDXA",False,# Exclude?
"NPPDXB",False,# Exclude
"NACCPRIO",False,# Exclude
"NPPDXD",False,# Exclude
"NPPDXE",False,
"NPPDXF",False,
"NPPDXG",False,
"NPPDXH",False,
"NPPDXI",False,
"NPPDXJ",False,
"NPPDXK",False,
"NPPDXL",False,
"NPPDXM",False,
"NPPDXN",False,
"NACCDOWN",False,
"NACCOTHP",False,# Survey for exclusion criteria
"NACCWRI1",False,# Survey for exclusion criteria
"NACCWRI2",False,# Survey for exclusion criteria
"NACCWRI3",False,# Survey for exclusion criteria
"NACCBNKF",False,
"NPBNKB",False,
"NACCFORM",False,
"NACCPARA",False,
"NACCCSFP",False,
"NPBNKF",False,
"NPFAUT",False,
"NPFAUT1",False,
"NPFAUT2",False,
"NPFAUT3",False,
"NPFAUT4",False,
"NACCINT",False,
"NPNIT",False,
"NPCERAD",False,# What sort of variable?
"NPADRDA",False,
"NPOCRIT",False,
"NPVOTH",False,
"NPLEWYCS",False,
"NPGENE",True,# Family history--include in predictors?
"NPFHSPEC",False,# Code as dummy variables if useful.
"NPCHROM",False,# Exclusion factor? Genetic/chromosomal abnormalities
"NPPNORM",False,# Check all the following variables for redundancy with the ones above.
"NPCNORM",False,
"NPPADP",False,
"NPCADP",False,
"NPPAD",False,
"NPCAD",False,
"NPPLEWY",False,
"NPCLEWY",False,
"NPPVASC",False,
"NPCVASC",False,
"NPPFTLD",False,
"NPCFTLD",False,
"NPPHIPP",False,
"NPCHIPP",False,
"NPPPRION",False,
"NPCPRION",False,
"NPPOTH1",False,
"NPCOTH1",False,
"NPOTH1X",False,
"NPPOTH2",False,
"NPCOTH2",False,
"NPOTH2X",False,
"NPPOTH3",False,
"NPCOTH3",False,
"NPOTH3X",0]).reshape((-1,2)))
npvar.columns = ['Variable','Keep']
## Case selection process.
# Include only those with autopsy data.
aut = fulldf[fulldf.NACCAUTP == 1]
del fulldf
def table(a,b):
print(pd.crosstab(aut[a],aut[b],dropna=False,margins=True))
# Exclude for Down's, Huntington's, and other conditions.
aut = aut.loc[aut.DOWNS != 1]
aut = aut.loc[aut.HUNT != 1]
aut = aut.loc[aut.PRION != 1]
aut = aut.loc[~aut.MSAIF.isin([1,2,3])]
aut = aut.loc[~aut.NEOPIF.isin([1,2,3])]
aut = aut.loc[~aut.SCHIZOIF.isin([1,2,3])]
aut.index = list(range(aut.shape[0]))
# How many unique IDs?
# For now, keep in follow-up visits to increase our training data.
uids = aut.NACCID[~aut.NACCID.duplicated()]
#aut = aut[~aut.NACCID.duplicated()]
## Coding of pathology class outcomes.
# Create binary variables for the presence of each pathology class of interest.
# Code Alzheimer's disease pathology based on NPADNC, which implements
# ABC scoring based on Montine et al. (2012).
aut = aut.assign(ADPath = 0)
aut.loc[aut.NPADNC.isin((2,3)),'ADPath'] = 1
aut.loc[aut.NPPAD == 1,'ADPath'] = 1
# The following two commands make the ADPath variable false if the AD path
# diagnosis is as contributing, not as primary.
aut.loc[aut.NPPAD == 2,'ADPath'] = 0
aut.loc[aut.NPCAD == 1,'ADPath'] = 0
aut.loc[aut.NPPVASC == 1,'ADPath'] = 0
aut.loc[aut.NPPLEWY == 1,'ADPath'] = 0
aut.loc[aut.NPPFTLD == 1,'ADPath'] = 0
# Several variables pertain to FTLD tauopathies.
aut = aut.assign(TauPath = [0 for i in range(aut.shape[0])])
aut.loc[aut.NPFTDTAU == 1,'TauPath'] = 1
aut.loc[aut.NACCPICK == 1,'TauPath'] = 1
aut.loc[aut.NACCCBD == 1,'TauPath'] = 1
aut.loc[aut.NACCPROG == 1,'TauPath'] = 1
aut.loc[aut.NPFTDT2 == 1,'TauPath'] = 1
aut.loc[aut.NPFTDT5 == 1,'TauPath'] = 1
aut.loc[aut.NPFTDT6 == 1,'TauPath'] = 1
aut.loc[aut.NPFTDT7 == 1,'TauPath'] = 1
aut.loc[aut.NPFTDT9 == 1,'TauPath'] = 1
aut.loc[aut.NPFRONT == 1,'TauPath'] = 1
aut.loc[aut.NPTAU == 1,'TauPath'] = 1
aut.loc[aut.ADPath == 1, 'TauPath'] = 0
aut.loc[aut.NPCFTLD == 1, 'TauPath'] = 0
# Code Lewy body disease based on NPLBOD variable. Do not include amygdala-
# predominant, brainstem-predominant, or olfactory-only cases.
# See Toledo et al. (2016, Acta Neuropathol) and Irwin et al. (2018, Nat Rev
# Neuro).
aut = aut.assign(LBPath = [0 for i in range(aut.shape[0])])
aut.loc[aut.NPLBOD.isin((2,3)),'LBPath'] = 1
aut.loc[aut.NPPLEWY == 1,'LBPath'] = 1
aut.loc[aut.NPPLEWY == 2,'LBPath'] = 0
aut.loc[aut.NPCLEWY == 1,'LBPath'] = 0
aut.loc[aut.ADPath == 1 & (aut.NPPLEWY != 1), 'LBPath'] = 0
aut.loc[aut.TauPath == 1 & (aut.NPPLEWY != 1),'LBPath'] = 0
# Code TDP-43 pathology based on NPFTDTDP and NPALSMND, excluding FUS and SOD1
# cases.
aut = aut.assign(TDPPath = [0 for i in range(aut.shape[0])])
aut.loc[aut.NPFTD == 1,'TDPPath'] = 1
aut.loc[aut.NPFTDTDP == 1,'TDPPath'] = 1
aut.loc[aut.NPALSMND == 1,'TDPPath'] = 1
aut.loc[aut.ADPath == 1, 'TDPPath'] = 0
aut.loc[aut.LBPath == 1, 'TDPPath'] = 0
aut.loc[aut.TauPath == 1, 'TDPPath'] = 0
# Code vascular disease based on relevant derived variables:
aut = aut.assign(VPath = [0 for i in range(aut.shape[0])])
aut.loc[aut.NPINF == 1,'VPath'] = 1
aut.loc[aut.NACCMICR == 1,'VPath'] = 1
aut.loc[aut.NACCHEM == 1,'VPath'] = 1
aut.loc[aut.NPPATH == 1,'VPath'] = 1
aut.loc[aut.NPPVASC == 1,'VPath'] = 1
aut.loc[aut.NPPVASC == 2,'VPath'] = 0
aut.loc[aut.NPCVASC == 1,'VPath'] = 0
aut.loc[aut.ADPath == 1 & (aut.NPPVASC != 1), 'VPath'] = 0
aut.loc[aut.LBPath == 1 & (aut.NPPVASC != 1), 'VPath'] = 0
aut.loc[aut.NPPFTLD == 1 & (aut.NPPVASC != 1),'VPath'] = 0
aut.loc[aut.TDPPath == 1 & (aut.NPPVASC != 1), 'VPath'] = 0
aut.loc[aut.TauPath == 1 & (aut.NPPVASC != 1), 'VPath'] = 0
aut = aut.assign(Class = aut.ADPath)
aut.loc[aut.TauPath == 1,'Class'] = 2
aut.loc[aut.TDPPath == 1,'Class'] = 3
aut.loc[aut.LBPath == 1,'Class'] = 4
aut.loc[aut.VPath == 1,'Class'] = 5
aut = aut.loc[aut.Class != 0]
aut.index = list(range(aut.shape[0]))
## Predictor variable preparation: one-hot-encoding, date/age/interval operations,
# consolidating redundant variables, consolidating free-text variables.
aut = aut.assign(DOB = aut.BIRTHYR)
aut = aut.assign(DOD = aut.NACCYOD)
aut = aut.assign(VISITDATE = aut.VISITYR)
for i in range(aut.shape[0]):
aut.loc[i,'DOB'] = datetime.datetime.strptime('-'.join([str(aut.BIRTHYR.loc[i]),str(aut.BIRTHMO.loc[i]),'01']),'%Y-%m-%d')
aut.loc[i,'DOD'] = datetime.datetime.strptime('-'.join([str(aut.NACCYOD.loc[i]),str(aut.NACCMOD.loc[i]),'01']),'%Y-%m-%d')
aut.loc[i,'VISITDATE'] = datetime.datetime.strptime('-'.join([str(aut.VISITYR.loc[i]),str(aut.VISITMO.loc[i]),str(aut.VISITDAY.loc[i])]),'%Y-%m-%d')
# Some time/interval variables
aut = aut.assign(SinceQUITSMOK = aut.NACCAGE - aut.QUITSMOK) # Years since quitting smoking
aut = aut.assign(AgeStroke = aut.NACCSTYR - aut.BIRTHYR)
aut = aut.assign(AgeTIA = aut.NACCTIYR - aut.BIRTHYR)
aut = aut.assign(AgePD = aut.PDYR - aut.BIRTHYR)
aut = aut.assign(AgePDOTHR = aut.PDOTHRYR - aut.BIRTHYR)
aut = aut.assign(AgeTBI = aut.TBIYEAR - aut.BIRTHYR)
aut = aut.assign(Duration = aut.NACCAGE - aut.DECAGE)
# Hispanic origin
aut.HISPORX = aut.HISPORX.str.lower()
aut.loc[aut.HISPORX == 'spanish','HISPORX'] = 'spain'
# Race. RACESECX and RACETERX have too few values to be useful.
aut.RACEX = aut.RACEX.str.lower().str.replace(' ','').str.replace('-','')
aut.loc[aut.RACEX.isin(['hispanic','puerto rican']),'RACEX'] = 'latino'
aut.loc[aut.RACEX.isin(['guam - chamorro']),'RACEX'] = 'chamorro'
aut.loc[aut.RACEX.isin(['multi racial']),'RACEX'] = 'multiracial'
# Other language. But actually, let's just drop this and code as English/non-English.
#aut.PRIMLANX = aut.PRIMLANX.str.lower().str.replace(' ','').str.replace('-','')
# Drug list. First get a list of all the unique drug names, then code as dummy variables.
# Update as of 04/01/2020: drugs alone are going to be a huge amount of work.
# For now, just rely on the NACC derived variables for diabetes meds, cardiac drugs, etc.
drugcols = ['DRUG' + str(i) for i in range(1,41)]
drugs = aut[drugcols].stack()
# Several varieties of insulin--important to distinguish?
# drop "*not-codable"
# drop "diphtheria/hepb/pertussis,acel/polio/tetanus"
drugs = drugs.unique()
drugs = [eachdrug.lower() for eachdrug in drugs.tolist()]
drugs = pd.Series(drugs)
drug_corrections = [("multivitamin with minerals","multivitamin"),
("multivitamin, prenatal","multivitamin"),
("omega 3-6-9","omega369"),
("omega-3","omega3"),
("vitamin-d","vitamin d"),
("acetyl-l-carnitine","acetyl l carnitine"),
("levodopa","levadopa"),
("pro-stat","prostat"),
("alpha-d-galactosidase","alpha d galactosidase"),
("indium pentetate in-111","indium pentetate in111"),
("fludeoxyglucose f-18","fludeoxyglucose f18"),
("calcium with vitamins d and k", "calcium-vitamin d-vitamin k"),
("aloe vera topical", "aloe vera"),
("ammonium lactate topical", "ammonium lactate")]
for i in range(len(drug_corrections)):
oldval = drug_corrections[i][0]
newval = drug_corrections[i][1]
drugs = drugs.str.replace(pat = oldval, repl = newval)
drugs = drugs.loc[drugs != "*not codable*"]
drugs = drugs.loc[drugs != "diphtheria/hepb/pertussis,acel/polio/tetanus"]
drugs = np.unique([ss for eachdrug in drugs for ss in eachdrug.split('-')])
drugs = np.unique([ss for eachdrug in drugs for ss in eachdrug.split('/')])
drugs.sort()
## Combining redundant variables. Often this reflects a change in form or
# variable name between UDS version 2 & 3.
aut.loc[(aut.CVPACE == -4) & (aut.CVPACDEF == 0),'CVPACE'] = 0
aut.loc[(aut.CVPACE == -4) & (aut.CVPACDEF == 1),'CVPACE'] = 1
xvar.loc[xvar.Variable == 'CVPACDEF','Keep'] = False
# Combine TBIBRIEF and TRAUMBRF.
aut.loc[(aut.TBIBRIEF == -4) & (aut.TRAUMBRF.isin([0])),'TBIBRIEF'] = 0
aut.loc[(aut.TBIBRIEF == -4) & (aut.TRAUMBRF.isin([1,2])),'TBIBRIEF'] = 1
xvar.loc[xvar.Variable == 'TRAUMBRF','Keep'] = False
# More data cleaning
aut.ABRUPT = aut.ABRUPT.replace(to_replace = 2, value = 1)
aut.FOCLSYM = aut.FOCLSYM.replace(to_replace = 2, value = 1)
aut.FOCLSIGN = aut.FOCLSIGN.replace(to_replace = 2, value = 1)
# Convert language to a binary variable (English/non-English)
aut = aut.assign(English = 0)
aut.loc[aut.PRIMLANG == 1,'English'] = 1
xvar.loc[xvar.Variable == 'PRIMLANG','Keep'] = False
# Some dummy coding
vv = xvar.Variable.loc[(xvar.Keep) & (xvar.Comments == "Dummy coding for (95,96,97,98)")]
for v in vv:
aut[v + '_couldnt'] = 0
aut.loc[aut[v].isin([95,96,97,98]),v + '_couldnt'] = 1
vv = xvar.Variable.loc[xvar.Comments == "Dummy coding for (995,996,997,998)"]
for v in vv:
aut[v + '_couldnt'] = 0
aut.loc[aut[v].isin([995,996,997,998]),v + '_couldnt'] = 1
# Drop all columns where xvar.Keep == False.
aut2 = aut
xvar.loc[xvar.Variable == 'NACCID','Keep'] = True
xvar.loc[xvar.Variable == 'NACCID','Type'] = "ID"
xvar.loc[xvar.Variable == 'VISITDATE','Keep'] = True
xvar.loc[xvar.Variable == 'VISITDATE','Type'] = "ID"
aut = aut.drop(columns = xvar.Variable[~xvar.Keep])
# Fill with NA values
xvar = xvar.loc[xvar.Keep]
xvar.index = range(xvar.shape[0])
for i in range(xvar.shape[0]):
if not xvar.NaNValues.isna()[i]:
v = xvar.Variable[i]
badval = eval(xvar.NaNValues[i])
#print(v,badval)
if isinstance(badval,int):
badval = [badval]
aut[v].mask(aut[v].isin(badval),inplace = True)
# Get rid of variables with very few meaningful observations.
valcounts = aut.describe().iloc[0]
aut = aut.drop(columns = valcounts.loc[valcounts < 100].index)
#aut = aut[valcounts.loc[valcounts >= 100].index]
# Find correlated variables and drop.
ac = aut.corr()
acs = ac.unstack(level = 0)
acs = acs.loc[abs(acs)>0.8]
acsind = list(acs.index)
diagnames = [ind for ind in acsind if ind[0] == ind[1]]
acs = acs.drop(labels=diagnames)
acs = pd.DataFrame(acs)
acs.columns = ['r']
acs['v1'] = acs.index
acs[['v1','v2']] = pd.DataFrame(acs['v1'].tolist(),index = acs.index)
y = aut.Class
X = aut.drop(columns = npvar.Variable.loc[npvar.Variable.isin(aut.columns)])
X = X.drop(columns = ['Class','ADPath','TauPath','TDPPath','LBPath','VPath'])
xd = X.describe().iloc[0]
# Impute numeric variables with the mean.
from sklearn.impute import SimpleImputer
numvar = X.columns.intersection(xvar.Variable.loc[xvar.Type == "Numeric"])
imp_mean = SimpleImputer(missing_values=np.nan, strategy='mean')
imp_mean.fit(X[numvar])
Xnumimp = imp_mean.transform(X[numvar])
Xnumimp = pd.DataFrame(Xnumimp)
Xnumimp.columns = X[numvar].columns
# Impute ordinal variables with the median.
ordvar = X.columns.intersection(xvar.Variable.loc[xvar.Type == "Ordinal"])
imp_med = SimpleImputer(missing_values=np.nan, strategy='median')
imp_med.fit(X[ordvar])
Xordimp = imp_med.transform(X[ordvar])
Xordimp = pd.DataFrame(Xordimp)
Xordimp.columns = X[ordvar].columns
# Impute boolean variables with zero.
boolvar = X.columns.intersection(xvar.Variable.loc[xvar.Type == "Boolean"])
boolenc = SimpleImputer(missing_values = np.nan, strategy = 'constant',
fill_value = 0)
boolenc.fit(X[boolvar])
Xbool = boolenc.transform(X[boolvar])
Xbool = pd.DataFrame(Xbool)
Xbool.columns = X[boolvar].columns
# One-hot encoding for nominal (not boolean, ordinal, or numeric) variables.
from sklearn.preprocessing import OneHotEncoder
nomvar = X.columns.intersection(xvar.Variable.loc[xvar.Type == "Nominal"])
enc = OneHotEncoder(handle_unknown='ignore',sparse = False)
Xfull = X[nomvar].fillna(value = 0)
enc.fit(Xfull)
Xohe = enc.transform(Xfull)
Xohe = pd.DataFrame(Xohe)
Xohe.columns = enc.get_feature_names(Xfull.columns)
# Put it all together
X = X.drop(columns = boolvar)
X = X.drop(columns = numvar)
X = X.drop(columns = ordvar)
X = pd.concat([X,Xbool,Xnumimp,Xordimp,Xohe],axis = 1)
X = X.drop(columns = nomvar)
# Create 80/20 split between data for training and final testing.
# Do data split stratified by pathology class.
from sklearn.model_selection import train_test_split
classy = aut[['Class','SEX','EDUC']]
classy = classy.assign(HighEd = classy.EDUC > 12)
classy = classy.drop(columns = ['EDUC'])
classy = classy.assign(MasterClass = classy.astype(str).apply(lambda x: '_'.join(x),axis = 1))
uclass = np.unique(classy.MasterClass)
X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.2, random_state=666, stratify=classy.MasterClass)
# Create a further split within the training dataset for CV and for validation.
classy2 = classy.iloc[X_train.index]
X_cv, X_val, y_cv, y_val = train_test_split( X_train, y_train, test_size=0.25, random_state=666, stratify=classy2.MasterClass)
X_cv.index = range(X_cv.shape[0])
y_cv.index = range(y_cv.shape[0])
X_val.index = range(X_val.shape[0])
y_val.index = range(y_val.shape[0])
X_test.index = range(X_test.shape[0])
y_test.index = range(y_test.shape[0])
#import pickle
#PIK = "nacc_train.pkl"
#data = [X_cv,y_cv,X_val,y_val]
#with open(PIK, "wb") as f:
# pickle.dump(data, f)
#with open(PIK, "rb") as f:
# pickle_list = pickle.load(f)
# Now load in classifier & classified data to do error analyses.
import pickle
pik = "weovr_classifier_og_data.pickle"
with open(pik, "rb") as f:
pickle_list = pickle.load(f)
# Here are the contents of the pickle:
#data = [weovr_clf, X_train, X_test, y_train, y_test, OG_X, OG_y, OG_weovr_pred]
wovr = pickle_list[0]
X_aug_train = pickle_list[1]
X_aug_val = pickle_list[2]
y_aug_train = pickle_list[3]
y_aug_val = pickle_list[4]
pikX = | pd.DataFrame(pickle_list[5]) | pandas.DataFrame |
"""
This script is for finding the optimal distribution to be used in GluonTS
"""
import warnings
import numpy as np
import pandas as pd
import streamlit as st
from scipy import stats
import statsmodels as sm
import matplotlib.pyplot as plt
import autodraft.gluonts as glu
@st.cache
def get_data(path='../../data/input/full_dataset_4_seasons.csv'):
data = pd.read_csv(path)
return data
def best_fit_distribution(data, bins=200, ax=None):
"""Model data by finding best fit distribution to data"""
# Get histogram of original data
y, x = np.histogram(data, bins=bins, density=True)
x = (x + np.roll(x, -1))[:-1] / 2.0
# Distributions to check
DISTRIBUTIONS = [stats.laplace,
stats.norm,
stats.nbinom,
stats.t,
stats.uniform
]
# Best holders
best_distribution = stats.norm
best_params = (0.0, 1.0)
best_sse = np.inf
# Estimate distribution parameters from data
for distribution in DISTRIBUTIONS:
# Try to fit the distribution
try:
# Ignore warnings from data that can't be fit
with warnings.catch_warnings():
warnings.filterwarnings('ignore')
# fit dist to data
params = distribution.fit(data)
# Separate parts of parameters
arg = params[:-2]
loc = params[-2]
scale = params[-1]
# Calculate fitted PDF and error with fit in distribution
pdf = distribution.pdf(x, loc=loc, scale=scale, *arg)
sse = np.sum(np.power(y - pdf, 2.0))
# if axis pass in add to plot
try:
if ax:
pd.Series(pdf, x).plot(ax=ax)
end
except Exception:
pass
# identify if this distribution is better
if best_sse > sse > 0:
best_distribution = distribution
best_params = params
best_sse = sse
except Exception:
pass
return (best_distribution.name, best_params)
def main():
data = get_data()
_, _, _, _, _, targets = glu.prep_df(data, column_list=['name', 'gameNumber', 'cumStatpoints'], streamlit=True, scale=True, target_output_df=True)
test_gn = targets.loc[targets.loc[:, 'gameNumber'] == 1]
st.dataframe(test_gn.head())
best_name, best_params = best_fit_distribution(test_gn.loc[:, 'cumStatpoints'])
st.write(best_name)
st.write(best_params)
dists_output = | pd.DataFrame() | pandas.DataFrame |
# Copyright (C) 2020 <NAME>, <NAME>
# Code -- Study 1 -- What Personal Information Can a Consumer Facial Image Reveal?
# https://github.com/computationalmarketing/facialanalysis/
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.lines as mlines
import matplotlib.patches as mpatches
import matplotlib.ticker as mtick
import matplotlib.image as mpimg
from matplotlib import gridspec
from matplotlib import rcParams
rcParams.update({'font.size': 12})
rcParams['font.family'] = 'serif'
rcParams['font.sans-serif'] = ['Times']
import seaborn as sns
from textwrap import wrap
import torchvision.models as models
import torch
from torch.utils.data import DataLoader
from torch.autograd import Variable
import torch.nn.functional as F
import torch.optim as optim
import os
from os import walk
from tqdm import tqdm
from sklearn.utils import class_weight
from sklearn import metrics, svm
from sklearn.linear_model import LogisticRegression
from sklearn.decomposition import TruncatedSVD, PCA
from sklearn.model_selection import KFold, GroupKFold, ShuffleSplit, GroupShuffleSplit
from sklearn.metrics import confusion_matrix
import scipy.stats
from scipy.special import softmax
import scipy.cluster.hierarchy as sch
from scipy.cluster.hierarchy import dendrogram, linkage
# ATTENTION: we disable notifications when AUC cannot be computed
from sklearn.exceptions import UndefinedMetricWarning
import warnings
warnings.filterwarnings(action='ignore', category=UndefinedMetricWarning)
warnings.filterwarnings(action='ignore', category=RuntimeWarning)
import json
import numpy as np
from torchvision import transforms
from torch.utils.data.dataset import Dataset
from PIL import Image
import pandas as pd
import pickle
'''
q_to_name_dict contains match between variable labels from the survey results file and a label of the variable
'''
q_to_name_dict = {#'Q11':'gender', #'Q12':'age', 'Q13':'race', 'Q14':'school', # these variables expanded below
'Q15':'marital_status',
#'Q16':'employment',
'Q17':'social_class', #'Q18':'religion', # NO VARIANCE, SO EXCLUDED 'Q19':'US_born',
'Q21':'body_fitness', #'Q22':'household_income', 'Q23':'zip_code',
'Q24':'orientation',
#'Q25':'political_party',
'Q26':'global_warming', 'Q27':'recycling', 'Q28':'religious',
'Q29':'offensive_ads_banned', 'Q30':'offensive_ads_brand',#'Q31':'facebook_evil',
'Q32':'NRA_support',
'Q34':'bin_family_career', 'Q35':'bin_friendship_laws', 'Q36':'bin_freedom_truth',
'Q37':'bin_pleasure_duty', 'Q38':'bin_wealth_fame', 'Q39':'bin_politeness_honesty',
'Q40':'bin_beautiful_smart', 'Q41':'bin_belonging_independence',
'Q42_1': 'lfstl_set_routine',
'Q42_4': 'lfstl_try_new_things',
'Q42_5': 'lfstl_highly_social_many_friends',
'Q42_6': 'lfstl_buy_new_before_others',
'Q42_7': 'lfstl_outgoing_soc_confident',
'Q42_8': 'lfstl_compulsive_purchases',
'Q42_10': 'lfstl_political_protest_participation',
'Q42_11': 'lfstl_donate_to_beggar',
'Q42_12': 'lfstl_like_hunting',
'Q42_13': 'lfstl_like_fishing',
'Q42_14': 'lfstl_like_hiking',
'Q42_15': 'lfstl_like_out_of_doors',
'Q42_16': 'lfstl_cabin_by_quiet_lake_spend_summer',
'Q42_17': 'lfstl_good_fixing_mechanical_things',
'Q42_18': 'lfstl_repair_my_own_car',
'Q42_19': 'lfstl_like_war_stories',
'Q42_20': 'lfstl_do_better_than_avg_fist_fight',
'Q42_21': 'lfstl_would_want_to_be_prof_football_player',
'Q42_22': 'lfstl_would_like_to_be_policeman',
'Q42_23': 'lfstl_too_much_violence_on_tv',
'Q42_24': 'lfstl_should_be_gun_in_every_home',
'Q42_25': 'lfstl_like_danger',
'Q42_26': 'lfstl_would_like_my_own_airplane',
'Q42_27': 'lfstl_like_to_play_poker',
'Q42_28': 'lfstl_smoke_too_much',
'Q42_29': 'lfstl_love_to_eat',
'Q42_30': 'lfstl_spend_money_on_myself_that_shuld_spend_on_family',
'Q42_31': 'lfstl_if_given_chance_men_would_cheat_on_spouses',
'Q42_33': 'lfstl_satisfied_with_life',
'Q42_34': 'lfstl_like_to_be_in_charge',
'Q42_35': 'lfstl_enjoy_shopping',
'Q42_36': 'lfstl_plan_spending_carefully',
'Q42_37': 'lfstl_obey_rules',
'Q43_1': 'lfstl_satisfied_with_weight',
'Q43_4': 'lfstl_regular_exercise_routine',
'Q43_5': 'lfstl_grew_up_eating_healthy_foods',
'Q43_7': 'lfstl_hard_to_be_disciplined_about_what_i_eat',
'Q43_9': 'lfstl_dont_have_to_worry_how_i_eat',
'Q43_11': 'lfstl_never_think_healthy_unhealthy_food',
'Q43_13': 'lfstl_stick_to_healthy_diet_for_family',
'Q43_14': 'lfstl_choose_snack_foods_that_give_vitamins_minerals',
'Q44_1': 'lfstl_often_prepare_sauces_dips_from_scratch',
'Q44_5': 'lfstl_dont_have_much_interest_cooking',
'Q44_6': 'lfstl_seek_out_healthy_foods',
'Q44_8': 'lfstl_read_ingreadients_list_on_the_label',
'Q44_9': 'lfstl_looking_for_new_products_when_at_grocery_store',
'Q44_11': 'lfstl_lower_priced_products_same_as_higher_priced',
'Q44_13': 'lfstl_look_for_authentic_ingredients_flavors',
'Q44_14': 'lfstl_like_ethnic_foods',
'Q44_15': 'lfstl_daring_adventurous_trying_new_foods',
'Q45_42': 'brkfst_none',
'Q45_43': 'brkfst_bar',
'Q45_44': 'brkfst_fruit',
'Q45_45': 'brkfst_nuts',
'Q45_46': 'brkfst_regular_yogurt',
'Q45_47': 'brkfst_greek_yogurt',
'Q45_48': 'brkfst_muffin_croissant',
'Q45_49': 'brkfst_cold_cereal',
'Q45_50': 'brkfst_hot_cereal_oatmeal',
'Q45_51': 'brkfst_frozen_waffle',
'Q45_52': 'brkfst_cheese_cottage_cheese',
'Q45_53': 'brkfst_sandwhich',
'Q45_54': 'brkfst_salad',
'Q45_55': 'brkfst_eggs',
'Q45_56': 'brkfst_meat',
'Q45_57': 'brkfst_chicken',
'Q45_58': 'brkfst_fish',
'Q45_59': 'brkfst_potatoes',
'Q45_60': 'brkfst_vegetables',
'Q45_61': 'brkfst_soup',
'Q45_62': 'brkfst_pasta',
'Q45_63': 'brkfst_hummus',
'Q45_64': 'brkfst_bread_toast',
'Q45_65': 'brkfst_bagel_roll',
'Q45_66': 'brkfst_chocolate_candy',
'Q45_67': 'brkfst_cake_cookies',
'Q45_68': 'brkfst_chips',
'Q45_69': 'brkfst_crackers',
'Q45_70': 'brkfst_pretzels',
'Q45_71': 'brkfst_smoothie',
'Q45_72': 'brkfst_pastry_buns_fruit_pies',
'Q45_73': 'brkfst_brownies_snack_cakes',
'Q45_74': 'brkfst_popcorn',
'Q45_75': 'brkfst_ice_cream_sorbet',
'Q45_76': 'brkfst_pudding_gelatin',
'Q45_77': 'brkfst_refrig_dip_salsa_guacamole_dairy',
'Q46_1': 'rsn_brkfst_gives_energy',
'Q46_4': 'rsn_brkfst_tide_over_next_meal',
'Q46_5': 'rsn_brkfst_great_taste',
'Q46_6': 'rsn_brkfst_satisfies_craving',
'Q46_7': 'rsn_brkfst_comforting_soothing',
'Q46_8': 'rsn_brkfst_healthy_good_guilt_free',
'Q46_9': 'rsn_brkfst_take_care_of_hunger_filling',
'Q46_10': 'rsn_brkfst_not_too_filling',
'Q46_11': 'rsn_brkfst_fits_with_who_i_am',
'Q46_12': 'rsn_brkfst_helps_relax_reduce_stress',
'Q46_13': 'rsn_brkfst_helps_control_weight',
'Q46_14': 'rsn_brkfst_helps_maintain_mental_focus',
'Q46_15': 'rsn_brkfst_keeps_from_overeating_next_meal',
'Q46_16': 'rsn_brkfst_great_texture',
'Q46_17': 'rsn_brkfst_sweet_taste',
'Q46_18': 'rsn_brkfst_tangy_savory_taste',
'Q46_19': 'rsn_brkfst_chunky_multidim_texture',
'Q46_20': 'rsn_brkfst_smooth_creamy_texture',
'Q46_21': 'rsn_brkfst_gives_protein',
'Q46_22': 'rsn_brkfst_keeps_me_going',
'Q46_23': 'rsn_brkfst_good_food_to_eat_with_others',
'Q46_24': 'rsn_brkfst_keeps_me_on_track',
'Q46_25': 'rsn_brkfst_like_ingredients',
'Q46_26': 'rsn_brkfst_refreshing_taste',
'Q47':'pay_organic', 'Q48':'alcohol', 'Q49':'credit_score',
'Q50_1':'em_happiness', 'Q50_2':'em_stress', 'Q50_3':'em_loneliness',
'Q50_4':'em_jealousy', 'Q50_5':'em_fear', 'Q50_6':'em_hopefulness',
'Q50_7':'em_regret', 'Q50_8':'em_optimism', 'Q50_9':'em_contentness',
'Q50_10':'em_gratitude', 'Q50_11':'em_guilt', 'Q50_12':'em_anger',
'Q50_13':'em_joy', 'Q50_14':'em_contempt', 'Q50_15':'em_disgust',
'Q50_16':'em_sadness', 'Q50_17':'em_surprise', 'Q50_18':'em_vulnerability',
'Q50_19':'em_curiosity', 'Q50_20':'em_warmth',
'Q51':'entertain_freq', 'Q52_1':'post_lik_pos', 'Q52_2':'post_lik_neg',
'Q53':'movie_activ_rec', 'Q54':'rec_lik_ask', 'Q55':'rec_lik_follow',
'Q56_1': 'bp_is_talkative',
'Q56_4': 'bp_tends_to_find_faults_with_others',
'Q56_5': 'bp_does_thorough_job',
'Q56_6': 'bp_is_depressed_blue',
'Q56_7': 'bp_is_original_comes_up_new_ideas',
'Q56_8': 'bp_is_helpful_unselfish',
'Q56_9': 'bp_is_relaxed_handles_stress_well',
'Q56_10': 'bp_is_curious_many_different_things',
'Q56_11': 'bp_is_full_of_energy',
'Q56_12': 'bp_starts_quarrels_with_others',
'Q56_13': 'bp_can_be_tense',
'Q56_14': 'bp_is_ingenious_deep_thinker',
'Q56_15': 'bp_has_forgiving_nature',
'Q56_16': 'bp_tends_to_be_lazy',
'Q56_17': 'bp_is_emotionally_stable_not_easily_upset',
'Q56_18': 'bp_is_inventive',
'Q56_19': 'bp_has_assertive_personality',
'Q56_20': 'bp_can_be_cold_aloof',
'Q56_21': 'bp_perserveres_until_task_finished',
'Q56_22': 'bp_can_be_moody',
'Q56_23': 'bp_values_artistic_aesthetic_experience',
'Q56_24': 'bp_is_sometimes_shy_inhibited',
'Q56_25': 'bp_is_considerate_kind_almost_everything',
'Q56_26': 'bp_does_things_efficiently',
'Q56_27': 'bp_remains_calm_in_tense_situations',
'Q56_28': 'bp_prefers_routine_work',
'Q56_29': 'bp_is_outgoing_sociable',
'Q56_30': 'bp_is_sometimes_rude_to_others',
'Q56_31': 'bp_makes_plans_follows_through',
'Q56_32': 'bp_gets_nervous_easily',
'Q56_33': 'bp_likes_to_reflect_play_with_ideas',
'Q56_39': 'bp_likes_to_cooperate_with_others',
'Q56_40': 'bp_is_easily_distracted',
'Q56_41': 'bp_is_sophisticated_arts_music_literature',
'Q56_42': 'bp_generates_enthusiasm',
'Q56_43': 'bp_is_reliable_worker',
'Q56_44': 'bp_is_reserved',
'Q56_45': 'bp_can_be_somewhat_careless',
'Q56_46': 'bp_tends_to_be_disorganized',
'Q56_47': 'bp_worries_a_lot',
'Q56_48': 'bp_has_active_imagination',
'Q56_49': 'bp_tends_to_be_quiet',
'Q56_50': 'bp_is_generally_trusting',
'Q56_52': 'bp_has_few_artistic_interests',
'Q57_1':'use_facebook', 'Q57_2':'use_twitter', 'Q57_3':'use_netflix',
'Q57_4':'use_spotify', 'Q57_5':'use_apple_music', 'Q57_6':'use_tinder',
'Q57_7':'use_pandora', 'Q57_9':'use_amazon',
'Q57_11':'use_saks', 'Q57_13':'use_dropbox',
'Q57_14':'use_gmail', 'Q57_15':'use_hotmail',
'Q57_16':'use_yahoo', 'Q57_18':'use_github',
'Q57_20':'use_shazam', 'Q57_21':'use_snapchat',
'Q57_22':'use_whatsapp', 'Q57_23':'use_instagram',
'Q57_24':'use_telegram', 'Q57_27':'use_hulu',
'Q57_30':'use_bloomingdales', 'Q57_31':'use_NYT',
'Q57_32':'use_WSJ',
'Q59' : 'netflix_frequent_viewer',
'Q60' : 'netflix_binger',
'Q61' : 'netflix_active_recommender',
'Q62' : 'netflix_intend_to_get',
'Q63':'superbowl', 'Q64_1':'TV_news_trust', 'Q64_2':'Internet_news_trust',
'Q65':'track_news_daily', 'Q66':'read_reviews', #'Q67':'sports_programming',
'Q68':'social_media_time', 'Q69':'social_media_posting', #'Q70':'video_watching',
'Q73':'bin_iphone_galaxy', 'Q74':'bin_clothing_tech', 'Q75':'bin_brand_recogn_not',
'Q76':'bin_chocolate_strawberry', 'Q77':'bin_coke_original_diet',
'Q78':'bin_coke_pepsi', 'Q79':'bin_club_book', 'Q80':'bin_beach_mountain',
'Q81':'bin_story_tell_listen', 'Q82':'bin_capitalism_socialism',
'Q83':'bin_children_not', 'Q84':'bin_thinking_acting', 'Q85':'bin_planning_spontaneity',
'Q86':'bin_trump_hillary', 'Q87':'bin_madonna_lady_gaga', 'Q88':'bin_beatles_michael_jackson',
'Q89':'ec_past_fin_better', 'Q90':'ec_fut_fin_better', 'Q91':'ec_good_times',
'Q92':'ec_depression', 'Q93':'ec_buy',
'Q94_1' : 'price_bicycle',
'Q94_4' : 'price_smartphone',
'Q94_5' : 'price_laptop',
'Q94_6' : 'price_jeans',
'Q94_7' : 'price_sneakers',
'Q94_8' : 'price_microwave',
'Q94_9' : 'price_washing_machine',
'Q94_10' : 'price_office_chair',
'Q95_1' : 'spend_savings_emergencies',
'Q95_3' : 'spend_necessities_bills',
'Q95_4' : 'spend_entertainment_gift_loved_one',
'Q97':'restaurant_ethics', 'Q99':'criminal_ethics', 'source':'data_source',
'Q11_0':'gender_0', 'Q11_1':'gender_1', 'Q11_2':'gender_2',
'Q12_0': 'age_0', 'Q12_1': 'age_1', 'Q12_2': 'age_2',
'Q13_0': 'race_0','Q13_1': 'race_1','Q13_2': 'race_2','Q13_3': 'race_3','Q13_4': 'race_4',
'Q14_0': 'school_0','Q14_1': 'school_1','Q14_2': 'school_2',
'Q16_0': 'employment_0','Q16_1': 'employment_1','Q16_2': 'employment_2',
'Q18_0': 'religion_0','Q18_1': 'religion_1','Q18_2': 'religion_2','Q18_3': 'religion_3',
'Q22_0': 'household_income_0','Q22_1': 'household_income_1', 'Q22_2': 'household_income_2',
'Q23_0': 'zip_code_0','Q23_1': 'zip_code_1', 'Q23_2':'zip_code_2','Q23_3': 'zip_code_3','Q23_4': 'zip_code_4',
'Q25_0': 'political_party_0','Q25_1': 'political_party_1','Q25_2': 'political_party_2',
'Q31_0': 'facebook_evil_0','Q31_1': 'facebook_evil_1', 'Q31_2': 'facebook_evil_2',
'Q67_0': 'sports_programming_0','Q67_1': 'sports_programming_1', 'Q67_2': 'sports_programming_2',
'Q70_0': 'video_watching_0', 'Q70_1': 'video_watching_1', 'Q70_2': 'video_watching_2',
'personality_extraversion':'personality_extraversion',
'personality_agreeableness':'personality_agreeableness',
'personality_conscientiousness':'personality_conscientiousness',
'personality_neuroticism':'personality_neuroticism',
'personality_openness':'personality_openness',
'Q71#1_1' : 'active_consumer_google_news',
'Q71#1_2' : 'active_consumer_yahoo_news',
'Q71#1_3' : 'active_consumer_new_york_times',
'Q71#1_4' : 'active_consumer_wsj',
'Q71#1_5' : 'active_consumer_boston_globe',
'Q71#1_6' : 'active_consumer_cnn',
'Q71#1_7' : 'active_consumer_huffpost',
'Q71#1_8' : 'active_consumer_foxnews',
'Q71#1_10' : 'active_consumer_vice',
'Q71#1_11' : 'active_consumer_chicago_tribune',
'Q71#1_12' : 'active_consumer_breitbart',
'Q71#1_14' : 'active_consumer_washington_post',
'Q71#1_16' : 'active_consumer_bbc_news',
'Q71#1_17' : 'active_consumer_facebook',
'Q71#1_19' : 'active_consumer_twitter',
'Q71#2_1' : 'bias_google_news',
'Q71#2_2' : 'bias_yahoo_news',
'Q71#2_3' : 'bias_new_york_times',
'Q71#2_4' : 'bias_wsj',
'Q71#2_5' : 'bias_boston_globe',
'Q71#2_6' : 'bias_cnn',
'Q71#2_7' : 'bias_huffpost',
'Q71#2_8' : 'bias_foxnews',
'Q71#2_10' : 'bias_vice',
'Q71#2_11' : 'bias_chicago_tribune',
'Q71#2_12' : 'bias_breitbart',
'Q71#2_14' : 'bias_washington_post',
'Q71#2_16' : 'bias_bbc_news',
'Q71#2_17' : 'bias_facebook',
'Q71#2_19' : 'bias_twitter',
'Q6_1_TEXT_0' : 'browser_safari_iphone',
'Q6_1_TEXT_1' : 'browser_chrome',
'Q6_1_TEXT_2' : 'browser_other',
}
image_metrics = {
'rc' : 'red_color',
'gc' : 'green_color',
'bc' : 'blue_color',
'fwhr' : 'face_with_2_height_ratio',
'fwidth' : 'face_width',
'fheight': 'face_height',
'sideeyeratio' : 'face_to_eye_left_right_ratio',
'noseheight' : 'nose_height',
'eyehdiff' : 'eye_height_difference',
'intereyedist': 'inter_eye_difference',
'lipwidth' : 'lip_width',
}
'''
q_to_full_name_dict is similar to q_to_name_dict and contains
match between variable code from the survey results file and a full name of the variable -- used in plotting
'''
q_to_full_name_dict = {'Q15':'Marital status',
'Q17':'Social class',
'Q21':'Body fitness',
'Q24':'Sexual orientation',
'Q26':'Believes global warming is a threat',
'Q27':'Makes effort to recycle',
'Q28':'Considers himself religious',
'Q29':'Believes offensive ads should be banned',
'Q30':'Will stop buying a brand accused of offensive advertising',
'Q32':'Supports National Rifle Association (NRA)',
'Q34':'More important: Family vs. career',
'Q35':'More important: Friendship vs. laws',
'Q36':'More important: Freedom vs. truth',
'Q37':'More important: Pleasure vs. duty',
'Q38':'More important: Wealth vs. fame',
'Q39':'More important: Politeness vs. honesty',
'Q40':'More important: Being beautiful vs. being smart',
'Q41':'More important: Belonging vs. independence',
# Lifestyle
'Q42_1': 'Lifestyle: Prefers a set routine',
'Q42_4': 'Lifestyle: Likes to try new things',
'Q42_5': 'Lifestyle: Is highly social with many friends',
'Q42_6': 'Lifestyle: Buys new things before others',
'Q42_7': 'Lifestyle: Is outgoing and socially confident',
'Q42_8': 'Lifestyle: Tends to make compulsive purchases',
'Q42_10': 'Lifestyle: Is likely to participate in a political protest',
'Q42_11': 'Lifestyle: Is likely to donate to a beggar',
'Q42_12': 'Lifestyle: Likes hunting',
'Q42_13': 'Lifestyle: Likes fishing',
'Q42_14': 'Lifestyle: Likes hiking',
'Q42_15': 'Lifestyle: Likes out of doors',
'Q42_16': 'Lifestyle: Cabin by a quiet lake is a good way to spend summer',
'Q42_17': 'Lifestyle: Is good at fixing mechanical things',
'Q42_18': 'Lifestyle: Repairs his own car',
'Q42_19': 'Lifestyle: Likes war stories',
'Q42_20': 'Lifestyle: Would do better than average in a fist fight',
'Q42_21': 'Lifestyle: Would want to be a professional football player',
'Q42_22': 'Lifestyle: Would like to be policeman',
'Q42_23': 'Lifestyle: Thinks there is too much violence on TV',
'Q42_24': 'Lifestyle: Believes there should be a gun in every home',
'Q42_25': 'Lifestyle: Likes danger',
'Q42_26': 'Lifestyle: Would like his own airplane',
'Q42_27': 'Lifestyle: Likes to play poker',
'Q42_28': 'Lifestyle: Smokes too much',
'Q42_29': 'Lifestyle: Loves to eat',
'Q42_30': 'Lifestyle: Spends money on himself that should be spent on family',
'Q42_31': 'Lifestyle: Believes that if given a chance men would cheat on spouses',
'Q42_33': 'Lifestyle: Is satisfied with life',
'Q42_34': 'Lifestyle: Likes to be in charge',
'Q42_35': 'Lifestyle: Enjoys shopping',
'Q42_36': 'Lifestyle: Plans spending carefully',
'Q42_37': 'Lifestyle: Obeys rules',
'Q43_1': 'Food habits, attitudes: Is satisfied with his weight',
'Q43_4': 'Food habits, attitudes: Follows regular exercise routine',
'Q43_5': 'Food habits, attitudes: Grew up eating healthy foods',
'Q43_7': 'Food habits, attitudes: Finds it hard to be disciplined about what he eats',
'Q43_9': 'Food habits, attitudes: Does not have to worry about how he eats',
'Q43_11': 'Food habits, attitudes: Never thinks of healthy or unhealthy food',
'Q43_13': 'Food habits, attitudes: Sticks to healthy diet for his family',
'Q43_14': 'Food habits, attitudes:: Chooses snack foods that give vitamins and minerals',
'Q44_1': 'Food habits, attitudes: Often prepares sauces, dips from scratch',
'Q44_5': 'Food habits, attitudes: Does not have much interest in cooking',
'Q44_6': 'Food habits, attitudes: Seeks out healthy foods',
'Q44_8': 'Food habits, attitudes: Reads ingredient list on the label',
'Q44_9': 'Food habits, attitudes: Looks for new products when at grocery store',
'Q44_11': 'Food habits, attitudes: Believes lower priced products are the same as higher priced ones',
'Q44_13': 'Food habits, attitudes: Look for authentic ingredients and flavors',
'Q44_14': 'Food habits, attitudes: Likes ethnic foods',
'Q44_15': 'Food habits, attitudes: Is daring, adventurous in trying new foods',
'Q45_42': 'Breakfast food choice: No breakfast',
'Q45_43': 'Breakfast food choice: Bar',
'Q45_44': 'Breakfast food choice: Fruit',
'Q45_45': 'Breakfast food choice: Nuts',
'Q45_46': 'Breakfast food choice: Regular yogurt',
'Q45_47': 'Breakfast food choice: Greek yogurt',
'Q45_48': 'Breakfast food choice: Muffin or croissant',
'Q45_49': 'Breakfast food choice: Cold cereal',
'Q45_50': 'Breakfast food choice: Hot cereal or oatmeal',
'Q45_51': 'Breakfast food choice: Frozen_waffle',
'Q45_52': 'Breakfast food choice: Cheese, cottage cheese',
'Q45_53': 'Breakfast food choice: Sandwich',
'Q45_54': 'Breakfast food choice: Salad',
'Q45_55': 'Breakfast food choice: Eggs',
'Q45_56': 'Breakfast food choice: Meat',
'Q45_57': 'Breakfast food choice: Chicken',
'Q45_58': 'Breakfast food choice: Fish',
'Q45_59': 'Breakfast food choice: Potatoes',
'Q45_60': 'Breakfast food choice: Vegetables',
'Q45_61': 'Breakfast food choice: Soup',
'Q45_62': 'Breakfast food choice: Pasta',
'Q45_63': 'Breakfast food choice: Hummus',
'Q45_64': 'Breakfast food choice: Bread, toast',
'Q45_65': 'Breakfast food choice: Bagel, roll',
'Q45_66': 'Breakfast food choice: Chocolate candy',
'Q45_67': 'Breakfast food choice: Cake, cookies',
'Q45_68': 'Breakfast food choice: Chips',
'Q45_69': 'Breakfast food choice: Crackers',
'Q45_70': 'Breakfast food choice: Pretzels',
'Q45_71': 'Breakfast food choice: Smoothie',
'Q45_72': 'Breakfast food choice: Pastry, buns, fruit pies',
'Q45_73': 'Breakfast food choice: Brownies, snack, cakes',
'Q45_74': 'Breakfast food choice: Popcorn',
'Q45_75': 'Breakfast food choice: Ice cream, sorbet',
'Q45_76': 'Breakfast food choice: Pudding, gelatin',
'Q45_77': 'Breakfast food choice: refrigerated dip (salsa, guacamole, dairy)',
'Q46_1': 'Breakfast food choice motivations: Gives energy',
'Q46_4': 'Breakfast food choice motivations: Tides him over until next meal',
'Q46_5': 'Breakfast food choice motivations: Tastes great',
'Q46_6': 'Breakfast food choice motivations: Satisfies a craving',
'Q46_7': 'Breakfast food choice motivations: Is comforting, soothing',
'Q46_8': 'Breakfast food choice motivations: Healthy, good, guilt free',
'Q46_9': 'Breakfast food choice motivations: Takes care of hunger, is filling',
'Q46_10': 'Breakfast food choice motivations: Is not too filling',
'Q46_11': 'Breakfast food choice motivations: Fits with who he is',
'Q46_12': 'Breakfast food choice motivations: Helps relax, reduce stress',
'Q46_13': 'Breakfast food choice motivations: Helps control weight',
'Q46_14': 'Breakfast food choice motivations: Helps maintain mental focus',
'Q46_15': 'Breakfast food choice motivations: Keeps from overeating during next meal',
'Q46_16': 'Breakfast food choice motivations: Has great texture',
'Q46_17': 'Breakfast food choice motivations: Tastes sweet',
'Q46_18': 'Breakfast food choice motivations: Tastes tangy, savory',
'Q46_19': 'Breakfast food choice motivations: Has chunky, multidimensional texture',
'Q46_20': 'Breakfast food choice motivations: Has smooth, creamy texture',
'Q46_21': 'Breakfast food choice motivations: Gives protein',
'Q46_22': 'Breakfast food choice motivations: Keeps him going',
'Q46_23': 'Breakfast food choice motivations: Is good food to eat with others',
'Q46_24': 'Breakfast food choice motivations: Keeps him on track',
'Q46_25': 'Breakfast food choice motivations: Likes ingredients',
'Q46_26': 'Breakfast food choice motivations: Has refreshing taste',
'Q47':'Is ready to pay more for organic food products',
'Q48':'Is a frequent alcohol consumer',
'Q49':'Missed a credit card payment within last year',
'Q50_1':'Regularly felt emotions: Happiness',
'Q50_2':'Regularly felt emotions: Stress',
'Q50_3':'Regularly felt emotions: Loneliness',
'Q50_4':'Regularly felt emotions: Jealousy',
'Q50_5':'Regularly felt emotions: Fear',
'Q50_6':'Regularly felt emotions: Hopefulness',
'Q50_7':'Regularly felt emotions: Regret',
'Q50_8':'Regularly felt emotions: Optimism',
'Q50_9':'Regularly felt emotions: Contentness',
'Q50_10':'Regularly felt emotions: Gratitude',
'Q50_11':'Regularly felt emotions: Guilt',
'Q50_12':'Regularly felt emotions: Anger',
'Q50_13':'Regularly felt emotions: Joy',
'Q50_14':'Regularly felt emotions: Contempt',
'Q50_15':'Regularly felt emotions: Disgust',
'Q50_16':'Regularly felt emotions: Sadness',
'Q50_17':'Regularly felt emotions: Surprise',
'Q50_18':'Regularly felt emotions: Vulnerability',
'Q50_19':'Regularly felt emotions: Curiosity',
'Q50_20':'Regularly felt emotions: Warmth',
'Q51':'Frequency of entertaining others at home',
'Q52_1':'Likelihood of social media post about positive shopping experience',
'Q52_2':'Likelihood of social media post about negative shopping experience',
'Q53':'Actively recommends movies to watch to friends',
'Q54':'Likelihood of asking a friend for a movie recommendation',
'Q55':'Likelihood of following a movie recommendation from a friend',
'Q56_1': 'Big 5 variable: Is talkative',
'Q56_4': 'Big 5 variable: Tends to find faults with others (reverse)',
'Q56_5': 'Big 5 variable: Does thorough job',
'Q56_6': 'Big 5 variable: Is depressed, blue',
'Q56_7': 'Big 5 variable: Is original, comes up new ideas',
'Q56_8': 'Big 5 variable: Is helpful, unselfish',
'Q56_9': 'Big 5 variable: Is relaxed, handles stress well (reverse)',
'Q56_10': 'Big 5 variable: Is curious about many different things',
'Q56_11': 'Big 5 variable: Is full of energy',
'Q56_12': 'Big 5 variable: Starts quarrels with others (reverse)',
'Q56_13': 'Big 5 variable: Can be tense',
'Q56_14': 'Big 5 variable: Is ingenious, deep thinker',
'Q56_15': 'Big 5 variable: Has forgiving nature',
'Q56_16': 'Big 5 variable: Tends to be lazy (reverse)',
'Q56_17': 'Big 5 variable: Is emotionally stable, not easily upset (reverse)',
'Q56_18': 'Big 5 variable: Is inventive',
'Q56_19': 'Big 5 variable: Has assertive personality',
'Q56_20': 'Big 5 variable: Can be cold, aloof (reverse)',
'Q56_21': 'Big 5 variable: Perseveres until task is finished',
'Q56_22': 'Big 5 variable: Can be moody',
'Q56_23': 'Big 5 variable: Values artistic, aesthetic experience',
'Q56_24': 'Big 5 variable: Is sometimes shy, inhibited (reverse)',
'Q56_25': 'Big 5 variable: Is considerate, kind to almost everyone',
'Q56_26': 'Big 5 variable: Does things efficiently',
'Q56_27': 'Big 5 variable: Remains calm in tense situations (reverse)',
'Q56_28': 'Big 5 variable: Prefers routine work (reverse)',
'Q56_29': 'Big 5 variable: Is outgoing, sociable',
'Q56_30': 'Big 5 variable: Is sometimes rude to others (reverse)',
'Q56_31': 'Big 5 variable: Makes plans and follows through',
'Q56_32': 'Big 5 variable: Gets nervous easily',
'Q56_33': 'Big 5 variable: Likes to reflect, play with ideas',
'Q56_39': 'Big 5 variable: Likes to cooperate with others',
'Q56_40': 'Big 5 variable: Is easily distracted (reverse)',
'Q56_41': 'Big 5 variable: Is sophisticated in arts, music, literature',
'Q56_42': 'Big 5 variable: Generates enthusiasm',
'Q56_43': 'Big 5 variable: Is reliable worker',
'Q56_44': 'Big 5 variable: Is reserved (reverse)',
'Q56_45': 'Big 5 variable: Can be somewhat careless (reverse)',
'Q56_46': 'Big 5 variable: Tends to be disorganized (reverse)',
'Q56_47': 'Big 5 variable: Worries a lot',
'Q56_48': 'Big 5 variable: Has active imagination',
'Q56_49': 'Big 5 variable: Tends to be quiet (reverse)',
'Q56_50': 'Big 5 variable: Is generally trusting',
'Q56_52': 'Big 5 variable: Has few artistic interests (reverse)',
'Q57_1':'Uses Facebook', 'Q57_2':'Uses Twitter', 'Q57_3':'Uses Netflix',
'Q57_4':'Uses Spotify', 'Q57_5':'Uses Apple music', 'Q57_6':'Uses Tinder',
'Q57_7':'Uses Pandora', 'Q57_9':'Uses Amazon',
'Q57_11':'Uses Saks', 'Q57_13':'Uses Dropbox',
'Q57_14':'Uses Gmail', 'Q57_15':'Uses Hotmail',
'Q57_16':'Uses Yahoo', 'Q57_18':'Uses Github',
'Q57_20':'Uses Shazam', 'Q57_21':'Uses Snapchat',
'Q57_22':'Uses Whatsapp', 'Q57_23':'Uses Instagram',
'Q57_24':'Uses Telegram', 'Q57_27':'Uses Hulu',
'Q57_30':'Uses Bloomingdales', 'Q57_31':'Uses NYT',
'Q57_32':'Uses WSJ',
'Q59' : 'Watches Netflix 4 or more days per week',
'Q60' : 'Tends to watch more than 3 hours of Netflix at a time',
'Q61' : 'Likelihood of recommending Netflix to a friend',
'Q62' : 'Intent to get Netflix subscription within 6 months',
'Q63':'Perceived effect of Superbowl ads on choices',
'Q64_1':'Trusts TV news',
'Q64_2':'Trusts Internet news',
'Q65':'Tracks news daily',
'Q66':'Reads product review in detail before purchase', #'Q67':'sports_programming',
'Q68':'Spends 4 hours or more a day on social media',
'Q69':'Frequency of posting on social media', #'Q70':'video_watching',
'Q73':'Prefers: iPhone vs. Galaxy', 'Q74':'Prefers: Clothing vs. tech', 'Q75':'Prefers: Recognizable brand vs. not well-known brand',
'Q76':'Prefers: Chocolate ice cream vs. strawberry ice cream', 'Q77':'Prefers: Original coke vs. diet',
'Q78':'Prefers: Coke vs. Pepsi', 'Q79':'Prefers: Night in club vs. night with a book', 'Q80':'Prefers: Beach vs. mountain',
'Q81':'Prefers: Telling a story vs. listening to a story', 'Q82':'Prefers: Capitalism vs. socialism',
'Q83':'Prefers: Children vs. no children', 'Q84':'Prefers: Thinking vs. acting', 'Q85':'Prefers: Planning vs. spontaneity',
'Q86':'Prefers: Trump vs. Hillary', 'Q87':'Prefers: Madonna vs. <NAME>', 'Q88':'Prefers: Beatles vs. <NAME>',
'Q89':'Is better/ worse financially than a year before',
'Q90':'Expects to be better/ worse financially in a year',
'Q91':'Expects good/ bad times financially in the US within a year',
'Q92':'Expects economic depression in the next five years',
'Q93':'Considers it to be a good time to buy a major household item',
'Q94_1' : 'Price sensitivity: Bicycle',
'Q94_4' : 'Price sensitivity: Smartphone',
'Q94_5' : 'Price sensitivity: Laptop',
'Q94_6' : 'Price sensitivity: Jeans',
'Q94_7' : 'Price sensitivity: Sneakers',
'Q94_8' : 'Price sensitivity: Microwave',
'Q94_9' : 'Price sensitivity: Washing machine',
'Q94_10' : 'Price sensitivity: Office chair',
'Q95_1' : 'Windfall income allocation: Savings, emergencies',
'Q95_3' : 'Windfall income allocation: Necessities, bills',
'Q95_4' : 'Windfall income allocation: Gift to a loved one',
'Q97':'Ethics: What right does your friend have to expect you to go easy on her restaurant in your review?',
'Q99':'Ethics: What right does your friend have to expect you to lie in court to protect him?',
'source':'Data source: Qualtrics panel vs. MTurk',
'Q11_0': 'Gender: Male', 'Q11_1':'Gender: Female', 'Q11_2':'Gender: Other',
'Q12_0': 'Age: <=30', 'Q12_1': 'Age: (30; 50] ', 'Q12_2': 'Age: > 50',
'Q13_0': 'Race: Caucasian/ White', 'Q13_1': 'Race: Asian','Q13_2': 'Race: Hispanic/ Latino','Q13_3': 'Race: African American/ Black','Q13_4': 'Race: Other',
'Q14_0': 'Education achieved: High school or less','Q14_1': 'Education achieved: Undergraduate degree','Q14_2': 'Education achieved: Graduate degree',
'Q16_0': 'Employment: Employed/ student','Q16_1': 'Employment: Unemployed, but looking','Q16_2': 'Employment: Unemployed and not looking',
'Q18_0': 'Religious background: Christianity','Q18_1': 'Religious background: Judaism, Islam','Q18_2': 'Religious background: Other (Hinduism, Buddhism, etc.)','Q18_3': 'Religious background: No particular religion',
'Q22_0': 'Household income: <$50K','Q22_1': 'Household income: [$50K,$100K)', 'Q22_2': 'Household income: >=$100K',
'Q23_0': 'ZIP code first digit: 0, 1','Q23_1': 'ZIP code first digit: 2, 3', 'Q23_2':'ZIP code first digit: 4, 5','Q23_3': 'ZIP code first digit: 6, 7','Q23_4': 'ZIP code first digit: 8, 9',
'Q25_0': 'Political party alignment: Republican','Q25_1': 'Political party alignment: Democrat','Q25_2': 'Political party alignment: Independent',
'Q31_0': 'Facebook is good for humanity: Yes','Q31_1': 'Facebook is good for humanity: No', 'Q31_2': 'Facebook is good for humanity: Unsure',
'Q67_0': 'Sports programming hours watched per week: 0','Q67_1': 'Sports programming hours watched per week: (0,8]', 'Q67_2': 'Sports programming hours watched per week: >8',
'Q70_0': 'Prefers to watch videos: Online', 'Q70_1': 'Prefers to watch videos: TV', 'Q70_2': 'Prefers to watch videos: Does not watch videos',
'personality_extraversion':'Big 5 personality: Extraversion',
'personality_agreeableness':'Big 5 personality: Agreeableness',
'personality_conscientiousness':'Big 5 personality: Conscientiousness',
'personality_neuroticism':'Big 5 personality: Neuroticism',
'personality_openness':'Big 5 personality: Openness',
'Q71#1_1' : 'Active consumer: Google news',
'Q71#1_2' : 'Active consumer: Yahoo news',
'Q71#1_3' : 'Active consumer: New York Times',
'Q71#1_4' : 'Active consumer: WSJ',
'Q71#1_5' : 'Active consumer: Boston Globe',
'Q71#1_6' : 'Active consumer: CNN',
'Q71#1_7' : 'Active consumer: Huffpost',
'Q71#1_8' : 'Active consumer: FoxNews',
'Q71#1_10' : 'Active consumer: Vice',
'Q71#1_11' : 'Active consumer: Chicago Tribune',
'Q71#1_12' : 'Active consumer: Breitbart',
'Q71#1_14' : 'Active consumer: Washington Post',
'Q71#1_16' : 'Active consumer: BBC News',
'Q71#1_17' : 'Active consumer: Facebook',
'Q71#1_19' : 'Active consumer: Twitter',
'Q71#2_1' : 'Perception of bias: Google News',
'Q71#2_2' : 'Perception of bias: Yahoo News',
'Q71#2_3' : 'Perception of bias: New York Times',
'Q71#2_4' : 'Perception of bias: WSJ',
'Q71#2_5' : 'Perception of bias: Boston Globe',
'Q71#2_6' : 'Perception of bias: CNN',
'Q71#2_7' : 'Perception of bias: Huffpost',
'Q71#2_8' : 'Perception of bias: FoxNews',
'Q71#2_10' : 'Perception of bias: Vice',
'Q71#2_11' : 'Perception of bias: Chicago Tribune',
'Q71#2_12' : 'Perception of bias: Breitbart',
'Q71#2_14' : 'Perception of bias: Washington Post',
'Q71#2_16' : 'Perception of bias: BBC News',
'Q71#2_17' : 'Perception of bias: Facebook',
'Q71#2_19' : 'Perception of bias: Twitter',
'Q6_1_TEXT_0' : 'Browser: Safari iPhone',
'Q6_1_TEXT_1' : 'Browser: Chrome',
'Q6_1_TEXT_2' : 'Browser: Other',
# 'rc' : 'Color channel: Red',
# 'gc' : 'Color channel: Green',
# 'bc' : 'Color channel: Blue',
# 'fwhr' : 'Face width-to-height ratio',
# 'fwidth' : 'Face width',
# 'fheight': 'Face height',
# 'sideeyeratio' : 'Face-edge to eye distance, left to right ratio',
# 'noseheight' : 'Nose height',
# 'eyehdiff' : 'Eye height difference',
# 'intereyedist': 'Inter-eye difference',
# 'lipwidth' : 'Lip width',
}
'''
var_groups contains a grouping of variables by categories we identified
some variables, such as data source (qualtrics vs. mturk) are not included in the grouping
'''
var_groups = {
'demographics_biological' : [
'Q11_1', # gender
'Q12_0', 'Q12_1', # age
'Q13_0','Q13_1', 'Q13_2','Q13_3', # race
'Q21', # body fitness
'Q24',# orientation
# 'rc', 'gc', 'bc',# avg. face color
# 'fwhr', 'fwidth', 'fheight',
# 'sideeyeratio', 'noseheight', 'eyehdiff', 'intereyedist', 'lipwidth'
],
'demographics_socio_economic' : [
'Q15', # :'marital_status'
'Q17', #:'social_class'
'Q14_0', 'Q14_1', # school level
'Q16_0', 'Q16_1', # employment status
'Q18_0','Q18_1','Q18_2', # religious
'Q22_0', 'Q22_1', # household income
'Q23_0','Q23_1', 'Q23_2','Q23_3', # zip code
'Q25_0', 'Q25_1'], # political party
'personality' : ['personality_extraversion',
'personality_agreeableness',
'personality_conscientiousness',
'personality_neuroticism',
'personality_openness'
],
'character_ethics' : [
'Q97', #'restaurant_ethics'
'Q99', #'criminal_ethics'
'Q49', #'credit_score',
'Q48', #'alcohol',
],
'lifestyle' : [
'Q42_1',#: 'lfstl_set_routine',
'Q42_4',#: 'lfstl_try_new_things',
'Q42_5',#: 'lfstl_highly_social_many_friends',
'Q42_6',#: 'lfstl_buy_new_before_others',
'Q42_7',#: 'lfstl_outgoing_soc_confident',
'Q42_8',#: 'lfstl_compulsive_purchases',
'Q42_10',#: 'lfstl_political_protest_participation',
'Q42_11',#: 'lfstl_donate_to_beggar',
'Q42_12',#: 'lfstl_like_hunting',
'Q42_13',#: 'lfstl_like_fishing',
'Q42_14',#: 'lfstl_like_hiking',
'Q42_15',#: 'lfstl_like_out_of_doors',
'Q42_16',#: 'lfstl_cabin_by_quiet_lake_spend_summer',
'Q42_17',#: 'lfstl_good_fixing_mechanical_things',
'Q42_18',#: 'lfstl_repair_my_own_car',
'Q42_19',#: 'lfstl_like_war_stories',
'Q42_20',#: 'lfstl_do_better_than_avg_fist_fight',
'Q42_21',#: 'lfstl_would_want_to_be_prof_football_player',
'Q42_22',#: 'lfstl_would_like_to_be_policeman',
'Q42_23',#: 'lfstl_too_much_violence_on_tv',
'Q42_24',#: 'lfstl_should_be_gun_in_every_home',
'Q42_25',#: 'lfstl_like_danger',
'Q42_26',#: 'lfstl_would_like_my_own_airplane',
'Q42_27',#: 'lfstl_like_to_play_poker',
'Q42_28',#: 'lfstl_smoke_too_much',
'Q42_29',#: 'lfstl_love_to_eat',
'Q42_30',#: 'lfstl_spend_money_on_myself_that_shuld_spend_on_family',
'Q42_31',#: 'lfstl_if_given_chance_men_would_cheat_on_spouses',
'Q42_33',#: 'lfstl_satisfied_with_life',
'Q42_34',#: 'lfstl_like_to_be_in_charge',
'Q42_35',#: 'lfstl_enjoy_shopping',
'Q42_36',#: 'lfstl_plan_spending_carefully',
'Q42_37',#: 'lfstl_obey_rules',
],
'food_habits_and_attitudes' : [
'Q43_1',#: 'lfstl_satisfied_with_weight',
'Q43_4',#: 'lfstl_regular_exercise_routine',
'Q43_5',#: 'lfstl_grew_up_eating_healthy_foods',
'Q43_7',#: 'lfstl_hard_to_be_disciplined_about_what_i_eat',
'Q43_9',#: 'lfstl_dont_have_to_worry_how_i_eat',
'Q43_11',#: 'lfstl_never_think_healthy_unhealthy_food',
'Q43_13',#: 'lfstl_stick_to_healthy_diet_for_family',
'Q43_14',#: 'lfstl_choose_snack_foods_that_give_vitamins_minerals',
'Q44_1',#: 'lfstl_often_prepare_sauces_dips_from_scratch',
'Q44_5',#: 'lfstl_dont_have_much_interest_cooking',
'Q44_6',#: 'lfstl_seek_out_healthy_foods',
'Q44_8',#: 'lfstl_read_ingreadients_list_on_the_label',
'Q44_9',#: 'lfstl_looking_for_new_products_when_at_grocery_store',
'Q44_11',#: 'lfstl_lower_priced_products_same_as_higher_priced',
'Q44_13',#: 'lfstl_look_for_authentic_ingredients_flavors',
'Q44_14',#: 'lfstl_like_ethnic_foods',
'Q44_15',#: 'lfstl_daring_adventurous_trying_new_foods',
'Q47',#:'pay_organic',
],
'emotional_state' : [
'Q50_1',#:'em_happiness',
'Q50_2',#:'em_stress',
'Q50_3',#:'em_loneliness',
'Q50_4',#:'em_jealousy',
'Q50_5',#:'em_fear',
'Q50_6',#:'em_hopefulness',
'Q50_7',#:'em_regret',
'Q50_8',#:'em_optimism',
'Q50_9',#:'em_contentness',
'Q50_10',#:'em_gratitude',
'Q50_11',#:'em_guilt',
'Q50_12',#:'em_anger',
'Q50_13',#:'em_joy',
'Q50_14',#:'em_contempt',
'Q50_15',#:'em_disgust',
'Q50_16',#:'em_sadness',
'Q50_17',#:'em_surprise',
'Q50_18',#:'em_vulnerability',
'Q50_19',#:'em_curiosity',
'Q50_20',#:'em_warmth'
],
'values_and_beliefs' : [
'Q26',#:'global_warming',
'Q27',#:'recycling',
'Q28',#:'religious',
'Q29',#:'offensive_ads_banned',
'Q30',#:'offensive_ads_brand',
'Q32',#:'NRA_support',
'Q31_0',#: 'facebook_evil_0',
'Q31_1',#: 'facebook_evil_1',
'Q31_2',#: 'facebook_evil_2',
'Q34',#:'bin_family_career',
'Q35',#:'bin_friendship_laws',
'Q36',#:'bin_freedom_truth',
'Q37',#:'bin_pleasure_duty',
'Q38',#:'bin_wealth_fame',
'Q39',#:'bin_politeness_honesty',
'Q40',#:'bin_beautiful_smart',
'Q41',#:'bin_belonging_independence',
],
'price_sensitivity' : [
'Q94_1',# : 'price_bicycle',
'Q94_4',# : 'price_smartphone',
'Q94_5',# : 'price_laptop',
'Q94_6',# : 'price_jeans',
'Q94_7',# : 'price_sneakers',
'Q94_8',# : 'price_microwave',
'Q94_9',# : 'price_washing_machine',
'Q94_10',# : 'price_office_chair',
],
'breakfast_food_choice' : [
'Q45_42',#: 'brkfst_none',
'Q45_43',#: 'brkfst_bar',
'Q45_44',#: 'brkfst_fruit',
'Q45_45',#: 'brkfst_nuts',
'Q45_46',#: 'brkfst_regular_yogurt',
'Q45_47',#: 'brkfst_greek_yogurt',
'Q45_48',#: 'brkfst_muffin_croissant',
'Q45_49',#: 'brkfst_cold_cereal',
'Q45_50',#: 'brkfst_hot_cereal_oatmeal',
'Q45_51',#: 'brkfst_frozen_waffle',
'Q45_52',#: 'brkfst_cheese_cottage_cheese',
'Q45_53',#: 'brkfst_sandwhich',
'Q45_54',#: 'brkfst_salad',
'Q45_55',#: 'brkfst_eggs',
'Q45_56',#: 'brkfst_meat',
'Q45_57',#: 'brkfst_chicken',
'Q45_58',#: 'brkfst_fish',
'Q45_59',#: 'brkfst_potatoes',
'Q45_60',#: 'brkfst_vegetables',
'Q45_61',#: 'brkfst_soup',
'Q45_62',#: 'brkfst_pasta',
'Q45_63',#: 'brkfst_hummus',
'Q45_64',#: 'brkfst_bread_toast',
'Q45_65',#: 'brkfst_bagel_roll',
'Q45_66',#: 'brkfst_chocolate_candy',
'Q45_67',#: 'brkfst_cake_cookies',
'Q45_68',#: 'brkfst_chips',
'Q45_69',#: 'brkfst_crackers',
'Q45_70',#: 'brkfst_pretzels',
'Q45_71',#: 'brkfst_smoothie',
'Q45_72',#: 'brkfst_pastry_buns_fruit_pies',
'Q45_73',#: 'brkfst_brownies_snack_cakes',
'Q45_74',#: 'brkfst_popcorn',
'Q45_75',#: 'brkfst_ice_cream_sorbet',
'Q45_76',#: 'brkfst_pudding_gelatin',
'Q45_77',#: 'brkfst_refrig_dip_salsa_guacamole_dairy',
],
'breakfast_motivations' : [
'Q46_1',#: 'rsn_brkfst_gives_energy',
'Q46_4',#: 'rsn_brkfst_tide_over_next_meal',
'Q46_5',#: 'rsn_brkfst_great_taste',
'Q46_6',#: 'rsn_brkfst_satisfies_craving',
'Q46_7',#: 'rsn_brkfst_comforting_soothing',
'Q46_8',#: 'rsn_brkfst_healthy_good_guilt_free',
'Q46_9',#: 'rsn_brkfst_take_care_of_hunger_filling',
'Q46_10',#: 'rsn_brkfst_not_too_filling',
'Q46_11',#: 'rsn_brkfst_fits_with_who_i_am',
'Q46_12',#: 'rsn_brkfst_helps_relax_reduce_stress',
'Q46_13',#: 'rsn_brkfst_helps_control_weight',
'Q46_14',#: 'rsn_brkfst_helps_maintain_mental_focus',
'Q46_15',#: 'rsn_brkfst_keeps_from_overeating_next_meal',
'Q46_16',#: 'rsn_brkfst_great_texture',
'Q46_17',#: 'rsn_brkfst_sweet_taste',
'Q46_18',#: 'rsn_brkfst_tangy_savory_taste',
'Q46_19',#: 'rsn_brkfst_chunky_multidim_texture',
'Q46_20',#: 'rsn_brkfst_smooth_creamy_texture',
'Q46_21',#: 'rsn_brkfst_gives_protein',
'Q46_22',#: 'rsn_brkfst_keeps_me_going',
'Q46_23',#: 'rsn_brkfst_good_food_to_eat_with_others',
'Q46_24',#: 'rsn_brkfst_keeps_me_on_track',
'Q46_25',#: 'rsn_brkfst_like_ingredients',
'Q46_26',#: 'rsn_brkfst_refreshing_taste',
],
'product_preferences' : [
'Q73',#:'bin_iphone_galaxy',
'Q74',#:'bin_clothing_tech',
'Q75',#:'bin_brand_recogn_not',
'Q76',#:'bin_chocolate_strawberry',
'Q77',#:'bin_coke_original_diet',
'Q78',#:'bin_coke_pepsi',
'Q79',#:'bin_club_book',
'Q80',#:'bin_beach_mountain',
'Q81',#:'bin_story_tell_listen',
'Q82',#:'bin_capitalism_socialism',
'Q83',#:'bin_children_not',
'Q84',#:'bin_thinking_acting',
'Q85',#:'bin_planning_spontaneity',
'Q86',#:'bin_trump_hillary',
'Q87',#:'bin_madonna_lady_gaga',
'Q88',#:'bin_beatles_michael_jackson',
],
'online_service_usage' : [
'Q57_1',#:'use_facebook',
'Q57_2',#:'use_twitter',
'Q57_3',#:'use_netflix',
'Q57_4',#:'use_spotify',
'Q57_5',#:'use_apple_music',
'Q57_6',#:'use_tinder',
'Q57_7',#:'use_pandora',
'Q57_9',#:'use_amazon',
'Q57_11',#:'use_saks',
'Q57_13',#:'use_dropbox',
'Q57_14',#:'use_gmail',
'Q57_15',#:'use_hotmail',
'Q57_16',#:'use_yahoo',
'Q57_18',#:'use_github',
'Q57_20',#:'use_shazam',
'Q57_21',#:'use_snapchat',
'Q57_22',#:'use_whatsapp',
'Q57_23',#:'use_instagram',
'Q57_24',#:'use_telegram',
'Q57_27',#:'use_hulu',
'Q57_30',#:'use_bloomingdales',
'Q57_31',#:'use_NYT',
'Q57_32',#:'use_WSJ',
],
'browser' : [
'Q6_1_TEXT_0', #: 'Browser: Safari iPhone',
'Q6_1_TEXT_1', #: 'Browser: Chrome',
'Q6_1_TEXT_2', #: 'Browser: Other',
],
'media_source' : [
'Q71#1_1',# : 'active_consumer_google_news',
'Q71#1_2',# : 'active_consumer_yahoo_news',
'Q71#1_3',# : 'active_consumer_new_york_times',
'Q71#1_4',# : 'active_consumer_wsj',
'Q71#1_5',# : 'active_consumer_boston_globe',
'Q71#1_6',# : 'active_consumer_cnn',
'Q71#1_7',# : 'active_consumer_huffpost',
'Q71#1_8',# : 'active_consumer_foxnews',
'Q71#1_10',# : 'active_consumer_vice',
'Q71#1_11',# : 'active_consumer_chicago_tribune',
'Q71#1_12',# : 'active_consumer_breitbart',
'Q71#1_14',# : 'active_consumer_washington_post',
'Q71#1_16',# : 'active_consumer_bbc_news',
'Q71#1_17',# : 'active_consumer_facebook',
'Q71#1_19',# : 'active_consumer_twitter',
],
'media_trust' : [
'Q71#2_1',# : 'bias_google_news',
'Q71#2_2',# : 'bias_yahoo_news',
'Q71#2_3',# : 'bias_new_york_times',
'Q71#2_4',# : 'bias_wsj',
'Q71#2_5',# : 'bias_boston_globe',
'Q71#2_6',# : 'bias_cnn',
'Q71#2_7',# : 'bias_huffpost',
'Q71#2_8',# : 'bias_foxnews',
'Q71#2_10',# : 'bias_vice',
'Q71#2_11',# : 'bias_chicago_tribune',
'Q71#2_12',# : 'bias_breitbart',
'Q71#2_14',# : 'bias_washington_post',
'Q71#2_16',# : 'bias_bbc_news',
'Q71#2_17',# : 'bias_facebook',
'Q71#2_19',# : 'bias_twitter',
'Q64_1',#:'TV_news_trust',
'Q64_2',#:'Internet_news_trust',
],
'economic_outlook' : [
'Q89',#:'ec_past_fin_better',
'Q90',#:'ec_fut_fin_better',
'Q91',#:'ec_good_times',
'Q92',#:'ec_depression',
],
'spend_intentions' :[
'Q93',#:'ec_buy',
'Q95_1',# : 'spend_savings_emergencies',
'Q95_3',# : 'spend_necessities_bills',
'Q95_4',# : 'spend_entertainment_gift_loved_one',
'Q62', #: 'netflix_intend_to_get',
],
'media_consumption_intensity' : [
'Q65',#:'track_news_daily',
'Q68',#:'social_media_time',
'Q69',#:'social_media_posting',
'Q67_0',#: 'sports_programming_0',
'Q67_1',#: 'sports_programming_1',
'Q67_2',#: 'sports_programming_2',
'Q70_0',#: 'video_watching_0',
'Q70_1',#: 'video_watching_1',
'Q70_2',#: 'video_watching_2',
'Q59', #: 'netflix_frequent_viewer',
'Q60', #: 'netflix_binger',
],
'follower_characteristics' : [
'Q63',#:'superbowl',
'Q66',#:'read_reviews',
'Q55',#:'rec_lik_follow'
'Q54',#:'rec_lik_ask',
],
'influencer_characteristics' : [
'Q52_1',#:'post_lik_pos',
'Q52_2',#:'post_lik_neg',
'Q53',#:'movie_activ_rec',
'Q51',#:'entertain_freq'
'Q61', # : 'netflix_active_recommender',
],
}
'''
meta_groups contains labels for the buckets of the variable groups
'''
meta_groups = [
('Demographics', '', 'Biological characteristics', 'demographics_biological'),
('Demographics', '', 'Socio-economic status', 'demographics_socio_economic'),
('General psychographics', '', 'Values and beliefs', 'values_and_beliefs'),
('General psychographics', '', 'Big 5 personalities', 'personality'),
('General psychographics', '', 'Regularly felt emotions', 'emotional_state'),
('General psychographics', '', 'Character and ethical choices', 'character_ethics'),
('General psychographics', '', 'Lifestyle', 'lifestyle'),
('Consumer psychographics', 'Products and services', 'Product preferences', 'product_preferences'),
('Consumer psychographics', 'Products and services', 'Online service use', 'online_service_usage'),
('Consumer psychographics', 'Products and services', 'Browser', 'browser'),
('Consumer psychographics', 'Media', 'Media choice', 'media_source'),
('Consumer psychographics', 'Media', 'Media consumption intensity', 'media_consumption_intensity'),
('Consumer psychographics', 'Media', 'Media trust', 'media_trust'),
('Consumer psychographics', 'Influence', 'Influencer characteristics', 'influencer_characteristics'),
('Consumer psychographics', 'Influence', 'Follower characteristics', 'follower_characteristics'),
('Consumer psychographics', 'Economics', 'Spend intentions', 'spend_intentions'),
('Consumer psychographics', 'Economics', 'Price sensitivity', 'price_sensitivity'),
('Consumer psychographics', 'Economics', 'Economic outlook', 'economic_outlook'),
('Consumer psychographics', 'Food', 'Food habits and attitudes', 'food_habits_and_attitudes'),
('Consumer psychographics', 'Food', 'Breakfast food choice', 'breakfast_food_choice'),
('Consumer psychographics', 'Food', 'Breakfast food choice motivations', 'breakfast_motivations'),
]
meta_groups = pd.DataFrame(meta_groups)
meta_groups.columns = ['l0', 'l1', 'l2', 'l3']
'''
CustomDataset object takes care of supplying an observation (image, labels).
It also performs image preprocessing, such as normalization by color channel.
In case of training, it also performs random transformations, such as horizontal flips, resized crops, rotations, and color jitter -- to expand the observation pool.
'''
class CustomDataset(Dataset):
def __init__(self, data, tr = True, cropped=False):
self.data = data
if not cropped:
self.paths = self.data['img_path'].values.astype('str')
else:
self.paths = self.data['img_path_face_only'].values.astype('str')
self.data_len = self.data.shape[0]
self.labels = self.data[q_list].values.astype('int32')
self.image_metrics = self.data[im_list].values.astype('float32')
# transforms
if tr:
self.transforms = transforms.Compose([
transforms.Resize(224),
transforms.RandomHorizontalFlip(p=0.5),
transforms.RandomApply([
transforms.RandomResizedCrop(224),
transforms.RandomRotation(20),
transforms.ColorJitter(brightness=0.1,contrast=0.1,saturation=0.1,hue=0.1)], p=0.75),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],[0.229, 0.224, 0.225])])
else:
self.transforms = transforms.Compose([
transforms.Resize(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],[0.229, 0.224, 0.225])])
def __getitem__(self, index):
img_path = PATH + '/'+ self.paths[index]
img = Image.open(img_path)
img_tensor = self.transforms(img)
label = self.labels[index]
image_metric = self.image_metrics[index]
return (img_tensor, label, image_metric)
def __len__(self):
return self.data_len
#get pretrained resnet50 model
def get_pretrained():
model = models.resnet50(pretrained=True)
return model
#replace last layer
def prepare_for_finetuning(model):
for param in model.parameters():
param.requires_grad = False
param.requires_grad = True
#replacing last layer with new fully connected
model.fc = torch.nn.Linear(model.fc.in_features,n_outs)
return
# create an object that uses CustomDataset object from above to load multiple observations in parallel
def create_dataloader(data,rand=True, cropped=False):
if rand: # shuffle observations
dataset = CustomDataset(data, tr=True, cropped=cropped)
loader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=True, num_workers=10, drop_last=False)
else: # load observations in the original order from data
dataset = CustomDataset(data, tr=False, cropped=cropped)
loader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=False, sampler = torch.utils.data.sampler.SequentialSampler(dataset), num_workers=10, drop_last=False)
return loader
#finetune and save neural net model
def finetune_and_save(loader_train, loader_test):
# loading pretrained model and preparing it for finetuning
model = get_pretrained()
prepare_for_finetuning(model)
if CUDA:
model.cuda()
# optimize only last six layers
layers = list(model.children())
params = list(layers[len(layers)-1].parameters())+list(layers[len(layers)-2].parameters())+list(layers[len(layers)-3].parameters())+list(layers[len(layers)-4].parameters())+list(layers[len(layers)-5].parameters())+list(layers[len(layers)-6].parameters())
optimizer = optim.Adamax(params=params, lr=0.001)
hist = {}
hist['d_labs'] = q_list
hist['train_loss'] = []
hist['val_loss'] = []
hist['train_loss_d'] = []
hist['val_loss_d'] = []
hist['train_auc_d'] = []
hist['val_auc_d'] = []
# train and evaluate
for epoch in range(N_EPOCHS):
train_loss, train_loss_d, train_auc_d = run_epoch(model, loss_f, optimizer, loader_train, update_model = True) # training
eval_loss, eval_loss_d, eval_auc_d = run_epoch(model, loss_f, optimizer, loader_test, update_model = False) # evaluation
#print('epoch: {} \ttrain loss: {:.6f} \tvalidation loss: {:.6f}'.format(epoch, train_loss, eval_loss))
hist['train_loss'].append(train_loss)
hist['val_loss'].append(eval_loss)
hist['train_loss_d'].append(train_loss_d)
hist['val_loss_d'].append(eval_loss_d)
hist['train_auc_d'].append(train_auc_d)
hist['val_auc_d'].append(eval_auc_d)
# # write this
# for i in range(len(q_list)):
# print('variable: {}\t {} \ttrain auc: {:.6f} \tvalidation auc: {:.6f}'.format(
# q_list[i], q_to_name_dict[q_list[i]], train_auc_d[i], eval_auc_d[i]))
with open(RESULTS+'/eval_record.json', 'w') as fjson:
json.dump(hist, fjson)
# saving model
torch.save(model, RESULTS+"/finetuned_model")
return
# function that performa training (or evaluation) over an epoch (full pass through a data set)
def run_epoch(model, loss_f, optimizer, loader, update_model = False):
if update_model:
model.train()
else:
model.eval()
loss_hist = []
loss_hist_detailed = []
auc_hist_detailed = []
for batch_i, var in tqdm(enumerate(loader)):
loss, loss_detailed, auc_detailed = loss_f(model, var)
if update_model:
optimizer.zero_grad()
loss.backward()
optimizer.step()
loss_hist.append(loss.data.item())
loss_hist_detailed.append(loss_detailed)
auc_hist_detailed.append(auc_detailed)
loss_detailed = pd.DataFrame(loss_hist_detailed)
loss_detailed.columns = q_list
auc_detailed = pd.DataFrame(auc_hist_detailed)
auc_detailed.columns = q_list
return np.mean(loss_hist).item(), loss_detailed.mean(0).values.tolist(), auc_detailed.mean(0).values.tolist()
# function to compute loss from a batch data
def loss_f(model, var):
data, target, _ = var
# data [n, 3, 224, 224]
# target [n, 349]
# image metrics [n, 11]
data, target = Variable(data), Variable(target)
if CUDA:
data, target = data.cuda(), target.cuda()
output = model(data) # [n, 2*349=698]
loss = 0
loss_detailed = []
auc_detailed = []
for i in range(len(q_d_list)):
# load class weight for variable i
w = torch.FloatTensor(class_weights[i])
if CUDA:
w = w.cuda()
# output contains scores for each level of every predicted variable
# q_d_list[i] is number of levels to variable i
# q_d_list_cumsum[i] is a cumulative sum over number of levels for variable i and all variables before it
# all variables ordered as in q_list
# (q_d_list_cumsum[i]-q_d_list[i]):q_d_list_cumsum[i] then gives exact coordinates of the scores for variable i
# among all scores in the output
temp = F.cross_entropy(output[:,(q_d_list_cumsum[i]-q_d_list[i]):q_d_list_cumsum[i]], target[:,i].long(), weight=w)
loss_detailed.append(temp.data.item())
loss += temp
# now we calculate AUC
y_true = target[:,i].detach().cpu().numpy() # true label
y_score = output[:,(q_d_list_cumsum[i]-q_d_list[i]):q_d_list_cumsum[i]].detach().cpu().numpy()[:,1] # score corresponding to level 1
fpr, tpr, thresholds = metrics.roc_curve(y_true, y_score)
auc_detailed.append(metrics.auc(fpr, tpr))
return loss, loss_detailed, auc_detailed
# building class balancing weights as in
# https://datascience.stackexchange.com/questions/13490/how-to-set-class-weights-for-imbalanced-classes-in-keras
def calculate_class_weights(X):
class_weights = []
for i in q_list:
class_weights.append(
class_weight.compute_class_weight('balanced', np.unique(X[i].values), X[i].values))
return class_weights
# extract data from a dataloader as a set of image features X and set of labels y, corresponding to those image features
# can also blackout specified areas of the loaded images before extracting the image features -- this is used in our experiments
# when data loader is deterministic, then it will load in the same data again and again
def extract_data(loader, modelred, blackout=None):
X = []
y = []
z = []
for batch_i, var in tqdm(enumerate(loader)):
data, target, immetr = var
if blackout is not None:
data[:, :, blackout[0]:blackout[1], blackout[2]:blackout[3]] = 0.0
data, target, immetr = Variable(data), Variable(target), Variable(immetr)
if CUDA:
data, target, immetr = data.cuda(), target.cuda(), immetr.cuda()
data_out = modelred(data)
X.append(data_out.detach().cpu().numpy())
y.append(target.detach().cpu().numpy())
z.append(immetr.detach().cpu().numpy())
X = np.vstack(X).squeeze()
y = np.vstack(y)
z = np.vstack(z)
return X, y, z
# function to evaluate a set of trained classifier using AUC metric
# 'models' contains classifiers in order of binary variables to be predicted -- which are contaiend in Y
# X is a matrix of covariates
def analytics_lin(models, X, Y):
auc = {}
for i in tqdm(range(Y.shape[1])):
y_true = Y[:,i]
mod = models[i]
# auc
y_prob = mod.predict_proba(X)[:,1]
fpr, tpr, thresholds = metrics.roc_curve(y_true, y_prob)
auc[q_list[i]] = metrics.auc(fpr, tpr)
return auc
# sequentially yield coordinates for blackout in an image
def sliding_window(image_shape, stepSize, windowSize):
# slide a window across the image
for yc in range(0, image_shape[0], stepSize):
for xc in range(0, image_shape[1], stepSize):
# yield the current window
yield (yc, yc + windowSize[1], xc, xc + windowSize[0])
# calculating decrease in AUC when blocking a particular area of an image -- over 8x8 grid placed over the image
def img_area_importance(modelred, models, svd, dat, auc_true):
patch_importance = {}
for (y0, y1, x0, x1) in sliding_window(image_shape=(224,224), stepSize = 28, windowSize=(28,28)):
loader = create_dataloader(dat,rand=False)
# X_modified_raw contains image features extracted from images with a portion of the image blocked
X_modified_raw, Y, _ = extract_data(loader, modelred, (y0, y1, x0, x1))
# image features reduced to 500 via svd
X_modified = svd.transform(X_modified_raw)
auc = analytics_lin(models, X_modified, Y)
patch_importance_q = {} # contains -(decrease in auc after blocking of an image)
for q in q_list:
patch_importance_q[q] = auc_true[q] - auc[q]
patch_importance[(y0, y1, x0, x1)] = patch_importance_q # decrease in auc across all variables -- for the given blocked portion of the image
return patch_importance
# START OF THE RUN
torch.set_num_threads(1)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
N_EPOCHS = 20
FINETUNE = True
CUDA = torch.cuda.is_available()
batch_size=10
PATH = './data'
RESULTS = './results'
os.makedirs(RESULTS, exist_ok=True)
#finetune model just by running this script
data = pd.read_csv(PATH+'/data.csv')
# data summary stats
# data size
data.shape # observations
data['randomID'].unique().shape # users
data[data['source']==1].shape # observations - qualtrics
data['randomID'][data['source']==1].unique().shape # users - qualtrics
data[data['source']==0].shape # observations - mturk
data['randomID'][data['source']==0].unique().shape # users - mturk
# female Q11_1 stats by data source
data['Q11_1'].mean()
data['Q11_1'][data['source']==1].mean() # qualtrics
data['Q11_1'][data['source']==0].mean() # mturk
# Generating a set of useful global constants
# sorted list of variables
q_list = sorted(list(q_to_name_dict.keys()))
q_to_d_dict = {} # number of levels per variable (portion of code were originally written to support multinomial, not only binary vars)
random_threshold = {} # random guess threshold
prop = {} # proportion of class 1 in the data (vs. 0)
for i in q_list:
q_to_d_dict[i] = np.unique(data[i]).shape[0]
random_threshold[i] = 1.0/q_to_d_dict[i]
prop[i] = data[i].sum()/data.shape[0]
q_d_list = [q_to_d_dict[q] for q in q_list] # vector containing number of levels per variable -- where variables are ordered as in q_list
q_d_list_cumsum = np.cumsum(q_d_list) # cumulative sum over variable levels
# total number of levels across variables
n_outs=q_d_list_cumsum[-1]
# image metrics
im_list = sorted(list(image_metrics.keys()))
# logistic regresssion wrapper
def logistic_regression(Xtr, Xts):
return LogisticRegression(penalty='l2', C=0.05, random_state=0, tol=1e-6, max_iter=1e7,
solver='lbfgs', class_weight='balanced').fit(Xtr, Xts)
# train many regressions
def train_eval_regressions(Xtr, Ytr, Xts, Yts):
lin_models = []
for i in tqdm(range(len(q_list))):
clf = logistic_regression(Xtr, Ytr[:,i])
lin_models.append(clf)
auc = analytics_lin(lin_models, Xts, Yts)
return auc, lin_models
# TRAINING
np.random.seed(999)
torch.manual_seed(999)
# load a pretrained resnet-50 network
model = get_pretrained()
# modelred is a subset of model that outputs a vector of image features per image
modelred = torch.nn.Sequential(*list(model.children())[:-1])
modelred.eval()
if CUDA:
modelred.cuda()
n_reps = 20 # number of repeats for 5-fold cross-valaidtion
gkf = KFold(n_splits=5)
results_auc = []
results_patch_importance = []
results_auc_cropped = []
results_auc_demographics = []
results_auc_browser = []
results_auc_shallowfacemetrics = []
results_auc_browser_demographics = []
results_auc_browser_shallowfacemetrics = []
results_auc_demographics_shallowfacemetrics = []
results_auc_browser_demographics_shallowfacemetrics = []
results_auc_all_plus_img = []
results_auc_all_plus_img_cropped = []
# individual IDs
IDs = data['randomID'].unique()
for rep in tqdm(range(n_reps)):
# shuffling every repetition to get new folds via cv procedure
np.random.shuffle(IDs)
data_shuffled = data.sample(frac=1.0) # shufling observations too
for trainID, testID in tqdm(gkf.split(IDs)):
# extracting split data
data_train = data_shuffled[data_shuffled['randomID'].isin(IDs[trainID])]
data_test = data_shuffled[data_shuffled['randomID'].isin(IDs[testID])]
# calculating class weights to balance data -- in order of q_list
class_weights = calculate_class_weights(data_train)
# creating data loaders
loader_train = create_dataloader(data_train,rand=False)
if FINETUNE:
loader_train_rand = create_dataloader(data_train,rand=True)
loader_test = create_dataloader(data_test,rand=False)
# finetuning model
if FINETUNE:
finetune_and_save(loader_train_rand, loader_test) # saves to RESULTS+"/finetuned_model"
model = torch.load(RESULTS+"/finetuned_model")
modelred = torch.nn.Sequential(*list(model.children())[:-1])
modelred.eval()
if CUDA:
modelred.cuda()
# extracting image features, labels, and ratios calculated from images (used as control)
X_train_raw, Y_train, Z_train = extract_data(loader_train, modelred)
X_test_raw, Y_test, Z_test = extract_data(loader_test, modelred)
# reducing number of features
svd = TruncatedSVD(n_components=500, random_state=0, n_iter=100).fit(X_train_raw)
X_train = svd.transform(X_train_raw)
X_test = svd.transform(X_test_raw)
# creating data loaders - CROPPED
loader_train_cropped = create_dataloader(data_train,rand=False,cropped=True)
loader_test_cropped = create_dataloader(data_test,rand=False,cropped=True)
# extracting image features and labels
X_train_raw_cropped, _, _ = extract_data(loader_train_cropped, modelred)
X_test_raw_cropped, _, _ = extract_data(loader_test_cropped, modelred)
# reducing number of features
svd_cropped = TruncatedSVD(n_components=500, random_state=0, n_iter=100).fit(X_train_raw_cropped)
X_train_cropped = svd_cropped.transform(X_train_raw_cropped)
X_test_cropped = svd_cropped.transform(X_test_raw_cropped)
# variables
demographic_vars = ['Q11_1','Q11_2','Q12_1','Q12_2','Q13_1','Q13_2','Q13_3','Q13_4']
browser_vars = ['Q6_1_TEXT_0', 'Q6_1_TEXT_1']
demographic_index = [ i for i in range(len(q_list)) if q_list[i] in demographic_vars]
browser_index = [ i for i in range(len(q_list)) if q_list[i] in browser_vars]
demographic_browser_index = [ i for i in range(len(q_list)) if q_list[i] in (demographic_vars+browser_vars)]
# TRAINING
# deep image features
auc, lin_models = train_eval_regressions(X_train, Y_train, X_test, Y_test)
results_auc.append(auc)
# heat maps - image area importance
patch_importance = img_area_importance(modelred, lin_models, svd, data_test, auc)
results_patch_importance.append(patch_importance)
# deep image features CROPPED
auc, lin_models = train_eval_regressions(X_train_cropped, Y_train, X_test_cropped, Y_test)
results_auc_cropped.append(auc)
# demographics
auc, lin_models = train_eval_regressions(Y_train[:,demographic_index], Y_train, Y_test[:,demographic_index], Y_test)
results_auc_demographics.append(auc)
# browser
auc, lin_models = train_eval_regressions(Y_train[:,browser_index], Y_train, Y_test[:,browser_index], Y_test)
results_auc_browser.append(auc)
# manual (shallow) facial metrics
auc, lin_models = train_eval_regressions(Z_train, Y_train, Z_test, Y_test)
results_auc_shallowfacemetrics.append(auc)
# browser + demographics
auc, lin_models = train_eval_regressions(Y_train[:,demographic_browser_index], Y_train, Y_test[:,demographic_browser_index], Y_test)
results_auc_browser_demographics.append(auc)
# browser + manual facial metrics
auc, lin_models = train_eval_regressions(np.concatenate([Y_train[:,browser_index], Z_train],1), Y_train,
np.concatenate([Y_test[:,browser_index], Z_test],1), Y_test)
results_auc_browser_shallowfacemetrics.append(auc)
# demographics + manual facial metrics
auc, lin_models = train_eval_regressions(np.concatenate([Y_train[:,demographic_index], Z_train],1), Y_train,
np.concatenate([Y_test[:,demographic_index], Z_test],1), Y_test)
results_auc_demographics_shallowfacemetrics.append(auc)
# browser + demographics + manual facial metrics
auc, lin_models = train_eval_regressions(np.concatenate([Y_train[:,demographic_browser_index], Z_train],1), Y_train,
np.concatenate([Y_test[:,demographic_browser_index], Z_test],1), Y_test)
results_auc_browser_demographics_shallowfacemetrics.append(auc)
# browser + demographics + manual facial metrics + deep image features
auc, lin_models = train_eval_regressions(np.concatenate([X_train, Y_train[:,demographic_browser_index], Z_train],1), Y_train,
np.concatenate([X_test, Y_test[:,demographic_browser_index], Z_test],1), Y_test)
results_auc_all_plus_img.append(auc)
auc, lin_models = train_eval_regressions(np.concatenate([X_train_cropped, Y_train[:,demographic_browser_index], Z_train],1), Y_train,
np.concatenate([X_test_cropped, Y_test[:,demographic_browser_index], Z_test],1), Y_test)
results_auc_all_plus_img_cropped.append(auc)
# saving results
pd.DataFrame(results_auc).to_csv(RESULTS+'/crossvalidation_auc.csv', index=False)
pd.DataFrame(results_auc_cropped).to_csv(RESULTS+'/crossvalidation_auc_cropped.csv', index=False)
pd.DataFrame(results_auc_demographics).to_csv(RESULTS+'/crossvalidation_auc_demographics.csv', index=False)
pd.DataFrame(results_auc_browser).to_csv(RESULTS+'/crossvalidation_auc_browser.csv', index=False)
pd.DataFrame(results_auc_shallowfacemetrics).to_csv(RESULTS+'/crossvalidation_auc_shallowfacemetrics.csv', index=False)
pd.DataFrame(results_auc_browser_demographics).to_csv(RESULTS+'/crossvalidation_auc_browser_demographics.csv', index=False)
pd.DataFrame(results_auc_browser_shallowfacemetrics).to_csv(RESULTS+'/crossvalidation_auc_browser_shallowfacemetrics.csv', index=False)
pd.DataFrame(results_auc_demographics_shallowfacemetrics).to_csv(RESULTS+'/crossvalidation_auc_demographics_shallowfacemetrics.csv', index=False)
pd.DataFrame(results_auc_browser_demographics_shallowfacemetrics).to_csv(RESULTS+'/crossvalidation_auc_browser_demographics_shallowfacemetrics.csv', index=False)
pd.DataFrame(results_auc_all_plus_img).to_csv(RESULTS+'/crossvalidation_auc_all_plus_img.csv', index=False)
pd.DataFrame(results_auc_all_plus_img_cropped).to_csv(RESULTS+'/crossvalidation_auc_all_plus_img_cropped.csv', index=False)
# saving patch_importance
patch_importance = {}
for q in q_list:
arr = np.zeros((224,224))
for (y0, y1, x0, x1) in sliding_window(image_shape=(224,224), stepSize = 28, windowSize=(28,28)):
arr[y0:y1, x0:x1] = np.mean([i[(y0, y1, x0, x1)][q] for i in results_patch_importance])
patch_importance[q] = arr.tolist()
with open(RESULTS+'/patch_importance.json', 'w') as fjson:
json.dump(patch_importance, fjson)
# VISUALIZATIONS
colors = ['#e6194B', '#3cb44b', '#ffe119', '#4363d8', '#f58231',
'#911eb4', '#42d4f4', '#f032e6', '#bfef45', '#fabebe',
'#469990', '#e6beff', '#9A6324', '#fffac8', '#800000',
'#aaffc3', '#808000', '#ffd8b1', '#000075', '#a9a9a9', '#ffffff', '#000000']
# extracting auc data for each fold of crossvalidation (cv) and each variable
results_auc = pd.read_csv(RESULTS+'/crossvalidation_auc.csv')
# checking normality of AUC distribution using Shapiro-Wilk test
h0_normal = np.array([scipy.stats.shapiro(results_auc[x].dropna())[1] for x in results_auc.columns])>0.05
sum(h0_normal)/h0_normal.shape[0] # 91% of variables
results_auc = results_auc.stack().reset_index()
results_auc.columns = ['cv_fold', 'var_name', 'auc']
results_auc['var_name_full'] = [q_to_full_name_dict[i] for i in results_auc['var_name']]
# calculating mean AUC mean and sd across cv folds for each variable
results_auc = results_auc[['var_name_full','var_name', 'auc']].groupby(['var_name_full','var_name'],sort=False).agg(['mean','std']).reset_index()
results_auc.columns = results_auc.columns.map('_'.join).str.strip('_')
# calculating confidence interval on auc for each variables
results_auc['auc_l'] = results_auc['auc_mean'] - 2*results_auc['auc_std']
results_auc['auc_u'] = results_auc['auc_mean'] + 2*results_auc['auc_std']
# mean value of the variable in the full data
temp = data[q_list].mean().reset_index()
temp.columns = ['index', 'var_mean']
results_auc = results_auc.merge(temp, left_on='var_name', right_on='index')
results_auc = results_auc.drop('index',1)
# p values
results_auc['p_val'] = [scipy.stats.norm(results_auc['auc_mean'].iloc[i], results_auc['auc_std'].iloc[i]).cdf(0.5) for i in range(results_auc.shape[0])]
results_auc['p_val'] = results_auc['p_val'].fillna(0.0) # for variables predicted perfectly with variance 0 - clearly, significantly predicted
# save auc analysis
results_auc.to_csv(RESULTS+'/results_auc.csv')
# analysis by group
results_auc_g = results_auc.copy()
results_auc_g['group_name'] = np.nan
for gr in var_groups.keys():
ind = results_auc_g['var_name'].isin(var_groups[gr])
results_auc_g.loc[ind,'group_name'] = gr
# drop variables without specified groups (e.g., data source)
results_auc_g = results_auc_g.dropna()
# merge with nice group names
results_auc_g = meta_groups.merge(results_auc_g, how='right', left_on='l3', right_on='group_name', sort=False)
results_auc_g_full = results_auc_g.copy()
# calculating percentiles by variable group
results_auc_g = results_auc_g[['l0', 'l2', 'group_name', 'auc_mean', 'auc_l', 'auc_u']].groupby(['l0', 'l2', 'group_name'],sort=False).mean().reset_index()
results_auc_g.to_csv(RESULTS+'/results_auc_by_group.csv')
results_auc_g = results_auc_g.sort_values('auc_mean', ascending=False)
# GROUP MEANS
# Func to draw line segment
def newline(p1, p2, linewidth =1.0, color='firebrick'):
ax = plt.gca()
l = mlines.Line2D([p1[0],p2[0]], [p1[1],p2[1]], linewidth = linewidth, color=color)
ax.add_line(l)
return l
# plot group results as group chart with error bars
plt.figure(figsize=(6,8), dpi=300)
# sets vertical index
plt.hlines(y=results_auc_g['l2'].tolist(), xmin=0, xmax=1, color='gray', alpha=0.0, linewidth=.5, linestyles='dashdot')
# plots dots
plt.scatter(results_auc_g_full['auc_mean'].values, results_auc_g_full['l2'].tolist(), marker='o', s = 75., edgecolors='gray', c='w', alpha=0.3)
plt.scatter(results_auc_g['auc_mean'].values, results_auc_g['l2'].tolist(), marker='o', s = 75., color='firebrick')
plt.axvline(x=0.5, color='k', linestyle=':')
plt.xlim([0.4,1])
plt.xlabel('AUC')
plt.gca().invert_yaxis()
#plt.gca().xaxis.grid(True, alpha=.4, linewidth=.1)
#plt.legend(loc='center right')
gray_patch = plt.plot([],[], marker="o", ms=10, ls="", mec=None, markerfacecolor='w', markeredgecolor='gray', label="Variable AUC", alpha=0.3)
red_patch = plt.plot([],[], marker="o", ms=10, ls="", mec=None, color='firebrick', label="Group mean AUC")
leg = plt.legend(handles=[gray_patch[0], red_patch[0]], loc='lower right', bbox_to_anchor=(1., -0.15), ncol=2, fontsize=11.)
plt.gca().spines["top"].set_visible(False)
plt.gca().spines["bottom"].set_visible(False)
plt.gca().spines["right"].set_visible(False)
plt.gca().spines["left"].set_visible(False)
plt.grid(axis='both', alpha=.4, linewidth=.1)
plt.savefig(RESULTS+'/group_auc.pdf', bbox_inches='tight', transparent=True)
plt.close()
# INDIVIDUAL VARIABLE MEANS
results_auc = results_auc.sort_values('p_val', ascending=True)
results_auc_filtered = results_auc[results_auc['auc_l']>0.5]
# number of variables with significant AUC
results_auc_filtered.shape[0]
# % variables with significant AUC
results_auc_filtered.shape[0]/results_auc.shape[0]
# FALSE DISCOVERY RATE UNDER ARBITRARY DEPENDENCE
alpha = 0.05 # desired control level for FDR
plt.figure(figsize=(10,10))
plt.scatter(list(range(results_auc['p_val'].shape[0])), results_auc['p_val'], color='black')
axes = plt.gca()
x_vals = np.array(axes.get_xlim())
slope = alpha/results_auc.shape[0]
y_vals = slope * x_vals
bhline, = plt.plot(x_vals, y_vals, '--', color='red')
plt.xlabel('k')
plt.ylabel('p-value')
plt.savefig(RESULTS+'/fdr.pdf', bbox_inches='tight', transparent=True)
plt.close()
# FDRc under Empirical Bayes view
below = results_auc['p_val'].values <= slope * np.array(list(range(1,1+results_auc['p_val'].shape[0])))
max_below = np.max(np.where(below)[0])
pth = results_auc['p_val'].values[max_below]
print('Threshold p_i:', pth) # 0.00699
results_auc[results_auc['p_val']<=pth]
results_auc[results_auc['p_val']<=pth].shape[0]
tot_fdr = max_below + 1
# confirmed results match those in
# from statsmodels.stats.multitest import multipletests
# multipletests(results_auc['p_val'].values, alpha=0.05, method='fdr_bh', is_sorted=False, returnsorted=False)[0]
# import seaborn as sns
df = data[q_list].copy()
# correlation matrix
Xcorr = df.corr().values
# distances based on sign-less correlation matrix
d = sch.distance.squareform(1-np.abs(Xcorr))
# hierarchical clustering linkage
L = sch.linkage(d, method='single')
sns_plot = sns.clustermap(Xcorr, figsize=(40, 40), row_linkage=L, col_linkage=L, xticklabels=25, yticklabels=25, linewidths=0, rasterized=True)
ax = sns_plot.ax_heatmap
cols = [df.columns[i] for i in list(sns_plot.data2d.columns)]
vl = [cols.index(q) for q in results_auc_filtered['var_name'].values[:tot_fdr]]
vl1 = [cols.index(q) for q in results_auc_filtered['var_name'].values[tot_fdr:]]
for v in vl:
ax.axvline(x=v+0.5, ymin=0, ymax=(sns_plot.data2d.shape[1]-v-0.5)/sns_plot.data2d.shape[1], color='#42d4f4', linewidth=2)
for v in vl1:
ax.axvline(x=v+0.5, ymin=0, ymax=(sns_plot.data2d.shape[1]-v-0.5)/sns_plot.data2d.shape[1], color='#42d4f4', linewidth=2, ls='--')
# ax.set_xticklabels([q_to_full_name_dict[i] for i in cols], fontsize = 7) #ax.get_xmajorticklabels()
# ax.set_yticklabels([q_to_full_name_dict[i] for i in cols], fontsize = 7)
ax.set_xticklabels(list(range(0,len(cols),25)), fontsize = 20) #ax.get_xmajorticklabels()
ax.set_yticklabels(list(range(0,len(cols),25)), fontsize = 20)
sns_plot.fig.axes[-1].tick_params(labelsize=25)
sns_plot.savefig(RESULTS+'/var_corr1.pdf')
plt.close()
pd.DataFrame.from_dict({'Variable':[q_to_full_name_dict[i] for i in cols],
'Question': cols}).reset_index().to_csv(RESULTS+'/var_corr1_order.csv',index=False)
# calculating mean and sd across cv folds for each variable
temp = df[cols].stack().reset_index()
temp.columns = ['respondent', 'var_name', 'value']
temp['var_name_full'] = [q_to_full_name_dict[q] for q in temp['var_name'].tolist()]
temp = temp[['var_name_full', 'var_name', 'value']].groupby(['var_name_full', 'var_name'],sort=False).agg(['mean','std']).reset_index()
temp.to_csv(RESULTS+'/var_corr1_order_summary.csv')
# PCA ANALYSIS
pca = PCA().fit(data[q_list])
# scree plot
plt.figure(figsize=(10, 10))
plt.plot(np.cumsum(pca.explained_variance_ratio_))
plt.xlabel('Number of components')
plt.ylabel('Cumulative explained variance')
plt.savefig(RESULTS+'/pca_scree.pdf', bbox_inches='tight', transparent=True)
plt.close()
# min number of factors that explain 50% of variance -- 47
sum(np.cumsum(pca.explained_variance_ratio_)<0.5)+1
# INDIVIDUAL VARIABLES - PART 1
# plot group results as group chart with error bars
plt.figure(figsize=(6,16), dpi=300)
# sets vertical index
plt.hlines(y=results_auc_filtered['var_name_full'].tolist()[:tot_fdr], xmin=0, xmax=1, color='gray', alpha=0.0, linewidth=.5, linestyles='dashdot')
# plots dots
plt.scatter(results_auc_filtered['auc_mean'].values[:tot_fdr], results_auc_filtered['var_name_full'].tolist()[:tot_fdr], marker='o', s = 75., color='firebrick')
# line segments
for i, p1, p2 in zip(results_auc_filtered['var_name_full'][:tot_fdr],
results_auc_filtered['auc_l'].values[:tot_fdr],
results_auc_filtered['auc_u'].values[:tot_fdr]):
newline([p1, i], [p2, i])
plt.axvline(x=0.5, color='k', linestyle=':')
plt.xlim([0.4,1])
plt.xlabel('AUC')
plt.gca().invert_yaxis()
#plt.gca().xaxis.grid(True, alpha=.4, linewidth=.1)
#plt.legend(loc='center right')
red_patch = plt.plot([],[], marker="o", ms=10, ls="", mec=None, color='firebrick', label="AUC")
red_line = mlines.Line2D([0], [0], linewidth = 1.0, color='firebrick', label="[AUC-2SE : AUC+2SE]")
leg = plt.legend(handles=[red_patch[0], red_line], loc='lower right', bbox_to_anchor=(1., -0.1), ncol=2, fontsize=11.)
plt.gca().spines["top"].set_visible(False)
plt.gca().spines["bottom"].set_visible(False)
plt.gca().spines["right"].set_visible(False)
plt.gca().spines["left"].set_visible(False)
plt.grid(axis='both', alpha=.4, linewidth=.1)
plt.savefig(RESULTS+'/variable_auc.pdf', bbox_inches='tight', transparent=True)
plt.close()
# INDIVIDUAL VARIABLES - PART 2
# plot group results as group chart with error bars
plt.figure(figsize=(6,16), dpi=300)
# sets vertical index
plt.hlines(y=results_auc_filtered['var_name_full'].tolist()[tot_fdr:], xmin=0, xmax=1, color='gray', alpha=0.0, linewidth=.5, linestyles='dashdot')
# plots dots
plt.scatter(results_auc_filtered['auc_mean'].values[tot_fdr:], results_auc_filtered['var_name_full'].tolist()[tot_fdr:], marker='o', s = 75., color='firebrick')
# line segments
for i, p1, p2 in zip(results_auc_filtered['var_name_full'][tot_fdr:],
results_auc_filtered['auc_l'].values[tot_fdr:],
results_auc_filtered['auc_u'].values[tot_fdr:]):
newline([p1, i], [p2, i])
plt.axvline(x=0.5, color='k', linestyle=':')
plt.xlim([0.4,1])
plt.xlabel('AUC')
plt.gca().invert_yaxis()
#plt.gca().xaxis.grid(True, alpha=.4, linewidth=.1)
#plt.legend(loc='center right')
red_patch = plt.plot([],[], marker="o", ms=10, ls="", mec=None, color='firebrick', label="AUC")
red_line = mlines.Line2D([0], [0], linewidth = 1.0, color='firebrick', label="[AUC-2SE : AUC+2SE]")
leg = plt.legend(handles=[red_patch[0], red_line], loc='lower right', bbox_to_anchor=(1., -0.1), ncol=2, fontsize=11.)
plt.gca().spines["top"].set_visible(False)
plt.gca().spines["bottom"].set_visible(False)
plt.gca().spines["right"].set_visible(False)
plt.gca().spines["left"].set_visible(False)
plt.grid(axis='both', alpha=.4, linewidth=.1)
plt.savefig(RESULTS+'/variable_auc_2.pdf', bbox_inches='tight', transparent=True)
plt.close()
# BENCHMARK PLOT
def benchmark(ref, target, saved):
# reference model
# extracting auc data for each fold of crossvalidation (cv) and each variable
results_reference = pd.read_csv(ref)
results_reference = results_reference.stack().reset_index()
results_reference.columns = ['cv_fold', 'var_name', 'auc']
# calculating mean AUC mean and sd across cv folds for each variable
results_reference = results_reference[['var_name', 'auc']].groupby(['var_name'],sort=False).agg(['mean','std']).reset_index()
results_reference.columns = results_reference.columns.map('_'.join).str.strip('_')
# calculating confidence interval on auc for each variables
results_reference['auc_l'] = results_reference['auc_mean'] - 2*results_reference['auc_std']
results_reference['auc_u'] = results_reference['auc_mean'] + 2*results_reference['auc_std']
# p values
results_reference['p_val'] = [scipy.stats.norm(results_reference['auc_mean'].iloc[i], results_reference['auc_std'].iloc[i]).cdf(0.5) for i in range(results_reference.shape[0])]
results_reference['p_val'] = results_reference['p_val'].fillna(0.0)
results_reference = results_reference.sort_values('p_val', ascending=True)
# significance 2SE
results_reference['significance_2se'] = 1*(results_reference['auc_l'] > 0.5)
# significance FDR (REQUIRES THAT p-values are sorted in ascending order)
alpha = 0.05 # desired control level for FDR
slope = alpha/results_reference.shape[0]
below = results_reference['p_val'].values <= slope * np.array(list(range(1,1+results_reference['p_val'].shape[0])))
results_reference['significance_fdr'] = 1*below
# reference + extra features model
results_target = pd.read_csv(target)
results_target = results_target.stack().reset_index()
results_target.columns = ['cv_fold', 'var_name', 'auc']
# calculating mean AUC mean and sd across cv folds for each variable
results_target = results_target[['var_name', 'auc']].groupby(['var_name'],sort=False).agg(['mean','std']).reset_index()
results_target.columns = results_target.columns.map('_'.join).str.strip('_')
# calculating confidence interval on auc for each variables
results_target['auc_l'] = results_target['auc_mean'] - 2*results_target['auc_std']
results_target['auc_u'] = results_target['auc_mean'] + 2*results_target['auc_std']
# p values
results_target['p_val'] = [scipy.stats.norm(results_target['auc_mean'].iloc[i], results_target['auc_std'].iloc[i]).cdf(0.5) for i in range(results_target.shape[0])]
results_target['p_val'] = results_target['p_val'].fillna(0.0)
results_target = results_target.sort_values('p_val', ascending=True)
# significance 2SE
results_target['significance_2se'] = 1*(results_target['auc_l'] > 0.5)
# significance FDR (REQUIRES THAT p-values are sorted in ascending order)
alpha = 0.05 # desired control level for FDR
slope = alpha/results_target.shape[0]
below = results_target['p_val'].values < slope * np.array(list(range(1,1+results_target['p_val'].shape[0])))
results_target['significance_fdr'] = 1*below
# merging
results_reference = results_reference.merge(results_target, how='outer', on='var_name', sort=False)
results_reference['improvement'] = (results_reference['auc_mean_y']/results_reference['auc_mean_x']-1)
results_reference = results_reference.sort_values('improvement', ascending=False)
results_reference['var_name_full'] = [q_to_full_name_dict[i] for i in results_reference['var_name']]
#results_reference = results_reference[results_reference['auc_l_y']>0.5]
results_reference['significance_2se_incr'] = results_reference['significance_2se_y'] > results_reference['significance_2se_x']
results_reference['significance_fdr_incr'] = results_reference['significance_fdr_y'] > results_reference['significance_fdr_x']
results_reference[['var_name_full', 'improvement', 'auc_mean_x', 'auc_mean_y', 'p_val_x', 'p_val_y', 'significance_2se_x', 'significance_2se_y', 'significance_fdr_x', 'significance_fdr_y', 'significance_2se_incr', 'significance_fdr_incr']].to_csv(saved+'.csv',index=False)
k=25
# Visualizing improvement on demographics
plt.figure(figsize=(6,10), dpi=300)
# plots dots
plt.scatter(results_reference['improvement'].values[:k], results_reference['var_name_full'].tolist()[:k], marker='o', s = 75., color='firebrick')
plt.gca().xaxis.set_major_formatter(mtick.PercentFormatter())
plt.rc('text', usetex=True)
for a0, a1, c, v in zip(results_reference['auc_mean_x'].values[:k],results_reference['auc_mean_y'].values[:k], results_reference['improvement'].values[:k], results_reference['var_name_full'].tolist()[:k], ):
plt.text(c+1, v, r'{} $\rightarrow$ {}'.format(round(a0,2),round(a1,2)), horizontalalignment='left', verticalalignment='center', fontdict={'size':10})
plt.rc('text', usetex=False)
#plt.xlim([0.,30])
plt.xlabel('Percent improvement in AUC')
plt.gca().invert_yaxis()
plt.gca().spines["top"].set_visible(False)
plt.gca().spines["bottom"].set_visible(False)
plt.gca().spines["right"].set_visible(False)
plt.gca().spines["left"].set_visible(False)
plt.grid(axis='both', alpha=.4, linewidth=.1)
plt.savefig(saved+'.pdf', bbox_inches='tight', transparent=True)
plt.close()
benchmark(ref=RESULTS+'/crossvalidation_auc_demographics.csv',
target=RESULTS+'/crossvalidation_auc_browser_demographics.csv',
saved=RESULTS+'/improvement_d_bd')
benchmark(ref=RESULTS+'/crossvalidation_auc_browser_demographics.csv',
target=RESULTS+'/crossvalidation_auc_browser_demographics_shallowfacemetrics.csv',
saved=RESULTS+'/improvement_bd_bdf')
benchmark(ref=RESULTS+'/crossvalidation_auc_browser_demographics_shallowfacemetrics.csv',
target=RESULTS+'/crossvalidation_auc_all_plus_img.csv',
saved=RESULTS+'/improvement_bdf_all')
benchmark(ref=RESULTS+'/crossvalidation_auc_browser_demographics_shallowfacemetrics.csv',
target=RESULTS+'/crossvalidation_auc_all_plus_img_cropped.csv',
saved=RESULTS+'/improvement_bdf_all_cropped')
benchmark(ref=RESULTS+'/crossvalidation_auc_all_plus_img_cropped.csv',
target=RESULTS+'/crossvalidation_auc_all_plus_img.csv',
saved=RESULTS+'/improvement_allcropped_all')
benchmark(ref=RESULTS+'/crossvalidation_auc_demographics.csv',
target=RESULTS+'/crossvalidation_auc.csv',
saved=RESULTS+'/improvement_d_deep')
benchmark(ref=RESULTS+'/crossvalidation_auc_demographics.csv',
target=RESULTS+'/crossvalidation_auc_cropped.csv',
saved=RESULTS+'/improvement_d_deep_cropped')
benchmark(ref=RESULTS+'/crossvalidation_auc_demographics.csv',
target=RESULTS+'/crossvalidation_auc_all_plus_img.csv',
saved=RESULTS+'/improvement_d_all')
benchmark(ref=RESULTS+'/crossvalidation_auc_cropped.csv',
target=RESULTS+'/crossvalidation_auc.csv',
saved=RESULTS+'/improvement_deepcropped_deep')
# number of significantly predictable variables by model
def waterfall(paths, model_names, saved):
res = []
for p in paths:
temp = | pd.read_csv(p) | pandas.read_csv |
'''
This script contains examples of functions that can be used from the Pandas
module.
'''
# Series ---------------------------------------------------------------------
import pandas as pd
import numpy as np
# Creating series
pd.Series(data=[1,2,3,4]) # list
pd.Series(data=[1,2,3,4], index=['a','b','c','d']) # custom index
pd.Series(data={'a':1, 'b':2, 'c':3, 'd':4}) # dictionary
# Indexing series
ser_1 = pd.Series(data=[1,2,3,4], index=['a','b','c','d']); ser_1
ser_1['b']
ser_1[2]
# Joining Series
ser_1 = pd.Series(data=[1,2,3,4], index=['a','b','c','d']); ser_1
ser_2 = pd.Series(data=[1,2,5,4], index=['a','b','e','d']); ser_2
ser_1 + ser_2
# NOTE: Pandas joins series by INDEX. This is why there are 2 NaN values.
# DataFrames - Basics --------------------------------------------------------
import pandas as pd
import numpy as np
df_a = pd.DataFrame({'A': list(range(43))[-6:],
'B': [pd.Timestamp('20180725')] * 5 + [None],
'C': ['cat', 'dog', 'fish', None, 'bird', 'snail'],
'D': [2/3, 1/2, None, 8/3, 1/9, 6/2]})
df_b = pd.DataFrame(data=np.random.randn(6, 4),
index=pd.date_range(start = '20180621', periods = 6),
columns=['col{}'.format(num) for num in list('1234')])
df_a
df_b
# Column types
df_a.dtypes
df_b.dtypes
df_a.head()
df_a.tail()
df_a.index
df_b.index
df_b.reset_index()
df_a.columns
df_b.columns
df_a.values
df_b.values
df_b.shift(periods = 1)
df_b.sub(100)
df_b.add(100)
df_a.info()
df_a.describe() # Summary Metrics
df_a.T # Transpose
df_a.transpose() # Same thing as T
df_b.sort_index(axis = 1, ascending = False) # Sort column or row order
df_b.sort_values(by = 'col2', ascending = True) # Sort rows
# DataFrames - Selecting -----------------------------------------------------
import pandas as pd
import numpy as np
# Select Columns
df_b.col1 # NOTE: Don't use this way b/c it will be confused with methods.
df_b['col1']
df_b[['col1', 'col3']]
# Select Rows
df_b[:3]
df_b['20180623':'20180625']
# .loc - Select by INDEX (Label)
df_a
df_b
df_a.loc[2] # row @ index 2
df_b.loc['20180623'] # row @ index '20180623'
df_b.loc[:, ['col1', 'col3']]
df_b.loc['20180623':'20180625', ['col1', 'col3']]
df_a.loc[3, 'C']
df_a.at[3, 'C'] # .at is faster than .loc for single values
df_a.loc[df_a['D'].idxmax()]
df_a.loc[df_a['D'].idxmin()]
# .iloc - Select by POSITION
df_a.iloc[3] # Slice of 3rd row
df_a.iloc[3:5, :2]
df_a.iloc[[1, 4], [0, 2]]
df_a.iloc[1:3, :]
df_a.iloc[2, 2]
df_a.iat[2, 2] # .iat is faster than .iloc for single values
# Boolean Indexing & Filtering
df_b
df_b[df_b>0]
df_b[df_b['col1']<0]
df_b[(df_b['col1']>0) & (df_b['col2']<0)] # Filtering by 2 columns (and)
df_b[(df_b['col1']>0) | (df_b['col2']<0)] # Filtering by 2 columns (or)
df_a[df_a['C'].isin(['fish', 'bird'])]
# String Functions
series_a = pd.Series(['A', 'B', 'C', 'Aaba', 'Baca', np.nan, 'CABA', 'dog', 'cat'])
series_a.str.lower()
series_a.str.upper()
# DataFrames - Sorting -------------------------------------------------------
import pandas as pd
import numpy as np
df_a
df_a.sort_values(by='C')
df_a.sort_values(by=['B','D'], axis=0, ascending=[True,False])
# DataFrames - Creating & Modifying Columns & Rows ---------------------------
# Creating Columns
df_a['E'] = df_a['A']; df_a
# Rename Columns
df_a.rename(columns = {'A':'col_a', 'B':'col_b', 'C':'col_c', 'D':'col_d'})
# Reset & Set Index
df_c = df_b.copy(); df_c
df_c.reset_index()
df_c # Reset did not set in place. Use 'inplace=True' for that.
df_c.reset_index(inplace=True); df_c
df_c.loc[:, 'States'] = pd.Series('CA NY WY OR CO TX'.split()); df_c
df_c.set_index('States')
# Dropping Columns
df_a['E'] = df_a['A']; df_a
df_a.drop(labels='E', axis=1) # Doesn't affect original table
df_a
df_a.drop(labels='E', axis=1, inplace=True); df_a # Affects original table
# Dropping Rows
df_a.drop(labels=2, axis=0) # Doesn't affect original table
df_a
df_a.drop(labels=2, axis=0, inplace=True); df_a # Affects original table
# Replace column values
df_a['C'].replace(['cat', 'dog', 'fish'], ['kittie', 'doggie', 'fishie'])
# DataFrames - Missing Values ------------------------------------------------
import pandas as pd
import numpy as np
# Create dataframe with missing values (NaN)
df_miss = | pd.DataFrame({'A':[1,2,np.nan], 'B':[5,np.nan,np.nan], 'C':[1,2,3]}) | pandas.DataFrame |
import collections
import copy
import os
import random
import string
import sys
from argparse import ArgumentParser
import matplotlib
import matplotlib.colors as pltc
import numpy as np
import pandas as pd
import plotly.figure_factory as ff
import plotly.graph_objects as go
import pyranges as pr
import pysam
import scipy
from plotly.subplots import make_subplots
from scipy import stats
# ---------------------------------------------------------------------------------------------------------------------------------
## Argument Parser
# ---------------------------------------------------------------------------------------------------------------------------------
def get_args():
parser = ArgumentParser(
description="Creates html plots from mosdepth results based on user defined region(s)"
)
req = parser.add_argument_group("Required Arguments")
one_req = parser.add_argument_group("Required Argument - ONLY ONE")
req.add_argument(
"--gtf",
metavar="GTF File",
required=True,
help="(Required) Path to a gtf file with gene features",
)
one_req.add_argument(
"--region",
metavar="Genomic Region",
nargs="+",
default=None,
help="A tabix styled region. (Example: --region chr11:1234567-1234578) Multiple regions can be added, separated by a space. (Example: --region chr11:1234567-1234578 chr1:987654321:987655432). Either --region, --region-file, --gene, or --gene-file is required.",
)
one_req.add_argument(
"--region-file",
metavar="File of Genomic Region",
default=None,
help="A file of tabix styled regions. One region per line. Either --region, --region-file, --gene, or --gene-file is required.",
)
one_req.add_argument(
"--gene",
metavar="Gene Symbol",
nargs="+",
default=None,
help="A Gene symbol to get info for. (Example: --gene APEX1).Multiple genes can be added, separated by a space. (Example: --gene APEX1 ABCA1) Either --region, --region-file, --gene, or --gene-file is required.",
)
one_req.add_argument(
"--gene-file",
metavar="File of Gene Symbols",
default=None,
help="A file of gene symbols to get info for. One gene symbol per line. Either --region, --region-file, --gene, or --gene-file is required.",
)
parser.add_argument(
"-o",
"--output",
default="region_coverage.html",
help="Path and/or name of output file. Directories must exist. (Default = 'region_coverage.html')",
)
parser.add_argument(
"--tmpl",
metavar="Template HTML",
default="tmpl.html",
help="Path and/or name of the template html file. This is the template html file that is distributed with the script. (Default = 'tmpl.html')",
)
parser.add_argument(
"--combine",
action="store_true",
help="True or False, whether or not to combine all regions into a single html file or not. If '--combine' is added, all regions will be combined into a single html file. If '--combine' is not added, each region will be written to a separate html file. (NOTE: when --combine is set, the size of the html file will increase. Depending on the number of regions and the size of each region, the html file may be to large to load)",
)
parser.add_argument(
"--lch-cutoff",
metavar="Low Coverage Highlight Cutoff",
default=10,
help="A coverage value cutoff, where any coverage at or below the cutoff will be highlighted in per base region plot. (Default = 10)",
)
parser.add_argument(
"--sample-colors",
metavar="Sample Colors",
nargs="+",
help="A space separated list of colors to use for the samples while plotting. If --sample-colors is not used or the number of colors provided by --sample-colors does not match the number of samples then colors will be chosen at random. (Example --sample-colors green blue orange) (Default = random color per sample)",
)
req.add_argument(
"--input",
metavar="Input Coverage File(s)",
nargs="+",
required=True,
help="One ore more coverage bed files from mosdepth to get coverage info for. (3 sample example: --input sample1.per-base.bed.gz sample2.per-base.bed.gz sample3.per-base.bed.gz)",
)
return parser.parse_args()
# ---------------------------------------------------------------------------------------------------------------------------------
## Functions/Methods
# ---------------------------------------------------------------------------------------------------------------------------------
def get_dict_from_region_string(region_string):
"""
get_dict_from_region_string
===========================
Get a dictionary from a tabix styled region string. Keys will include "chrom", "start", and "end"
Parameters:
-----------
1) region_string: (str) A tabix styled region string. (Example: chr14:123456-124456)
Returns:
++++++++
1) (dict) A dictionary with keys: 'chrom', 'start', 'end' and genomic region info as values
"""
region_list = region_string.strip().replace(",", "").replace("-", ":").split(":")
region_dict = {
"chrom": str(region_list[0]),
"start": int(region_list[1]),
"end": int(region_list[2]),
}
return region_dict
def get_sample_region_coverage(sample_tabix_object, region_dict):
"""
get_sample_region_coverage
==========================
Get the coverage values for a specific sample at a specific region interval.
A list containing per base coverage values will be returned.
Parameters:
-----------
1) sample_tabix_object: (psyam Object) A pysam tabix object representing the bed file of a sample
2) region_dict: (dict) A dictionary with region information. Required keys include "chrom", "start", and "end"
Returns:
+++++++
1) (list) A list of coverage values representing a per base coverage score
"""
coverage = []
## Iterate over the overlapping coverage intervals
for i, coverage_line in enumerate(
sample_tabix_object.fetch(
region_dict["chrom"], int(region_dict["start"]), int(region_dict["end"])
)
):
## Create a dict for the current line
cov_dict = dict(
zip(
["chrom", "start", "end", "coverage_value"],
coverage_line.strip().split("\t"),
)
)
## Add the coverage value for each position defined by the current interval
coverage.extend(
[
int(cov_dict["coverage_value"])
for x in range(int(cov_dict["end"]) - int(cov_dict["start"]))
]
)
return coverage
def plot_kde_per_sample(
per_base_coverage_by_sample, sample_labels, sample_color_dict, region_dict
):
"""
plot_kde_per_sample
===================
Plot the kernel density estimator plot for each sample. Use values from a histogram to generate the kernel density estimator predictions,
and plot the density predictions.
Get the distribution of coverage per sample at a specific region.
Parameters:
-----------
1) per_base_coverage_by_sample: (2d list) A 2d list, with each list representing the per base coverage values for a specific sample for the current query region
2) sample_labels: (list) A list of sample ids/labels, with each index associated with the index in per_base_coverage_by_sample. (That is, index 1 in
per_base_coverage_by_sample should represent the coverage for the sample at index 1 in sample_labels)
3) sample_color_dict: (dict) A dictionary with keys as sample ids/labels and value as a color. (Used for plotting colors)
4) region_dict: (dict) A dictionary with region information. Required keys include "chrom", "start", and "end". (See the 'get_dict_from_region_string' function)
Returns:
+++++++
1) (str) A plotly embedded html string for the per sample kde plot.
"""
fig = ff.create_distplot(
per_base_coverage_by_sample,
sample_labels,
# bin_size = .1,
# curve_type = "normal",
colors=[sample_color_dict[x] for x in sample_labels],
show_rug=False,
show_hist=False,
)
fig.update_layout(
title_text="Density Coverage Distribution for {}:{}-{}".format(
region_dict["chrom"], region_dict["start"], region_dict["end"]
)
)
fig.update_layout(legend_title_text="Samples")
fig.update_layout(xaxis_title="Coverage")
fig.update_layout(yaxis_title="Density")
return fig.to_html(full_html=False, include_plotlyjs="cdn")
def plot_z_score_distribution(
per_base_coverage_by_sample, sample_labels, sample_color_dict, region_dict
):
"""
plot_z_score_distribution
=========================
Plot the distribution of per base coverage scores by sample converted to z-scores. The distribution will be converted to density scores using a kernel density estimator.
The z-scores are relative to each samples mean. That is, each sample's z-scores are determined by that samples coverage distribution.
Parameters:
-----------
1) per_base_coverage_by_sample: (2d list) A 2d list, with each list representing the per base coverage values for a specific sample for the current query region
2) sample_labels: (list) A list of sample ids/labels, with each index associated with the index in per_base_coverage_by_sample. (That is, index 1 in
per_base_coverage_by_sample should represent the coverage for the sample at index 1 in sample_labels)
3) sample_color_dict: (dict) A dictionary with keys as sample ids/labels and value as a color. (Used for plotting colors)
4) region_dict: (dict) A dictionary with region information. Required keys include "chrom", "start", and "end". (See the 'get_dict_from_region_string' function)
Returns:
+++++++
1) (str) A plotly embedded html string for the per sample z-score kde plot.
2) (2d list) A 2d list representing the relative coverage corrected z-scores per sample
"""
## get z scores
z_scores = [stats.zscore(x) for x in per_base_coverage_by_sample]
fig = ff.create_distplot(
z_scores,
sample_labels,
colors=[sample_color_dict[x] for x in sample_labels],
show_rug=False,
show_hist=False,
)
fig.update_layout(
title_text="Density Z-Score Coverage Distribution for {}:{}-{}".format(
region_dict["chrom"], region_dict["start"], region_dict["end"]
)
)
fig.update_layout(legend_title_text="Samples")
fig.update_layout(xaxis_title="Relative Z-Score for Sample Coverage")
fig.update_layout(yaxis_title="Density")
return (fig.to_html(full_html=False, include_plotlyjs="cdn"), z_scores)
def get_plotly_table(per_base_coverage_by_sample, sample_labels, low_coverage_cutoff):
"""
get_plotly_table
================
Get a table that provides descriptive statistics on coverage for each sample at a specific region.
Parameters:
-----------
1) per_base_coverage_by_sample: (2d list) A 2d list, with each list representing the per base coverage values for a specific sample for the current query region
2) sample_labels: (list) A list of sample ids/labels, with each index associated with the index in per_base_coverage_by_sample. (That is, index 1 in
per_base_coverage_by_sample should represent the coverage for the sample at index 1 in sample_labels)
3) low_coverage_cutoff: (int) An int that is used to identify positions at or below low coverage
Returns:
+++++++
1) (str) A plotly embedded html string for per sample descriptive statistic table.
2) (int) The max coverage value across all samples.
3) (bool) True of False, whether or not any of the samples has a position at or below the low coverage cutoff
"""
header = [
"<b>Samples</b>",
"<b>Mean</b>",
"<b>Median</b>",
"<b>SD</b>",
"<b>Min</b>",
"<b>Max</b>",
"<b>Range</b>",
"<b>Low Coverage Bases</b><br> coverage <= {}".format(low_coverage_cutoff),
]
samples_cell = []
mean_cell = []
median_cell = []
min_cell = []
max_cell = []
sd_cell = []
range_cell = []
low_coverage_cell = []
## get descriptive statistics
for i, cov_list in enumerate(per_base_coverage_by_sample):
samples_cell.append("<b>{}</b>".format(sample_labels[i]))
mean_cell.append("{:.3f}".format(np.mean(cov_list)))
median_cell.append("{:.3f}".format(np.median(cov_list)))
min_cell.append(np.amin(cov_list))
max_cell.append(np.amax(cov_list))
sd_cell.append("{:.3f}".format(np.std(cov_list)))
range_cell.append(np.ptp(cov_list))
low_coverage_cell.append((np.array(cov_list) <= low_coverage_cutoff).sum())
fig = go.Figure(
data=[
go.Table(
columnwidth=[100, 70, 80, 60, 60, 60, 70, 200],
header=dict(
values=header,
line_color="darkslategray",
fill_color="royalblue",
align=["left", "center"],
font=dict(color="white", size=15),
height=40,
),
cells=dict(
values=[
samples_cell,
mean_cell,
median_cell,
sd_cell,
min_cell,
max_cell,
range_cell,
low_coverage_cell,
],
line_color="darkslategray",
fill=dict(
color=[
"royalblue",
"white",
"white",
"white",
"white",
"white",
"white",
"white",
]
),
align=["left", "center"],
font=dict(
color=[
"white",
"black",
"black",
"black",
"black",
"black",
"black",
"black",
],
size=[15, 12, 12, 12, 12, 12, 12, 12],
),
height=30,
),
)
]
)
fig.update_layout(title_text="Descriptive Statistics Table")
return (
fig.to_html(full_html=False, include_plotlyjs="cdn"),
max(max_cell),
bool(max(low_coverage_cell) > 0),
)
def plot_sample_vs_sample_per_base_coverage(
per_base_coverage_by_sample, sample_labels, sample_color_dict, region_dict
):
"""
plot_sample_vs_sample_per_base_coverage
=======================================
Plot each sample's coverage vs another sample's coverage. Currently, this is limited to a max of 3 samples
Parameters:
-----------
1) per_base_coverage_by_sample: (2d list) A 2d list, with each list representing the per base coverage values for a specific sample for the current query region
2) sample_labels: (list) A list of sample ids/labels, with each index associated with the index in per_base_coverage_by_sample. (That is, index 1 in
per_base_coverage_by_sample should represent the coverage for the sample at index 1 in sample_labels)
3) sample_color_dict: (dict) A dictionary with keys as sample ids/labels and value as a color. (Used for plotting colors)
4) region_dict: (dict) A dictionary with region information. Required keys include "chrom", "start", and "end". (See the 'get_dict_from_region_string' function)
Returns:
+++++++
1) (str) A plotly embedded html string for the sample vs sample coverage plots.
"""
if len(per_base_coverage_by_sample) > 3:
print(
"\n!!WARNING!! There are more then 3 samples. Unable to plot sample vs sample"
)
return ()
fig = make_subplots(rows=3)
row_index = 0
## Subplot of per base coverage
for i, coverage_list1 in enumerate(per_base_coverage_by_sample):
for j, coverage_list2 in enumerate(per_base_coverage_by_sample):
## Skip if sample info already used
if j <= i:
continue
row_index += 1
## Get a set of unique x,y pairs
unique_pairs = set()
for cov1, cov2 in zip(coverage_list1, coverage_list2):
unique_pairs.add((cov1, cov2))
## Separate the pairs into x values and y values
x_values = []
y_values = []
for pair in unique_pairs:
x_values.append(pair[0])
y_values.append(pair[1])
fig.add_trace(
go.Scatter(x=x_values, y=y_values, showlegend=False, mode="markers"),
row=row_index,
col=1,
)
fig.update_xaxes(
title_text="{} Coverage".format(sample_labels[i]), row=row_index, col=1
)
fig.update_yaxes(
title_text="{} Coverage".format(sample_labels[j]), row=row_index, col=1
)
fig.update_layout(
title_text="Sample vs Sample per Base Coverage for region {}:{}-{}".format(
region_dict["chrom"], region_dict["start"], region_dict["end"]
)
)
return fig.to_html(full_html=False, include_plotlyjs="cdn")
def plot_proportion_coverage(
per_base_coverage_by_sample, sample_labels, sample_color_dict, region_dict
):
"""
plot_proportion_coverage
========================
Plot the proportion of bases covered in a region per each coverage cutoff for each sample.
Parameters:
-----------
1) per_base_coverage_by_sample: (2d list) A 2d list, with each list representing the per base coverage values for a specific sample for the current query region
2) sample_labels: (list) A list of sample ids/labels, with each index associated with the index in per_base_coverage_by_sample. (That is, index 1 in
per_base_coverage_by_sample should represent the coverage for the sample at index 1 in sample_labels)
3) sample_color_dict: (dict) A dictionary with keys as sample ids/labels and value as a color. (Used for plotting colors)
4) region_dict: (dict) A dictionary with region information. Required keys include "chrom", "start", and "end". (See the 'get_dict_from_region_string' function)
Returns:
+++++++
1) (str) A plotly embedded html string for the proportion coverage plot.
"""
fig = go.Figure()
for i, coverage_list in enumerate(per_base_coverage_by_sample):
x_values, y_values = get_region_coverage_proportion(coverage_list)
fig.add_trace(
go.Scatter(
x=x_values,
y=y_values,
mode="lines",
name=sample_labels[i],
line={"color": sample_color_dict[sample_labels[i]]},
)
)
fig.update_layout(
title_text="Proportion of bases covered at coverage cutoff for region {}:{}-{}".format(
region_dict["chrom"], region_dict["start"], region_dict["end"]
)
)
fig.update_xaxes(title_text="Coverage")
fig.update_yaxes(title_text="Proportion of bases")
fig.update_layout(legend_title_text="Samples")
return fig.to_html(full_html=False, include_plotlyjs="cdn")
def get_region_coverage_proportion(coverage_list):
"""
get_region_coverage_proportion
===============================
Method to get the proportion of bases covered for each coverage cutoff
Parameters:
-----------
1) coverage_list: (list) A list of coverage values
Returns:
+++++++
1) x_values: (list) A list of x axis values. (Coverage cutoffs)
2) y_values: (list) A list of y axis values. (Proportion of bases)
NOTE: indices between the two list represent an x,y pair
"""
## Get coverage count by coverage value
coverage_dict = collections.defaultdict(int)
for cov in coverage_list:
coverage_dict[cov] += 1
## Convert to df
rows = []
for key in sorted(coverage_dict.keys()):
rows.append([key, coverage_dict[key]])
df = | pd.DataFrame(rows, columns=["cov_value", "cov_count"]) | pandas.DataFrame |
# ----------------------------------------------------------------------------
# Copyright (c) 2016-2021, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import unittest
import pandas as pd
import pandas.util.testing as pdt
import qiime2
from q2_taxa import collapse, filter_table, filter_seqs
class CollapseTests(unittest.TestCase):
def assert_index_equal(self, a, b):
# this method is derived from scikit-bio 0.5.1
pdt.assert_index_equal(a, b,
exact=True,
check_names=True,
check_exact=True)
def assert_data_frame_almost_equal(self, left, right):
# this method is derived from scikit-bio 0.5.1
pdt.assert_frame_equal(left, right,
check_dtype=True,
check_index_type=True,
check_column_type=True,
check_frame_type=True,
check_less_precise=False,
check_names=True,
by_blocks=False,
check_exact=False)
self.assert_index_equal(left.index, right.index)
def test_collapse(self):
table = pd.DataFrame([[2.0, 2.0], [1.0, 1.0], [9.0, 8.0], [0.0, 4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat1', 'feat2'])
taxonomy = pd.Series(['a; b; c', 'a; b; d'],
index=['feat1', 'feat2'])
actual = collapse(table, taxonomy, 1)
expected = pd.DataFrame([[4.0], [2.0], [17.0], [4.0]],
index=['A', 'B', 'C', 'D'],
columns=['a'])
self.assert_data_frame_almost_equal(actual, expected)
actual = collapse(table, taxonomy, 2)
expected = pd.DataFrame([[4.0], [2.0], [17.0], [4.0]],
index=['A', 'B', 'C', 'D'],
columns=['a;b'])
self.assert_data_frame_almost_equal(actual, expected)
actual = collapse(table, taxonomy, 3)
expected = pd.DataFrame([[2.0, 2.0], [1.0, 1.0], [9.0, 8.0],
[0.0, 4.0]],
index=['A', 'B', 'C', 'D'],
columns=['a;b;c', 'a;b;d'])
self.assert_data_frame_almost_equal(actual, expected)
def test_collapse_missing_level(self):
table = pd.DataFrame([[2.0, 2.0], [1.0, 1.0], [9.0, 8.0], [0.0, 4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat1', 'feat2'])
taxonomy = pd.Series(['a; b', 'a; b; d'],
index=['feat1', 'feat2'])
actual = collapse(table, taxonomy, 1)
expected = pd.DataFrame([[4.0], [2.0], [17.0], [4.0]],
index=['A', 'B', 'C', 'D'],
columns=['a'])
self.assert_data_frame_almost_equal(actual, expected)
actual = collapse(table, taxonomy, 2)
expected = pd.DataFrame([[4.0], [2.0], [17.0], [4.0]],
index=['A', 'B', 'C', 'D'],
columns=['a;b'])
self.assert_data_frame_almost_equal(actual, expected)
actual = collapse(table, taxonomy, 3)
expected = pd.DataFrame([[2.0, 2.0], [1.0, 1.0], [9.0, 8.0],
[0.0, 4.0]],
index=['A', 'B', 'C', 'D'],
columns=['a;b;__', 'a;b;d'])
self.assert_data_frame_almost_equal(actual, expected)
def test_collapse_bad_level(self):
table = pd.DataFrame([[2.0, 2.0], [1.0, 1.0], [9.0, 8.0], [0.0, 4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat1', 'feat2'])
taxonomy = pd.Series(['a; b; c', 'a; b; d'],
index=['feat1', 'feat2'])
with self.assertRaisesRegex(ValueError, 'of 42 is larger'):
collapse(table, taxonomy, 42)
with self.assertRaisesRegex(ValueError, 'of 0 is too low'):
collapse(table, taxonomy, 0)
def test_collapse_missing_table_ids_in_taxonomy(self):
table = pd.DataFrame([[2.0, 2.0],
[1.0, 1.0],
[9.0, 8.0],
[0.0, 4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat1', 'feat2'])
taxonomy = pd.Series(['a; b; c', 'a; b; d'],
index=['feat1', 'feat3'])
with self.assertRaisesRegex(ValueError, 'missing.*feat2'):
collapse(table, taxonomy, 1)
class FilterTable(unittest.TestCase):
def test_filter_no_filters(self):
table = pd.DataFrame([[2.0, 2.0], [1.0, 1.0], [9.0, 8.0], [0.0, 4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat1', 'feat2'])
taxonomy = qiime2.Metadata(
pd.DataFrame(['aa; bb; cc', 'aa; bb; dd ee'],
index=pd.Index(['feat1', 'feat2'], name='id'),
columns=['Taxon']))
with self.assertRaisesRegex(ValueError, 'At least one'):
filter_table(table, taxonomy)
def test_alt_delimiter(self):
table = pd.DataFrame([[2.0, 2.0], [1.0, 1.0], [9.0, 8.0], [0.0, 4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat1', 'feat2'])
taxonomy = qiime2.Metadata(
pd.DataFrame(['aa; bb; cc', 'aa; bb; dd ee'],
index=pd.Index(['feat1', 'feat2'], name='id'),
columns=['Taxon']))
# include with delimiter
obs = filter_table(table, taxonomy, include='<EMAIL>',
query_delimiter='@peanut@')
pdt.assert_frame_equal(obs, table, check_like=True)
# exclude with delimiter
obs = filter_table(table, taxonomy, exclude='<EMAIL>',
query_delimiter='@peanut@')
exp = pd.DataFrame([[2.0], [1.0], [9.0]],
index=['A', 'B', 'C'],
columns=['feat1'])
pdt.assert_frame_equal(obs, exp, check_like=True)
def test_filter_table_unknown_mode(self):
table = pd.DataFrame([[2.0, 2.0], [1.0, 1.0], [9.0, 8.0], [0.0, 4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat1', 'feat2'])
taxonomy = qiime2.Metadata(
pd.DataFrame(['aa; bb; cc', 'aa; bb; dd ee'],
index=pd.Index(['feat1', 'feat2'], name='id'),
columns=['Taxon']))
with self.assertRaisesRegex(ValueError, 'Unknown mode'):
filter_table(table, taxonomy, include='bb', mode='not-a-mode')
def test_filter_table_include(self):
table = pd.DataFrame([[2.0, 2.0], [1.0, 1.0], [9.0, 8.0], [0.0, 4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat1', 'feat2'])
taxonomy = qiime2.Metadata(
pd.DataFrame(['aa; bb; cc', 'aa; bb; dd ee'],
index= | pd.Index(['feat1', 'feat2'], name='id') | pandas.Index |
'''
Collect computational performance from a collection of GNU time reports.
Usage:
```
python collect_perf.py -a bt2_all.time_log -l lift.time_log -l collate.time_log \
-l to_fastq.time_log -l aln_paired.time_log -l aln_unpaired.time_log \
-l merge.time_log -l sort_all.time_log
```
<NAME>
Johns Hopkins University
2021-2022
'''
import argparse
import os
import pandas as pd
import sys
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
'-a', '--aln-log',
help='Paths to the GNU time log for full alignment.')
parser.add_argument(
'-an', '--aln-name', default='aln',
help='Label of the full alignment experiment [aln].')
parser.add_argument(
'-l', '--leviosam-logs',
action='append', required=True,
help='Paths to GNU time logs for the levioSAM pipeline.')
parser.add_argument(
'-ln', '--leviosam-name', default='leviosam',
help='Label of the levioSAM experiment [leviosam].')
parser.add_argument(
'-ll', '--labels', default='',
help=('Customized labels for each `-l` items, separated by commas.'
'Length should be equal to number of `-l`'))
parser.add_argument(
'-o', '--output',
help='Path to the output TSV file.')
args = parser.parse_args()
return args
def collect_perf_core(f, ls_perf) -> None:
for line in f:
line = line.rstrip()
if line.count('Command being timed:') > 0:
cmd = line.split('"')[1].split()
program = cmd[0].split('/')[-1]
task = program + '_' + cmd[1]
elif line.count('User time (seconds):') > 0:
usr_time = float(line.split('):')[1])
elif line.count('System time (seconds):') > 0:
sys_time = float(line.split('):')[1])
elif line.count('Elapsed (wall clock) time (h:mm:ss or m:ss):') > 0:
wt = line.split('):')[1].split(':')
if len(wt) == 3:
wall_time = 60 * 60 * float(wt[0]) + 60 * float(wt[1]) + float(wt[2])
elif len(wt) == 2:
wall_time = 60 * float(wt[0]) + float(wt[1])
else:
print('error - invalid wall time format', file=sys.stderr)
exit(1)
elif line.count('Maximum resident set size (kbytes):') > 0:
max_rss = int(line.split('):')[1])
cpu_time = usr_time + sys_time
ls_perf.append([
task, usr_time, sys_time,
cpu_time, wall_time, max_rss])
return
def collect_perf_list(logs_list, cols):
print(logs_list, file=sys.stderr)
ls_perf = []
for log in logs_list:
f = open(log, 'r')
# task = os.path.basename(log).split('.')[0]
collect_perf_core(f, ls_perf)
f.close()
df = | pd.DataFrame(ls_perf, columns=cols) | pandas.DataFrame |
import pandas as pd
from sklearn import preprocessing
from scipy.sparse import coo_matrix
import numpy as np
def quora_leaky_extracting(concat):
tid1 = concat['q1_id'].values
tid2 = concat['q2_id'].values
doc_number = np.max((tid1.max(), tid2.max())) + 1
adj = coo_matrix((np.ones(len(tid1) * 2), (np.concatenate(
[tid1, tid2]), np.concatenate([tid2, tid1]))), (doc_number, doc_number))
degree = adj.sum(axis=0)
concat['q1_id_degree'] = concat['q1_id'].apply(lambda x: degree[0, x])
concat['q2_id_degree'] = concat['q2_id'].apply(lambda x: degree[0, x])
tmp = adj * adj
concat['path'] = concat.apply(
lambda row: tmp[int(row['q1_id']), int(row['q2_id'])], axis=1)
return concat
def load_quora(path='./quora'):
print('---------- Loading QuoraQP ----------')
tr = pd.read_csv(path + '/train.tsv', delimiter='\t', header=None)
tr.columns = ['is_duplicate', 'question1', 'question2', 'pair_id']
val = pd.read_csv(path + '/dev.tsv', delimiter='\t', header=None)
val.columns = ['is_duplicate', 'question1', 'question2', 'pair_id']
te = pd.read_csv(path + '/test.tsv', delimiter='\t', header=None)
te.columns = ['is_duplicate', 'question1', 'question2', 'pair_id']
data = pd.concat([tr, val, te]).fillna('')
questions = list(data['question1'].values) + list(data['question2'].values)
le = preprocessing.LabelEncoder()
le.fit(questions)
data['q1_id'] = le.transform(data['question1'].values)
data['q2_id'] = le.transform(data['question2'].values)
data = quora_leaky_extracting(data)
label = data["is_duplicate"].to_numpy()
s1_freq = data["q1_id_degree"].to_numpy()
s2_freq = data["q2_id_degree"].to_numpy()
s1s2_inter = data["path"].to_numpy()
X = pd.DataFrame({
"s1_freq": s1_freq,
"s2_freq": s2_freq,
"s1s2_inter": s1s2_inter
})
Y = label
print('Success!')
return X, Y
def load_artificial_dataset(path='./artificial_dataset'):
print('---------- Loading artificial dataset ----------')
tr = | pd.read_csv(path + '/train.tsv', delimiter='\t', header=None) | pandas.read_csv |
import pandas as pd
from itertools import combinations
import seaborn as sns
import matplotlib.pyplot as plt
path_to_data = '../data/preprocess.csv'
data = pd.read_csv(path_to_data)
data.utc_event_time = | pd.to_datetime(data.utc_event_time) | pandas.to_datetime |
#!/usr/bin/python
# -*- coding: utf-8 -*-
from collections import defaultdict, OrderedDict
from itertools import chain
from pathlib import Path
from typing import Dict, Tuple, List, Union, Optional
import numpy
import pandas
from pyutils.list_utils import _
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import precision_recall_fscore_support
from sklearn.preprocessing import StandardScaler
from data_structure import GraphNode, Graph
from semantic_modeling.assembling.learning.shared_models import Example
from semantic_modeling.assembling.weak_models.multi_val_predicate import MultiValuePredicate
from semantic_modeling.config import config, get_logger
from semantic_modeling.data_io import get_semantic_models, get_cache_dir
from semantic_modeling.utilities.serializable import serialize, deserialize
def get_merged_cost(nodeA: GraphNode, nodeB: GraphNode, multi_val_predicate: MultiValuePredicate):
""""determining the merged cost from node B to node A. This one would be asymmetric"""
# make this asymmetric function
if nodeA.n_outgoing_links > nodeB.n_outgoing_links:
return 1e6
# TODO: how about the case that # their nodes is equal ??
pseudo_outgoing_links = defaultdict(lambda: 0)
for link in chain(nodeA.iter_outgoing_links(), nodeB.iter_outgoing_links()):
pseudo_outgoing_links[link.label] += 1
total_cost = 0
for link_lbl, link_no in pseudo_outgoing_links.items():
cost = multi_val_predicate.compute_prob(link_lbl, link_no)
# this one likes a product of prob, then higher is better (likely to merge), then we should negative it
if cost is None:
cost = 0 # assume that this link is always fine, then log(cost) = 0
else:
cost = -numpy.log(max(cost, 1e-6))
# cost = -max(cost, 1e-6)
total_cost += cost
return total_cost
class NodeProb(object):
logger = get_logger("app.assembling.weak_models.node_prob")
def __init__(self, example_annotator: 'ExampleAnnotator', load_classifier: bool=False):
self.example_annotator = example_annotator
self.multival_predicate = example_annotator.multival_predicate
if load_classifier:
retrain = example_annotator.training_examples is not None
self.scaler, self.classifier = self.get_classifier(retrain=retrain, train_examples=example_annotator.training_examples)
else:
self.scaler, self.classifier = None, None
def feature_extraction(self, graph: Graph, stype_score: Dict[int, Optional[float]]):
node2features = {}
for node in graph.iter_class_nodes():
prob_data_nodes = _(node.iter_outgoing_links()) \
.imap(lambda x: x.get_target_node()) \
.ifilter(lambda x: x.is_data_node()) \
.reduce(lambda a, b: a + (stype_score[b.id] or 0), 0)
similar_nodes = graph.iter_nodes_by_label(node.label)
minimum_merged_cost = min(
(get_merged_cost(node, similar_node, self.multival_predicate) for similar_node in similar_nodes))
node2features[node.id] = [
('prob_data_nodes', prob_data_nodes),
('minimum_merged_cost', minimum_merged_cost)
]
return node2features
def compute_prob(self, node2features):
X = numpy.asarray([
[p[1] for p in features]
for features in node2features.values()
])
self.scaler.transform(X)
y_pred = self.classifier.predict_proba(X)[:, 1]
return {nid: y_pred[i] for i, nid in enumerate(node2features.keys())}
def get_classifier(self, retrain: bool, train_examples: List[Example]):
# TODO: implement this properly, currently, we have to train and save manually
cached_file = get_cache_dir(self.example_annotator.dataset, list(self.example_annotator.train_source_ids)) / "weak_models" / "node_prob_classifier.pkl"
if not cached_file.exists() or retrain:
self.logger.debug("Retrain new model")
raw_X_train = make_data(self, train_examples)
classifier = LogisticRegression(fit_intercept=True)
X_train = numpy.asarray([list(features.values())[1:] for features in raw_X_train])
X_train, y_train = X_train[:, :-1], [int(x) for x in X_train[:, -1]]
scaler = StandardScaler().fit(X_train)
scaler.transform(X_train)
try:
classifier.fit(X_train, y_train)
except ValueError as e:
assert str(e).startswith("This solver needs samples of at least 2 classes in the data")
# this should be at a starter phase when we don't have any data but use ground-truth to build
X_train = numpy.vstack([X_train, [0, 0]])
y_train.append(0)
classifier.fit(X_train, y_train)
cached_file.parent.mkdir(exist_ok=True, parents=True)
serialize((scaler, classifier), cached_file)
return scaler, classifier
return deserialize(cached_file)
def make_data(node_prob, examples: List[Example]):
"""Use to create training data"""
X = []
for example in examples:
stype_score = node_prob.example_annotator.get_stype_score(example)
node2features = node_prob.feature_extraction(example.pred_sm, stype_score)
for node in example.pred_sm.iter_class_nodes():
features = OrderedDict([('provenance', '%s:%s' % (example.example_id, node.id))])
features.update(node2features[node.id])
features['label'] = example.prime2x[node.id] is not None
X.append(features)
return X
if __name__ == '__main__':
from semantic_modeling.assembling.training_workflow.mod_interface import ExampleLabelingFileInterface
from semantic_modeling.assembling.training_workflow.training_manager import WorkflowSettings
from semantic_modeling.assembling.undirected_graphical_model.model_core import ExampleAnnotator
dataset = "museum_crm"
source_models = get_semantic_models(dataset)
train_source_ids = [sm.id for sm in source_models[:12]]
# load model
workdir = Path(config.fsys.debug.as_path()) / dataset / "training_workflow"
# noinspection PyTypeChecker
settings = WorkflowSettings(
dataset,
max_iter=1,
workdir=workdir,
train_source_ids=train_source_ids,
test_source_ids=None,
model_trainer_args=None,
scenario=None)
annotator = ExampleAnnotator(dataset, train_source_ids, load_circular_dependency=False)
node_prob = NodeProb(annotator)
# make training data
train_examples, test_examples = ExampleLabelingFileInterface(settings.workdir, set()).read_examples_at_iter(0)
raw_X_train = make_data(node_prob, train_examples)
raw_X_test = make_data(node_prob, test_examples)
output_dir = Path(config.fsys.debug.as_path()) / dataset / "weak_models" / "node_prob"
output_dir.mkdir(exist_ok=True, parents=True)
pandas.DataFrame(raw_X_train).to_csv(str(output_dir / "features.train.csv"), index=False)
| pandas.DataFrame(raw_X_test) | pandas.DataFrame |
from datetime import datetime
import numpy as np
import pandas as pd
import pytest
from numba import njit
import vectorbt as vbt
from tests.utils import record_arrays_close
from vectorbt.generic.enums import range_dt, drawdown_dt
from vectorbt.portfolio.enums import order_dt, trade_dt, log_dt
day_dt = np.timedelta64(86400000000000)
example_dt = np.dtype([
('id', np.int64),
('col', np.int64),
('idx', np.int64),
('some_field1', np.float64),
('some_field2', np.float64)
], align=True)
records_arr = np.asarray([
(0, 0, 0, 10, 21),
(1, 0, 1, 11, 20),
(2, 0, 2, 12, 19),
(3, 1, 0, 13, 18),
(4, 1, 1, 14, 17),
(5, 1, 2, 13, 18),
(6, 2, 0, 12, 19),
(7, 2, 1, 11, 20),
(8, 2, 2, 10, 21)
], dtype=example_dt)
records_nosort_arr = np.concatenate((
records_arr[0::3],
records_arr[1::3],
records_arr[2::3]
))
group_by = pd.Index(['g1', 'g1', 'g2', 'g2'])
wrapper = vbt.ArrayWrapper(
index=['x', 'y', 'z'],
columns=['a', 'b', 'c', 'd'],
ndim=2,
freq='1 days'
)
wrapper_grouped = wrapper.replace(group_by=group_by)
records = vbt.records.Records(wrapper, records_arr)
records_grouped = vbt.records.Records(wrapper_grouped, records_arr)
records_nosort = records.replace(records_arr=records_nosort_arr)
records_nosort_grouped = vbt.records.Records(wrapper_grouped, records_nosort_arr)
# ############# Global ############# #
def setup_module():
vbt.settings.numba['check_func_suffix'] = True
vbt.settings.caching.enabled = False
vbt.settings.caching.whitelist = []
vbt.settings.caching.blacklist = []
def teardown_module():
vbt.settings.reset()
# ############# col_mapper.py ############# #
class TestColumnMapper:
def test_col_arr(self):
np.testing.assert_array_equal(
records['a'].col_mapper.col_arr,
np.array([0, 0, 0])
)
np.testing.assert_array_equal(
records.col_mapper.col_arr,
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2])
)
def test_get_col_arr(self):
np.testing.assert_array_equal(
records.col_mapper.get_col_arr(),
records.col_mapper.col_arr
)
np.testing.assert_array_equal(
records_grouped['g1'].col_mapper.get_col_arr(),
np.array([0, 0, 0, 0, 0, 0])
)
np.testing.assert_array_equal(
records_grouped.col_mapper.get_col_arr(),
np.array([0, 0, 0, 0, 0, 0, 1, 1, 1])
)
def test_col_range(self):
np.testing.assert_array_equal(
records['a'].col_mapper.col_range,
np.array([
[0, 3]
])
)
np.testing.assert_array_equal(
records.col_mapper.col_range,
np.array([
[0, 3],
[3, 6],
[6, 9],
[-1, -1]
])
)
def test_get_col_range(self):
np.testing.assert_array_equal(
records.col_mapper.get_col_range(),
np.array([
[0, 3],
[3, 6],
[6, 9],
[-1, -1]
])
)
np.testing.assert_array_equal(
records_grouped['g1'].col_mapper.get_col_range(),
np.array([[0, 6]])
)
np.testing.assert_array_equal(
records_grouped.col_mapper.get_col_range(),
np.array([[0, 6], [6, 9]])
)
def test_col_map(self):
np.testing.assert_array_equal(
records['a'].col_mapper.col_map[0],
np.array([0, 1, 2])
)
np.testing.assert_array_equal(
records['a'].col_mapper.col_map[1],
np.array([3])
)
np.testing.assert_array_equal(
records.col_mapper.col_map[0],
np.array([0, 1, 2, 3, 4, 5, 6, 7, 8])
)
np.testing.assert_array_equal(
records.col_mapper.col_map[1],
np.array([3, 3, 3, 0])
)
def test_get_col_map(self):
np.testing.assert_array_equal(
records.col_mapper.get_col_map()[0],
records.col_mapper.col_map[0]
)
np.testing.assert_array_equal(
records.col_mapper.get_col_map()[1],
records.col_mapper.col_map[1]
)
np.testing.assert_array_equal(
records_grouped['g1'].col_mapper.get_col_map()[0],
np.array([0, 1, 2, 3, 4, 5])
)
np.testing.assert_array_equal(
records_grouped['g1'].col_mapper.get_col_map()[1],
np.array([6])
)
np.testing.assert_array_equal(
records_grouped.col_mapper.get_col_map()[0],
np.array([0, 1, 2, 3, 4, 5, 6, 7, 8])
)
np.testing.assert_array_equal(
records_grouped.col_mapper.get_col_map()[1],
np.array([6, 3])
)
def test_is_sorted(self):
assert records.col_mapper.is_sorted()
assert not records_nosort.col_mapper.is_sorted()
# ############# mapped_array.py ############# #
mapped_array = records.map_field('some_field1')
mapped_array_grouped = records_grouped.map_field('some_field1')
mapped_array_nosort = records_nosort.map_field('some_field1')
mapped_array_nosort_grouped = records_nosort_grouped.map_field('some_field1')
mapping = {x: 'test_' + str(x) for x in pd.unique(mapped_array.values)}
mp_mapped_array = mapped_array.replace(mapping=mapping)
mp_mapped_array_grouped = mapped_array_grouped.replace(mapping=mapping)
class TestMappedArray:
def test_config(self, tmp_path):
assert vbt.MappedArray.loads(mapped_array.dumps()) == mapped_array
mapped_array.save(tmp_path / 'mapped_array')
assert vbt.MappedArray.load(tmp_path / 'mapped_array') == mapped_array
def test_mapped_arr(self):
np.testing.assert_array_equal(
mapped_array['a'].values,
np.array([10., 11., 12.])
)
np.testing.assert_array_equal(
mapped_array.values,
np.array([10., 11., 12., 13., 14., 13., 12., 11., 10.])
)
def test_id_arr(self):
np.testing.assert_array_equal(
mapped_array['a'].id_arr,
np.array([0, 1, 2])
)
np.testing.assert_array_equal(
mapped_array.id_arr,
np.array([0, 1, 2, 3, 4, 5, 6, 7, 8])
)
def test_col_arr(self):
np.testing.assert_array_equal(
mapped_array['a'].col_arr,
np.array([0, 0, 0])
)
np.testing.assert_array_equal(
mapped_array.col_arr,
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2])
)
def test_idx_arr(self):
np.testing.assert_array_equal(
mapped_array['a'].idx_arr,
np.array([0, 1, 2])
)
np.testing.assert_array_equal(
mapped_array.idx_arr,
np.array([0, 1, 2, 0, 1, 2, 0, 1, 2])
)
def test_is_sorted(self):
assert mapped_array.is_sorted()
assert mapped_array.is_sorted(incl_id=True)
assert not mapped_array_nosort.is_sorted()
assert not mapped_array_nosort.is_sorted(incl_id=True)
def test_sort(self):
assert mapped_array.sort().is_sorted()
assert mapped_array.sort().is_sorted(incl_id=True)
assert mapped_array.sort(incl_id=True).is_sorted(incl_id=True)
assert mapped_array_nosort.sort().is_sorted()
assert mapped_array_nosort.sort().is_sorted(incl_id=True)
assert mapped_array_nosort.sort(incl_id=True).is_sorted(incl_id=True)
def test_apply_mask(self):
mask_a = mapped_array['a'].values >= mapped_array['a'].values.mean()
np.testing.assert_array_equal(
mapped_array['a'].apply_mask(mask_a).id_arr,
np.array([1, 2])
)
mask = mapped_array.values >= mapped_array.values.mean()
filtered = mapped_array.apply_mask(mask)
np.testing.assert_array_equal(
filtered.id_arr,
np.array([2, 3, 4, 5, 6])
)
np.testing.assert_array_equal(filtered.col_arr, mapped_array.col_arr[mask])
np.testing.assert_array_equal(filtered.idx_arr, mapped_array.idx_arr[mask])
assert mapped_array_grouped.apply_mask(mask).wrapper == mapped_array_grouped.wrapper
assert mapped_array_grouped.apply_mask(mask, group_by=False).wrapper.grouper.group_by is None
def test_map_to_mask(self):
@njit
def every_2_nb(inout, idxs, col, mapped_arr):
inout[idxs[::2]] = True
np.testing.assert_array_equal(
mapped_array.map_to_mask(every_2_nb),
np.array([True, False, True, True, False, True, True, False, True])
)
def test_top_n_mask(self):
np.testing.assert_array_equal(
mapped_array.top_n_mask(1),
np.array([False, False, True, False, True, False, True, False, False])
)
def test_bottom_n_mask(self):
np.testing.assert_array_equal(
mapped_array.bottom_n_mask(1),
np.array([True, False, False, True, False, False, False, False, True])
)
def test_top_n(self):
np.testing.assert_array_equal(
mapped_array.top_n(1).id_arr,
np.array([2, 4, 6])
)
def test_bottom_n(self):
np.testing.assert_array_equal(
mapped_array.bottom_n(1).id_arr,
np.array([0, 3, 8])
)
def test_to_pd(self):
target = pd.DataFrame(
np.array([
[10., 13., 12., np.nan],
[11., 14., 11., np.nan],
[12., 13., 10., np.nan]
]),
index=wrapper.index,
columns=wrapper.columns
)
pd.testing.assert_series_equal(
mapped_array['a'].to_pd(),
target['a']
)
pd.testing.assert_frame_equal(
mapped_array.to_pd(),
target
)
pd.testing.assert_frame_equal(
mapped_array.to_pd(fill_value=0.),
target.fillna(0.)
)
mapped_array2 = vbt.MappedArray(
wrapper,
records_arr['some_field1'].tolist() + [1],
records_arr['col'].tolist() + [2],
idx_arr=records_arr['idx'].tolist() + [2]
)
with pytest.raises(Exception):
_ = mapped_array2.to_pd()
pd.testing.assert_series_equal(
mapped_array['a'].to_pd(ignore_index=True),
pd.Series(np.array([10., 11., 12.]), name='a')
)
pd.testing.assert_frame_equal(
mapped_array.to_pd(ignore_index=True),
pd.DataFrame(
np.array([
[10., 13., 12., np.nan],
[11., 14., 11., np.nan],
[12., 13., 10., np.nan]
]),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array.to_pd(fill_value=0, ignore_index=True),
pd.DataFrame(
np.array([
[10., 13., 12., 0.],
[11., 14., 11., 0.],
[12., 13., 10., 0.]
]),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array_grouped.to_pd(ignore_index=True),
pd.DataFrame(
np.array([
[10., 12.],
[11., 11.],
[12., 10.],
[13., np.nan],
[14., np.nan],
[13., np.nan],
]),
columns=pd.Index(['g1', 'g2'], dtype='object')
)
)
def test_apply(self):
@njit
def cumsum_apply_nb(idxs, col, a):
return np.cumsum(a)
np.testing.assert_array_equal(
mapped_array['a'].apply(cumsum_apply_nb).values,
np.array([10., 21., 33.])
)
np.testing.assert_array_equal(
mapped_array.apply(cumsum_apply_nb).values,
np.array([10., 21., 33., 13., 27., 40., 12., 23., 33.])
)
np.testing.assert_array_equal(
mapped_array_grouped.apply(cumsum_apply_nb, apply_per_group=False).values,
np.array([10., 21., 33., 13., 27., 40., 12., 23., 33.])
)
np.testing.assert_array_equal(
mapped_array_grouped.apply(cumsum_apply_nb, apply_per_group=True).values,
np.array([10., 21., 33., 46., 60., 73., 12., 23., 33.])
)
assert mapped_array_grouped.apply(cumsum_apply_nb).wrapper == \
mapped_array.apply(cumsum_apply_nb, group_by=group_by).wrapper
assert mapped_array.apply(cumsum_apply_nb, group_by=False).wrapper.grouper.group_by is None
def test_reduce(self):
@njit
def mean_reduce_nb(col, a):
return np.mean(a)
assert mapped_array['a'].reduce(mean_reduce_nb) == 11.
pd.testing.assert_series_equal(
mapped_array.reduce(mean_reduce_nb),
pd.Series(np.array([11., 13.333333333333334, 11., np.nan]), index=wrapper.columns).rename('reduce')
)
pd.testing.assert_series_equal(
mapped_array.reduce(mean_reduce_nb, fill_value=0.),
pd.Series(np.array([11., 13.333333333333334, 11., 0.]), index=wrapper.columns).rename('reduce')
)
pd.testing.assert_series_equal(
mapped_array.reduce(mean_reduce_nb, fill_value=0., wrap_kwargs=dict(dtype=np.int_)),
pd.Series(np.array([11., 13.333333333333334, 11., 0.]), index=wrapper.columns).rename('reduce')
)
pd.testing.assert_series_equal(
mapped_array.reduce(mean_reduce_nb, wrap_kwargs=dict(to_timedelta=True)),
pd.Series(np.array([11., 13.333333333333334, 11., np.nan]), index=wrapper.columns).rename('reduce') * day_dt
)
pd.testing.assert_series_equal(
mapped_array_grouped.reduce(mean_reduce_nb),
pd.Series([12.166666666666666, 11.0], index=pd.Index(['g1', 'g2'], dtype='object')).rename('reduce')
)
assert mapped_array_grouped['g1'].reduce(mean_reduce_nb) == 12.166666666666666
pd.testing.assert_series_equal(
mapped_array_grouped[['g1']].reduce(mean_reduce_nb),
pd.Series([12.166666666666666], index=pd.Index(['g1'], dtype='object')).rename('reduce')
)
pd.testing.assert_series_equal(
mapped_array.reduce(mean_reduce_nb),
mapped_array_grouped.reduce(mean_reduce_nb, group_by=False)
)
pd.testing.assert_series_equal(
mapped_array.reduce(mean_reduce_nb, group_by=group_by),
mapped_array_grouped.reduce(mean_reduce_nb)
)
def test_reduce_to_idx(self):
@njit
def argmin_reduce_nb(col, a):
return np.argmin(a)
assert mapped_array['a'].reduce(argmin_reduce_nb, returns_idx=True) == 'x'
pd.testing.assert_series_equal(
mapped_array.reduce(argmin_reduce_nb, returns_idx=True),
pd.Series(np.array(['x', 'x', 'z', np.nan], dtype=object), index=wrapper.columns).rename('reduce')
)
pd.testing.assert_series_equal(
mapped_array.reduce(argmin_reduce_nb, returns_idx=True, to_index=False),
pd.Series(np.array([0, 0, 2, -1], dtype=int), index=wrapper.columns).rename('reduce')
)
pd.testing.assert_series_equal(
mapped_array_grouped.reduce(argmin_reduce_nb, returns_idx=True, to_index=False),
pd.Series(np.array([0, 2], dtype=int), index=pd.Index(['g1', 'g2'], dtype='object')).rename('reduce')
)
def test_reduce_to_array(self):
@njit
def min_max_reduce_nb(col, a):
return np.array([np.min(a), np.max(a)])
pd.testing.assert_series_equal(
mapped_array['a'].reduce(min_max_reduce_nb, returns_array=True,
wrap_kwargs=dict(name_or_index=['min', 'max'])),
pd.Series([10., 12.], index=pd.Index(['min', 'max'], dtype='object'), name='a')
)
pd.testing.assert_frame_equal(
mapped_array.reduce(min_max_reduce_nb, returns_array=True, wrap_kwargs=dict(name_or_index=['min', 'max'])),
pd.DataFrame(
np.array([
[10., 13., 10., np.nan],
[12., 14., 12., np.nan]
]),
index=pd.Index(['min', 'max'], dtype='object'),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array.reduce(min_max_reduce_nb, returns_array=True, fill_value=0.),
pd.DataFrame(
np.array([
[10., 13., 10., 0.],
[12., 14., 12., 0.]
]),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array.reduce(min_max_reduce_nb, returns_array=True, wrap_kwargs=dict(to_timedelta=True)),
pd.DataFrame(
np.array([
[10., 13., 10., np.nan],
[12., 14., 12., np.nan]
]),
columns=wrapper.columns
) * day_dt
)
pd.testing.assert_frame_equal(
mapped_array_grouped.reduce(min_max_reduce_nb, returns_array=True),
pd.DataFrame(
np.array([
[10., 10.],
[14., 12.]
]),
columns=pd.Index(['g1', 'g2'], dtype='object')
)
)
pd.testing.assert_frame_equal(
mapped_array.reduce(min_max_reduce_nb, returns_array=True),
mapped_array_grouped.reduce(min_max_reduce_nb, returns_array=True, group_by=False)
)
pd.testing.assert_frame_equal(
mapped_array.reduce(min_max_reduce_nb, returns_array=True, group_by=group_by),
mapped_array_grouped.reduce(min_max_reduce_nb, returns_array=True)
)
pd.testing.assert_series_equal(
mapped_array_grouped['g1'].reduce(min_max_reduce_nb, returns_array=True),
pd.Series([10., 14.], name='g1')
)
pd.testing.assert_frame_equal(
mapped_array_grouped[['g1']].reduce(min_max_reduce_nb, returns_array=True),
pd.DataFrame([[10.], [14.]], columns=pd.Index(['g1'], dtype='object'))
)
def test_reduce_to_idx_array(self):
@njit
def idxmin_idxmax_reduce_nb(col, a):
return np.array([np.argmin(a), np.argmax(a)])
pd.testing.assert_series_equal(
mapped_array['a'].reduce(
idxmin_idxmax_reduce_nb,
returns_array=True,
returns_idx=True,
wrap_kwargs=dict(name_or_index=['min', 'max'])
),
pd.Series(
np.array(['x', 'z'], dtype=object),
index=pd.Index(['min', 'max'], dtype='object'),
name='a'
)
)
pd.testing.assert_frame_equal(
mapped_array.reduce(
idxmin_idxmax_reduce_nb,
returns_array=True,
returns_idx=True,
wrap_kwargs=dict(name_or_index=['min', 'max'])
),
pd.DataFrame(
{
'a': ['x', 'z'],
'b': ['x', 'y'],
'c': ['z', 'x'],
'd': [np.nan, np.nan]
},
index=pd.Index(['min', 'max'], dtype='object')
)
)
pd.testing.assert_frame_equal(
mapped_array.reduce(
idxmin_idxmax_reduce_nb,
returns_array=True,
returns_idx=True,
to_index=False
),
pd.DataFrame(
np.array([
[0, 0, 2, -1],
[2, 1, 0, -1]
]),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array_grouped.reduce(
idxmin_idxmax_reduce_nb,
returns_array=True,
returns_idx=True,
to_index=False
),
pd.DataFrame(
np.array([
[0, 2],
[1, 0]
]),
columns=pd.Index(['g1', 'g2'], dtype='object')
)
)
def test_nth(self):
assert mapped_array['a'].nth(0) == 10.
pd.testing.assert_series_equal(
mapped_array.nth(0),
pd.Series(np.array([10., 13., 12., np.nan]), index=wrapper.columns).rename('nth')
)
assert mapped_array['a'].nth(-1) == 12.
pd.testing.assert_series_equal(
mapped_array.nth(-1),
pd.Series(np.array([12., 13., 10., np.nan]), index=wrapper.columns).rename('nth')
)
with pytest.raises(Exception):
_ = mapped_array.nth(10)
pd.testing.assert_series_equal(
mapped_array_grouped.nth(0),
pd.Series(np.array([10., 12.]), index=pd.Index(['g1', 'g2'], dtype='object')).rename('nth')
)
def test_nth_index(self):
assert mapped_array['a'].nth(0) == 10.
pd.testing.assert_series_equal(
mapped_array.nth_index(0),
pd.Series(
np.array(['x', 'x', 'x', np.nan], dtype='object'),
index=wrapper.columns
).rename('nth_index')
)
assert mapped_array['a'].nth(-1) == 12.
pd.testing.assert_series_equal(
mapped_array.nth_index(-1),
pd.Series(
np.array(['z', 'z', 'z', np.nan], dtype='object'),
index=wrapper.columns
).rename('nth_index')
)
with pytest.raises(Exception):
_ = mapped_array.nth_index(10)
pd.testing.assert_series_equal(
mapped_array_grouped.nth_index(0),
pd.Series(
np.array(['x', 'x'], dtype='object'),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('nth_index')
)
def test_min(self):
assert mapped_array['a'].min() == mapped_array['a'].to_pd().min()
pd.testing.assert_series_equal(
mapped_array.min(),
mapped_array.to_pd().min().rename('min')
)
pd.testing.assert_series_equal(
mapped_array_grouped.min(),
pd.Series([10., 10.], index=pd.Index(['g1', 'g2'], dtype='object')).rename('min')
)
def test_max(self):
assert mapped_array['a'].max() == mapped_array['a'].to_pd().max()
pd.testing.assert_series_equal(
mapped_array.max(),
mapped_array.to_pd().max().rename('max')
)
pd.testing.assert_series_equal(
mapped_array_grouped.max(),
pd.Series([14., 12.], index=pd.Index(['g1', 'g2'], dtype='object')).rename('max')
)
def test_mean(self):
assert mapped_array['a'].mean() == mapped_array['a'].to_pd().mean()
pd.testing.assert_series_equal(
mapped_array.mean(),
mapped_array.to_pd().mean().rename('mean')
)
pd.testing.assert_series_equal(
mapped_array_grouped.mean(),
pd.Series([12.166667, 11.], index=pd.Index(['g1', 'g2'], dtype='object')).rename('mean')
)
def test_median(self):
assert mapped_array['a'].median() == mapped_array['a'].to_pd().median()
pd.testing.assert_series_equal(
mapped_array.median(),
mapped_array.to_pd().median().rename('median')
)
pd.testing.assert_series_equal(
mapped_array_grouped.median(),
pd.Series([12.5, 11.], index=pd.Index(['g1', 'g2'], dtype='object')).rename('median')
)
def test_std(self):
assert mapped_array['a'].std() == mapped_array['a'].to_pd().std()
pd.testing.assert_series_equal(
mapped_array.std(),
mapped_array.to_pd().std().rename('std')
)
pd.testing.assert_series_equal(
mapped_array.std(ddof=0),
mapped_array.to_pd().std(ddof=0).rename('std')
)
pd.testing.assert_series_equal(
mapped_array_grouped.std(),
pd.Series([1.4719601443879746, 1.0], index=pd.Index(['g1', 'g2'], dtype='object')).rename('std')
)
def test_sum(self):
assert mapped_array['a'].sum() == mapped_array['a'].to_pd().sum()
pd.testing.assert_series_equal(
mapped_array.sum(),
mapped_array.to_pd().sum().rename('sum')
)
pd.testing.assert_series_equal(
mapped_array_grouped.sum(),
pd.Series([73.0, 33.0], index=pd.Index(['g1', 'g2'], dtype='object')).rename('sum')
)
def test_count(self):
assert mapped_array['a'].count() == mapped_array['a'].to_pd().count()
pd.testing.assert_series_equal(
mapped_array.count(),
mapped_array.to_pd().count().rename('count')
)
pd.testing.assert_series_equal(
mapped_array_grouped.count(),
pd.Series([6, 3], index=pd.Index(['g1', 'g2'], dtype='object')).rename('count')
)
def test_idxmin(self):
assert mapped_array['a'].idxmin() == mapped_array['a'].to_pd().idxmin()
pd.testing.assert_series_equal(
mapped_array.idxmin(),
mapped_array.to_pd().idxmin().rename('idxmin')
)
pd.testing.assert_series_equal(
mapped_array_grouped.idxmin(),
pd.Series(
np.array(['x', 'z'], dtype=object),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('idxmin')
)
def test_idxmax(self):
assert mapped_array['a'].idxmax() == mapped_array['a'].to_pd().idxmax()
pd.testing.assert_series_equal(
mapped_array.idxmax(),
mapped_array.to_pd().idxmax().rename('idxmax')
)
pd.testing.assert_series_equal(
mapped_array_grouped.idxmax(),
pd.Series(
np.array(['y', 'x'], dtype=object),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('idxmax')
)
def test_describe(self):
pd.testing.assert_series_equal(
mapped_array['a'].describe(),
mapped_array['a'].to_pd().describe()
)
pd.testing.assert_frame_equal(
mapped_array.describe(percentiles=None),
mapped_array.to_pd().describe(percentiles=None)
)
pd.testing.assert_frame_equal(
mapped_array.describe(percentiles=[]),
mapped_array.to_pd().describe(percentiles=[])
)
pd.testing.assert_frame_equal(
mapped_array.describe(percentiles=np.arange(0, 1, 0.1)),
mapped_array.to_pd().describe(percentiles=np.arange(0, 1, 0.1))
)
pd.testing.assert_frame_equal(
mapped_array_grouped.describe(),
pd.DataFrame(
np.array([
[6., 3.],
[12.16666667, 11.],
[1.47196014, 1.],
[10., 10.],
[11.25, 10.5],
[12.5, 11.],
[13., 11.5],
[14., 12.]
]),
columns=pd.Index(['g1', 'g2'], dtype='object'),
index=mapped_array.describe().index
)
)
def test_value_counts(self):
pd.testing.assert_series_equal(
mapped_array['a'].value_counts(),
pd.Series(
np.array([1, 1, 1]),
index=pd.Float64Index([10.0, 11.0, 12.0], dtype='float64'),
name='a'
)
)
pd.testing.assert_series_equal(
mapped_array['a'].value_counts(mapping=mapping),
pd.Series(
np.array([1, 1, 1]),
index=pd.Index(['test_10.0', 'test_11.0', 'test_12.0'], dtype='object'),
name='a'
)
)
pd.testing.assert_frame_equal(
mapped_array.value_counts(),
pd.DataFrame(
np.array([
[1, 0, 1, 0],
[1, 0, 1, 0],
[1, 0, 1, 0],
[0, 2, 0, 0],
[0, 1, 0, 0]
]),
index=pd.Float64Index([10.0, 11.0, 12.0, 13.0, 14.0], dtype='float64'),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array_grouped.value_counts(),
pd.DataFrame(
np.array([
[1, 1],
[1, 1],
[1, 1],
[2, 0],
[1, 0]
]),
index=pd.Float64Index([10.0, 11.0, 12.0, 13.0, 14.0], dtype='float64'),
columns=pd.Index(['g1', 'g2'], dtype='object')
)
)
mapped_array2 = mapped_array.replace(mapped_arr=[4, 4, 3, 2, np.nan, 4, 3, 2, 1])
pd.testing.assert_frame_equal(
mapped_array2.value_counts(sort_uniques=False),
pd.DataFrame(
np.array([
[2, 1, 0, 0],
[1, 0, 1, 0],
[0, 1, 1, 0],
[0, 0, 1, 0],
[0, 1, 0, 0]
]),
index=pd.Float64Index([4.0, 3.0, 2.0, 1.0, None], dtype='float64'),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array2.value_counts(sort_uniques=True),
pd.DataFrame(
np.array([
[0, 0, 1, 0],
[0, 1, 1, 0],
[1, 0, 1, 0],
[2, 1, 0, 0],
[0, 1, 0, 0]
]),
index=pd.Float64Index([1.0, 2.0, 3.0, 4.0, None], dtype='float64'),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array2.value_counts(sort=True),
pd.DataFrame(
np.array([
[2, 1, 0, 0],
[0, 1, 1, 0],
[1, 0, 1, 0],
[0, 0, 1, 0],
[0, 1, 0, 0]
]),
index=pd.Float64Index([4.0, 2.0, 3.0, 1.0, np.nan], dtype='float64'),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array2.value_counts(sort=True, ascending=True),
pd.DataFrame(
np.array([
[0, 0, 1, 0],
[0, 1, 0, 0],
[0, 1, 1, 0],
[1, 0, 1, 0],
[2, 1, 0, 0]
]),
index=pd.Float64Index([1.0, np.nan, 2.0, 3.0, 4.0], dtype='float64'),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array2.value_counts(sort=True, normalize=True),
pd.DataFrame(
np.array([
[0.2222222222222222, 0.1111111111111111, 0.0, 0.0],
[0.0, 0.1111111111111111, 0.1111111111111111, 0.0],
[0.1111111111111111, 0.0, 0.1111111111111111, 0.0],
[0.0, 0.0, 0.1111111111111111, 0.0],
[0.0, 0.1111111111111111, 0.0, 0.0]
]),
index=pd.Float64Index([4.0, 2.0, 3.0, 1.0, np.nan], dtype='float64'),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array2.value_counts(sort=True, normalize=True, dropna=True),
pd.DataFrame(
np.array([
[0.25, 0.125, 0.0, 0.0],
[0.0, 0.125, 0.125, 0.0],
[0.125, 0.0, 0.125, 0.0],
[0.0, 0.0, 0.125, 0.0]
]),
index=pd.Float64Index([4.0, 2.0, 3.0, 1.0], dtype='float64'),
columns=wrapper.columns
)
)
@pytest.mark.parametrize(
"test_nosort",
[False, True],
)
def test_indexing(self, test_nosort):
if test_nosort:
ma = mapped_array_nosort
ma_grouped = mapped_array_nosort_grouped
else:
ma = mapped_array
ma_grouped = mapped_array_grouped
np.testing.assert_array_equal(
ma['a'].id_arr,
np.array([0, 1, 2])
)
np.testing.assert_array_equal(
ma['a'].col_arr,
np.array([0, 0, 0])
)
pd.testing.assert_index_equal(
ma['a'].wrapper.columns,
pd.Index(['a'], dtype='object')
)
np.testing.assert_array_equal(
ma['b'].id_arr,
np.array([3, 4, 5])
)
np.testing.assert_array_equal(
ma['b'].col_arr,
np.array([0, 0, 0])
)
pd.testing.assert_index_equal(
ma['b'].wrapper.columns,
pd.Index(['b'], dtype='object')
)
np.testing.assert_array_equal(
ma[['a', 'a']].id_arr,
np.array([0, 1, 2, 0, 1, 2])
)
np.testing.assert_array_equal(
ma[['a', 'a']].col_arr,
np.array([0, 0, 0, 1, 1, 1])
)
pd.testing.assert_index_equal(
ma[['a', 'a']].wrapper.columns,
pd.Index(['a', 'a'], dtype='object')
)
np.testing.assert_array_equal(
ma[['a', 'b']].id_arr,
np.array([0, 1, 2, 3, 4, 5])
)
np.testing.assert_array_equal(
ma[['a', 'b']].col_arr,
np.array([0, 0, 0, 1, 1, 1])
)
pd.testing.assert_index_equal(
ma[['a', 'b']].wrapper.columns,
pd.Index(['a', 'b'], dtype='object')
)
with pytest.raises(Exception):
_ = ma.iloc[::2, :] # changing time not supported
pd.testing.assert_index_equal(
ma_grouped['g1'].wrapper.columns,
pd.Index(['a', 'b'], dtype='object')
)
assert ma_grouped['g1'].wrapper.ndim == 2
assert ma_grouped['g1'].wrapper.grouped_ndim == 1
pd.testing.assert_index_equal(
ma_grouped['g1'].wrapper.grouper.group_by,
pd.Index(['g1', 'g1'], dtype='object')
)
pd.testing.assert_index_equal(
ma_grouped['g2'].wrapper.columns,
pd.Index(['c', 'd'], dtype='object')
)
assert ma_grouped['g2'].wrapper.ndim == 2
assert ma_grouped['g2'].wrapper.grouped_ndim == 1
pd.testing.assert_index_equal(
ma_grouped['g2'].wrapper.grouper.group_by,
pd.Index(['g2', 'g2'], dtype='object')
)
pd.testing.assert_index_equal(
ma_grouped[['g1']].wrapper.columns,
pd.Index(['a', 'b'], dtype='object')
)
assert ma_grouped[['g1']].wrapper.ndim == 2
assert ma_grouped[['g1']].wrapper.grouped_ndim == 2
pd.testing.assert_index_equal(
ma_grouped[['g1']].wrapper.grouper.group_by,
pd.Index(['g1', 'g1'], dtype='object')
)
pd.testing.assert_index_equal(
ma_grouped[['g1', 'g2']].wrapper.columns,
pd.Index(['a', 'b', 'c', 'd'], dtype='object')
)
assert ma_grouped[['g1', 'g2']].wrapper.ndim == 2
assert ma_grouped[['g1', 'g2']].wrapper.grouped_ndim == 2
pd.testing.assert_index_equal(
ma_grouped[['g1', 'g2']].wrapper.grouper.group_by,
pd.Index(['g1', 'g1', 'g2', 'g2'], dtype='object')
)
def test_magic(self):
a = vbt.MappedArray(
wrapper,
records_arr['some_field1'],
records_arr['col'],
id_arr=records_arr['id'],
idx_arr=records_arr['idx']
)
a_inv = vbt.MappedArray(
wrapper,
records_arr['some_field1'][::-1],
records_arr['col'][::-1],
id_arr=records_arr['id'][::-1],
idx_arr=records_arr['idx'][::-1]
)
b = records_arr['some_field2']
a_bool = vbt.MappedArray(
wrapper,
records_arr['some_field1'] > np.mean(records_arr['some_field1']),
records_arr['col'],
id_arr=records_arr['id'],
idx_arr=records_arr['idx']
)
b_bool = records_arr['some_field2'] > np.mean(records_arr['some_field2'])
assert a ** a == a ** 2
with pytest.raises(Exception):
_ = a * a_inv
# binary ops
# comparison ops
np.testing.assert_array_equal((a == b).values, a.values == b)
np.testing.assert_array_equal((a != b).values, a.values != b)
np.testing.assert_array_equal((a < b).values, a.values < b)
np.testing.assert_array_equal((a > b).values, a.values > b)
np.testing.assert_array_equal((a <= b).values, a.values <= b)
np.testing.assert_array_equal((a >= b).values, a.values >= b)
# arithmetic ops
np.testing.assert_array_equal((a + b).values, a.values + b)
np.testing.assert_array_equal((a - b).values, a.values - b)
np.testing.assert_array_equal((a * b).values, a.values * b)
np.testing.assert_array_equal((a ** b).values, a.values ** b)
np.testing.assert_array_equal((a % b).values, a.values % b)
np.testing.assert_array_equal((a // b).values, a.values // b)
np.testing.assert_array_equal((a / b).values, a.values / b)
# __r*__ is only called if the left object does not have an __*__ method
np.testing.assert_array_equal((10 + a).values, 10 + a.values)
np.testing.assert_array_equal((10 - a).values, 10 - a.values)
np.testing.assert_array_equal((10 * a).values, 10 * a.values)
np.testing.assert_array_equal((10 ** a).values, 10 ** a.values)
np.testing.assert_array_equal((10 % a).values, 10 % a.values)
np.testing.assert_array_equal((10 // a).values, 10 // a.values)
np.testing.assert_array_equal((10 / a).values, 10 / a.values)
# mask ops
np.testing.assert_array_equal((a_bool & b_bool).values, a_bool.values & b_bool)
np.testing.assert_array_equal((a_bool | b_bool).values, a_bool.values | b_bool)
np.testing.assert_array_equal((a_bool ^ b_bool).values, a_bool.values ^ b_bool)
np.testing.assert_array_equal((True & a_bool).values, True & a_bool.values)
np.testing.assert_array_equal((True | a_bool).values, True | a_bool.values)
np.testing.assert_array_equal((True ^ a_bool).values, True ^ a_bool.values)
# unary ops
np.testing.assert_array_equal((-a).values, -a.values)
np.testing.assert_array_equal((+a).values, +a.values)
np.testing.assert_array_equal((abs(-a)).values, abs((-a.values)))
def test_stats(self):
stats_index = pd.Index([
'Start', 'End', 'Period', 'Count', 'Mean', 'Std', 'Min', 'Median', 'Max', 'Min Index', 'Max Index'
], dtype='object')
pd.testing.assert_series_equal(
mapped_array.stats(),
pd.Series([
'x', 'z', pd.Timedelta('3 days 00:00:00'),
2.25, 11.777777777777779, 0.859116756396542, 11.0, 11.666666666666666, 12.666666666666666
],
index=stats_index[:-2],
name='agg_func_mean'
)
)
pd.testing.assert_series_equal(
mapped_array.stats(column='a'),
pd.Series([
'x', 'z', pd.Timedelta('3 days 00:00:00'),
3, 11.0, 1.0, 10.0, 11.0, 12.0, 'x', 'z'
],
index=stats_index,
name='a'
)
)
pd.testing.assert_series_equal(
mapped_array.stats(column='g1', group_by=group_by),
pd.Series([
'x', 'z', pd.Timedelta('3 days 00:00:00'),
6, 12.166666666666666, 1.4719601443879746, 10.0, 12.5, 14.0, 'x', 'y'
],
index=stats_index,
name='g1'
)
)
pd.testing.assert_series_equal(
mapped_array['c'].stats(),
mapped_array.stats(column='c')
)
pd.testing.assert_series_equal(
mapped_array['c'].stats(),
mapped_array.stats(column='c', group_by=False)
)
pd.testing.assert_series_equal(
mapped_array_grouped['g2'].stats(),
mapped_array_grouped.stats(column='g2')
)
pd.testing.assert_series_equal(
mapped_array_grouped['g2'].stats(),
mapped_array.stats(column='g2', group_by=group_by)
)
stats_df = mapped_array.stats(agg_func=None)
assert stats_df.shape == (4, 11)
pd.testing.assert_index_equal(stats_df.index, mapped_array.wrapper.columns)
pd.testing.assert_index_equal(stats_df.columns, stats_index)
def test_stats_mapping(self):
stats_index = pd.Index([
'Start', 'End', 'Period', 'Count', 'Value Counts: test_10.0',
'Value Counts: test_11.0', 'Value Counts: test_12.0',
'Value Counts: test_13.0', 'Value Counts: test_14.0'
], dtype='object')
pd.testing.assert_series_equal(
mp_mapped_array.stats(),
pd.Series([
'x',
'z',
pd.Timedelta('3 days 00:00:00'),
2.25, 0.5, 0.5, 0.5, 0.5, 0.25
],
index=stats_index,
name='agg_func_mean'
)
)
pd.testing.assert_series_equal(
mp_mapped_array.stats(column='a'),
pd.Series([
'x',
'z',
pd.Timedelta('3 days 00:00:00'),
3, 1, 1, 1, 0, 0
],
index=stats_index,
name='a'
)
)
pd.testing.assert_series_equal(
mp_mapped_array.stats(column='g1', group_by=group_by),
pd.Series([
'x',
'z',
pd.Timedelta('3 days 00:00:00'),
6, 1, 1, 1, 2, 1
],
index=stats_index,
name='g1'
)
)
pd.testing.assert_series_equal(
mp_mapped_array.stats(),
mapped_array.stats(settings=dict(mapping=mapping))
)
pd.testing.assert_series_equal(
mp_mapped_array['c'].stats(settings=dict(incl_all_keys=True)),
mp_mapped_array.stats(column='c')
)
pd.testing.assert_series_equal(
mp_mapped_array['c'].stats(settings=dict(incl_all_keys=True)),
mp_mapped_array.stats(column='c', group_by=False)
)
pd.testing.assert_series_equal(
mp_mapped_array_grouped['g2'].stats(settings=dict(incl_all_keys=True)),
mp_mapped_array_grouped.stats(column='g2')
)
pd.testing.assert_series_equal(
mp_mapped_array_grouped['g2'].stats(settings=dict(incl_all_keys=True)),
mp_mapped_array.stats(column='g2', group_by=group_by)
)
stats_df = mp_mapped_array.stats(agg_func=None)
assert stats_df.shape == (4, 9)
pd.testing.assert_index_equal(stats_df.index, mp_mapped_array.wrapper.columns)
pd.testing.assert_index_equal(stats_df.columns, stats_index)
# ############# base.py ############# #
class TestRecords:
def test_config(self, tmp_path):
assert vbt.Records.loads(records['a'].dumps()) == records['a']
assert vbt.Records.loads(records.dumps()) == records
records.save(tmp_path / 'records')
assert vbt.Records.load(tmp_path / 'records') == records
def test_records(self):
pd.testing.assert_frame_equal(
records.records,
pd.DataFrame.from_records(records_arr)
)
def test_recarray(self):
np.testing.assert_array_equal(records['a'].recarray.some_field1, records['a'].values['some_field1'])
np.testing.assert_array_equal(records.recarray.some_field1, records.values['some_field1'])
def test_records_readable(self):
pd.testing.assert_frame_equal(
records.records_readable,
pd.DataFrame([
[0, 'a', 'x', 10.0, 21.0], [1, 'a', 'y', 11.0, 20.0], [2, 'a', 'z', 12.0, 19.0],
[3, 'b', 'x', 13.0, 18.0], [4, 'b', 'y', 14.0, 17.0], [5, 'b', 'z', 13.0, 18.0],
[6, 'c', 'x', 12.0, 19.0], [7, 'c', 'y', 11.0, 20.0], [8, 'c', 'z', 10.0, 21.0]
], columns=pd.Index(['Id', 'Column', 'Timestamp', 'some_field1', 'some_field2'], dtype='object'))
)
def test_is_sorted(self):
assert records.is_sorted()
assert records.is_sorted(incl_id=True)
assert not records_nosort.is_sorted()
assert not records_nosort.is_sorted(incl_id=True)
def test_sort(self):
assert records.sort().is_sorted()
assert records.sort().is_sorted(incl_id=True)
assert records.sort(incl_id=True).is_sorted(incl_id=True)
assert records_nosort.sort().is_sorted()
assert records_nosort.sort().is_sorted(incl_id=True)
assert records_nosort.sort(incl_id=True).is_sorted(incl_id=True)
def test_apply_mask(self):
mask_a = records['a'].values['some_field1'] >= records['a'].values['some_field1'].mean()
record_arrays_close(
records['a'].apply_mask(mask_a).values,
np.array([
(1, 0, 1, 11., 20.), (2, 0, 2, 12., 19.)
], dtype=example_dt)
)
mask = records.values['some_field1'] >= records.values['some_field1'].mean()
filtered = records.apply_mask(mask)
record_arrays_close(
filtered.values,
np.array([
(2, 0, 2, 12., 19.), (3, 1, 0, 13., 18.), (4, 1, 1, 14., 17.),
(5, 1, 2, 13., 18.), (6, 2, 0, 12., 19.)
], dtype=example_dt)
)
assert records_grouped.apply_mask(mask).wrapper == records_grouped.wrapper
def test_map_field(self):
np.testing.assert_array_equal(
records['a'].map_field('some_field1').values,
np.array([10., 11., 12.])
)
np.testing.assert_array_equal(
records.map_field('some_field1').values,
np.array([10., 11., 12., 13., 14., 13., 12., 11., 10.])
)
assert records_grouped.map_field('some_field1').wrapper == \
records.map_field('some_field1', group_by=group_by).wrapper
assert records_grouped.map_field('some_field1', group_by=False).wrapper.grouper.group_by is None
def test_map(self):
@njit
def map_func_nb(record):
return record['some_field1'] + record['some_field2']
np.testing.assert_array_equal(
records['a'].map(map_func_nb).values,
np.array([31., 31., 31.])
)
np.testing.assert_array_equal(
records.map(map_func_nb).values,
np.array([31., 31., 31., 31., 31., 31., 31., 31., 31.])
)
assert records_grouped.map(map_func_nb).wrapper == \
records.map(map_func_nb, group_by=group_by).wrapper
assert records_grouped.map(map_func_nb, group_by=False).wrapper.grouper.group_by is None
def test_map_array(self):
arr = records_arr['some_field1'] + records_arr['some_field2']
np.testing.assert_array_equal(
records['a'].map_array(arr[:3]).values,
np.array([31., 31., 31.])
)
np.testing.assert_array_equal(
records.map_array(arr).values,
np.array([31., 31., 31., 31., 31., 31., 31., 31., 31.])
)
assert records_grouped.map_array(arr).wrapper == \
records.map_array(arr, group_by=group_by).wrapper
assert records_grouped.map_array(arr, group_by=False).wrapper.grouper.group_by is None
def test_apply(self):
@njit
def cumsum_apply_nb(records):
return np.cumsum(records['some_field1'])
np.testing.assert_array_equal(
records['a'].apply(cumsum_apply_nb).values,
np.array([10., 21., 33.])
)
np.testing.assert_array_equal(
records.apply(cumsum_apply_nb).values,
np.array([10., 21., 33., 13., 27., 40., 12., 23., 33.])
)
np.testing.assert_array_equal(
records_grouped.apply(cumsum_apply_nb, apply_per_group=False).values,
np.array([10., 21., 33., 13., 27., 40., 12., 23., 33.])
)
np.testing.assert_array_equal(
records_grouped.apply(cumsum_apply_nb, apply_per_group=True).values,
np.array([10., 21., 33., 46., 60., 73., 12., 23., 33.])
)
assert records_grouped.apply(cumsum_apply_nb).wrapper == \
records.apply(cumsum_apply_nb, group_by=group_by).wrapper
assert records_grouped.apply(cumsum_apply_nb, group_by=False).wrapper.grouper.group_by is None
def test_count(self):
assert records['a'].count() == 3
pd.testing.assert_series_equal(
records.count(),
pd.Series(
np.array([3, 3, 3, 0]),
index=wrapper.columns
).rename('count')
)
assert records_grouped['g1'].count() == 6
pd.testing.assert_series_equal(
records_grouped.count(),
pd.Series(
np.array([6, 3]),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('count')
)
@pytest.mark.parametrize(
"test_nosort",
[False, True],
)
def test_indexing(self, test_nosort):
if test_nosort:
r = records_nosort
r_grouped = records_nosort_grouped
else:
r = records
r_grouped = records_grouped
record_arrays_close(
r['a'].values,
np.array([
(0, 0, 0, 10., 21.), (1, 0, 1, 11., 20.), (2, 0, 2, 12., 19.)
], dtype=example_dt)
)
pd.testing.assert_index_equal(
r['a'].wrapper.columns,
pd.Index(['a'], dtype='object')
)
pd.testing.assert_index_equal(
r['b'].wrapper.columns,
pd.Index(['b'], dtype='object')
)
record_arrays_close(
r[['a', 'a']].values,
np.array([
(0, 0, 0, 10., 21.), (1, 0, 1, 11., 20.), (2, 0, 2, 12., 19.),
(0, 1, 0, 10., 21.), (1, 1, 1, 11., 20.), (2, 1, 2, 12., 19.)
], dtype=example_dt)
)
pd.testing.assert_index_equal(
r[['a', 'a']].wrapper.columns,
pd.Index(['a', 'a'], dtype='object')
)
record_arrays_close(
r[['a', 'b']].values,
np.array([
(0, 0, 0, 10., 21.), (1, 0, 1, 11., 20.), (2, 0, 2, 12., 19.),
(3, 1, 0, 13., 18.), (4, 1, 1, 14., 17.), (5, 1, 2, 13., 18.)
], dtype=example_dt)
)
pd.testing.assert_index_equal(
r[['a', 'b']].wrapper.columns,
pd.Index(['a', 'b'], dtype='object')
)
with pytest.raises(Exception):
_ = r.iloc[::2, :] # changing time not supported
pd.testing.assert_index_equal(
r_grouped['g1'].wrapper.columns,
pd.Index(['a', 'b'], dtype='object')
)
assert r_grouped['g1'].wrapper.ndim == 2
assert r_grouped['g1'].wrapper.grouped_ndim == 1
pd.testing.assert_index_equal(
r_grouped['g1'].wrapper.grouper.group_by,
pd.Index(['g1', 'g1'], dtype='object')
)
pd.testing.assert_index_equal(
r_grouped['g2'].wrapper.columns,
pd.Index(['c', 'd'], dtype='object')
)
assert r_grouped['g2'].wrapper.ndim == 2
assert r_grouped['g2'].wrapper.grouped_ndim == 1
pd.testing.assert_index_equal(
r_grouped['g2'].wrapper.grouper.group_by,
pd.Index(['g2', 'g2'], dtype='object')
)
pd.testing.assert_index_equal(
r_grouped[['g1']].wrapper.columns,
pd.Index(['a', 'b'], dtype='object')
)
assert r_grouped[['g1']].wrapper.ndim == 2
assert r_grouped[['g1']].wrapper.grouped_ndim == 2
pd.testing.assert_index_equal(
r_grouped[['g1']].wrapper.grouper.group_by,
pd.Index(['g1', 'g1'], dtype='object')
)
pd.testing.assert_index_equal(
r_grouped[['g1', 'g2']].wrapper.columns,
pd.Index(['a', 'b', 'c', 'd'], dtype='object')
)
assert r_grouped[['g1', 'g2']].wrapper.ndim == 2
assert r_grouped[['g1', 'g2']].wrapper.grouped_ndim == 2
pd.testing.assert_index_equal(
r_grouped[['g1', 'g2']].wrapper.grouper.group_by,
pd.Index(['g1', 'g1', 'g2', 'g2'], dtype='object')
)
def test_filtering(self):
filtered_records = vbt.Records(wrapper, records_arr[[0, -1]])
record_arrays_close(
filtered_records.values,
np.array([(0, 0, 0, 10., 21.), (8, 2, 2, 10., 21.)], dtype=example_dt)
)
# a
record_arrays_close(
filtered_records['a'].values,
np.array([(0, 0, 0, 10., 21.)], dtype=example_dt)
)
np.testing.assert_array_equal(
filtered_records['a'].map_field('some_field1').id_arr,
np.array([0])
)
assert filtered_records['a'].map_field('some_field1').min() == 10.
assert filtered_records['a'].count() == 1.
# b
record_arrays_close(
filtered_records['b'].values,
np.array([], dtype=example_dt)
)
np.testing.assert_array_equal(
filtered_records['b'].map_field('some_field1').id_arr,
np.array([])
)
assert np.isnan(filtered_records['b'].map_field('some_field1').min())
assert filtered_records['b'].count() == 0.
# c
record_arrays_close(
filtered_records['c'].values,
np.array([(8, 0, 2, 10., 21.)], dtype=example_dt)
)
np.testing.assert_array_equal(
filtered_records['c'].map_field('some_field1').id_arr,
np.array([8])
)
assert filtered_records['c'].map_field('some_field1').min() == 10.
assert filtered_records['c'].count() == 1.
# d
record_arrays_close(
filtered_records['d'].values,
np.array([], dtype=example_dt)
)
np.testing.assert_array_equal(
filtered_records['d'].map_field('some_field1').id_arr,
np.array([])
)
assert np.isnan(filtered_records['d'].map_field('some_field1').min())
assert filtered_records['d'].count() == 0.
def test_stats(self):
stats_index = pd.Index([
'Start', 'End', 'Period', 'Count'
], dtype='object')
pd.testing.assert_series_equal(
records.stats(),
pd.Series([
'x', 'z', pd.Timedelta('3 days 00:00:00'), 2.25
],
index=stats_index,
name='agg_func_mean'
)
)
pd.testing.assert_series_equal(
records.stats(column='a'),
pd.Series([
'x', 'z', pd.Timedelta('3 days 00:00:00'), 3
],
index=stats_index,
name='a'
)
)
pd.testing.assert_series_equal(
records.stats(column='g1', group_by=group_by),
pd.Series([
'x', 'z', pd.Timedelta('3 days 00:00:00'), 6
],
index=stats_index,
name='g1'
)
)
pd.testing.assert_series_equal(
records['c'].stats(),
records.stats(column='c')
)
pd.testing.assert_series_equal(
records['c'].stats(),
records.stats(column='c', group_by=False)
)
pd.testing.assert_series_equal(
records_grouped['g2'].stats(),
records_grouped.stats(column='g2')
)
pd.testing.assert_series_equal(
records_grouped['g2'].stats(),
records.stats(column='g2', group_by=group_by)
)
stats_df = records.stats(agg_func=None)
assert stats_df.shape == (4, 4)
pd.testing.assert_index_equal(stats_df.index, records.wrapper.columns)
pd.testing.assert_index_equal(stats_df.columns, stats_index)
# ############# ranges.py ############# #
ts = pd.DataFrame({
'a': [1, -1, 3, -1, 5, -1],
'b': [-1, -1, -1, 4, 5, 6],
'c': [1, 2, 3, -1, -1, -1],
'd': [-1, -1, -1, -1, -1, -1]
}, index=[
datetime(2020, 1, 1),
datetime(2020, 1, 2),
datetime(2020, 1, 3),
datetime(2020, 1, 4),
datetime(2020, 1, 5),
datetime(2020, 1, 6)
])
ranges = vbt.Ranges.from_ts(ts, wrapper_kwargs=dict(freq='1 days'))
ranges_grouped = vbt.Ranges.from_ts(ts, wrapper_kwargs=dict(freq='1 days', group_by=group_by))
class TestRanges:
def test_mapped_fields(self):
for name in range_dt.names:
np.testing.assert_array_equal(
getattr(ranges, name).values,
ranges.values[name]
)
def test_from_ts(self):
record_arrays_close(
ranges.values,
np.array([
(0, 0, 0, 1, 1), (1, 0, 2, 3, 1), (2, 0, 4, 5, 1), (3, 1, 3, 5, 0), (4, 2, 0, 3, 1)
], dtype=range_dt)
)
assert ranges.wrapper.freq == day_dt
pd.testing.assert_index_equal(
ranges_grouped.wrapper.grouper.group_by,
group_by
)
def test_records_readable(self):
records_readable = ranges.records_readable
np.testing.assert_array_equal(
records_readable['Range Id'].values,
np.array([
0, 1, 2, 3, 4
])
)
np.testing.assert_array_equal(
records_readable['Column'].values,
np.array([
'a', 'a', 'a', 'b', 'c'
])
)
np.testing.assert_array_equal(
records_readable['Start Timestamp'].values,
np.array([
'2020-01-01T00:00:00.000000000', '2020-01-03T00:00:00.000000000',
'2020-01-05T00:00:00.000000000', '2020-01-04T00:00:00.000000000',
'2020-01-01T00:00:00.000000000'
], dtype='datetime64[ns]')
)
np.testing.assert_array_equal(
records_readable['End Timestamp'].values,
np.array([
'2020-01-02T00:00:00.000000000', '2020-01-04T00:00:00.000000000',
'2020-01-06T00:00:00.000000000', '2020-01-06T00:00:00.000000000',
'2020-01-04T00:00:00.000000000'
], dtype='datetime64[ns]')
)
np.testing.assert_array_equal(
records_readable['Status'].values,
np.array([
'Closed', 'Closed', 'Closed', 'Open', 'Closed'
])
)
def test_to_mask(self):
pd.testing.assert_series_equal(
ranges['a'].to_mask(),
ts['a'] != -1
)
pd.testing.assert_frame_equal(
ranges.to_mask(),
ts != -1
)
pd.testing.assert_frame_equal(
ranges_grouped.to_mask(),
pd.DataFrame(
[
[True, True],
[False, True],
[True, True],
[True, False],
[True, False],
[True, False]
],
index=ts.index,
columns=pd.Index(['g1', 'g2'], dtype='object')
)
)
def test_duration(self):
np.testing.assert_array_equal(
ranges['a'].duration.values,
np.array([1, 1, 1])
)
np.testing.assert_array_equal(
ranges.duration.values,
np.array([1, 1, 1, 3, 3])
)
def test_avg_duration(self):
assert ranges['a'].avg_duration() == pd.Timedelta('1 days 00:00:00')
pd.testing.assert_series_equal(
ranges.avg_duration(),
pd.Series(
np.array([86400000000000, 259200000000000, 259200000000000, 'NaT'], dtype='timedelta64[ns]'),
index=wrapper.columns
).rename('avg_duration')
)
pd.testing.assert_series_equal(
ranges_grouped.avg_duration(),
pd.Series(
np.array([129600000000000, 259200000000000], dtype='timedelta64[ns]'),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('avg_duration')
)
def test_max_duration(self):
assert ranges['a'].max_duration() == pd.Timedelta('1 days 00:00:00')
pd.testing.assert_series_equal(
ranges.max_duration(),
pd.Series(
np.array([86400000000000, 259200000000000, 259200000000000, 'NaT'], dtype='timedelta64[ns]'),
index=wrapper.columns
).rename('max_duration')
)
pd.testing.assert_series_equal(
ranges_grouped.max_duration(),
pd.Series(
np.array([259200000000000, 259200000000000], dtype='timedelta64[ns]'),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('max_duration')
)
def test_coverage(self):
assert ranges['a'].coverage() == 0.5
pd.testing.assert_series_equal(
ranges.coverage(),
pd.Series(
np.array([0.5, 0.5, 0.5, np.nan]),
index=ts2.columns
).rename('coverage')
)
pd.testing.assert_series_equal(
ranges.coverage(),
ranges.replace(records_arr=np.repeat(ranges.values, 2)).coverage()
)
pd.testing.assert_series_equal(
ranges.replace(records_arr=np.repeat(ranges.values, 2)).coverage(overlapping=True),
pd.Series(
np.array([1.0, 1.0, 1.0, np.nan]),
index=ts2.columns
).rename('coverage')
)
pd.testing.assert_series_equal(
ranges.coverage(normalize=False),
pd.Series(
np.array([3.0, 3.0, 3.0, np.nan]),
index=ts2.columns
).rename('coverage')
)
pd.testing.assert_series_equal(
ranges.replace(records_arr=np.repeat(ranges.values, 2)).coverage(overlapping=True, normalize=False),
pd.Series(
np.array([3.0, 3.0, 3.0, np.nan]),
index=ts2.columns
).rename('coverage')
)
pd.testing.assert_series_equal(
ranges_grouped.coverage(),
pd.Series(
np.array([0.4166666666666667, 0.25]),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('coverage')
)
pd.testing.assert_series_equal(
ranges_grouped.coverage(),
ranges_grouped.replace(records_arr=np.repeat(ranges_grouped.values, 2)).coverage()
)
def test_stats(self):
stats_index = pd.Index([
'Start', 'End', 'Period', 'Coverage', 'Overlap Coverage',
'Total Records', 'Duration: Min', 'Duration: Median', 'Duration: Max',
'Duration: Mean', 'Duration: Std'
], dtype='object')
pd.testing.assert_series_equal(
ranges.stats(),
pd.Series([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-06 00:00:00'),
pd.Timedelta('6 days 00:00:00'), pd.Timedelta('3 days 00:00:00'),
pd.Timedelta('0 days 00:00:00'), 1.25, pd.Timedelta('2 days 08:00:00'),
pd.Timedelta('2 days 08:00:00'), pd.Timedelta('2 days 08:00:00'),
pd.Timedelta('2 days 08:00:00'), pd.Timedelta('0 days 00:00:00')
],
index=stats_index,
name='agg_func_mean'
)
)
pd.testing.assert_series_equal(
ranges.stats(column='a'),
pd.Series([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-06 00:00:00'),
pd.Timedelta('6 days 00:00:00'), pd.Timedelta('3 days 00:00:00'),
pd.Timedelta('0 days 00:00:00'), 3, pd.Timedelta('1 days 00:00:00'),
pd.Timedelta('1 days 00:00:00'), pd.Timedelta('1 days 00:00:00'),
pd.Timedelta('1 days 00:00:00'), pd.Timedelta('0 days 00:00:00')
],
index=stats_index,
name='a'
)
)
pd.testing.assert_series_equal(
ranges.stats(column='g1', group_by=group_by),
pd.Series([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-06 00:00:00'),
pd.Timedelta('6 days 00:00:00'), pd.Timedelta('5 days 00:00:00'),
pd.Timedelta('1 days 00:00:00'), 4, pd.Timedelta('1 days 00:00:00'),
pd.Timedelta('1 days 00:00:00'), pd.Timedelta('3 days 00:00:00'),
pd.Timedelta('1 days 12:00:00'), pd.Timedelta('1 days 00:00:00')
],
index=stats_index,
name='g1'
)
)
pd.testing.assert_series_equal(
ranges['c'].stats(),
ranges.stats(column='c')
)
pd.testing.assert_series_equal(
ranges['c'].stats(),
ranges.stats(column='c', group_by=False)
)
pd.testing.assert_series_equal(
ranges_grouped['g2'].stats(),
ranges_grouped.stats(column='g2')
)
pd.testing.assert_series_equal(
ranges_grouped['g2'].stats(),
ranges.stats(column='g2', group_by=group_by)
)
stats_df = ranges.stats(agg_func=None)
assert stats_df.shape == (4, 11)
pd.testing.assert_index_equal(stats_df.index, ranges.wrapper.columns)
pd.testing.assert_index_equal(stats_df.columns, stats_index)
# ############# drawdowns.py ############# #
ts2 = pd.DataFrame({
'a': [2, 1, 3, 1, 4, 1],
'b': [1, 2, 1, 3, 1, 4],
'c': [1, 2, 3, 2, 1, 2],
'd': [1, 2, 3, 4, 5, 6]
}, index=[
datetime(2020, 1, 1),
datetime(2020, 1, 2),
datetime(2020, 1, 3),
datetime(2020, 1, 4),
datetime(2020, 1, 5),
datetime(2020, 1, 6)
])
drawdowns = vbt.Drawdowns.from_ts(ts2, wrapper_kwargs=dict(freq='1 days'))
drawdowns_grouped = vbt.Drawdowns.from_ts(ts2, wrapper_kwargs=dict(freq='1 days', group_by=group_by))
class TestDrawdowns:
def test_mapped_fields(self):
for name in drawdown_dt.names:
np.testing.assert_array_equal(
getattr(drawdowns, name).values,
drawdowns.values[name]
)
def test_ts(self):
pd.testing.assert_frame_equal(
drawdowns.ts,
ts2
)
pd.testing.assert_series_equal(
drawdowns['a'].ts,
ts2['a']
)
pd.testing.assert_frame_equal(
drawdowns_grouped['g1'].ts,
ts2[['a', 'b']]
)
assert drawdowns.replace(ts=None)['a'].ts is None
def test_from_ts(self):
record_arrays_close(
drawdowns.values,
np.array([
(0, 0, 0, 1, 1, 2, 2.0, 1.0, 3.0, 1), (1, 0, 2, 3, 3, 4, 3.0, 1.0, 4.0, 1),
(2, 0, 4, 5, 5, 5, 4.0, 1.0, 1.0, 0), (3, 1, 1, 2, 2, 3, 2.0, 1.0, 3.0, 1),
(4, 1, 3, 4, 4, 5, 3.0, 1.0, 4.0, 1), (5, 2, 2, 3, 4, 5, 3.0, 1.0, 2.0, 0)
], dtype=drawdown_dt)
)
assert drawdowns.wrapper.freq == day_dt
pd.testing.assert_index_equal(
drawdowns_grouped.wrapper.grouper.group_by,
group_by
)
def test_records_readable(self):
records_readable = drawdowns.records_readable
np.testing.assert_array_equal(
records_readable['Drawdown Id'].values,
np.array([
0, 1, 2, 3, 4, 5
])
)
np.testing.assert_array_equal(
records_readable['Column'].values,
np.array([
'a', 'a', 'a', 'b', 'b', 'c'
])
)
np.testing.assert_array_equal(
records_readable['Peak Timestamp'].values,
np.array([
'2020-01-01T00:00:00.000000000', '2020-01-03T00:00:00.000000000',
'2020-01-05T00:00:00.000000000', '2020-01-02T00:00:00.000000000',
'2020-01-04T00:00:00.000000000', '2020-01-03T00:00:00.000000000'
], dtype='datetime64[ns]')
)
np.testing.assert_array_equal(
records_readable['Start Timestamp'].values,
np.array([
'2020-01-02T00:00:00.000000000', '2020-01-04T00:00:00.000000000',
'2020-01-06T00:00:00.000000000', '2020-01-03T00:00:00.000000000',
'2020-01-05T00:00:00.000000000', '2020-01-04T00:00:00.000000000'
], dtype='datetime64[ns]')
)
np.testing.assert_array_equal(
records_readable['Valley Timestamp'].values,
np.array([
'2020-01-02T00:00:00.000000000', '2020-01-04T00:00:00.000000000',
'2020-01-06T00:00:00.000000000', '2020-01-03T00:00:00.000000000',
'2020-01-05T00:00:00.000000000', '2020-01-05T00:00:00.000000000'
], dtype='datetime64[ns]')
)
np.testing.assert_array_equal(
records_readable['End Timestamp'].values,
np.array([
'2020-01-03T00:00:00.000000000', '2020-01-05T00:00:00.000000000',
'2020-01-06T00:00:00.000000000', '2020-01-04T00:00:00.000000000',
'2020-01-06T00:00:00.000000000', '2020-01-06T00:00:00.000000000'
], dtype='datetime64[ns]')
)
np.testing.assert_array_equal(
records_readable['Peak Value'].values,
np.array([
2., 3., 4., 2., 3., 3.
])
)
np.testing.assert_array_equal(
records_readable['Valley Value'].values,
np.array([
1., 1., 1., 1., 1., 1.
])
)
np.testing.assert_array_equal(
records_readable['End Value'].values,
np.array([
3., 4., 1., 3., 4., 2.
])
)
np.testing.assert_array_equal(
records_readable['Status'].values,
np.array([
'Recovered', 'Recovered', 'Active', 'Recovered', 'Recovered', 'Active'
])
)
def test_drawdown(self):
np.testing.assert_array_almost_equal(
drawdowns['a'].drawdown.values,
np.array([-0.5, -0.66666667, -0.75])
)
np.testing.assert_array_almost_equal(
drawdowns.drawdown.values,
np.array([-0.5, -0.66666667, -0.75, -0.5, -0.66666667, -0.66666667])
)
pd.testing.assert_frame_equal(
drawdowns.drawdown.to_pd(),
pd.DataFrame(
np.array([
[np.nan, np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan, np.nan],
[-0.5, np.nan, np.nan, np.nan],
[np.nan, -0.5, np.nan, np.nan],
[-0.66666669, np.nan, np.nan, np.nan],
[-0.75, -0.66666669, -0.66666669, np.nan]
]),
index=ts2.index,
columns=ts2.columns
)
)
def test_avg_drawdown(self):
assert drawdowns['a'].avg_drawdown() == -0.6388888888888888
pd.testing.assert_series_equal(
drawdowns.avg_drawdown(),
pd.Series(
np.array([-0.63888889, -0.58333333, -0.66666667, np.nan]),
index=wrapper.columns
).rename('avg_drawdown')
)
pd.testing.assert_series_equal(
drawdowns_grouped.avg_drawdown(),
pd.Series(
np.array([-0.6166666666666666, -0.6666666666666666]),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('avg_drawdown')
)
def test_max_drawdown(self):
assert drawdowns['a'].max_drawdown() == -0.75
pd.testing.assert_series_equal(
drawdowns.max_drawdown(),
pd.Series(
np.array([-0.75, -0.66666667, -0.66666667, np.nan]),
index=wrapper.columns
).rename('max_drawdown')
)
pd.testing.assert_series_equal(
drawdowns_grouped.max_drawdown(),
pd.Series(
np.array([-0.75, -0.6666666666666666]),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('max_drawdown')
)
def test_recovery_return(self):
np.testing.assert_array_almost_equal(
drawdowns['a'].recovery_return.values,
np.array([2., 3., 0.])
)
np.testing.assert_array_almost_equal(
drawdowns.recovery_return.values,
np.array([2., 3., 0., 2., 3., 1.])
)
pd.testing.assert_frame_equal(
drawdowns.recovery_return.to_pd(),
pd.DataFrame(
np.array([
[np.nan, np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan, np.nan],
[2.0, np.nan, np.nan, np.nan],
[np.nan, 2.0, np.nan, np.nan],
[3.0, np.nan, np.nan, np.nan],
[0.0, 3.0, 1.0, np.nan]
]),
index=ts2.index,
columns=ts2.columns
)
)
def test_avg_recovery_return(self):
assert drawdowns['a'].avg_recovery_return() == 1.6666666666666667
pd.testing.assert_series_equal(
drawdowns.avg_recovery_return(),
pd.Series(
np.array([1.6666666666666667, 2.5, 1.0, np.nan]),
index=wrapper.columns
).rename('avg_recovery_return')
)
pd.testing.assert_series_equal(
drawdowns_grouped.avg_recovery_return(),
pd.Series(
np.array([2.0, 1.0]),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('avg_recovery_return')
)
def test_max_recovery_return(self):
assert drawdowns['a'].max_recovery_return() == 3.0
pd.testing.assert_series_equal(
drawdowns.max_recovery_return(),
pd.Series(
np.array([3.0, 3.0, 1.0, np.nan]),
index=wrapper.columns
).rename('max_recovery_return')
)
pd.testing.assert_series_equal(
drawdowns_grouped.max_recovery_return(),
pd.Series(
np.array([3.0, 1.0]),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('max_recovery_return')
)
def test_duration(self):
np.testing.assert_array_almost_equal(
drawdowns['a'].duration.values,
np.array([1, 1, 1])
)
np.testing.assert_array_almost_equal(
drawdowns.duration.values,
np.array([1, 1, 1, 1, 1, 3])
)
def test_avg_duration(self):
assert drawdowns['a'].avg_duration() == pd.Timedelta('1 days 00:00:00')
pd.testing.assert_series_equal(
drawdowns.avg_duration(),
pd.Series(
np.array([86400000000000, 86400000000000, 259200000000000, 'NaT'], dtype='timedelta64[ns]'),
index=wrapper.columns
).rename('avg_duration')
)
pd.testing.assert_series_equal(
drawdowns_grouped.avg_duration(),
pd.Series(
np.array([86400000000000, 259200000000000], dtype='timedelta64[ns]'),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('avg_duration')
)
def test_max_duration(self):
assert drawdowns['a'].max_duration() == pd.Timedelta('1 days 00:00:00')
pd.testing.assert_series_equal(
drawdowns.max_duration(),
pd.Series(
np.array([86400000000000, 86400000000000, 259200000000000, 'NaT'], dtype='timedelta64[ns]'),
index=wrapper.columns
).rename('max_duration')
)
pd.testing.assert_series_equal(
drawdowns_grouped.max_duration(),
pd.Series(
np.array([86400000000000, 259200000000000], dtype='timedelta64[ns]'),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('max_duration')
)
def test_coverage(self):
assert drawdowns['a'].coverage() == 0.5
pd.testing.assert_series_equal(
drawdowns.coverage(),
pd.Series(
np.array([0.5, 0.3333333333333333, 0.5, np.nan]),
index=ts2.columns
).rename('coverage')
)
pd.testing.assert_series_equal(
drawdowns_grouped.coverage(),
pd.Series(
np.array([0.4166666666666667, 0.25]),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('coverage')
)
def test_decline_duration(self):
np.testing.assert_array_almost_equal(
drawdowns['a'].decline_duration.values,
np.array([1., 1., 1.])
)
np.testing.assert_array_almost_equal(
drawdowns.decline_duration.values,
np.array([1., 1., 1., 1., 1., 2.])
)
def test_recovery_duration(self):
np.testing.assert_array_almost_equal(
drawdowns['a'].recovery_duration.values,
np.array([1, 1, 0])
)
np.testing.assert_array_almost_equal(
drawdowns.recovery_duration.values,
np.array([1, 1, 0, 1, 1, 1])
)
def test_recovery_duration_ratio(self):
np.testing.assert_array_almost_equal(
drawdowns['a'].recovery_duration_ratio.values,
np.array([1., 1., 0.])
)
np.testing.assert_array_almost_equal(
drawdowns.recovery_duration_ratio.values,
np.array([1., 1., 0., 1., 1., 0.5])
)
def test_active_records(self):
assert isinstance(drawdowns.active, vbt.Drawdowns)
assert drawdowns.active.wrapper == drawdowns.wrapper
record_arrays_close(
drawdowns['a'].active.values,
np.array([
(2, 0, 4, 5, 5, 5, 4., 1., 1., 0)
], dtype=drawdown_dt)
)
record_arrays_close(
drawdowns['a'].active.values,
drawdowns.active['a'].values
)
record_arrays_close(
drawdowns.active.values,
np.array([
(2, 0, 4, 5, 5, 5, 4.0, 1.0, 1.0, 0), (5, 2, 2, 3, 4, 5, 3.0, 1.0, 2.0, 0)
], dtype=drawdown_dt)
)
def test_recovered_records(self):
assert isinstance(drawdowns.recovered, vbt.Drawdowns)
assert drawdowns.recovered.wrapper == drawdowns.wrapper
record_arrays_close(
drawdowns['a'].recovered.values,
np.array([
(0, 0, 0, 1, 1, 2, 2.0, 1.0, 3.0, 1), (1, 0, 2, 3, 3, 4, 3.0, 1.0, 4.0, 1)
], dtype=drawdown_dt)
)
record_arrays_close(
drawdowns['a'].recovered.values,
drawdowns.recovered['a'].values
)
record_arrays_close(
drawdowns.recovered.values,
np.array([
(0, 0, 0, 1, 1, 2, 2.0, 1.0, 3.0, 1), (1, 0, 2, 3, 3, 4, 3.0, 1.0, 4.0, 1),
(3, 1, 1, 2, 2, 3, 2.0, 1.0, 3.0, 1), (4, 1, 3, 4, 4, 5, 3.0, 1.0, 4.0, 1)
], dtype=drawdown_dt)
)
def test_active_drawdown(self):
assert drawdowns['a'].active_drawdown() == -0.75
pd.testing.assert_series_equal(
drawdowns.active_drawdown(),
pd.Series(
np.array([-0.75, np.nan, -0.3333333333333333, np.nan]),
index=wrapper.columns
).rename('active_drawdown')
)
with pytest.raises(Exception):
drawdowns_grouped.active_drawdown()
def test_active_duration(self):
assert drawdowns['a'].active_duration() == np.timedelta64(86400000000000)
pd.testing.assert_series_equal(
drawdowns.active_duration(),
pd.Series(
np.array([86400000000000, 'NaT', 259200000000000, 'NaT'], dtype='timedelta64[ns]'),
index=wrapper.columns
).rename('active_duration')
)
with pytest.raises(Exception):
drawdowns_grouped.active_duration()
def test_active_recovery(self):
assert drawdowns['a'].active_recovery() == 0.
pd.testing.assert_series_equal(
drawdowns.active_recovery(),
pd.Series(
np.array([0., np.nan, 0.5, np.nan]),
index=wrapper.columns
).rename('active_recovery')
)
with pytest.raises(Exception):
drawdowns_grouped.active_recovery()
def test_active_recovery_return(self):
assert drawdowns['a'].active_recovery_return() == 0.
pd.testing.assert_series_equal(
drawdowns.active_recovery_return(),
pd.Series(
np.array([0., np.nan, 1., np.nan]),
index=wrapper.columns
).rename('active_recovery_return')
)
with pytest.raises(Exception):
drawdowns_grouped.active_recovery_return()
def test_active_recovery_duration(self):
assert drawdowns['a'].active_recovery_duration() == pd.Timedelta('0 days 00:00:00')
pd.testing.assert_series_equal(
drawdowns.active_recovery_duration(),
pd.Series(
np.array([0, 'NaT', 86400000000000, 'NaT'], dtype='timedelta64[ns]'),
index=wrapper.columns
).rename('active_recovery_duration')
)
with pytest.raises(Exception):
drawdowns_grouped.active_recovery_duration()
def test_stats(self):
stats_index = pd.Index([
'Start', 'End', 'Period', 'Coverage [%]', 'Total Records',
'Total Recovered Drawdowns', 'Total Active Drawdowns',
'Active Drawdown [%]', 'Active Duration', 'Active Recovery [%]',
'Active Recovery Return [%]', 'Active Recovery Duration',
'Max Drawdown [%]', 'Avg Drawdown [%]', 'Max Drawdown Duration',
'Avg Drawdown Duration', 'Max Recovery Return [%]',
'Avg Recovery Return [%]', 'Max Recovery Duration',
'Avg Recovery Duration', 'Avg Recovery Duration Ratio'
], dtype='object')
pd.testing.assert_series_equal(
drawdowns.stats(),
pd.Series([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-06 00:00:00'),
pd.Timedelta('6 days 00:00:00'), 44.444444444444436, 1.5, 1.0, 0.5,
54.166666666666664, pd.Timedelta('2 days 00:00:00'), 25.0, 50.0,
pd.Timedelta('0 days 12:00:00'), 66.66666666666666, 58.33333333333333,
pd.Timedelta('1 days 00:00:00'), pd.Timedelta('1 days 00:00:00'), 300.0, 250.0,
pd.Timedelta('1 days 00:00:00'), pd.Timedelta('1 days 00:00:00'), 1.0
],
index=stats_index,
name='agg_func_mean'
)
)
pd.testing.assert_series_equal(
drawdowns.stats(settings=dict(incl_active=True)),
pd.Series([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-06 00:00:00'),
pd.Timedelta('6 days 00:00:00'), 44.444444444444436, 1.5, 1.0, 0.5,
54.166666666666664, pd.Timedelta('2 days 00:00:00'), 25.0, 50.0,
pd.Timedelta('0 days 12:00:00'), 69.44444444444444, 62.962962962962955,
pd.Timedelta('1 days 16:00:00'), pd.Timedelta('1 days 16:00:00'), 300.0, 250.0,
pd.Timedelta('1 days 00:00:00'), pd.Timedelta('1 days 00:00:00'), 1.0
],
index=stats_index,
name='agg_func_mean'
)
)
pd.testing.assert_series_equal(
drawdowns.stats(column='a'),
pd.Series([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-06 00:00:00'),
pd.Timedelta('6 days 00:00:00'), 50.0, 3, 2, 1, 75.0, pd.Timedelta('1 days 00:00:00'),
0.0, 0.0, | pd.Timedelta('0 days 00:00:00') | pandas.Timedelta |
# -*- coding: utf-8 -*-
"""
@author: Elie
"""
# Libraries
import datetime
import numpy as np
import pandas as pd
#plotting
import matplotlib as mpl
from matplotlib import pyplot as plt
import seaborn as sns
import os
#sklearn
from sklearn.metrics import auc, roc_curve
from sklearn.model_selection import (GridSearchCV, KFold, StratifiedKFold,
cross_val_score, train_test_split)
from sklearn.preprocessing import label_binarize
#xgboost etc
import xgboost
from xgboost import XGBClassifier
import shap
# =============================================================================
# define these feature/headers here in case the headers
# are out of order in input files (often the case)
# =============================================================================
snv_categories = ["sample",
"A[C>A]A", "A[C>A]C", "A[C>A]G", "A[C>A]T",
"C[C>A]A", "C[C>A]C", "C[C>A]G", "C[C>A]T",
"G[C>A]A", "G[C>A]C", "G[C>A]G", "G[C>A]T",
"T[C>A]A", "T[C>A]C", "T[C>A]G", "T[C>A]T",
"A[C>G]A", "A[C>G]C", "A[C>G]G", "A[C>G]T",
"C[C>G]A", "C[C>G]C", "C[C>G]G", "C[C>G]T",
"G[C>G]A", "G[C>G]C", "G[C>G]G", "G[C>G]T",
"T[C>G]A", "T[C>G]C", "T[C>G]G", "T[C>G]T",
"A[C>T]A", "A[C>T]C", "A[C>T]G", "A[C>T]T",
"C[C>T]A", "C[C>T]C", "C[C>T]G", "C[C>T]T",
"G[C>T]A", "G[C>T]C", "G[C>T]G", "G[C>T]T",
"T[C>T]A", "T[C>T]C", "T[C>T]G", "T[C>T]T",
"A[T>A]A", "A[T>A]C", "A[T>A]G", "A[T>A]T",
"C[T>A]A", "C[T>A]C", "C[T>A]G", "C[T>A]T",
"G[T>A]A", "G[T>A]C", "G[T>A]G", "G[T>A]T",
"T[T>A]A", "T[T>A]C", "T[T>A]G", "T[T>A]T",
"A[T>C]A", "A[T>C]C", "A[T>C]G", "A[T>C]T",
"C[T>C]A", "C[T>C]C", "C[T>C]G", "C[T>C]T",
"G[T>C]A", "G[T>C]C", "G[T>C]G", "G[T>C]T",
"T[T>C]A", "T[T>C]C", "T[T>C]G", "T[T>C]T",
"A[T>G]A", "A[T>G]C", "A[T>G]G", "A[T>G]T",
"C[T>G]A", "C[T>G]C", "C[T>G]G", "C[T>G]T",
"G[T>G]A", "G[T>G]C", "G[T>G]G", "G[T>G]T",
"T[T>G]A", "T[T>G]C", "T[T>G]G", "T[T>G]T"]
indel_categories = ["sample",
"1:Del:C:0", "1:Del:C:1", "1:Del:C:2", "1:Del:C:3", "1:Del:C:4", "1:Del:C:5",
"1:Del:T:0", "1:Del:T:1", "1:Del:T:2", "1:Del:T:3", "1:Del:T:4", "1:Del:T:5",
"1:Ins:C:0", "1:Ins:C:1", "1:Ins:C:2", "1:Ins:C:3", "1:Ins:C:4", "1:Ins:C:5",
"1:Ins:T:0", "1:Ins:T:1", "1:Ins:T:2", "1:Ins:T:3", "1:Ins:T:4", "1:Ins:T:5",
"2:Del:R:0", "2:Del:R:1", "2:Del:R:2", "2:Del:R:3", "2:Del:R:4", "2:Del:R:5",
"3:Del:R:0", "3:Del:R:1", "3:Del:R:2", "3:Del:R:3", "3:Del:R:4", "3:Del:R:5",
"4:Del:R:0", "4:Del:R:1", "4:Del:R:2", "4:Del:R:3", "4:Del:R:4", "4:Del:R:5",
"5:Del:R:0", "5:Del:R:1", "5:Del:R:2", "5:Del:R:3", "5:Del:R:4", "5:Del:R:5",
"2:Ins:R:0", "2:Ins:R:1", "2:Ins:R:2", "2:Ins:R:3", "2:Ins:R:4", "2:Ins:R:5",
"3:Ins:R:0", "3:Ins:R:1", "3:Ins:R:2", "3:Ins:R:3", "3:Ins:R:4", "3:Ins:R:5",
"4:Ins:R:0", "4:Ins:R:1", "4:Ins:R:2", "4:Ins:R:3", "4:Ins:R:4", "4:Ins:R:5",
"5:Ins:R:0", "5:Ins:R:1", "5:Ins:R:2", "5:Ins:R:3", "5:Ins:R:4", "5:Ins:R:5",
"2:Del:M:1", "3:Del:M:1", "3:Del:M:2", "4:Del:M:1", "4:Del:M:2", "4:Del:M:3",
"5:Del:M:1", "5:Del:M:2", "5:Del:M:3", "5:Del:M:4", "5:Del:M:5"]
cnv_categories = ["sample",
"BCper10mb_0", "BCper10mb_1", "BCper10mb_2", "BCper10mb_3",
"CN_0", "CN_1", "CN_2", "CN_3", "CN_4", "CN_5", "CN_6", "CN_7", "CN_8",
"CNCP_0", "CNCP_1", "CNCP_2", "CNCP_3", "CNCP_4", "CNCP_5", "CNCP_6", "CNCP_7",
"BCperCA_0", "BCperCA_1", "BCperCA_2", "BCperCA_3", "BCperCA_4", "BCperCA_5",
"SegSize_0", "SegSize_1", "SegSize_2", "SegSize_3", "SegSize_4", "SegSize_5",
"SegSize_6", "SegSize_7", "SegSize_8", "SegSize_9", "SegSize_10",
"CopyFraction_0", "CopyFraction_1", "CopyFraction_2", "CopyFraction_3", "CopyFraction_4",
"CopyFraction_5", "CopyFraction_6"]
### ==========================================================
# make concat sig dataframe
# ============================================================
"""load the 3 data frames and merge to one df"""
def load_data(snv_counts_path, indel_counts_path, cnv_counts_path):
df_snv = pd.read_csv(snv_counts_path, sep='\t', low_memory=False)
df_snv = df_snv[snv_categories]
df_snv["sample"] = df_snv["sample"].astype(str)
df_indel = pd.read_csv(indel_counts_path, sep='\t', low_memory=False)
df_indel = df_indel[indel_categories]
df_indel["sample"] = df_indel["sample"].astype(str)
df_cnv = pd.read_csv(cnv_counts_path, sep='\t', low_memory=False)
df_cnv = df_cnv[cnv_categories]
df_cnv["sample"] = df_cnv["sample"].astype(str)
df_sigs = pd.merge(df_snv, df_indel, on="sample", how='left').fillna(0)
df_sigs = pd.merge(df_sigs, df_cnv, on="sample", how='left').reset_index(drop=True)
return df_sigs
def get_data_and_labels_from_df(df, gene_name):
#first encode gene lable as binary
combined_matrix_for_gene = df.copy(deep=True)
gene_name = str(gene_name)
combined_matrix_for_gene.loc[(combined_matrix_for_gene["primary_label"] == gene_name), 'primary_label'] = 1
combined_matrix_for_gene.loc[(combined_matrix_for_gene["primary_label"] != 1), 'primary_label'] = 0
#amazingly stupid, if dont specify astype int, the 1/0 remain an object and dont work with gridsearchcv
combined_matrix_for_gene["primary_label"] = combined_matrix_for_gene["primary_label"].astype('int')
#now extract 2d matrix of feature values and 1d matrix of labels
features_list = snv_categories[1:] + indel_categories[1:] + cnv_categories[1:]
X_data = combined_matrix_for_gene[features_list]
X_data.columns = X_data.columns.str.replace("[", "mm").str.replace("]", "nn").str.replace(">", "rr")
Y_labels = combined_matrix_for_gene["primary_label"]
return X_data, Y_labels
"""Can use this function on the server with many cores, takes long time without many cores"""
def do_grid_search_for_best_params(xtrain, ytrain, xtest, ytest, paramgrid):
estimator = XGBClassifier(objective='binary:logistic', nthread=1, seed=42)
grid_search = GridSearchCV(estimator=estimator, param_grid=paramgrid, scoring = 'roc_auc', n_jobs = 60, cv = 6, verbose=True)
fit_params={"eval_metric" : ['auc', 'error', 'logloss'], "eval_set" : [[xtest, ytest]]}
fitted_model = grid_search.fit(xtrain, ytrain, **fit_params)
cv_results = pd.DataFrame(fitted_model.cv_results_)
return fitted_model.best_score_, fitted_model.best_params_, fitted_model.best_estimator_, cv_results
def model_with_params(trainX, trainY, testX, testY, params, max_rounds):
estimator = XGBClassifier(n_estimators=max_rounds, nthread=40, **params)
fitted_model = estimator.fit(trainX, trainY, verbose=True)
prediction_binary_test = fitted_model.predict(testX, ntree_limit=max_rounds)
prediction_probability_test = fitted_model.predict_proba(testX, ntree_limit=max_rounds)
prediction_prob_of_true_test = prediction_probability_test[:,1]
prediction_binary_train = fitted_model.predict(trainX, ntree_limit=max_rounds)
prediction_probability_train = fitted_model.predict_proba(trainX, ntree_limit=max_rounds)
prediction_prob_of_true_train = prediction_probability_train[:,1]
return fitted_model, prediction_binary_test, prediction_prob_of_true_test, prediction_binary_train, prediction_prob_of_true_train
def draw_roc_curve_for_test(testY, prediction_prob_of_true_test):
fpr, tpr, _ = roc_curve(testY, prediction_prob_of_true_test)
roc_auc = auc(fpr, tpr)
fig, ax = plt.subplots()
lw = 2
ax.plot(fpr, tpr, color='darkorange', lw=lw, label='ROC curve (area = %0.2f)' % roc_auc)
ax.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
ax.set_xlim([-0.02, 1.0])
ax.set_ylim([0.0, 1.05])
ax.set_xlabel('False Positive Rate')
ax.set_ylabel('True Positive Rate')
ax.set_title('ROC curve')
ax.legend(loc="lower right")
return fig, ax
def draw_roc_curve_for_all_data(testY, trainY, prediction_prob_of_true_test, prediction_prob_of_true_train):
all_data = pd.concat([testY, trainY])
all_prob_of_true = np.concatenate([prediction_prob_of_true_test, prediction_prob_of_true_train])
fpr, tpr, _ = roc_curve(all_data, all_prob_of_true)
roc_auc = auc(fpr, tpr)
fig, ax = plt.subplots()
lw = 2
ax.plot(fpr, tpr, color='darkorange', lw=lw, label='ROC curve (area = %0.2f)' % roc_auc)
ax.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
ax.set_xlim([-0.02, 1.0])
ax.set_ylim([0.0, 1.05])
ax.set_xlabel('False Positive Rate')
ax.set_ylabel('True Positive Rate')
ax.set_title('ROC curve')
ax.legend(loc="lower right")
return fig, ax
def kfold_cv(Knumber, Xdata, Ylabels, model):
kfold = KFold(n_splits=Knumber)
results = cross_val_score(model, Xdata, Ylabels, cv=kfold)
return results
def shapely_values(model, Xdata, Nvalues):
import inspect
print(os.path.abspath(inspect.getfile(shap.summary_plot)))
X = Xdata.copy(deep=True)
shap_values = shap.TreeExplainer(model, feature_perturbation='tree_path_dependent').shap_values(X, check_additivity=False)
X.columns = X.columns.str.replace("mm", "[").str.replace("nn", "]").str.replace("rr", ">")
fig, ax = plt.subplots(figsize=(7,4))
shap.summary_plot(shap_values, X, plot_type="dot", max_display=Nvalues, show=False, plot_size=(6,3), alpha=0.7)
plt.subplots_adjust(left=0.3, right=0.94, top=0.9, bottom=0.1)
ax = plt.gca()
fig = plt.gcf()
mpl.rcParams['savefig.transparent'] = "False"
mpl.rcParams['axes.facecolor'] = "white"
mpl.rcParams['figure.facecolor'] = "white"
mpl.rcParams['font.size'] = "5"
plt.rcParams['savefig.transparent'] = "False"
plt.rcParams['axes.facecolor'] = "white"
plt.rcParams['figure.facecolor'] = "white"
return fig, ax
### ==========================================================
# get paths, load data and make df with each file merged
# ============================================================
#files from paths relative to this script
rootdir = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
datadir = os.path.join(rootdir, "data")
cohort_data = os.path.join(datadir, "cohort.tsv")
snv_features = os.path.join(datadir, "tns_features.tsv")
ndl_features = os.path.join(datadir, "ndl_features.tsv")
cnv_features = os.path.join(datadir, "cnv_features.tsv")
outputdir = os.path.dirname(__file__)
print('Loading data at '+str(datetime.datetime.now()))
sigs = load_data(snv_features, ndl_features, cnv_features)
sample_labels = pd.read_csv(cohort_data, sep='\t', low_memory=False)
df = pd.merge(sample_labels, sigs, how='left', on='sample').query('(cancer == "PC")').reset_index(drop=True)
print('Finished loading data at '+str(datetime.datetime.now()))
# =============================================================================
# model gridsearch
# =============================================================================
def gridsearch_model(gene, outputdir):
goi = str(gene)
df_good = df.copy(deep=True)
print('Start '+ goi + ' at '+str(datetime.datetime.now()))
X_data, Y_labels = get_data_and_labels_from_df(df_good, goi)
X_train, X_test, Y_train, Y_test = train_test_split(X_data, Y_labels, test_size=0.4, random_state=42, stratify=Y_labels)
print('Grid search for '+goi+' parameters '+str(datetime.datetime.now()))
max_rounds = 150000
# params = { "eta": 0.001, 'max_depth': 3, 'subsample': 0.9, 'colsample_bytree': 0.7, 'colsample_bylevel': 0.7, 'objective': 'binary:logistic', 'seed': 99, 'eval_metric':['auc', 'error', 'logloss'], 'nthread':12}
parameter_grid = {'max_depth': [i for i in range(3,4)],
'eta': [0.001],
'subsample': [i.round(1) for i in np.arange(0.5,1.01,0.1)],
'colsample_bytree': [i.round(1) for i in np.arange(0.5,1.01,0.1)],
'colsample_bylevel': [i.round(1) for i in np.arange(0.5,1.01,0.1)],
'colsample_bynode': [i.round(1) for i in np.arange(0.5,1.01,0.1)],
'seed': [i for i in range(0,50)]}
best_score_, best_params_, best_estimator_, cv_results = do_grid_search_for_best_params(X_train, Y_train, X_test, Y_test, parameter_grid)
print(goi + f" best score = {best_score_}")
print(goi + f" best parameters = {best_params_}")
cv_results.to_csv(outputdir+"/"+goi+"_cv_results.tsv",sep='\t', index=False)
fitted_model, prediction_binary_test, prediction_prob_of_true_test, prediction_binary_train, prediction_prob_of_true_train = model_with_params(X_train, Y_train, X_test, Y_test, best_params_, max_rounds)
test_df = pd.DataFrame(data={"labels":Y_test.values, "prob_of_true": prediction_prob_of_true_test, "pred_binary":prediction_binary_test})
test_df.index = Y_test.index
train_df = pd.DataFrame(data={"labels":Y_train.values, "prob_of_true": prediction_prob_of_true_train, "pred_binary":prediction_binary_train})
train_df.index = Y_train.index
all_preds_df = pd.concat([test_df, train_df])
all_data_with_preds = | pd.merge(df_good, all_preds_df, left_index=True, right_index=True) | pandas.merge |
'''
THIS IS THE BEATAML ONLY TEST FILE
'''
import sys
sys.path.append(r'C:\Users\natha\Documents\DEEP_DRUG_SH\python\UTILS')
import pickle
from matplotlib import pyplot as plt
import numpy as np
from config import * # params stored here
import utils
import pandas as pd
from torch.utils import data
if __name__ == '__main__':
with open(f"{params['MODEL_OUT_DIR']}/{params['NAME']}/model.pkl", 'rb') as f:
net = pickle.load(f)
with open(params['SPLIT_LABEL_PATH'], 'rb') as f:
label_dict = pickle.load(f)
plt.gcf()
f, axes = plt.subplots(4,2,figsize=(15,12))
for i,dataset in enumerate([dataset for dataset in params['RESP_TYPES']]):
test = label_dict[dataset]['test']
gen = data.DataLoader(utils.DrugExpressionDataset(test, root_dir=params['DATA_DIR'], return_response_type=True), **{'batch_size':10000, 'shuffle':False,'num_workers':0})
yhats = []
ys = []
ii = 0
for X,y,resp_type,resp_selector in gen:
ii+=X.size(0)
print(f'predicting {dataset} ...[{ii}/{len(gen.dataset)}]', end='\r')
yhats += net.predict(X, resp_type, resp_selector).tolist()
ys += y.tolist()
#if ii > 1000:
# break
print()
mse = np.mean((np.array(ys) - np.array(yhats))**2)
df = | pd.DataFrame({'y':ys, 'yhat':yhats}) | pandas.DataFrame |
# -----------------------------------------------------------------------------
# WSDM Cup 2017 Classification and Evaluation
#
# Copyright (c) 2017 <NAME>, <NAME>, <NAME>, <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# -----------------------------------------------------------------------------
from collections import OrderedDict
import logging
import numpy as np
import pandas as pd
import config
from src import constants
from src.dataset import DataSet
_logger = logging.getLogger()
# Use thousand separator and no decimal points
_FLOAT_FORMAT = '{:,.0f}'.format
# columns
REVISION_ID = 'revisionId'
SESSION_ID = 'revisionSessionId'
CONTENT_TYPE = 'contentType'
ROLLBACK_REVERTED = 'rollbackReverted'
ITEM_ID = 'itemId'
USER_NAME = 'userName'
REVISION_ACTION = 'revisionAction'
TIMESTAMP = 'timestamp'
REVISIONT_TAGS = 'revisionTags'
LANGUAGE_WORD_RATIO = 'languageWordRatio'
REVISION_LANGUAGE = 'revisionLanguage'
USER_COUNTRY = 'userCountry'
def compute_statistics(data):
_logger.info("Computing statistics...")
_logger.debug(
data['revisionAction']
.str
.cat(data['revisionSubaction'], sep='_', na_rep='na')
.value_counts())
_compute_feature_statistics(data)
_compute_corpus_statistics(data)
_compute_corpus_statistics_over_time(data)
_compute_dataset_statistics(data)
_compute_session_statistics(data)
_compute_backpressure_statistics(data)
# computes some statistics about selected features
# _compute_special_feature_statistics(data)
_logger.info("Computing statistics... done.")
def _compute_feature_statistics(data):
_logger.debug("Computing descriptive statistics...")
data.describe(include='all').to_csv(
config.OUTPUT_PREFIX + "_feature_statistics.csv")
_logger.debug("Computing descriptive statistics... done.")
def _compute_corpus_statistics(data):
"""Compute statistics for the whole corpus.
Evaluate corpus in terms of total unique users, items, sessions,
and revisions with a breakdown by content type and by vandalism
status (vandalism/non-vandalism).
"""
def compute_data_frame(data):
head_mask = data[CONTENT_TYPE] == 'TEXT'
stmt_mask = (data[CONTENT_TYPE] == 'STATEMENT')
sitelink_mask = (data[CONTENT_TYPE] == 'SITELINK')
body_mask = (stmt_mask | sitelink_mask)
result = OrderedDict()
result['Entire corpus'] = compute_column_group(data)
result['Item head'] = compute_column_group(data[head_mask])
result['Item body'] = compute_column_group(data[body_mask])
# result['STATEMENT'] = compute_column_group(data[stmt_mask])
# result['SITELINK'] = compute_column_group(data[sitelink_mask])
result = pd.concat(result, axis=1, keys=result.keys())
return result
def compute_column_group(data):
vandalism_mask = data[ROLLBACK_REVERTED].astype(np.bool)
regular_mask = ~vandalism_mask
result = OrderedDict()
result['Total'] = compute_column(data)
result['Vandalism'] = compute_column(data[vandalism_mask])
result['Regular'] = compute_column(data[regular_mask])
result = pd.concat(result, axis=1, keys=result.keys())
return result
def compute_column(data):
result = | pd.Series() | pandas.Series |
import pandas as pd
import urllib
from bs4 import BeautifulSoup
#creates a list of the word that needs to be searched in dictionary.com
word = ['handy','whisper','lovely','scrape']
List = []
#creates a for loop to pull the definitions for each word in the list
for i in range(0,4):
url = "https://www.dictionary.com/browse/" + word[i] + ""
#urllib was used to pull the whole html page according to word
htmlfile = urllib.request.urlopen(url)
soup = BeautifulSoup(htmlfile, 'lxml')
#beautifulsoup was used to extarct only the definition from the html file
soup1 = soup.find(class_="one-click-content css-1p89gle e1q3nk1v4")
soup1 = soup1.get_text()
#appended the definitions into a list according to the order
List.append([word[i],soup1])
#Words and Definition lists are added into a dataframe
df = | pd.DataFrame(List, columns=["Word", "Definition"]) | pandas.DataFrame |
import pandas as pd
import os
import portalocker
import contextlib
import yaml
import subprocess
from gnn_acopf.experimental.opf_dataset import OPFDataset
from gnn_acopf.training.training_run import QualityMetric
from gnn_acopf.utils.timer import Timer
from pathlib import Path
import copy
from gnn_acopf.utils.observers import DefaultObserver, Scaler
from gnn_acopf.utils.power_net import PowerNetwork
from gnn_acopf.models.summarize_encoding_model import SummarizedEncodingModel
from gnn_acopf.julia_interface import JuliaInterface
import torch
import numpy as np
class EvaluateOPF:
def __init__(self, model, results_path):
self.model = model
self.results_path = results_path
self.results_fp = results_path / "results.csv"
self.run_fp = results_path / "runs.yaml"
self.scenarios = None
with self.synced_results():
pass
@contextlib.contextmanager
def synced_runs(self):
with portalocker.Lock(self.results_path / ".runs.lock", timeout=120) as lockfile:
lockfile.flush()
os.fsync(lockfile.fileno())
try:
with self.run_fp.open("r") as run_file:
exp_states = yaml.load(run_file, Loader=yaml.FullLoader)
except FileNotFoundError:
exp_states = {}
yield exp_states
with self.run_fp.open("w") as run_file:
yaml.dump(exp_states, run_file)
lockfile.flush()
os.fsync(lockfile.fileno())
def get_slurm_id(self):
# returns either the slurm ID or "running" if no slurm ID can be found.
try:
slurm_id = os.environ["SLURM_JOB_ID"]
except KeyError:
# no slurm available
slurm_id = "running"
return slurm_id
def is_running(self, exp_state):
try:
slurm_id = os.environ["SLURM_JOB_ID"]
except KeyError:
slurm_id = None
if exp_state in [None, "stopped", slurm_id]:
return False
try:
result = subprocess.check_output("squeue -hO jobid:15", shell=True,
stderr=subprocess.DEVNULL).decode("utf-8").strip()
result = result.split("\n")
result = [int(line.strip()) for line in result]
return exp_state in result
except subprocess.CalledProcessError:
return True
@contextlib.contextmanager
def synced_results(self):
with portalocker.Lock(self.results_path / ".results.lock", timeout=120) as lockfile:
lockfile.flush()
os.fsync(lockfile.fileno())
try:
self.results = pd.read_csv(self.results_fp)
except (pd.errors.EmptyDataError, FileNotFoundError):
self.results = pd.DataFrame(columns=[
"scenario_id",
"opf_method",
"time_taken",
"solved",
"power_generated",
"power_loss"
])
yield
self.results.to_csv(self.results_fp, index=False)
lockfile.flush()
os.fsync(lockfile.fileno())
def eval_method(self, eval_func, case_dict, jl, data, observer):
with Timer() as optimization_timer:
solution = eval_func(case_dict, jl, data=data, observer=observer)
ac_pf_result, _ = jl.run_pf(case_dict, method="ac",
previous_result=solution, print_level=0,
max_iter=1)
solved = "SOLVED" in ac_pf_result["termination_status"]
power_demand = sum([g["pd"] for g in case_dict["load"].values() if g["pd"] is not None])
power_generated = sum([g["pg"] for g in ac_pf_result["solution"]["gen"].values() if g["pg"] is not None])
power_loss = power_generated / power_demand
return {
"time_taken": optimization_timer.interval,
"solved": solved,
"power_generated": power_generated,
"power_loss": power_loss
}
def set_run_state(self, exp_name, state):
with self.synced_runs() as exp_states:
exp_states[exp_name] = state
def eval_ac_opf(self, case_dict, jl, print_level=0, **kwargs):
ac_opf_result, _ = jl.run_opf(case_dict, method="ac", print_level=print_level)
return ac_opf_result
def eval_dc_opf(self, case_dict, jl, print_level=0, **kwargs):
ac_opf_result, _ = jl.run_opf(case_dict, method="dc", print_level=print_level)
return ac_opf_result
def eval_dcac_opf(self, case_dict, jl, print_level=0, **kwargs):
ac_opf_result, _ = jl.run_opf(case_dict, method="dcac", print_level=print_level)
return ac_opf_result
def eval_model_and_ac_opf(self, case_dict, jl, data, observer, print_level=0, **kwargs):
output = self.model(data)
model_output_dict = observer.translate_output_to_results_dict(
data, output, case_dict
)
ac_opf_result, _ = jl.run_opf(case_dict, method="ac",
previous_result=model_output_dict, print_level=print_level)
return ac_opf_result
def eval_model_and_ac_opf_nobranch(self, case_dict, jl, data, observer, print_level=0, **kwargs):
output = self.model(data)
model_output_dict = observer.translate_output_to_results_dict(
data, output, case_dict, keys_to_consider=["bus", "gen"]
)
ac_opf_result, _ = jl.run_opf(case_dict, method="ac",
previous_result=model_output_dict, print_level=print_level)
return ac_opf_result
def eval_model_pf_ac_opf(self, case_dict, jl, data, observer, print_level=0, **kwargs):
output = self.model(data)
model_output_dict = observer.translate_output_to_results_dict(
data, output, case_dict
)
pf_result, _ = jl.run_pf(case_dict, method="ac", previous_result=model_output_dict,
print_level=print_level)
# TODO: Maybe need to combine it.
ac_opf_result, _ = jl.run_opf(case_dict, method="ac",
previous_result=pf_result, print_level=print_level)
return ac_opf_result
def eval_model_pf(self, case_dict, jl, data, observer, print_level=0, **kwargs):
output = self.model(data)
model_output_dict = observer.translate_output_to_results_dict(
data, output, case_dict
)
pf_result, _ = jl.run_pf(case_dict, method="ac", previous_result=model_output_dict,
print_level=print_level)
return pf_result
def eval_model(self, case_dict, jl, data, observer, print_level=0, **kwargs):
output = self.model(data)
output_dict = observer.translate_output_to_results_dict(
data, output, case_dict
)
return output_dict
def eval_model_feasibility_check_opf(self, case_dict, jl, data, observer, print_level=0, **kwargs):
output = self.model(data)
output_dict = observer.translate_output_to_results_dict(
data, output, case_dict
)
ac_pf_result, _ = jl.run_pf(case_dict, method="ac",
previous_result=output_dict, print_level=0)
solved = "SOLVED" in ac_pf_result["termination_status"]
if not solved:
output_dict, _ = jl.run_opf(case_dict, method="ac",
previous_result=output_dict, print_level=print_level)
return output_dict
def statistical_summary(self, filter_by_solved):
group_by = ["opf_method"]
results = self.results
if filter_by_solved:
results = results[results["solved"] == 1]
grouped_results = results.groupby(group_by)
agg_dict = {c: ["mean", "std"] for c in list(self.results.columns.values)
if c not in group_by + ["scenario_id", "solved"]}
agg_dict["solved"] = ["mean", "sum"]
statistics_df = grouped_results.agg(agg_dict)
# statistics_df = statistics_df.unstack(level=[1]).reorder_levels([2, 0, 1], axis=1)
# sort whole group according to test acc
statistics_df = statistics_df.sort_values(by=[("time_taken", "mean")], ascending=True)
return statistics_df
def pretty_statistics(self, filter_by_solved):
with pd.option_context('display.max_rows', None, 'display.max_columns', None,
"display.width", 200):
pretty_statistic_string = str(self.statistical_summary(filter_by_solved))
return pretty_statistic_string
def pprint_results(self, results_by_method, n_evaluated):
for method, results in results_by_method.items():
print(method)
n_solved = len([r for r in results["solved"] if r])
print(f"\tSolved: {n_solved}")
print(f"\tTime (solved): {np.mean([results['time_taken'][i] for i in range(n_evaluated) if results['solved'][i]])}")
print(f"\tTime (all): {np.mean(results['time_taken'])}")
print(f"\tCost (solved): {np.mean([results['cost'][i] for i in range(n_evaluated) if results['solved'][i]])}")
print(f"\tPower Gen (solved): {np.mean([results['power_generated'][i] for i in range(n_evaluated) if results['solved'][i]])}")
def claim_scenario_idx(self, scenario_id):
with self.synced_runs() as exp_states:
scen_state = exp_states.get(scenario_id, None)
if not self.is_running(scen_state):
exp_states[scenario_id] = self.get_slurm_id()
return True
return False
def eval_opf(self, dataloader, observer, device, print_level=0):
jl = JuliaInterface()
self.model.eval()
self.model.to(device)
methods = {
"ac_opf": self.eval_ac_opf,
"dcac_opf": self.eval_dcac_opf,
"dc_opf": self.eval_dc_opf,
"model_ac_opf": self.eval_model_and_ac_opf,
"model": self.eval_model,
# "model_feasibility_acopf": self.eval_model_feasibility_check_opf
# "model_pf_ac_opf": self.eval_model_pf_ac_opf,
# "model_ac_opf_nobranch": self.eval_model_and_ac_opf_nobranch,
# "model_pf": self.eval_model_pf
}
results_by_method = {m: {"solved": [], "time_taken": [], "cost": []} for m in methods}
n_evaluated = 0
with torch.no_grad():
for i, data in enumerate(dataloader):
scenario_idx = data["scenario_idx"].item()
if not self.claim_scenario_idx(scenario_idx):
continue
scenario_df = | pd.DataFrame() | pandas.DataFrame |
from pytools4p.transformer import reshaper
import pandas as pd
import pandas.testing as tm
from pandas.testing import assert_frame_equal
import numpy as np
def test_pivot_reshaper():
"""Test for normal arguments
"""
def unpivot(frame):
N, K = frame.shape
data = {
"value": frame.to_numpy().ravel("F"),
"variable": np.asarray(frame.columns).repeat(N),
"date": np.tile(np.asarray(frame.index), K)
}
return pd.DataFrame(data, columns=["date", "variable", "value"])
df = unpivot(tm.makeTimeDataFrame(3))
actual = reshaper(df, index_col="date", var_name="variable", value_col="value", type="pivot")
expected = pd.pivot(df, "time", columns="date", values = "value").reset_index(level=0, inplace=True)
assert_frame_equal(actual, expected)
def test_melt_reshaper():
"""Test for normal arguments
"""
def unpivot(frame):
N, K = frame.shape
data = {
"value": frame.to_numpy().ravel("F"),
"variable": np.asarray(frame.columns).repeat(N),
"date": np.tile(np.asarray(frame.index), K)
}
return | pd.DataFrame(data, columns=["date", "variable", "value"]) | pandas.DataFrame |
# -*- coding: utf-8 -*-
# pylint: disable=E1101,E1103,W0232
import os
import sys
from datetime import datetime
from distutils.version import LooseVersion
import numpy as np
import pandas as pd
import pandas.compat as compat
import pandas.core.common as com
import pandas.util.testing as tm
from pandas import (Categorical, Index, Series, DataFrame, PeriodIndex,
Timestamp, CategoricalIndex)
from pandas.compat import range, lrange, u, PY3
from pandas.core.config import option_context
# GH 12066
# flake8: noqa
class TestCategorical(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
self.factor = Categorical.from_array(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'],
ordered=True)
def test_getitem(self):
self.assertEqual(self.factor[0], 'a')
self.assertEqual(self.factor[-1], 'c')
subf = self.factor[[0, 1, 2]]
tm.assert_almost_equal(subf._codes, [0, 1, 1])
subf = self.factor[np.asarray(self.factor) == 'c']
tm.assert_almost_equal(subf._codes, [2, 2, 2])
def test_getitem_listlike(self):
# GH 9469
# properly coerce the input indexers
np.random.seed(1)
c = Categorical(np.random.randint(0, 5, size=150000).astype(np.int8))
result = c.codes[np.array([100000]).astype(np.int64)]
expected = c[np.array([100000]).astype(np.int64)].codes
self.assert_numpy_array_equal(result, expected)
def test_setitem(self):
# int/positional
c = self.factor.copy()
c[0] = 'b'
self.assertEqual(c[0], 'b')
c[-1] = 'a'
self.assertEqual(c[-1], 'a')
# boolean
c = self.factor.copy()
indexer = np.zeros(len(c), dtype='bool')
indexer[0] = True
indexer[-1] = True
c[indexer] = 'c'
expected = Categorical.from_array(['c', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], ordered=True)
self.assert_categorical_equal(c, expected)
def test_setitem_listlike(self):
# GH 9469
# properly coerce the input indexers
np.random.seed(1)
c = Categorical(np.random.randint(0, 5, size=150000).astype(
np.int8)).add_categories([-1000])
indexer = np.array([100000]).astype(np.int64)
c[indexer] = -1000
# we are asserting the code result here
# which maps to the -1000 category
result = c.codes[np.array([100000]).astype(np.int64)]
self.assertEqual(result, np.array([5], dtype='int8'))
def test_constructor_unsortable(self):
# it works!
arr = np.array([1, 2, 3, datetime.now()], dtype='O')
factor = Categorical.from_array(arr, ordered=False)
self.assertFalse(factor.ordered)
if compat.PY3:
self.assertRaises(
TypeError, lambda: Categorical.from_array(arr, ordered=True))
else:
# this however will raise as cannot be sorted (on PY3 or older
# numpies)
if LooseVersion(np.__version__) < "1.10":
self.assertRaises(
TypeError,
lambda: Categorical.from_array(arr, ordered=True))
else:
Categorical.from_array(arr, ordered=True)
def test_is_equal_dtype(self):
# test dtype comparisons between cats
c1 = Categorical(list('aabca'), categories=list('abc'), ordered=False)
c2 = Categorical(list('aabca'), categories=list('cab'), ordered=False)
c3 = Categorical(list('aabca'), categories=list('cab'), ordered=True)
self.assertTrue(c1.is_dtype_equal(c1))
self.assertTrue(c2.is_dtype_equal(c2))
self.assertTrue(c3.is_dtype_equal(c3))
self.assertFalse(c1.is_dtype_equal(c2))
self.assertFalse(c1.is_dtype_equal(c3))
self.assertFalse(c1.is_dtype_equal(Index(list('aabca'))))
self.assertFalse(c1.is_dtype_equal(c1.astype(object)))
self.assertTrue(c1.is_dtype_equal(CategoricalIndex(c1)))
self.assertFalse(c1.is_dtype_equal(
CategoricalIndex(c1, categories=list('cab'))))
self.assertFalse(c1.is_dtype_equal(CategoricalIndex(c1, ordered=True)))
def test_constructor(self):
exp_arr = np.array(["a", "b", "c", "a", "b", "c"])
c1 = Categorical(exp_arr)
self.assert_numpy_array_equal(c1.__array__(), exp_arr)
c2 = Categorical(exp_arr, categories=["a", "b", "c"])
self.assert_numpy_array_equal(c2.__array__(), exp_arr)
c2 = Categorical(exp_arr, categories=["c", "b", "a"])
self.assert_numpy_array_equal(c2.__array__(), exp_arr)
# categories must be unique
def f():
Categorical([1, 2], [1, 2, 2])
self.assertRaises(ValueError, f)
def f():
Categorical(["a", "b"], ["a", "b", "b"])
self.assertRaises(ValueError, f)
def f():
with tm.assert_produces_warning(FutureWarning):
Categorical([1, 2], [1, 2, np.nan, np.nan])
self.assertRaises(ValueError, f)
# The default should be unordered
c1 = Categorical(["a", "b", "c", "a"])
self.assertFalse(c1.ordered)
# Categorical as input
c1 = Categorical(["a", "b", "c", "a"])
c2 = Categorical(c1)
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(c1)
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(c1)
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(c1, categories=["a", "b", "c"])
self.assert_numpy_array_equal(c1.__array__(), c2.__array__())
self.assert_numpy_array_equal(c2.categories, np.array(["a", "b", "c"]))
# Series of dtype category
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(Series(c1))
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(Series(c1))
self.assertTrue(c1.equals(c2))
# Series
c1 = Categorical(["a", "b", "c", "a"])
c2 = Categorical(Series(["a", "b", "c", "a"]))
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(
Series(["a", "b", "c", "a"]), categories=["a", "b", "c", "d"])
self.assertTrue(c1.equals(c2))
# This should result in integer categories, not float!
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
self.assertTrue(com.is_integer_dtype(cat.categories))
# https://github.com/pydata/pandas/issues/3678
cat = pd.Categorical([np.nan, 1, 2, 3])
self.assertTrue(com.is_integer_dtype(cat.categories))
# this should result in floats
cat = pd.Categorical([np.nan, 1, 2., 3])
self.assertTrue(com.is_float_dtype(cat.categories))
cat = pd.Categorical([np.nan, 1., 2., 3.])
self.assertTrue(com.is_float_dtype(cat.categories))
# Deprecating NaNs in categoires (GH #10748)
# preserve int as far as possible by converting to object if NaN is in
# categories
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical([np.nan, 1, 2, 3],
categories=[np.nan, 1, 2, 3])
self.assertTrue(com.is_object_dtype(cat.categories))
# This doesn't work -> this would probably need some kind of "remember
# the original type" feature to try to cast the array interface result
# to...
# vals = np.asarray(cat[cat.notnull()])
# self.assertTrue(com.is_integer_dtype(vals))
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical([np.nan, "a", "b", "c"],
categories=[np.nan, "a", "b", "c"])
self.assertTrue(com.is_object_dtype(cat.categories))
# but don't do it for floats
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical([np.nan, 1., 2., 3.],
categories=[np.nan, 1., 2., 3.])
self.assertTrue(com.is_float_dtype(cat.categories))
# corner cases
cat = pd.Categorical([1])
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == 1)
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
cat = pd.Categorical(["a"])
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == "a")
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
# Scalars should be converted to lists
cat = pd.Categorical(1)
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == 1)
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
cat = pd.Categorical([1], categories=1)
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == 1)
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
# Catch old style constructor useage: two arrays, codes + categories
# We can only catch two cases:
# - when the first is an integer dtype and the second is not
# - when the resulting codes are all -1/NaN
with tm.assert_produces_warning(RuntimeWarning):
c_old = Categorical([0, 1, 2, 0, 1, 2],
categories=["a", "b", "c"]) # noqa
with tm.assert_produces_warning(RuntimeWarning):
c_old = Categorical([0, 1, 2, 0, 1, 2], # noqa
categories=[3, 4, 5])
# the next one are from the old docs, but unfortunately these don't
# trigger :-(
with tm.assert_produces_warning(None):
c_old2 = Categorical([0, 1, 2, 0, 1, 2], [1, 2, 3]) # noqa
cat = Categorical([1, 2], categories=[1, 2, 3])
# this is a legitimate constructor
with tm.assert_produces_warning(None):
c = Categorical(np.array([], dtype='int64'), # noqa
categories=[3, 2, 1], ordered=True)
def test_constructor_with_index(self):
ci = CategoricalIndex(list('aabbca'), categories=list('cab'))
self.assertTrue(ci.values.equals(Categorical(ci)))
ci = CategoricalIndex(list('aabbca'), categories=list('cab'))
self.assertTrue(ci.values.equals(Categorical(
ci.astype(object), categories=ci.categories)))
def test_constructor_with_generator(self):
# This was raising an Error in isnull(single_val).any() because isnull
# returned a scalar for a generator
xrange = range
exp = Categorical([0, 1, 2])
cat = Categorical((x for x in [0, 1, 2]))
self.assertTrue(cat.equals(exp))
cat = Categorical(xrange(3))
self.assertTrue(cat.equals(exp))
# This uses xrange internally
from pandas.core.index import MultiIndex
MultiIndex.from_product([range(5), ['a', 'b', 'c']])
# check that categories accept generators and sequences
cat = pd.Categorical([0, 1, 2], categories=(x for x in [0, 1, 2]))
self.assertTrue(cat.equals(exp))
cat = pd.Categorical([0, 1, 2], categories=xrange(3))
self.assertTrue(cat.equals(exp))
def test_from_codes(self):
# too few categories
def f():
Categorical.from_codes([1, 2], [1, 2])
self.assertRaises(ValueError, f)
# no int codes
def f():
Categorical.from_codes(["a"], [1, 2])
self.assertRaises(ValueError, f)
# no unique categories
def f():
Categorical.from_codes([0, 1, 2], ["a", "a", "b"])
self.assertRaises(ValueError, f)
# too negative
def f():
Categorical.from_codes([-2, 1, 2], ["a", "b", "c"])
self.assertRaises(ValueError, f)
exp = Categorical(["a", "b", "c"], ordered=False)
res = Categorical.from_codes([0, 1, 2], ["a", "b", "c"])
self.assertTrue(exp.equals(res))
# Not available in earlier numpy versions
if hasattr(np.random, "choice"):
codes = np.random.choice([0, 1], 5, p=[0.9, 0.1])
pd.Categorical.from_codes(codes, categories=["train", "test"])
def test_comparisons(self):
result = self.factor[self.factor == 'a']
expected = self.factor[np.asarray(self.factor) == 'a']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor != 'a']
expected = self.factor[np.asarray(self.factor) != 'a']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor < 'c']
expected = self.factor[np.asarray(self.factor) < 'c']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor > 'a']
expected = self.factor[np.asarray(self.factor) > 'a']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor >= 'b']
expected = self.factor[np.asarray(self.factor) >= 'b']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor <= 'b']
expected = self.factor[np.asarray(self.factor) <= 'b']
self.assertTrue(result.equals(expected))
n = len(self.factor)
other = self.factor[np.random.permutation(n)]
result = self.factor == other
expected = np.asarray(self.factor) == np.asarray(other)
self.assert_numpy_array_equal(result, expected)
result = self.factor == 'd'
expected = np.repeat(False, len(self.factor))
self.assert_numpy_array_equal(result, expected)
# comparisons with categoricals
cat_rev = pd.Categorical(["a", "b", "c"], categories=["c", "b", "a"],
ordered=True)
cat_rev_base = pd.Categorical(
["b", "b", "b"], categories=["c", "b", "a"], ordered=True)
cat = pd.Categorical(["a", "b", "c"], ordered=True)
cat_base = pd.Categorical(["b", "b", "b"], categories=cat.categories,
ordered=True)
# comparisons need to take categories ordering into account
res_rev = cat_rev > cat_rev_base
exp_rev = np.array([True, False, False])
self.assert_numpy_array_equal(res_rev, exp_rev)
res_rev = cat_rev < cat_rev_base
exp_rev = np.array([False, False, True])
self.assert_numpy_array_equal(res_rev, exp_rev)
res = cat > cat_base
exp = np.array([False, False, True])
self.assert_numpy_array_equal(res, exp)
# Only categories with same categories can be compared
def f():
cat > cat_rev
self.assertRaises(TypeError, f)
cat_rev_base2 = pd.Categorical(
["b", "b", "b"], categories=["c", "b", "a", "d"])
def f():
cat_rev > cat_rev_base2
self.assertRaises(TypeError, f)
# Only categories with same ordering information can be compared
cat_unorderd = cat.set_ordered(False)
self.assertFalse((cat > cat).any())
def f():
cat > cat_unorderd
self.assertRaises(TypeError, f)
# comparison (in both directions) with Series will raise
s = Series(["b", "b", "b"])
self.assertRaises(TypeError, lambda: cat > s)
self.assertRaises(TypeError, lambda: cat_rev > s)
self.assertRaises(TypeError, lambda: s < cat)
self.assertRaises(TypeError, lambda: s < cat_rev)
# comparison with numpy.array will raise in both direction, but only on
# newer numpy versions
a = np.array(["b", "b", "b"])
self.assertRaises(TypeError, lambda: cat > a)
self.assertRaises(TypeError, lambda: cat_rev > a)
# The following work via '__array_priority__ = 1000'
# works only on numpy >= 1.7.1
if LooseVersion(np.__version__) > "1.7.1":
self.assertRaises(TypeError, lambda: a < cat)
self.assertRaises(TypeError, lambda: a < cat_rev)
# Make sure that unequal comparison take the categories order in
# account
cat_rev = pd.Categorical(
list("abc"), categories=list("cba"), ordered=True)
exp = np.array([True, False, False])
res = cat_rev > "b"
self.assert_numpy_array_equal(res, exp)
def test_na_flags_int_categories(self):
# #1457
categories = lrange(10)
labels = np.random.randint(0, 10, 20)
labels[::5] = -1
cat = Categorical(labels, categories, fastpath=True)
repr(cat)
self.assert_numpy_array_equal(com.isnull(cat), labels == -1)
def test_categories_none(self):
factor = Categorical(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], ordered=True)
self.assertTrue(factor.equals(self.factor))
def test_describe(self):
# string type
desc = self.factor.describe()
expected = DataFrame({'counts': [3, 2, 3],
'freqs': [3 / 8., 2 / 8., 3 / 8.]},
index=pd.CategoricalIndex(['a', 'b', 'c'],
name='categories'))
tm.assert_frame_equal(desc, expected)
# check unused categories
cat = self.factor.copy()
cat.set_categories(["a", "b", "c", "d"], inplace=True)
desc = cat.describe()
expected = DataFrame({'counts': [3, 2, 3, 0],
'freqs': [3 / 8., 2 / 8., 3 / 8., 0]},
index=pd.CategoricalIndex(['a', 'b', 'c', 'd'],
name='categories'))
tm.assert_frame_equal(desc, expected)
# check an integer one
desc = Categorical([1, 2, 3, 1, 2, 3, 3, 2, 1, 1, 1]).describe()
expected = DataFrame({'counts': [5, 3, 3],
'freqs': [5 / 11., 3 / 11., 3 / 11.]},
index=pd.CategoricalIndex([1, 2, 3],
name='categories'))
tm.assert_frame_equal(desc, expected)
# https://github.com/pydata/pandas/issues/3678
# describe should work with NaN
cat = pd.Categorical([np.nan, 1, 2, 2])
desc = cat.describe()
expected = DataFrame({'counts': [1, 2, 1],
'freqs': [1 / 4., 2 / 4., 1 / 4.]},
index=pd.CategoricalIndex([1, 2, np.nan],
categories=[1, 2],
name='categories'))
tm.assert_frame_equal(desc, expected)
# NA as a category
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical(["a", "c", "c", np.nan],
categories=["b", "a", "c", np.nan])
result = cat.describe()
expected = DataFrame([[0, 0], [1, 0.25], [2, 0.5], [1, 0.25]],
columns=['counts', 'freqs'],
index=pd.CategoricalIndex(['b', 'a', 'c', np.nan],
name='categories'))
tm.assert_frame_equal(result, expected)
# NA as an unused category
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical(["a", "c", "c"],
categories=["b", "a", "c", np.nan])
result = cat.describe()
exp_idx = pd.CategoricalIndex(
['b', 'a', 'c', np.nan], name='categories')
expected = DataFrame([[0, 0], [1, 1 / 3.], [2, 2 / 3.], [0, 0]],
columns=['counts', 'freqs'], index=exp_idx)
tm.assert_frame_equal(result, expected)
def test_print(self):
expected = ["[a, b, b, a, a, c, c, c]",
"Categories (3, object): [a < b < c]"]
expected = "\n".join(expected)
actual = repr(self.factor)
self.assertEqual(actual, expected)
def test_big_print(self):
factor = Categorical([0, 1, 2, 0, 1, 2] * 100, ['a', 'b', 'c'],
name='cat', fastpath=True)
expected = ["[a, b, c, a, b, ..., b, c, a, b, c]", "Length: 600",
"Categories (3, object): [a, b, c]"]
expected = "\n".join(expected)
actual = repr(factor)
self.assertEqual(actual, expected)
def test_empty_print(self):
factor = Categorical([], ["a", "b", "c"])
expected = ("[], Categories (3, object): [a, b, c]")
# hack because array_repr changed in numpy > 1.6.x
actual = repr(factor)
self.assertEqual(actual, expected)
self.assertEqual(expected, actual)
factor = Categorical([], ["a", "b", "c"], ordered=True)
expected = ("[], Categories (3, object): [a < b < c]")
actual = repr(factor)
self.assertEqual(expected, actual)
factor = Categorical([], [])
expected = ("[], Categories (0, object): []")
self.assertEqual(expected, repr(factor))
def test_print_none_width(self):
# GH10087
a = pd.Series(pd.Categorical([1, 2, 3, 4]))
exp = u("0 1\n1 2\n2 3\n3 4\n" +
"dtype: category\nCategories (4, int64): [1, 2, 3, 4]")
with option_context("display.width", None):
self.assertEqual(exp, repr(a))
def test_unicode_print(self):
if PY3:
_rep = repr
else:
_rep = unicode # noqa
c = pd.Categorical(['aaaaa', 'bb', 'cccc'] * 20)
expected = u"""\
[aaaaa, bb, cccc, aaaaa, bb, ..., bb, cccc, aaaaa, bb, cccc]
Length: 60
Categories (3, object): [aaaaa, bb, cccc]"""
self.assertEqual(_rep(c), expected)
c = pd.Categorical([u'ใใใใ', u'ใใใใใ', u'ใใใใใใใ']
* 20)
expected = u"""\
[ใใใใ, ใใใใใ, ใใใใใใใ, ใใใใ, ใใใใใ, ..., ใใใใใ, ใใใใใใใ, ใใใใ, ใใใใใ, ใใใใใใใ]
Length: 60
Categories (3, object): [ใใใใ, ใใใใใ, ใใใใใใใ]""" # noqa
self.assertEqual(_rep(c), expected)
# unicode option should not affect to Categorical, as it doesn't care
# the repr width
with option_context('display.unicode.east_asian_width', True):
c = pd.Categorical([u'ใใใใ', u'ใใใใใ', u'ใใใใใใใ']
* 20)
expected = u"""[ใใใใ, ใใใใใ, ใใใใใใใ, ใใใใ, ใใใใใ, ..., ใใใใใ, ใใใใใใใ, ใใใใ, ใใใใใ, ใใใใใใใ]
Length: 60
Categories (3, object): [ใใใใ, ใใใใใ, ใใใใใใใ]""" # noqa
self.assertEqual(_rep(c), expected)
def test_periodindex(self):
idx1 = PeriodIndex(['2014-01', '2014-01', '2014-02', '2014-02',
'2014-03', '2014-03'], freq='M')
cat1 = Categorical.from_array(idx1)
str(cat1)
exp_arr = np.array([0, 0, 1, 1, 2, 2], dtype='int64')
exp_idx = PeriodIndex(['2014-01', '2014-02', '2014-03'], freq='M')
self.assert_numpy_array_equal(cat1._codes, exp_arr)
self.assertTrue(cat1.categories.equals(exp_idx))
idx2 = PeriodIndex(['2014-03', '2014-03', '2014-02', '2014-01',
'2014-03', '2014-01'], freq='M')
cat2 = Categorical.from_array(idx2, ordered=True)
str(cat2)
exp_arr = np.array([2, 2, 1, 0, 2, 0], dtype='int64')
exp_idx2 = PeriodIndex(['2014-01', '2014-02', '2014-03'], freq='M')
self.assert_numpy_array_equal(cat2._codes, exp_arr)
self.assertTrue(cat2.categories.equals(exp_idx2))
idx3 = PeriodIndex(['2013-12', '2013-11', '2013-10', '2013-09',
'2013-08', '2013-07', '2013-05'], freq='M')
cat3 = Categorical.from_array(idx3, ordered=True)
exp_arr = np.array([6, 5, 4, 3, 2, 1, 0], dtype='int64')
exp_idx = PeriodIndex(['2013-05', '2013-07', '2013-08', '2013-09',
'2013-10', '2013-11', '2013-12'], freq='M')
self.assert_numpy_array_equal(cat3._codes, exp_arr)
self.assertTrue(cat3.categories.equals(exp_idx))
def test_categories_assigments(self):
s = pd.Categorical(["a", "b", "c", "a"])
exp = np.array([1, 2, 3, 1])
s.categories = [1, 2, 3]
self.assert_numpy_array_equal(s.__array__(), exp)
self.assert_numpy_array_equal(s.categories, np.array([1, 2, 3]))
# lengthen
def f():
s.categories = [1, 2, 3, 4]
self.assertRaises(ValueError, f)
# shorten
def f():
s.categories = [1, 2]
self.assertRaises(ValueError, f)
def test_construction_with_ordered(self):
# GH 9347, 9190
cat = Categorical([0, 1, 2])
self.assertFalse(cat.ordered)
cat = Categorical([0, 1, 2], ordered=False)
self.assertFalse(cat.ordered)
cat = Categorical([0, 1, 2], ordered=True)
self.assertTrue(cat.ordered)
def test_ordered_api(self):
# GH 9347
cat1 = pd.Categorical(["a", "c", "b"], ordered=False)
self.assertTrue(cat1.categories.equals(Index(['a', 'b', 'c'])))
self.assertFalse(cat1.ordered)
cat2 = pd.Categorical(["a", "c", "b"], categories=['b', 'c', 'a'],
ordered=False)
self.assertTrue(cat2.categories.equals(Index(['b', 'c', 'a'])))
self.assertFalse(cat2.ordered)
cat3 = pd.Categorical(["a", "c", "b"], ordered=True)
self.assertTrue(cat3.categories.equals(Index(['a', 'b', 'c'])))
self.assertTrue(cat3.ordered)
cat4 = pd.Categorical(["a", "c", "b"], categories=['b', 'c', 'a'],
ordered=True)
self.assertTrue(cat4.categories.equals(Index(['b', 'c', 'a'])))
self.assertTrue(cat4.ordered)
def test_set_ordered(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
cat2 = cat.as_unordered()
self.assertFalse(cat2.ordered)
cat2 = cat.as_ordered()
self.assertTrue(cat2.ordered)
cat2.as_unordered(inplace=True)
self.assertFalse(cat2.ordered)
cat2.as_ordered(inplace=True)
self.assertTrue(cat2.ordered)
self.assertTrue(cat2.set_ordered(True).ordered)
self.assertFalse(cat2.set_ordered(False).ordered)
cat2.set_ordered(True, inplace=True)
self.assertTrue(cat2.ordered)
cat2.set_ordered(False, inplace=True)
self.assertFalse(cat2.ordered)
# deperecated in v0.16.0
with tm.assert_produces_warning(FutureWarning):
cat.ordered = False
self.assertFalse(cat.ordered)
with tm.assert_produces_warning(FutureWarning):
cat.ordered = True
self.assertTrue(cat.ordered)
def test_set_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
exp_categories = np.array(["c", "b", "a"])
exp_values = np.array(["a", "b", "c", "a"])
res = cat.set_categories(["c", "b", "a"], inplace=True)
self.assert_numpy_array_equal(cat.categories, exp_categories)
self.assert_numpy_array_equal(cat.__array__(), exp_values)
self.assertIsNone(res)
res = cat.set_categories(["a", "b", "c"])
# cat must be the same as before
self.assert_numpy_array_equal(cat.categories, exp_categories)
self.assert_numpy_array_equal(cat.__array__(), exp_values)
# only res is changed
exp_categories_back = np.array(["a", "b", "c"])
self.assert_numpy_array_equal(res.categories, exp_categories_back)
self.assert_numpy_array_equal(res.__array__(), exp_values)
# not all "old" included in "new" -> all not included ones are now
# np.nan
cat = Categorical(["a", "b", "c", "a"], ordered=True)
res = cat.set_categories(["a"])
self.assert_numpy_array_equal(res.codes, np.array([0, -1, -1, 0]))
# still not all "old" in "new"
res = cat.set_categories(["a", "b", "d"])
self.assert_numpy_array_equal(res.codes, np.array([0, 1, -1, 0]))
self.assert_numpy_array_equal(res.categories,
np.array(["a", "b", "d"]))
# all "old" included in "new"
cat = cat.set_categories(["a", "b", "c", "d"])
exp_categories = np.array(["a", "b", "c", "d"])
self.assert_numpy_array_equal(cat.categories, exp_categories)
# internals...
c = Categorical([1, 2, 3, 4, 1], categories=[1, 2, 3, 4], ordered=True)
self.assert_numpy_array_equal(c._codes, np.array([0, 1, 2, 3, 0]))
self.assert_numpy_array_equal(c.categories, np.array([1, 2, 3, 4]))
self.assert_numpy_array_equal(c.get_values(),
np.array([1, 2, 3, 4, 1]))
c = c.set_categories(
[4, 3, 2, 1
]) # all "pointers" to '4' must be changed from 3 to 0,...
self.assert_numpy_array_equal(c._codes, np.array([3, 2, 1, 0, 3])
) # positions are changed
self.assert_numpy_array_equal(c.categories, np.array([4, 3, 2, 1])
) # categories are now in new order
self.assert_numpy_array_equal(c.get_values(), np.array([1, 2, 3, 4, 1])
) # output is the same
self.assertTrue(c.min(), 4)
self.assertTrue(c.max(), 1)
# set_categories should set the ordering if specified
c2 = c.set_categories([4, 3, 2, 1], ordered=False)
self.assertFalse(c2.ordered)
self.assert_numpy_array_equal(c.get_values(), c2.get_values())
# set_categories should pass thru the ordering
c2 = c.set_ordered(False).set_categories([4, 3, 2, 1])
self.assertFalse(c2.ordered)
self.assert_numpy_array_equal(c.get_values(), c2.get_values())
def test_rename_categories(self):
cat = pd.Categorical(["a", "b", "c", "a"])
# inplace=False: the old one must not be changed
res = cat.rename_categories([1, 2, 3])
self.assert_numpy_array_equal(res.__array__(), np.array([1, 2, 3, 1]))
self.assert_numpy_array_equal(res.categories, np.array([1, 2, 3]))
self.assert_numpy_array_equal(cat.__array__(),
np.array(["a", "b", "c", "a"]))
self.assert_numpy_array_equal(cat.categories,
np.array(["a", "b", "c"]))
res = cat.rename_categories([1, 2, 3], inplace=True)
# and now inplace
self.assertIsNone(res)
self.assert_numpy_array_equal(cat.__array__(), np.array([1, 2, 3, 1]))
self.assert_numpy_array_equal(cat.categories, np.array([1, 2, 3]))
# lengthen
def f():
cat.rename_categories([1, 2, 3, 4])
self.assertRaises(ValueError, f)
# shorten
def f():
cat.rename_categories([1, 2])
self.assertRaises(ValueError, f)
def test_reorder_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
old = cat.copy()
new = Categorical(["a", "b", "c", "a"], categories=["c", "b", "a"],
ordered=True)
# first inplace == False
res = cat.reorder_categories(["c", "b", "a"])
# cat must be the same as before
self.assert_categorical_equal(cat, old)
# only res is changed
self.assert_categorical_equal(res, new)
# inplace == True
res = cat.reorder_categories(["c", "b", "a"], inplace=True)
self.assertIsNone(res)
self.assert_categorical_equal(cat, new)
# not all "old" included in "new"
cat = Categorical(["a", "b", "c", "a"], ordered=True)
def f():
cat.reorder_categories(["a"])
self.assertRaises(ValueError, f)
# still not all "old" in "new"
def f():
cat.reorder_categories(["a", "b", "d"])
self.assertRaises(ValueError, f)
# all "old" included in "new", but too long
def f():
cat.reorder_categories(["a", "b", "c", "d"])
self.assertRaises(ValueError, f)
def test_add_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
old = cat.copy()
new = Categorical(["a", "b", "c", "a"],
categories=["a", "b", "c", "d"], ordered=True)
# first inplace == False
res = cat.add_categories("d")
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
res = cat.add_categories(["d"])
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
# inplace == True
res = cat.add_categories("d", inplace=True)
self.assert_categorical_equal(cat, new)
self.assertIsNone(res)
# new is in old categories
def f():
cat.add_categories(["d"])
self.assertRaises(ValueError, f)
# GH 9927
cat = Categorical(list("abc"), ordered=True)
expected = Categorical(
list("abc"), categories=list("abcde"), ordered=True)
# test with Series, np.array, index, list
res = cat.add_categories(Series(["d", "e"]))
self.assert_categorical_equal(res, expected)
res = cat.add_categories(np.array(["d", "e"]))
self.assert_categorical_equal(res, expected)
res = cat.add_categories(Index(["d", "e"]))
self.assert_categorical_equal(res, expected)
res = cat.add_categories(["d", "e"])
self.assert_categorical_equal(res, expected)
def test_remove_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
old = cat.copy()
new = Categorical(["a", "b", np.nan, "a"], categories=["a", "b"],
ordered=True)
# first inplace == False
res = cat.remove_categories("c")
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
res = cat.remove_categories(["c"])
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
# inplace == True
res = cat.remove_categories("c", inplace=True)
self.assert_categorical_equal(cat, new)
self.assertIsNone(res)
# removal is not in categories
def f():
cat.remove_categories(["c"])
self.assertRaises(ValueError, f)
def test_remove_unused_categories(self):
c = Categorical(["a", "b", "c", "d", "a"],
categories=["a", "b", "c", "d", "e"])
exp_categories_all = np.array(["a", "b", "c", "d", "e"])
exp_categories_dropped = np.array(["a", "b", "c", "d"])
self.assert_numpy_array_equal(c.categories, exp_categories_all)
res = c.remove_unused_categories()
self.assert_numpy_array_equal(res.categories, exp_categories_dropped)
self.assert_numpy_array_equal(c.categories, exp_categories_all)
res = c.remove_unused_categories(inplace=True)
self.assert_numpy_array_equal(c.categories, exp_categories_dropped)
self.assertIsNone(res)
# with NaN values (GH11599)
c = Categorical(["a", "b", "c", np.nan],
categories=["a", "b", "c", "d", "e"])
res = c.remove_unused_categories()
self.assert_numpy_array_equal(res.categories,
np.array(["a", "b", "c"]))
self.assert_numpy_array_equal(c.categories, exp_categories_all)
val = ['F', np.nan, 'D', 'B', 'D', 'F', np.nan]
cat = pd.Categorical(values=val, categories=list('ABCDEFG'))
out = cat.remove_unused_categories()
self.assert_numpy_array_equal(out.categories, ['B', 'D', 'F'])
self.assert_numpy_array_equal(out.codes, [2, -1, 1, 0, 1, 2, -1])
self.assertEqual(out.get_values().tolist(), val)
alpha = list('abcdefghijklmnopqrstuvwxyz')
val = np.random.choice(alpha[::2], 10000).astype('object')
val[np.random.choice(len(val), 100)] = np.nan
cat = pd.Categorical(values=val, categories=alpha)
out = cat.remove_unused_categories()
self.assertEqual(out.get_values().tolist(), val.tolist())
def test_nan_handling(self):
# Nans are represented as -1 in codes
c = Categorical(["a", "b", np.nan, "a"])
self.assert_numpy_array_equal(c.categories, np.array(["a", "b"]))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, -1, 0]))
c[1] = np.nan
self.assert_numpy_array_equal(c.categories, np.array(["a", "b"]))
self.assert_numpy_array_equal(c._codes, np.array([0, -1, -1, 0]))
# If categories have nan included, the code should point to that
# instead
with tm.assert_produces_warning(FutureWarning):
c = Categorical(["a", "b", np.nan, "a"],
categories=["a", "b", np.nan])
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, 2, 0]))
c[1] = np.nan
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 2, 2, 0]))
# Changing categories should also make the replaced category np.nan
c = Categorical(["a", "b", "c", "a"])
with tm.assert_produces_warning(FutureWarning):
c.categories = ["a", "b", np.nan] # noqa
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, 2, 0]))
# Adding nan to categories should make assigned nan point to the
# category!
c = Categorical(["a", "b", np.nan, "a"])
self.assert_numpy_array_equal(c.categories, np.array(["a", "b"]))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, -1, 0]))
with tm.assert_produces_warning(FutureWarning):
c.set_categories(["a", "b", np.nan], rename=True, inplace=True)
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, -1, 0]))
c[1] = np.nan
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 2, -1, 0]))
# Remove null categories (GH 10156)
cases = [
([1.0, 2.0, np.nan], [1.0, 2.0]),
(['a', 'b', None], ['a', 'b']),
([pd.Timestamp('2012-05-01'), pd.NaT],
[pd.Timestamp('2012-05-01')])
]
null_values = [np.nan, None, pd.NaT]
for with_null, without in cases:
with tm.assert_produces_warning(FutureWarning):
base = Categorical([], with_null)
expected = Categorical([], without)
for nullval in null_values:
result = base.remove_categories(nullval)
self.assert_categorical_equal(result, expected)
# Different null values are indistinguishable
for i, j in [(0, 1), (0, 2), (1, 2)]:
nulls = [null_values[i], null_values[j]]
def f():
with tm.assert_produces_warning(FutureWarning):
Categorical([], categories=nulls)
self.assertRaises(ValueError, f)
def test_isnull(self):
exp = np.array([False, False, True])
c = Categorical(["a", "b", np.nan])
res = c.isnull()
self.assert_numpy_array_equal(res, exp)
with tm.assert_produces_warning(FutureWarning):
c = Categorical(["a", "b", np.nan], categories=["a", "b", np.nan])
res = c.isnull()
self.assert_numpy_array_equal(res, exp)
# test both nan in categories and as -1
exp = np.array([True, False, True])
c = Categorical(["a", "b", np.nan])
with tm.assert_produces_warning(FutureWarning):
c.set_categories(["a", "b", np.nan], rename=True, inplace=True)
c[0] = np.nan
res = c.isnull()
self.assert_numpy_array_equal(res, exp)
def test_codes_immutable(self):
# Codes should be read only
c = Categorical(["a", "b", "c", "a", np.nan])
exp = np.array([0, 1, 2, 0, -1], dtype='int8')
self.assert_numpy_array_equal(c.codes, exp)
# Assignments to codes should raise
def f():
c.codes = np.array([0, 1, 2, 0, 1], dtype='int8')
self.assertRaises(ValueError, f)
# changes in the codes array should raise
# np 1.6.1 raises RuntimeError rather than ValueError
codes = c.codes
def f():
codes[4] = 1
self.assertRaises(ValueError, f)
# But even after getting the codes, the original array should still be
# writeable!
c[4] = "a"
exp = np.array([0, 1, 2, 0, 0], dtype='int8')
self.assert_numpy_array_equal(c.codes, exp)
c._codes[4] = 2
exp = np.array([0, 1, 2, 0, 2], dtype='int8')
self.assert_numpy_array_equal(c.codes, exp)
def test_min_max(self):
# unordered cats have no min/max
cat = Categorical(["a", "b", "c", "d"], ordered=False)
self.assertRaises(TypeError, lambda: cat.min())
self.assertRaises(TypeError, lambda: cat.max())
cat = Categorical(["a", "b", "c", "d"], ordered=True)
_min = cat.min()
_max = cat.max()
self.assertEqual(_min, "a")
self.assertEqual(_max, "d")
cat = Categorical(["a", "b", "c", "d"],
categories=['d', 'c', 'b', 'a'], ordered=True)
_min = cat.min()
_max = cat.max()
self.assertEqual(_min, "d")
self.assertEqual(_max, "a")
cat = Categorical([np.nan, "b", "c", np.nan],
categories=['d', 'c', 'b', 'a'], ordered=True)
_min = cat.min()
_max = cat.max()
self.assertTrue(np.isnan(_min))
self.assertEqual(_max, "b")
_min = cat.min(numeric_only=True)
self.assertEqual(_min, "c")
_max = cat.max(numeric_only=True)
self.assertEqual(_max, "b")
cat = Categorical([np.nan, 1, 2, np.nan], categories=[5, 4, 3, 2, 1],
ordered=True)
_min = cat.min()
_max = cat.max()
self.assertTrue(np.isnan(_min))
self.assertEqual(_max, 1)
_min = cat.min(numeric_only=True)
self.assertEqual(_min, 2)
_max = cat.max(numeric_only=True)
self.assertEqual(_max, 1)
def test_unique(self):
# categories are reordered based on value when ordered=False
cat = Categorical(["a", "b"])
exp = np.asarray(["a", "b"])
res = cat.unique()
self.assert_numpy_array_equal(res, exp)
cat = Categorical(["a", "b", "a", "a"], categories=["a", "b", "c"])
res = cat.unique()
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, Categorical(exp))
cat = Categorical(["c", "a", "b", "a", "a"],
categories=["a", "b", "c"])
exp = np.asarray(["c", "a", "b"])
res = cat.unique()
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, Categorical(
exp, categories=['c', 'a', 'b']))
# nan must be removed
cat = Categorical(["b", np.nan, "b", np.nan, "a"],
categories=["a", "b", "c"])
res = cat.unique()
exp = np.asarray(["b", np.nan, "a"], dtype=object)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, Categorical(
["b", np.nan, "a"], categories=["b", "a"]))
def test_unique_ordered(self):
# keep categories order when ordered=True
cat = Categorical(['b', 'a', 'b'], categories=['a', 'b'], ordered=True)
res = cat.unique()
exp = np.asarray(['b', 'a'])
exp_cat = Categorical(exp, categories=['a', 'b'], ordered=True)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, exp_cat)
cat = Categorical(['c', 'b', 'a', 'a'], categories=['a', 'b', 'c'],
ordered=True)
res = cat.unique()
exp = np.asarray(['c', 'b', 'a'])
exp_cat = Categorical(exp, categories=['a', 'b', 'c'], ordered=True)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, exp_cat)
cat = Categorical(['b', 'a', 'a'], categories=['a', 'b', 'c'],
ordered=True)
res = cat.unique()
exp = np.asarray(['b', 'a'])
exp_cat = Categorical(exp, categories=['a', 'b'], ordered=True)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, exp_cat)
cat = Categorical(['b', 'b', np.nan, 'a'], categories=['a', 'b', 'c'],
ordered=True)
res = cat.unique()
exp = np.asarray(['b', np.nan, 'a'], dtype=object)
exp_cat = Categorical(exp, categories=['a', 'b'], ordered=True)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, exp_cat)
def test_mode(self):
s = Categorical([1, 1, 2, 4, 5, 5, 5], categories=[5, 4, 3, 2, 1],
ordered=True)
res = s.mode()
exp = Categorical([5], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
s = Categorical([1, 1, 1, 4, 5, 5, 5], categories=[5, 4, 3, 2, 1],
ordered=True)
res = s.mode()
exp = Categorical([5, 1], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
s = Categorical([1, 2, 3, 4, 5], categories=[5, 4, 3, 2, 1],
ordered=True)
res = s.mode()
exp = Categorical([], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
# NaN should not become the mode!
s = Categorical([np.nan, np.nan, np.nan, 4, 5],
categories=[5, 4, 3, 2, 1], ordered=True)
res = s.mode()
exp = Categorical([], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
s = Categorical([np.nan, np.nan, np.nan, 4, 5, 4],
categories=[5, 4, 3, 2, 1], ordered=True)
res = s.mode()
exp = Categorical([4], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
s = Categorical([np.nan, np.nan, 4, 5, 4], categories=[5, 4, 3, 2, 1],
ordered=True)
res = s.mode()
exp = Categorical([4], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
def test_sort(self):
# unordered cats are sortable
cat = Categorical(["a", "b", "b", "a"], ordered=False)
cat.sort_values()
cat.sort()
cat = Categorical(["a", "c", "b", "d"], ordered=True)
# sort_values
res = cat.sort_values()
exp = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp)
cat = Categorical(["a", "c", "b", "d"],
categories=["a", "b", "c", "d"], ordered=True)
res = cat.sort_values()
exp = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp)
res = cat.sort_values(ascending=False)
exp = np.array(["d", "c", "b", "a"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp)
# sort (inplace order)
cat1 = cat.copy()
cat1.sort()
exp = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(cat1.__array__(), exp)
def test_slicing_directly(self):
cat = Categorical(["a", "b", "c", "d", "a", "b", "c"])
sliced = cat[3]
tm.assert_equal(sliced, "d")
sliced = cat[3:5]
expected = Categorical(["d", "a"], categories=['a', 'b', 'c', 'd'])
self.assert_numpy_array_equal(sliced._codes, expected._codes)
tm.assert_index_equal(sliced.categories, expected.categories)
def test_set_item_nan(self):
cat = pd.Categorical([1, 2, 3])
exp = pd.Categorical([1, np.nan, 3], categories=[1, 2, 3])
cat[1] = np.nan
self.assertTrue(cat.equals(exp))
# if nan in categories, the proper code should be set!
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[1] = np.nan
exp = np.array([0, 3, 2, -1])
self.assert_numpy_array_equal(cat.codes, exp)
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[1:3] = np.nan
exp = np.array([0, 3, 3, -1])
self.assert_numpy_array_equal(cat.codes, exp)
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[1:3] = [np.nan, 1]
exp = np.array([0, 3, 0, -1])
self.assert_numpy_array_equal(cat.codes, exp)
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[1:3] = [np.nan, np.nan]
exp = np.array([0, 3, 3, -1])
self.assert_numpy_array_equal(cat.codes, exp)
cat = pd.Categorical([1, 2, np.nan, 3], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[pd.isnull(cat)] = np.nan
exp = np.array([0, 1, 3, 2])
self.assert_numpy_array_equal(cat.codes, exp)
def test_shift(self):
# GH 9416
cat = pd.Categorical(['a', 'b', 'c', 'd', 'a'])
# shift forward
sp1 = cat.shift(1)
xp1 = pd.Categorical([np.nan, 'a', 'b', 'c', 'd'])
self.assert_categorical_equal(sp1, xp1)
self.assert_categorical_equal(cat[:-1], sp1[1:])
# shift back
sn2 = cat.shift(-2)
xp2 = pd.Categorical(['c', 'd', 'a', np.nan, np.nan],
categories=['a', 'b', 'c', 'd'])
self.assert_categorical_equal(sn2, xp2)
self.assert_categorical_equal(cat[2:], sn2[:-2])
# shift by zero
self.assert_categorical_equal(cat, cat.shift(0))
def test_nbytes(self):
cat = pd.Categorical([1, 2, 3])
exp = cat._codes.nbytes + cat._categories.values.nbytes
self.assertEqual(cat.nbytes, exp)
def test_memory_usage(self):
cat = pd.Categorical([1, 2, 3])
self.assertEqual(cat.nbytes, cat.memory_usage())
self.assertEqual(cat.nbytes, cat.memory_usage(deep=True))
cat = pd.Categorical(['foo', 'foo', 'bar'])
self.assertEqual(cat.nbytes, cat.memory_usage())
self.assertTrue(cat.memory_usage(deep=True) > cat.nbytes)
# sys.getsizeof will call the .memory_usage with
# deep=True, and add on some GC overhead
diff = cat.memory_usage(deep=True) - sys.getsizeof(cat)
self.assertTrue(abs(diff) < 100)
def test_searchsorted(self):
# https://github.com/pydata/pandas/issues/8420
s1 = pd.Series(['apple', 'bread', 'bread', 'cheese', 'milk'])
s2 = pd.Series(['apple', 'bread', 'bread', 'cheese', 'milk', 'donuts'])
c1 = pd.Categorical(s1, ordered=True)
c2 = pd.Categorical(s2, ordered=True)
# Single item array
res = c1.searchsorted(['bread'])
chk = s1.searchsorted(['bread'])
exp = np.array([1])
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
# Scalar version of single item array
# Categorical return np.array like pd.Series, but different from
# np.array.searchsorted()
res = c1.searchsorted('bread')
chk = s1.searchsorted('bread')
exp = np.array([1])
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
# Searching for a value that is not present in the Categorical
res = c1.searchsorted(['bread', 'eggs'])
chk = s1.searchsorted(['bread', 'eggs'])
exp = np.array([1, 4])
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
# Searching for a value that is not present, to the right
res = c1.searchsorted(['bread', 'eggs'], side='right')
chk = s1.searchsorted(['bread', 'eggs'], side='right')
exp = np.array([3, 4]) # eggs before milk
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
# As above, but with a sorter array to reorder an unsorted array
res = c2.searchsorted(['bread', 'eggs'], side='right',
sorter=[0, 1, 2, 3, 5, 4])
chk = s2.searchsorted(['bread', 'eggs'], side='right',
sorter=[0, 1, 2, 3, 5, 4])
exp = np.array([3, 5]
) # eggs after donuts, after switching milk and donuts
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
def test_deprecated_labels(self):
# TODO: labels is deprecated and should be removed in 0.18 or 2017,
# whatever is earlier
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
exp = cat.codes
with tm.assert_produces_warning(FutureWarning):
res = cat.labels
self.assert_numpy_array_equal(res, exp)
self.assertFalse(LooseVersion(pd.__version__) >= '0.18')
def test_deprecated_levels(self):
# TODO: levels is deprecated and should be removed in 0.18 or 2017,
# whatever is earlier
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
exp = cat.categories
with tm.assert_produces_warning(FutureWarning):
res = cat.levels
self.assert_numpy_array_equal(res, exp)
with tm.assert_produces_warning(FutureWarning):
res = pd.Categorical([1, 2, 3, np.nan], levels=[1, 2, 3])
self.assert_numpy_array_equal(res.categories, exp)
self.assertFalse(LooseVersion(pd.__version__) >= '0.18')
def test_removed_names_produces_warning(self):
# 10482
with tm.assert_produces_warning(UserWarning):
Categorical([0, 1], name="a")
with tm.assert_produces_warning(UserWarning):
Categorical.from_codes([1, 2], ["a", "b", "c"], name="a")
def test_datetime_categorical_comparison(self):
dt_cat = pd.Categorical(
| pd.date_range('2014-01-01', periods=3) | pandas.date_range |
import numpy as np
import pandas as pd
from open_quant.labeling.multi_processing import mp_pandas
import sys
def test(a, b):
return a + b
def triple_barrier_method(close, events, pt_sl, molecule):
"""
Advances in Financial Machine Learning, Snippet 3.2, page 45.
Triple Barrier Labeling Method
Applies triple-barrier labeling method on time-series (molecule).
Returns DataFrame of timestamps of barrier touches.
:param close: (pd.Series) Close prices
:param events: (pd.Series) Event values calculated (CUSUM filter)
:param pt_sl: (np.array) Profit takin value 0; Stop loss value 1
:param molecule: (an array) Datetime index values
:return: (pd.DataFrame) Timestamps of when first barrier was touched
"""
# apply stop loss/profit taking, if it takes place before t1 (end of event)
events_ = events.loc[molecule]
out = events_[['t1']].copy(deep=True)
if pt_sl[0] > 0:
pt = pt_sl[0] * events_['target']
else:
pt = pd.Series(index=events.index) # NaNs
if pt_sl[1] > 0:
sl = -pt_sl[1] * events_['target']
else:
sl = pd.Series(index=events.index) # NaNs
for loc, t1 in events_['t1'].fillna(close.index[-1]).iteritems():
df0 = close[loc:t1] # path prices
df0 = (df0 / close[loc] - 1) * events_.at[loc, 'side'] # path returns
out.loc[loc, 'sl'] = df0[df0 < sl[loc]].index.min() # earliest stop loss
out.loc[loc, 'pt'] = df0[df0 > pt[loc]].index.min() # earliest profit taking
return out
def get_events(close, t_events, pt_sl, target, min_ret=0,
num_threads=1, t1=False, side=None):
"""
Advances in Financial Machine Learning, Snippet 3.6 page 50.
Computes the time of first touch using Meta Labels.
:param close: (pd.Series) Close prices
:param t_events: (pd.Series) of t_events. The timestamps are calculated using the CUSUM filter
and will be used as timestamps for the Triple Barrier Method.
:param pt_sl: (2 element array) Profit takin value 0; Stop loss value 1.
:param target: (pd.Series) of values that are used (in conjunction with pt_sl) as a
scalar values to calculate the size of the profit take and stop loss.
:param min_ret: (float) The minimum target return required for running a triple barrier search.
:param num_threads: (int) The number of threads concurrently used by the function.
:param t1: (pd.Series) A pandas series with the timestamps of the vertical barriers.
Pass False to disable vertical barriers.
:param side: (pd.Series) Long or Short side prediction.
:return: (pd.DataFrame) Events
-events.index is event's starttime
-events['t1'] is event's endtime
-events['target'] is event's target
-events['side'] (optional) implies the algo's position side
-events['pt'] is profit taking multiple
-events['sl'] is stop loss multiple
"""
# Get sampled target values
target = target.loc[t_events]
target = target[target > min_ret]
# Get time boundary t1
if t1 is False:
t1 = pd.Series(pd.NaT, index=t_events)
# Define the side
if side is None:
_side = pd.Series(1., index=target.index)
_pt_sl = [pt_sl, pt_sl]
else:
_side = side.loc[target.index]
_pt_sl = pt_sl[:2]
events = pd.concat({'t1': t1, 'target': target, 'side': _side}, axis = 1)
events = events.dropna(subset = ['target'])
df0 = mp_pandas(func=triple_barrier_method, pd_obj=('molecule', events.index),
num_threads=num_threads, close=close, events=events, pt_sl=pt_sl)
events['t1'] = df0.dropna(how='all').min(axis=1) # ignores NaN
if side is None:
events = events.drop('side', axis=1)
return events
def add_vertical_barrier(t_events, close, num_days=1):
"""
Advances in Financial Machine Learning, Snippet 3.4 page 49.
Adding a Vertical Barrier
For each index in t_events, it finds the timestamp of the next price bar at or immediately after
a number of days num_days. This vertical barrier can be passed as an optional argument t1 in get_events.
This function creates a series that has all the timestamps of when the vertical barrier would be reached.
:param t_events: (pd.Series) Series of events (symmetric CUSUM filter)
:param close: (pd.Series) Close prices
:param num_days: (int) Number of days to add for vertical barrier
:return: (pd.Series) Timestamps of vertical barriers
"""
t1 = close.index.searchsorted(t_events + | pd.Timedelta(days=num_days) | pandas.Timedelta |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import datetime
import numpy as np
import pandas as pd
import pandas.testing as pdt
import pyarrow as pa
import pytest
from pyarrow.parquet import ParquetFile
from kartothek.serialization import (
CsvSerializer,
DataFrameSerializer,
ParquetSerializer,
default_serializer,
)
from kartothek.serialization._util import ensure_unicode_string_type
TYPE_STABLE_SERIALISERS = [ParquetSerializer()]
SERLIALISERS = TYPE_STABLE_SERIALISERS + [
CsvSerializer(),
CsvSerializer(compress=False),
default_serializer(),
]
type_stable_serialisers = pytest.mark.parametrize("serialiser", TYPE_STABLE_SERIALISERS)
predicate_serialisers = pytest.mark.parametrize(
"serialiser",
[
ParquetSerializer(chunk_size=1),
ParquetSerializer(chunk_size=2),
ParquetSerializer(chunk_size=4),
]
+ SERLIALISERS,
)
def test_load_df_from_store_unsupported_format(store):
with pytest.raises(ValueError):
DataFrameSerializer.restore_dataframe(store, "test.unknown")
def test_store_df_to_store(store):
df = pd.DataFrame({"a": [1, 2], "b": [3.0, 4.0], "c": ["โ", "โฌ"]})
dataframe_format = default_serializer()
assert isinstance(dataframe_format, ParquetSerializer)
key = dataframe_format.store(store, "prefix", df)
pdt.assert_frame_equal(DataFrameSerializer.restore_dataframe(store, key), df)
@pytest.mark.parametrize("serialiser", SERLIALISERS)
def test_store_table_to_store(serialiser, store):
df = pd.DataFrame({"a": [1, 2], "b": [3.0, 4.0], "c": ["โ", "โฌ"]})
table = pa.Table.from_pandas(df)
key = serialiser.store(store, "prefix", table)
pdt.assert_frame_equal(DataFrameSerializer.restore_dataframe(store, key), df)
@pytest.mark.parametrize("serialiser", SERLIALISERS)
def test_dataframe_roundtrip(serialiser, store):
if serialiser in TYPE_STABLE_SERIALISERS:
df = pd.DataFrame(
{"a": [1, 2], "b": [3.0, 4.0], "c": ["โ", "โฌ"], b"d": ["#", ";"]}
)
key = serialiser.store(store, "prefix", df)
df.columns = [ensure_unicode_string_type(col) for col in df.columns]
else:
df = pd.DataFrame(
{"a": [1, 2], "b": [3.0, 4.0], "c": ["โ", "โฌ"], "d": ["#", ";"]}
)
key = serialiser.store(store, "prefix", df)
pdt.assert_frame_equal(DataFrameSerializer.restore_dataframe(store, key), df)
# Test partial restore
pdt.assert_frame_equal(
DataFrameSerializer.restore_dataframe(store, key, columns=["a", "c"]),
df[["a", "c"]],
)
# Test that all serialisers can ingest predicate_pushdown_to_io
pdt.assert_frame_equal(
DataFrameSerializer.restore_dataframe(
store, key, columns=["a", "c"], predicate_pushdown_to_io=False
),
df[["a", "c"]],
)
# Test that all serialisers can deal with categories
expected = df[["c", "d"]].copy()
expected["c"] = expected["c"].astype("category")
# Check that the dtypes match but don't care about the order of the categoricals.
pdt.assert_frame_equal(
DataFrameSerializer.restore_dataframe(
store, key, columns=["c", "d"], categories=["c"]
),
expected,
check_categorical=False,
)
# Test restore w/ empty col list
pdt.assert_frame_equal(
DataFrameSerializer.restore_dataframe(store, key, columns=[]), df[[]]
)
@pytest.mark.parametrize("serialiser", SERLIALISERS)
def test_missing_column(serialiser, store):
df = pd.DataFrame({"a": [1, 2], "b": [3.0, 4.0], "c": ["โ", "โฌ"], "d": ["#", ";"]})
key = serialiser.store(store, "prefix", df)
with pytest.raises(ValueError):
DataFrameSerializer.restore_dataframe(store, key, columns=["a", "x"])
@pytest.mark.parametrize("serialiser", SERLIALISERS)
def test_dataframe_roundtrip_empty(serialiser, store):
df = pd.DataFrame({})
key = serialiser.store(store, "prefix", df)
pdt.assert_frame_equal(DataFrameSerializer.restore_dataframe(store, key), df)
# Test partial restore
pdt.assert_frame_equal(DataFrameSerializer.restore_dataframe(store, key), df)
@pytest.mark.parametrize("serialiser", SERLIALISERS)
def test_dataframe_roundtrip_no_rows(serialiser, store):
df = pd.DataFrame({"a": [], "b": [], "c": []}).astype(object)
key = serialiser.store(store, "prefix", df)
pdt.assert_frame_equal(DataFrameSerializer.restore_dataframe(store, key), df)
# Test partial restore
pdt.assert_frame_equal(
DataFrameSerializer.restore_dataframe(store, key, columns=["a", "c"]),
df[["a", "c"]],
)
def test_filter_query_predicate_exclusion(store):
with pytest.raises(ValueError):
DataFrameSerializer.restore_dataframe(
store, "test.parquet", predicates=[[("a", "==", 1)]], filter_query="True"
)
def assert_frame_almost_equal(df_left, df_right):
"""
Be more friendly to some dtypes that are not preserved during the roundtrips.
"""
# FIXME: This needs a better documentation
for col in df_left.columns:
if pd.api.types.is_datetime64_dtype(
df_left[col].dtype
) and pd.api.types.is_object_dtype(df_right[col].dtype):
df_right[col] = pd.to_datetime(df_right[col])
elif pd.api.types.is_object_dtype(
df_left[col].dtype
) and pd.api.types.is_datetime64_dtype(df_right[col].dtype):
df_left[col] = pd.to_datetime(df_left[col])
elif (
len(df_left) > 0
and pd.api.types.is_object_dtype(df_left[col].dtype)
and pd.api.types.is_object_dtype(df_right[col].dtype)
):
if isinstance(df_left[col].iloc[0], datetime.date) or isinstance(
df_right[col].iloc[0], datetime.date
):
df_left[col] = pd.to_datetime(df_left[col])
df_right[col] = pd.to_datetime(df_right[col])
elif pd.api.types.is_object_dtype(
df_left[col].dtype
) and pd.api.types.is_categorical_dtype(df_right[col].dtype):
df_left[col] = df_left[col].astype(df_right[col].dtype)
pdt.assert_frame_equal(
df_left.reset_index(drop=True), df_right.reset_index(drop=True)
)
@pytest.mark.parametrize(
"df, read_kwargs",
[
(pd.DataFrame({"string_รผ": ["abc", "affe", "banane", "buchstabe_รผ"]}), {}),
(pd.DataFrame({"integer_รผ": np.arange(4)}), {}),
(pd.DataFrame({"float_รผ": [-3.141591, 0.0, 3.141593, 3.141595]}), {}),
(
pd.DataFrame(
{
"date_รผ": [
datetime.date(2011, 1, 31),
datetime.date(2011, 2, 3),
datetime.date(2011, 2, 4),
datetime.date(2011, 3, 10),
]
}
),
{"date_as_object": False},
),
(
pd.DataFrame(
{
"date_รผ": [
datetime.date(2011, 1, 31),
datetime.date(2011, 2, 3),
datetime.date(2011, 2, 4),
datetime.date(2011, 3, 10),
]
}
),
{"date_as_object": True},
),
(
pd.DataFrame(
{"categorical_รผ": list("abcd")},
dtype=pd.api.types.CategoricalDtype(list("abcd"), ordered=True),
),
{},
),
],
)
@predicate_serialisers
@pytest.mark.parametrize("predicate_pushdown_to_io", [True, False])
def test_predicate_pushdown(
store, df, read_kwargs, predicate_pushdown_to_io, serialiser
):
"""
Test predicate pushdown for several types and operations.
The DataFrame parameters all need to be of same length for this test to
work universally. Also the values in the DataFrames need to be sorted in
ascending order.
"""
# All test dataframes need to have the same length
assert len(df) == 4
assert df[df.columns[0]].is_monotonic and df.iloc[0, 0] < df.iloc[-1, 0]
# This is due to the limitation that dates cannot be expressed in
# Pandas' query() method.
if isinstance(serialiser, CsvSerializer) and isinstance(
df.iloc[0, 0], datetime.date
):
pytest.skip("CsvSerialiser cannot filter on dates")
key = serialiser.store(store, "prefix", df)
# Test `<` and `>` operators
expected = df.iloc[[1, 2], :].copy()
predicates = [
[(df.columns[0], "<", df.iloc[3, 0]), (df.columns[0], ">", df.iloc[0, 0])]
]
result = serialiser.restore_dataframe(
store,
key,
predicate_pushdown_to_io=predicate_pushdown_to_io,
predicates=predicates,
**read_kwargs,
)
assert_frame_almost_equal(result, expected)
# Test `=<` and `>=` operators
expected = df.iloc[[1, 2, 3], :].copy()
predicates = [
[(df.columns[0], "<=", df.iloc[3, 0]), (df.columns[0], ">=", df.iloc[1, 0])]
]
result = serialiser.restore_dataframe(
store,
key,
predicate_pushdown_to_io=predicate_pushdown_to_io,
predicates=predicates,
**read_kwargs,
)
assert_frame_almost_equal(result, expected)
# Test `==` operator
expected = df.iloc[[1], :].copy()
predicates = [[(df.columns[0], "==", df.iloc[1, 0])]]
result = serialiser.restore_dataframe(
store,
key,
predicate_pushdown_to_io=predicate_pushdown_to_io,
predicates=predicates,
**read_kwargs,
)
assert_frame_almost_equal(result, expected)
# Test `in` operator
expected = df.iloc[[1], :].copy()
predicates = [[(df.columns[0], "in", [df.iloc[1, 0]])]]
result = serialiser.restore_dataframe(
store,
key,
predicate_pushdown_to_io=predicate_pushdown_to_io,
predicates=predicates,
**read_kwargs,
)
assert_frame_almost_equal(result, expected)
# Test `!=` operator
expected = df.iloc[[0, 2, 3], :].copy()
predicates = [[(df.columns[0], "!=", df.iloc[1, 0])]]
result = serialiser.restore_dataframe(
store,
key,
predicate_pushdown_to_io=predicate_pushdown_to_io,
predicates=predicates,
**read_kwargs,
)
assert_frame_almost_equal(result, expected)
# Test empty DataFrame
expected = df.head(0)
predicates = [[(df.columns[0], "<", df.iloc[0, 0])]]
result = serialiser.restore_dataframe(
store,
key,
predicate_pushdown_to_io=predicate_pushdown_to_io,
predicates=predicates,
**read_kwargs,
)
assert_frame_almost_equal(result, expected)
# Test in empty list
expected = df.head(0)
predicates = [[(df.columns[0], "in", [])]]
result = serialiser.restore_dataframe(
store,
key,
predicate_pushdown_to_io=predicate_pushdown_to_io,
predicates=predicates,
**read_kwargs,
)
assert_frame_almost_equal(result, expected)
# Test in numpy array
expected = df.iloc[[1], :].copy()
predicates = [[(df.columns[0], "in", np.asarray([df.iloc[1, 0], df.iloc[1, 0]]))]]
result = serialiser.restore_dataframe(
store,
key,
predicate_pushdown_to_io=predicate_pushdown_to_io,
predicates=predicates,
**read_kwargs,
)
assert_frame_almost_equal(result, expected)
# Test malformed predicates 1
predicates = []
with pytest.raises(ValueError) as exc:
serialiser.restore_dataframe(
store,
key,
predicate_pushdown_to_io=predicate_pushdown_to_io,
predicates=predicates,
**read_kwargs,
)
assert str(exc.value) == "Empty predicates"
# Test malformed predicates 2
predicates = [[]]
with pytest.raises(ValueError) as exc:
serialiser.restore_dataframe(
store,
key,
predicate_pushdown_to_io=predicate_pushdown_to_io,
predicates=predicates,
**read_kwargs,
)
assert str(exc.value) == "Invalid predicates: Conjunction 0 is empty"
# Test malformed predicates 3
predicates = [[(df.columns[0], "<", df.iloc[0, 0])], []]
with pytest.raises(ValueError) as exc:
serialiser.restore_dataframe(
store,
key,
predicate_pushdown_to_io=predicate_pushdown_to_io,
predicates=predicates,
**read_kwargs,
)
assert str(exc.value) == "Invalid predicates: Conjunction 1 is empty"
# Test malformed predicates 4
predicates = [[(df.columns[0], "<", df.iloc[0, 0])], ["foo"]]
with pytest.raises(ValueError) as exc:
serialiser.restore_dataframe(
store,
key,
predicate_pushdown_to_io=predicate_pushdown_to_io,
predicates=predicates,
**read_kwargs,
)
assert (
str(exc.value)
== "Invalid predicates: Clause 0 in conjunction 1 should be a 3-tuple, got object of type <class 'str'> instead"
)
@predicate_serialisers
@pytest.mark.parametrize("predicate_pushdown_to_io", [True, False])
def test_predicate_float_equal_big(predicate_pushdown_to_io, store, serialiser):
df = pd.DataFrame({"float": [3141590.0, 3141592.0, 3141594.0]})
key = serialiser.store(store, "prefix", df)
predicates = [[("float", "==", 3141592.0)]]
result_df = serialiser.restore_dataframe(
store,
key,
predicate_pushdown_to_io=predicate_pushdown_to_io,
predicates=predicates,
)
expected_df = df.iloc[[1], :].copy()
pdt.assert_frame_equal(
result_df.reset_index(drop=True), expected_df.reset_index(drop=True)
)
@predicate_serialisers
@pytest.mark.parametrize("predicate_pushdown_to_io", [True, False])
def test_predicate_float_equal_small(predicate_pushdown_to_io, store, serialiser):
df = pd.DataFrame({"float": [0.3141590, 0.3141592, 0.3141594]})
key = serialiser.store(store, "prefix", df)
predicates = [[("float", "==", 0.3141592)]]
result_df = serialiser.restore_dataframe(
store,
key,
predicate_pushdown_to_io=predicate_pushdown_to_io,
predicates=predicates,
)
expected_df = df.iloc[[1], :].copy()
pdt.assert_frame_equal(
result_df.reset_index(drop=True), expected_df.reset_index(drop=True)
)
@type_stable_serialisers
@pytest.mark.parametrize("predicate_pushdown_to_io", [True, False])
def test_predicate_eval_string_types(serialiser, store, predicate_pushdown_to_io):
df = pd.DataFrame({b"a": [1, 2], "b": [3.0, 4.0]})
key = serialiser.store(store, "prefix", df)
df.columns = [ensure_unicode_string_type(col) for col in df.columns]
pdt.assert_frame_equal(DataFrameSerializer.restore_dataframe(store, key), df)
for col in ["a", b"a", "a"]:
predicates = [[(col, "==", 1)]]
result_df = serialiser.restore_dataframe(
store,
key,
predicate_pushdown_to_io=predicate_pushdown_to_io,
predicates=predicates,
)
expected_df = df.iloc[[0], :].copy()
pdt.assert_frame_equal(
result_df.reset_index(drop=True), expected_df.reset_index(drop=True)
)
for col in ["b", b"b", "b"]:
predicates = [[(col, "==", 3.0)]]
result_df = serialiser.restore_dataframe(
store,
key,
predicate_pushdown_to_io=predicate_pushdown_to_io,
predicates=predicates,
)
expected_df = df.iloc[[0], :].copy()
pdt.assert_frame_equal(
result_df.reset_index(drop=True), expected_df.reset_index(drop=True)
)
for preds in (
[[("a", "==", 1), ("b", "==", 3.0)]],
[[("a", "==", 1), (b"b", "==", 3.0)]],
[[(b"a", "==", 1), ("b", "==", 3.0)]],
):
result_df = serialiser.restore_dataframe(
store,
key,
predicate_pushdown_to_io=predicate_pushdown_to_io,
predicates=preds,
)
expected_df = df.iloc[[0], :].copy()
pdt.assert_frame_equal(
result_df.reset_index(drop=True), expected_df.reset_index(drop=True)
)
@pytest.mark.parametrize(
"df,value",
[
(pd.DataFrame({"u": pd.Series([None], dtype=object)}), "foo"),
(pd.DataFrame({"b": pd.Series([None], dtype=object)}), b"foo"),
(pd.DataFrame({"f": pd.Series([np.nan], dtype=float)}), 1.2),
(
pd.DataFrame({"t": pd.Series([pd.NaT], dtype="datetime64[ns]")}),
pd.Timestamp("2017"),
),
],
)
@predicate_serialisers
@pytest.mark.parametrize("predicate_pushdown_to_io", [True, False])
def test_predicate_pushdown_null_col(
store, df, value, predicate_pushdown_to_io, serialiser
):
key = serialiser.store(store, "prefix", df)
expected = df.iloc[[]].copy()
predicates = [[(df.columns[0], "==", value)]]
result = serialiser.restore_dataframe(
store,
key,
predicate_pushdown_to_io=predicate_pushdown_to_io,
predicates=predicates,
)
check_datetimelike_compat = (
isinstance(value, pd.Timestamp) and not serialiser.type_stable
)
pdt.assert_frame_equal(
result.reset_index(drop=True),
expected.reset_index(drop=True),
check_dtype=serialiser.type_stable,
check_datetimelike_compat=check_datetimelike_compat,
)
@pytest.mark.parametrize(
"df, op, value, expected_index",
[
(
pd.DataFrame({"u": pd.Series([None, "x", np.nan], dtype=object)}),
"==",
None,
[0, 2],
),
(
pd.DataFrame({"u": pd.Series([None, "x", np.nan], dtype=object)}),
"in",
[None],
[0, 2],
),
(
pd.DataFrame({"u": pd.Series([None, "x", np.nan], dtype=object)}),
"!=",
None,
[1],
),
(
pd.DataFrame({"u": pd.Series([None, "x", np.nan], dtype=object)}),
"in",
[None, "x"],
[0, 1, 2],
),
(
pd.DataFrame({"f": pd.Series([np.nan, 1.0, np.nan], dtype=float)}),
"==",
np.nan,
[0, 2],
),
(
pd.DataFrame({"f": pd.Series([np.nan, 1.0, np.nan], dtype=float)}),
"in",
[np.nan],
[0, 2],
),
(
pd.DataFrame({"f": | pd.Series([np.nan, 1.0, np.nan], dtype=float) | pandas.Series |
from unittest import TestCase
import pandas as pd
from moonstone.utils.taxonomy import TaxonomyCountsBase
class TestTaxonomyCountsBase(TestCase):
def setUp(self):
self.taxonomy_instance = TaxonomyCountsBase()
def test_fill_none(self):
taxa_df = pd.DataFrame(
[
['Bacteria', 'Bacteroidetes'],
['Bacteria', None]
],
columns=['kingdom', 'phylum']
)
expected_df = pd.DataFrame(
[
['Bacteria', 'Bacteroidetes'],
['Bacteria', 'Bacteria (kingdom)']
],
columns=['kingdom', 'phylum']
)
tested_df = self.taxonomy_instance._fill_none(taxa_df)
| pd.testing.assert_frame_equal(tested_df, expected_df) | pandas.testing.assert_frame_equal |
from config import *
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from time import sleep
from getpass import getpass
from os import remove
import zipfile
import pandas as pd
import numpy as np
from lxml import etree as et
def _parseBgeXml(f):
timestamp = []
consumed = []
cost = []
timezone = config.get('global','timezone')
for e,elem in et.iterparse(f, tag='{http://naesb.org/espi}IntervalReading'):
timestamp.append(elem.findall('.//{http://naesb.org/espi}start')[0].text)
consumed.append( elem.findall('{http://naesb.org/espi}value')[0].text)
cost.append( elem.findall('{http://naesb.org/espi}cost')[0].text)
nt = np.array(timestamp,dtype=int).astype('datetime64[s]')
nc = np.array(consumed,dtype=float)
no = np.array(cost,dtype=float)
nc /= 1e5
no /= 1e5
consumed = | pd.Series(nc,index=nt) | pandas.Series |
from textwrap import dedent
import numpy as np
import pytest
from pandas import (
DataFrame,
MultiIndex,
option_context,
)
pytest.importorskip("jinja2")
from pandas.io.formats.style import Styler
from pandas.io.formats.style_render import (
_parse_latex_cell_styles,
_parse_latex_css_conversion,
_parse_latex_header_span,
_parse_latex_table_styles,
_parse_latex_table_wrapping,
)
@pytest.fixture
def df():
return DataFrame({"A": [0, 1], "B": [-0.61, -1.22], "C": ["ab", "cd"]})
@pytest.fixture
def df_ext():
return DataFrame(
{"A": [0, 1, 2], "B": [-0.61, -1.22, -2.22], "C": ["ab", "cd", "de"]}
)
@pytest.fixture
def styler(df):
return Styler(df, uuid_len=0, precision=2)
def test_minimal_latex_tabular(styler):
expected = dedent(
"""\
\\begin{tabular}{lrrl}
& A & B & C \\\\
0 & 0 & -0.61 & ab \\\\
1 & 1 & -1.22 & cd \\\\
\\end{tabular}
"""
)
assert styler.to_latex() == expected
def test_tabular_hrules(styler):
expected = dedent(
"""\
\\begin{tabular}{lrrl}
\\toprule
& A & B & C \\\\
\\midrule
0 & 0 & -0.61 & ab \\\\
1 & 1 & -1.22 & cd \\\\
\\bottomrule
\\end{tabular}
"""
)
assert styler.to_latex(hrules=True) == expected
def test_tabular_custom_hrules(styler):
styler.set_table_styles(
[
{"selector": "toprule", "props": ":hline"},
{"selector": "bottomrule", "props": ":otherline"},
]
) # no midrule
expected = dedent(
"""\
\\begin{tabular}{lrrl}
\\hline
& A & B & C \\\\
0 & 0 & -0.61 & ab \\\\
1 & 1 & -1.22 & cd \\\\
\\otherline
\\end{tabular}
"""
)
assert styler.to_latex() == expected
def test_column_format(styler):
# default setting is already tested in `test_latex_minimal_tabular`
styler.set_table_styles([{"selector": "column_format", "props": ":cccc"}])
assert "\\begin{tabular}{rrrr}" in styler.to_latex(column_format="rrrr")
styler.set_table_styles([{"selector": "column_format", "props": ":r|r|cc"}])
assert "\\begin{tabular}{r|r|cc}" in styler.to_latex()
def test_siunitx_cols(styler):
expected = dedent(
"""\
\\begin{tabular}{lSSl}
{} & {A} & {B} & {C} \\\\
0 & 0 & -0.61 & ab \\\\
1 & 1 & -1.22 & cd \\\\
\\end{tabular}
"""
)
assert styler.to_latex(siunitx=True) == expected
def test_position(styler):
assert "\\begin{table}[h!]" in styler.to_latex(position="h!")
assert "\\end{table}" in styler.to_latex(position="h!")
styler.set_table_styles([{"selector": "position", "props": ":b!"}])
assert "\\begin{table}[b!]" in styler.to_latex()
assert "\\end{table}" in styler.to_latex()
@pytest.mark.parametrize("env", [None, "longtable"])
def test_label(styler, env):
assert "\n\\label{text}" in styler.to_latex(label="text", environment=env)
styler.set_table_styles([{"selector": "label", "props": ":{more ยงtext}"}])
assert "\n\\label{more :text}" in styler.to_latex(environment=env)
def test_position_float_raises(styler):
msg = "`position_float` should be one of 'raggedright', 'raggedleft', 'centering',"
with pytest.raises(ValueError, match=msg):
styler.to_latex(position_float="bad_string")
msg = "`position_float` cannot be used in 'longtable' `environment`"
with pytest.raises(ValueError, match=msg):
styler.to_latex(position_float="centering", environment="longtable")
@pytest.mark.parametrize("label", [(None, ""), ("text", "\\label{text}")])
@pytest.mark.parametrize("position", [(None, ""), ("h!", "{table}[h!]")])
@pytest.mark.parametrize("caption", [(None, ""), ("text", "\\caption{text}")])
@pytest.mark.parametrize("column_format", [(None, ""), ("rcrl", "{tabular}{rcrl}")])
@pytest.mark.parametrize("position_float", [(None, ""), ("centering", "\\centering")])
def test_kwargs_combinations(
styler, label, position, caption, column_format, position_float
):
result = styler.to_latex(
label=label[0],
position=position[0],
caption=caption[0],
column_format=column_format[0],
position_float=position_float[0],
)
assert label[1] in result
assert position[1] in result
assert caption[1] in result
assert column_format[1] in result
assert position_float[1] in result
def test_custom_table_styles(styler):
styler.set_table_styles(
[
{"selector": "mycommand", "props": ":{myoptions}"},
{"selector": "mycommand2", "props": ":{myoptions2}"},
]
)
expected = dedent(
"""\
\\begin{table}
\\mycommand{myoptions}
\\mycommand2{myoptions2}
"""
)
assert expected in styler.to_latex()
def test_cell_styling(styler):
styler.highlight_max(props="itshape:;Huge:--wrap;")
expected = dedent(
"""\
\\begin{tabular}{lrrl}
& A & B & C \\\\
0 & 0 & \\itshape {\\Huge -0.61} & ab \\\\
1 & \\itshape {\\Huge 1} & -1.22 & \\itshape {\\Huge cd} \\\\
\\end{tabular}
"""
)
assert expected == styler.to_latex()
def test_multiindex_columns(df):
cidx = MultiIndex.from_tuples([("A", "a"), ("A", "b"), ("B", "c")])
df.columns = cidx
expected = dedent(
"""\
\\begin{tabular}{lrrl}
& \\multicolumn{2}{r}{A} & B \\\\
& a & b & c \\\\
0 & 0 & -0.61 & ab \\\\
1 & 1 & -1.22 & cd \\\\
\\end{tabular}
"""
)
s = df.style.format(precision=2)
assert expected == s.to_latex()
# non-sparse
expected = dedent(
"""\
\\begin{tabular}{lrrl}
& A & A & B \\\\
& a & b & c \\\\
0 & 0 & -0.61 & ab \\\\
1 & 1 & -1.22 & cd \\\\
\\end{tabular}
"""
)
s = df.style.format(precision=2)
assert expected == s.to_latex(sparse_columns=False)
def test_multiindex_row(df_ext):
ridx = MultiIndex.from_tuples([("A", "a"), ("A", "b"), ("B", "c")])
df_ext.index = ridx
expected = dedent(
"""\
\\begin{tabular}{llrrl}
& & A & B & C \\\\
\\multirow[c]{2}{*}{A} & a & 0 & -0.61 & ab \\\\
& b & 1 & -1.22 & cd \\\\
B & c & 2 & -2.22 & de \\\\
\\end{tabular}
"""
)
styler = df_ext.style.format(precision=2)
result = styler.to_latex()
assert expected == result
# non-sparse
expected = dedent(
"""\
\\begin{tabular}{llrrl}
& & A & B & C \\\\
A & a & 0 & -0.61 & ab \\\\
A & b & 1 & -1.22 & cd \\\\
B & c & 2 & -2.22 & de \\\\
\\end{tabular}
"""
)
result = styler.to_latex(sparse_index=False)
assert expected == result
def test_multirow_naive(df_ext):
ridx = MultiIndex.from_tuples([("X", "x"), ("X", "y"), ("Y", "z")])
df_ext.index = ridx
expected = dedent(
"""\
\\begin{tabular}{llrrl}
& & A & B & C \\\\
X & x & 0 & -0.61 & ab \\\\
& y & 1 & -1.22 & cd \\\\
Y & z & 2 & -2.22 & de \\\\
\\end{tabular}
"""
)
styler = df_ext.style.format(precision=2)
result = styler.to_latex(multirow_align="naive")
assert expected == result
def test_multiindex_row_and_col(df_ext):
cidx = MultiIndex.from_tuples([("Z", "a"), ("Z", "b"), ("Y", "c")])
ridx = MultiIndex.from_tuples([("A", "a"), ("A", "b"), ("B", "c")])
df_ext.index, df_ext.columns = ridx, cidx
expected = dedent(
"""\
\\begin{tabular}{llrrl}
& & \\multicolumn{2}{l}{Z} & Y \\\\
& & a & b & c \\\\
\\multirow[b]{2}{*}{A} & a & 0 & -0.61 & ab \\\\
& b & 1 & -1.22 & cd \\\\
B & c & 2 & -2.22 & de \\\\
\\end{tabular}
"""
)
styler = df_ext.style.format(precision=2)
result = styler.to_latex(multirow_align="b", multicol_align="l")
assert result == expected
# non-sparse
expected = dedent(
"""\
\\begin{tabular}{llrrl}
& & Z & Z & Y \\\\
& & a & b & c \\\\
A & a & 0 & -0.61 & ab \\\\
A & b & 1 & -1.22 & cd \\\\
B & c & 2 & -2.22 & de \\\\
\\end{tabular}
"""
)
result = styler.to_latex(sparse_index=False, sparse_columns=False)
assert result == expected
@pytest.mark.parametrize(
"multicol_align, siunitx, header",
[
("naive-l", False, " & A & &"),
("naive-r", False, " & & & A"),
("naive-l", True, "{} & {A} & {} & {}"),
("naive-r", True, "{} & {} & {} & {A}"),
],
)
def test_multicol_naive(df, multicol_align, siunitx, header):
ridx = MultiIndex.from_tuples([("A", "a"), ("A", "b"), ("A", "c")])
df.columns = ridx
level1 = " & a & b & c" if not siunitx else "{} & {a} & {b} & {c}"
col_format = "lrrl" if not siunitx else "lSSl"
expected = dedent(
f"""\
\\begin{{tabular}}{{{col_format}}}
{header} \\\\
{level1} \\\\
0 & 0 & -0.61 & ab \\\\
1 & 1 & -1.22 & cd \\\\
\\end{{tabular}}
"""
)
styler = df.style.format(precision=2)
result = styler.to_latex(multicol_align=multicol_align, siunitx=siunitx)
assert expected == result
def test_multi_options(df_ext):
cidx = MultiIndex.from_tuples([("Z", "a"), ("Z", "b"), ("Y", "c")])
ridx = MultiIndex.from_tuples([("A", "a"), ("A", "b"), ("B", "c")])
df_ext.index, df_ext.columns = ridx, cidx
styler = df_ext.style.format(precision=2)
expected = dedent(
"""\
& & \\multicolumn{2}{r}{Z} & Y \\\\
& & a & b & c \\\\
\\multirow[c]{2}{*}{A} & a & 0 & -0.61 & ab \\\\
"""
)
result = styler.to_latex()
assert expected in result
with option_context("styler.latex.multicol_align", "l"):
assert " & & \\multicolumn{2}{l}{Z} & Y \\\\" in styler.to_latex()
with option_context("styler.latex.multirow_align", "b"):
assert "\\multirow[b]{2}{*}{A} & a & 0 & -0.61 & ab \\\\" in styler.to_latex()
def test_multiindex_columns_hidden():
df = DataFrame([[1, 2, 3, 4]])
df.columns = MultiIndex.from_tuples([("A", 1), ("A", 2), ("A", 3), ("B", 1)])
s = df.style
assert "{tabular}{lrrrr}" in s.to_latex()
s.set_table_styles([]) # reset the position command
s.hide([("A", 2)], axis="columns")
assert "{tabular}{lrrr}" in s.to_latex()
@pytest.mark.parametrize(
"option, value",
[
("styler.sparse.index", True),
("styler.sparse.index", False),
("styler.sparse.columns", True),
("styler.sparse.columns", False),
],
)
def test_sparse_options(df_ext, option, value):
cidx = MultiIndex.from_tuples([("Z", "a"), ("Z", "b"), ("Y", "c")])
ridx = MultiIndex.from_tuples([("A", "a"), ("A", "b"), ("B", "c")])
df_ext.index, df_ext.columns = ridx, cidx
styler = df_ext.style
latex1 = styler.to_latex()
with option_context(option, value):
latex2 = styler.to_latex()
assert (latex1 == latex2) is value
def test_hidden_index(styler):
styler.hide(axis="index")
expected = dedent(
"""\
\\begin{tabular}{rrl}
A & B & C \\\\
0 & -0.61 & ab \\\\
1 & -1.22 & cd \\\\
\\end{tabular}
"""
)
assert styler.to_latex() == expected
@pytest.mark.parametrize("environment", ["table", "figure*", None])
def test_comprehensive(df_ext, environment):
# test as many low level features simultaneously as possible
cidx = MultiIndex.from_tuples([("Z", "a"), ("Z", "b"), ("Y", "c")])
ridx = MultiIndex.from_tuples([("A", "a"), ("A", "b"), ("B", "c")])
df_ext.index, df_ext.columns = ridx, cidx
stlr = df_ext.style
stlr.set_caption("mycap")
stlr.set_table_styles(
[
{"selector": "label", "props": ":{figยงitem}"},
{"selector": "position", "props": ":h!"},
{"selector": "position_float", "props": ":centering"},
{"selector": "column_format", "props": ":rlrlr"},
{"selector": "toprule", "props": ":toprule"},
{"selector": "midrule", "props": ":midrule"},
{"selector": "bottomrule", "props": ":bottomrule"},
{"selector": "rowcolors", "props": ":{3}{pink}{}"}, # custom command
]
)
stlr.highlight_max(axis=0, props="textbf:--rwrap;cellcolor:[rgb]{1,1,0.6}--rwrap")
stlr.highlight_max(axis=None, props="Huge:--wrap;", subset=[("Z", "a"), ("Z", "b")])
expected = (
"""\
\\begin{table}[h!]
\\centering
\\caption{mycap}
\\label{fig:item}
\\rowcolors{3}{pink}{}
\\begin{tabular}{rlrlr}
\\toprule
& & \\multicolumn{2}{r}{Z} & Y \\\\
& & a & b & c \\\\
\\midrule
\\multirow[c]{2}{*}{A} & a & 0 & \\textbf{\\cellcolor[rgb]{1,1,0.6}{-0.61}} & ab \\\\
& b & 1 & -1.22 & cd \\\\
B & c & \\textbf{\\cellcolor[rgb]{1,1,0.6}{{\\Huge 2}}} & -2.22 & """
"""\
\\textbf{\\cellcolor[rgb]{1,1,0.6}{de}} \\\\
\\bottomrule
\\end{tabular}
\\end{table}
"""
).replace("table", environment if environment else "table")
result = stlr.format(precision=2).to_latex(environment=environment)
assert result == expected
def test_environment_option(styler):
with option_context("styler.latex.environment", "bar-env"):
assert "\\begin{bar-env}" in styler.to_latex()
assert "\\begin{foo-env}" in styler.to_latex(environment="foo-env")
def test_parse_latex_table_styles(styler):
styler.set_table_styles(
[
{"selector": "foo", "props": [("attr", "value")]},
{"selector": "bar", "props": [("attr", "overwritten")]},
{"selector": "bar", "props": [("attr", "baz"), ("attr2", "ignored")]},
{"selector": "label", "props": [("", "{figยงitem}")]},
]
)
assert _parse_latex_table_styles(styler.table_styles, "bar") == "baz"
# test 'ยง' replaced by ':' [for CSS compatibility]
assert _parse_latex_table_styles(styler.table_styles, "label") == "{fig:item}"
def test_parse_latex_cell_styles_basic(): # test nesting
cell_style = [("itshape", "--rwrap"), ("cellcolor", "[rgb]{0,1,1}--rwrap")]
expected = "\\itshape{\\cellcolor[rgb]{0,1,1}{text}}"
assert _parse_latex_cell_styles(cell_style, "text") == expected
@pytest.mark.parametrize(
"wrap_arg, expected",
[ # test wrapping
("", "\\<command><options> <display_value>"),
("--wrap", "{\\<command><options> <display_value>}"),
("--nowrap", "\\<command><options> <display_value>"),
("--lwrap", "{\\<command><options>} <display_value>"),
("--dwrap", "{\\<command><options>}{<display_value>}"),
("--rwrap", "\\<command><options>{<display_value>}"),
],
)
def test_parse_latex_cell_styles_braces(wrap_arg, expected):
cell_style = [("<command>", f"<options>{wrap_arg}")]
assert _parse_latex_cell_styles(cell_style, "<display_value>") == expected
def test_parse_latex_header_span():
cell = {"attributes": 'colspan="3"', "display_value": "text", "cellstyle": []}
expected = "\\multicolumn{3}{Y}{text}"
assert _parse_latex_header_span(cell, "X", "Y") == expected
cell = {"attributes": 'rowspan="5"', "display_value": "text", "cellstyle": []}
expected = "\\multirow[X]{5}{*}{text}"
assert _parse_latex_header_span(cell, "X", "Y") == expected
cell = {"display_value": "text", "cellstyle": []}
assert | _parse_latex_header_span(cell, "X", "Y") | pandas.io.formats.style_render._parse_latex_header_span |
import sys, os, re
import numpy as np
import json
import csv
import matplotlib.pyplot as plt
import pandas as pd
class PARAMETERS_EXTRACTOR:
"""
This class is used to extract and analyze data from log files, which are generated by running management.py
"""
def __init__(self, dir, problem_set):
'''
:param dir: location of log
:param problem_set: tested problem
'''
self.target_dir = dir
self.problem_set = problem_set
# self.para_dir = para_dir
def getpara(self, system_name):
'''
This funciton is used to extract the name of tuned HYPER_PARAMETER in the test, by reading the json file which
control hyper-parameters in.
:param system_name:
:return:
'''
llist = []
# other parameter before the main group
if system_name == 'ponyge2' or system_name == 'PonyGE2':
para_file = '../util/hyper_para_list_PonyGE2.json'
llist.append('PROBLEM')
elif system_name == 'SGE':
para_file = '../util/hyper_para_list_SGE.json'
llist.append('PROBLEM')
elif system_name == 'GGES':
para_file = '../util/hyper_para_list_GGES.json'
llist.append('PROBLEM')
with open(para_file, 'r') as load_f:
data = json.load(load_f)
for parameter in data:
if parameter['name']:
llist.append(parameter['name'])
# other parameter before the main group
if system_name == 'SGE':
llist.append('NUMBER_OF_ITERATIONS')
llist.append('EVAL_BUDGET')
llist.remove('MAX_REC_LEVEL')
if system_name == 'SGE':
llist.append('EVAL_BUDGET')
return llist
def output_analyzer(self, f):
"""
This funciton extract data from std_out of test.
Due to the fact i use the command like ' nohup python3 management.py >log/output_node112_3.txt 2>&1 &', std_out
are stored in those files with the name of 'output_MACHINE_NAME_IDNEX.txt'
:param f:
:return: A List with following structure:[level1,level1,[...]]
Level1:(SYSTEM_NAME,[level2],[level2],[...])
Level2:[PROBLEM_NAME,[level3],[level3],[...]]
Level3:[[Iteration_number,Fitness_value],[Iteration_number,Fitness_value],[...]]
"""
recorder = [] # lv1
tuning_data_for_one_system = () # lv2
data_for_one_problem = [] # lv3
problem_index = 0
buffer_flat_space = ''
for line in f.readlines():
# find the system name to categorized all log.
if re.search(r'now testing ([A-Za-z0-9]*) system', line):
system_name = re.search(r'now testing ([A-Za-z0-9]*) system', line).group(1)
if len(tuning_data_for_one_system) != 0:
# in the case of it already have data, which mean we encounter another system.
# finish the data from previous one and rebuild new data list.
tuning_data_for_one_system[1].append(data_for_one_problem)
data_for_one_problem = [] # need to clear all used data
problem_index = 0
recorder.append(tuning_data_for_one_system)
tuning_data_for_one_system = (system_name, [])
else:
tuning_data_for_one_system = (system_name, [])
# catch stat of each iteration of hyper-parameter tuning
if re.search(r'iteration ([0-9]*), objective value:\s(\d+(\.\d+)?)', line):
cur_iteration_num = int(re.search(r'iteration ([0-9]*), objective value:\s(\d+(\.\d+)?)',
line).group(1))
cur_fitness_value = float(re.search(r'iteration ([0-9]*), objective value:\s(\d+(\.\d+)?)',
line).group(2))
if len(data_for_one_problem) == 0:
# in the case of the lv3 list is empty, input the name of problem and the first group of data
data_for_one_problem = [self.problem_set[problem_index], []]
problem_index += 1
data_for_one_problem[1].append([cur_iteration_num, cur_fitness_value])
elif data_for_one_problem[1][-1][0] < cur_iteration_num:
data_for_one_problem[1].append([cur_iteration_num, cur_fitness_value])
elif data_for_one_problem[1][-1][0] > cur_iteration_num:
#the case of met a new iteration
tuning_data_for_one_system[1].append(data_for_one_problem) # end the previous problem
data_for_one_problem = [self.problem_set[problem_index], []]
problem_index += 1
if problem_index >= len(self.problem_set):
problem_index = 0
data_for_one_problem[1].append([cur_iteration_num, cur_fitness_value])
if re.search(r'flat objective value after taking 2 samples', line):
# to handle the case of No
print('flat initial space for problem ', 'in', f.name, 'for system', system_name,
'This part of data will be ignored')
if buffer_flat_space != line:
problem_index += 1
buffer_flat_space = line
tuning_data_for_one_system[1].append(data_for_one_problem)
recorder.append(tuning_data_for_one_system)
# print(f.name,recorder)
return recorder
def out_analyzer(self, system_name, f,type):
'''
In the benchmark test, for every problem the system will store its best-founded hyper-parameters settings, under
the name of out_SYSTEMNAME_PROBLEM_TIME_MACHINENAME.txt
This funciton is used to extract the best founded hyper-parameter setting stored in one file f.
A dict includes the parameter_name and it's value will be returned.
:param system_name:
:param f: file handle
:param type: returned type
:return:
'''
while 1:
line = f.readline()
if not line:
break
if line.startswith("xopt"):
tmp = line.strip("xopt: [").replace(']\n', '\n').replace("'", "").replace(" ", "")
parameter_dict = dict(zip(self.getpara(system_name), tmp.split(',')))
if type=='list':
return tmp.split(',')
elif type=='dict':
return parameter_dict
def csv_writer(self, data):
'''
This function is used to write data into csv file for further usage.
:param data:
:return:
'''
if not os.path.exists('tmp'):
os.mkdir('tmp/')
for system in data:
# print(system[0]) #system[0] is system name
for problem in system[1]:
data_to_write = []
for element in problem[1]:
data_to_write.append(element[1])
with open('tmp/' + system[0] + '_' + problem[0] + '.csv', 'a') as csv_file:
writer = csv.writer(csv_file, dialect='excel')
writer.writerow(data_to_write)
def run(self):
'''
main funciton of class PARAMETERS_EXTRACTOR
all other fucntions are called here.
:return:
'''
files = os.listdir(self.target_dir)
for file in files:
with open(os.path.join(os.getcwd(), self.target_dir, file), encoding='utf-8') as f:
if file.startswith('output'):
# This file is standard output of test (nohup command), extract all data and store them in global_data.
# pass
data_of_one_output = self.output_analyzer(f)
self.csv_writer(data_of_one_output)
elif file.startswith('out_'):
# print('current file is ', file)
system_name = re.search(r'out_([A-Za-z0-9]*)_([A-Za-z0-9]*)_', file).group(1)
problem_name = re.search(r'out_([A-Za-z0-9]*)_([A-Za-z0-9_]*)_', file).group(2)
data_of_out_log=self.out_analyzer(system_name,f,'list')
if not os.path.exists('tmp_para'):
os.mkdir('tmp_para/')
with open('tmp_para/configurations_'+system_name+'_'+problem_name+'.csv','a') as record_file:
# print(data_of_out_log)
writer = csv.writer(record_file)
writer.writerow(data_of_out_log[0:-1])
def system_analyzer(target_dir='tmp/', show=False):
'''
Draw comparison graph between different systems for each problem.
:param target_dir: The target directory stores formatted fitness data over the configuration tuning.
:return:
'''
problem_set = []
system_set = []
files = os.listdir(target_dir)
# first analyze the system and problem
for file in files:
if file.startswith('.'):
continue
tmp_sys = re.search(r'([A-Za-z0-9]*)_([A-Za-z0-9_]*).csv', file).group(1)
tmp_pro = re.search(r'([A-Za-z0-9]*)_([A-Za-z0-9_]*).csv', file).group(2)
if not tmp_sys in system_set:
system_set.append(tmp_sys)
if not tmp_pro in problem_set:
problem_set.append(tmp_pro)
for problem_tested in problem_set:
plt.title(problem_tested)
for system_tested in system_set:
filename = system_tested + '_' + problem_tested + '.csv'
cur_data = pd.read_csv(target_dir + filename, header=None, delimiter=',')
# cur_data is a DataFrame
# -----------------no deviation version-----------------------
# cur_data.mean(0).plot(label=system_tested)
# -----------------no deviation version-----------------------
# -----------------with std_dev version-----------------------
mean_value = cur_data.mean(0)
std_value = cur_data.std(0)
x = np.arange(0,100,1)
y = mean_value
y1 = mean_value+std_value
y2 = mean_value-std_value
if system_tested == 'PonyGE2':
plt.plot(x, y, color='orange', label='PonyGE2')
plt.plot(x, y1, color='orange', alpha=0.4, linestyle='dotted')
plt.plot(x, y2, color='orange', alpha=0.4, linestyle='dotted')
elif system_tested=='SGE':
plt.plot(x, y, color='blue', label='SGE')
plt.plot(x, y1, color='blue', alpha=0.4, linestyle='dotted')
plt.plot(x, y2, color='blue', alpha=0.4, linestyle='dotted')
else:
pass
# in the case new system added, please modify this part.
# -----------------with std_dev version-----------------------
# -----------------For thesis data-----------------------
from scipy.stats import ttest_ind
print('final result for **',problem_tested,'** in @@',system_tested, '@@ is ',pd.Series.as_matrix(y)[-1])
print(problem_tested,' in ',system_tested,'fitst result=',pd.Series.as_matrix(y)[0],
'std_dev_0=', pd.Series.as_matrix(std_value)[0],
'final result=', | pd.Series.as_matrix(y) | pandas.Series.as_matrix |
import random
import unittest
from collections import namedtuple
from copy import deepcopy
from itertools import chain, product
from unittest.mock import MagicMock
import modAL.acquisition
import modAL.batch
import modAL.density
import modAL.disagreement
import modAL.dropout
import modAL.expected_error
import modAL.models.base
import modAL.models.learners
import modAL.multilabel
import modAL.uncertainty
import modAL.utils.combination
import modAL.utils.selection
import modAL.utils.validation
import numpy as np
import pandas as pd
import torch
from scipy import sparse as sp
from scipy.special import ndtr
from scipy.stats import entropy, norm
from sklearn.ensemble import RandomForestClassifier
from sklearn.exceptions import NotFittedError
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.metrics import confusion_matrix
from sklearn.multiclass import OneVsRestClassifier
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import FunctionTransformer
from sklearn.svm import SVC
from skorch import NeuralNetClassifier
from torch import nn
import mock
Test = namedtuple('Test', ['input', 'output'])
def random_array(shape, n_arrays):
for _ in range(n_arrays):
yield np.random.rand(*shape)
class TestUtils(unittest.TestCase):
def test_check_class_labels(self):
for n_labels in range(1, 10):
for n_learners in range(1, 10):
# 1. test fitted estimators
labels = np.random.randint(10, size=n_labels)
different_labels = np.random.randint(
10, 20, size=np.random.randint(1, 10))
learner_list_1 = [mock.MockEstimator(
classes_=labels) for _ in range(n_learners)]
learner_list_2 = [mock.MockEstimator(
classes_=different_labels) for _ in range(np.random.randint(1, 5))]
shuffled_learners = random.sample(
learner_list_1 + learner_list_2, len(learner_list_1 + learner_list_2))
self.assertTrue(
modAL.utils.validation.check_class_labels(*learner_list_1))
self.assertFalse(
modAL.utils.validation.check_class_labels(*shuffled_learners))
# 2. test unfitted estimators
unfitted_learner_list = [mock.MockEstimator(
classes_=labels) for _ in range(n_learners)]
idx = np.random.randint(0, n_learners)
unfitted_learner_list.insert(
idx, mock.MockEstimator(fitted=False))
self.assertRaises(
NotFittedError, modAL.utils.validation.check_class_labels, *unfitted_learner_list)
def test_check_class_proba(self):
for n_labels in range(2, 20):
# when all classes are known:
proba = np.random.rand(100, n_labels)
class_labels = list(range(n_labels))
np.testing.assert_almost_equal(
modAL.utils.check_class_proba(
proba, known_labels=class_labels, all_labels=class_labels),
proba
)
for unknown_idx in range(n_labels):
all_labels = list(range(n_labels))
known_labels = deepcopy(all_labels)
known_labels.remove(unknown_idx)
aug_proba = np.insert(
proba[:, known_labels], unknown_idx, np.zeros(len(proba)), axis=1)
np.testing.assert_almost_equal(
modAL.utils.check_class_proba(
proba[:, known_labels], known_labels=known_labels, all_labels=all_labels),
aug_proba
)
def test_linear_combination(self):
def dummy_function(X_in):
return np.ones(shape=(len(X_in), 1))
for n_samples in range(2, 10):
for n_features in range(1, 10):
for n_functions in range(2, 10):
functions = [dummy_function for _ in range(n_functions)]
linear_combination = modAL.utils.combination.make_linear_combination(
*functions)
X_in = np.random.rand(n_samples, n_features)
if n_samples == 1:
true_result = float(n_functions)
else:
true_result = n_functions*np.ones(shape=(n_samples, 1))
try:
np.testing.assert_almost_equal(
linear_combination(X_in), true_result)
except:
linear_combination(X_in)
def test_product(self):
for n_dim in range(1, 5):
shape = tuple([10] + [2 for _ in range(n_dim-1)])
X_in = 2*np.ones(shape=shape)
for n_functions in range(1, 10):
functions = [(lambda x: x) for _ in range(n_functions)]
# linear combination without weights
product = modAL.utils.combination.make_product(*functions)
np.testing.assert_almost_equal(
product(X_in),
X_in**n_functions
)
# linear combination with weights
exponents = np.random.rand(n_functions)
exp_product = modAL.utils.combination.make_product(
*functions, exponents=exponents)
np.testing.assert_almost_equal(
exp_product(X_in),
np.prod([X_in**exponent for exponent in exponents], axis=0)
)
def test_make_query_strategy(self):
query_strategy = modAL.utils.combination.make_query_strategy(
utility_measure=modAL.uncertainty.classifier_uncertainty,
selector=modAL.utils.selection.multi_argmax
)
for n_samples in range(1, 10):
for n_classes in range(1, 10):
proba = np.random.rand(n_samples, n_classes)
proba = proba/np.sum(proba, axis=1).reshape(n_samples, 1)
X = np.random.rand(n_samples, 3)
learner = modAL.models.learners.ActiveLearner(
estimator=mock.MockEstimator(predict_proba_return=proba)
)
query_1 = query_strategy(learner, X)
query_2 = modAL.uncertainty.uncertainty_sampling(learner, X)
np.testing.assert_equal(query_1, query_2)
def test_data_vstack(self):
for n_samples, n_features in product(range(1, 10), range(1, 10)):
# numpy arrays
a, b = np.random.rand(n_samples, n_features), np.random.rand(
n_samples, n_features)
np.testing.assert_almost_equal(
modAL.utils.data.data_vstack((a, b)),
np.concatenate((a, b))
)
# sparse matrices
for format in ['lil', 'csc', 'csr']:
a, b = sp.random(n_samples, n_features, format=format), sp.random(
n_samples, n_features, format=format)
self.assertEqual((modAL.utils.data.data_vstack(
(a, b)) != sp.vstack((a, b))).sum(), 0)
# lists
a, b = np.random.rand(n_samples, n_features).tolist(), np.random.rand(
n_samples, n_features).tolist()
np.testing.assert_almost_equal(
modAL.utils.data.data_vstack((a, b)),
np.concatenate((a, b))
)
# torch.Tensors
a, b = torch.ones(2, 2), torch.ones(2, 2)
torch.testing.assert_allclose(
modAL.utils.data.data_vstack((a, b)),
torch.cat((a, b))
)
# not supported formats
self.assertRaises(TypeError, modAL.utils.data.data_vstack, (1, 1))
# functions from modALu.tils.selection
def test_multi_argmax(self):
for n_pool in range(2, 100):
for n_instances in range(1, n_pool+1):
utility = np.zeros(n_pool)
max_idx = np.random.choice(
range(n_pool), size=n_instances, replace=False)
utility[max_idx] = 1e-10 + np.random.rand(n_instances, )
np.testing.assert_equal(
np.sort(modAL.utils.selection.multi_argmax(
utility, n_instances)),
(np.sort(max_idx), np.sort(utility)
[len(utility)-n_instances:])
)
def test_shuffled_argmax(self):
for n_pool in range(1, 100):
for n_instances in range(1, n_pool+1):
values = np.random.permutation(n_pool)
true_query_idx = np.argsort(values)[len(values)-n_instances:]
true_values = np.sort(values, axis=None)[
len(values)-n_instances:]
np.testing.assert_equal(
(true_query_idx, true_values),
modAL.utils.selection.shuffled_argmax(values, n_instances)
)
def test_weighted_random(self):
for n_pool in range(2, 100):
for n_instances in range(1, n_pool):
utility = np.ones(n_pool)
query_idx = modAL.utils.selection.weighted_random(
utility, n_instances)
# testing for correct number of returned indices
np.testing.assert_equal(len(query_idx), n_instances)
# testing for uniqueness of each query index
np.testing.assert_equal(
len(query_idx), len(np.unique(query_idx)))
class TestAcquisitionFunctions(unittest.TestCase):
def test_acquisition_functions(self):
for n_samples in range(1, 100):
mean, std = np.random.rand(100, 1), np.random.rand(100, 1)
modAL.acquisition.PI(mean, std, 0, 0)
modAL.acquisition.EI(mean, std, 0, 0)
modAL.acquisition.UCB(mean, std, 0)
mean, std = np.random.rand(100, ), np.random.rand(100, )
modAL.acquisition.PI(mean, std, 0, 0)
modAL.acquisition.EI(mean, std, 0, 0)
modAL.acquisition.UCB(mean, std, 0)
def test_optimizer_PI(self):
for n_samples in range(1, 100):
mean = np.random.rand(n_samples, )
std = np.random.rand(n_samples, )
tradeoff = np.random.rand()
max_val = np.random.rand()
# 1. fitted estimator
mock_estimator = mock.MockEstimator(predict_return=(mean, std))
optimizer = modAL.models.learners.BayesianOptimizer(
estimator=mock_estimator)
optimizer._set_max([0], [max_val])
true_PI = ndtr((mean - max_val - tradeoff)/std)
np.testing.assert_almost_equal(
true_PI,
modAL.acquisition.optimizer_PI(
optimizer, np.random.rand(n_samples, 2), tradeoff)
)
# 2. unfitted estimator
mock_estimator = mock.MockEstimator(fitted=False)
optimizer = modAL.models.learners.BayesianOptimizer(
estimator=mock_estimator)
optimizer._set_max([0], [max_val])
true_PI = ndtr((np.zeros(shape=(len(mean), 1)) -
max_val - tradeoff) / np.ones(shape=(len(mean), 1)))
np.testing.assert_almost_equal(
true_PI,
modAL.acquisition.optimizer_PI(
optimizer, np.random.rand(n_samples, 2), tradeoff)
)
def test_optimizer_EI(self):
for n_samples in range(1, 100):
mean = np.random.rand(n_samples, )
std = np.random.rand(n_samples, )
tradeoff = np.random.rand()
max_val = np.random.rand()
# 1. fitted estimator
mock_estimator = mock.MockEstimator(
predict_return=(mean, std)
)
optimizer = modAL.models.learners.BayesianOptimizer(
estimator=mock_estimator)
optimizer._set_max([0], [max_val])
true_EI = (mean - optimizer.y_max - tradeoff) * ndtr((mean - optimizer.y_max - tradeoff) / std) \
+ std * norm.pdf((mean - optimizer.y_max - tradeoff) / std)
np.testing.assert_almost_equal(
true_EI,
modAL.acquisition.optimizer_EI(
optimizer, np.random.rand(n_samples, 2), tradeoff)
)
# 2. unfitted estimator
mock_estimator = mock.MockEstimator(fitted=False)
optimizer = modAL.models.learners.BayesianOptimizer(
estimator=mock_estimator)
optimizer._set_max([0], [max_val])
true_EI = (np.zeros(shape=(len(mean), 1)) - optimizer.y_max - tradeoff) * ndtr((np.zeros(shape=(len(mean), 1)) - optimizer.y_max - tradeoff) / np.ones(shape=(len(mean), 1))) \
+ np.ones(shape=(len(mean), 1)) * norm.pdf((np.zeros(shape=(len(mean), 1)
) - optimizer.y_max - tradeoff) / np.ones(shape=(len(mean), 1)))
np.testing.assert_almost_equal(
true_EI,
modAL.acquisition.optimizer_EI(
optimizer, np.random.rand(n_samples, 2), tradeoff)
)
def test_optimizer_UCB(self):
for n_samples in range(1, 100):
mean = np.random.rand(n_samples, )
std = np.random.rand(n_samples, )
beta = np.random.rand()
# 1. fitted estimator
mock_estimator = mock.MockEstimator(
predict_return=(mean, std)
)
optimizer = modAL.models.learners.BayesianOptimizer(
estimator=mock_estimator)
true_UCB = mean + beta*std
np.testing.assert_almost_equal(
true_UCB,
modAL.acquisition.optimizer_UCB(
optimizer, np.random.rand(n_samples, 2), beta)
)
# 2. unfitted estimator
mock_estimator = mock.MockEstimator(fitted=False)
optimizer = modAL.models.learners.BayesianOptimizer(
estimator=mock_estimator)
true_UCB = np.zeros(shape=(len(mean), 1)) + \
beta * np.ones(shape=(len(mean), 1))
np.testing.assert_almost_equal(
true_UCB,
modAL.acquisition.optimizer_UCB(
optimizer, np.random.rand(n_samples, 2), beta)
)
def test_selection(self):
for n_samples in range(1, 100):
for n_instances in range(1, n_samples):
X = np.random.rand(n_samples, 3)
mean = np.random.rand(n_samples, )
std = np.random.rand(n_samples, )
max_val = np.random.rand()
mock_estimator = mock.MockEstimator(
predict_return=(mean, std)
)
optimizer = modAL.models.learners.BayesianOptimizer(
estimator=mock_estimator)
optimizer._set_max([0], [max_val])
modAL.acquisition.max_PI(
optimizer, X, tradeoff=np.random.rand(), n_instances=n_instances)
modAL.acquisition.max_EI(
optimizer, X, tradeoff=np.random.rand(), n_instances=n_instances)
modAL.acquisition.max_UCB(
optimizer, X, beta=np.random.rand(), n_instances=n_instances)
class TestDensity(unittest.TestCase):
def test_similarize_distance(self):
from scipy.spatial.distance import cosine
sim = modAL.density.similarize_distance(cosine)
for _ in range(100):
for n_dim in range(1, 10):
X_1, X_2 = np.random.rand(n_dim), np.random.rand(n_dim)
np.testing.assert_almost_equal(
sim(X_1, X_2),
1/(1 + cosine(X_1, X_2))
)
def test_information_density(self):
for n_samples in range(1, 10):
for n_dim in range(1, 10):
X_pool = np.random.rand(n_samples, n_dim)
similarities = modAL.density.information_density(X_pool)
np.testing.assert_equal(len(similarities), n_samples)
class TestDisagreements(unittest.TestCase):
def test_vote_entropy(self):
for n_samples in range(1, 10):
for n_classes in range(1, 10):
for true_query_idx in range(n_samples):
# 1. fitted committee
vote_return = np.zeros(
shape=(n_samples, n_classes), dtype=np.int16)
vote_return[true_query_idx] = np.asarray(
range(n_classes), dtype=np.int16)
committee = mock.MockCommittee(classes_=np.asarray(
range(n_classes)), vote_return=vote_return)
vote_entr = modAL.disagreement.vote_entropy(
committee, np.random.rand(n_samples, n_classes)
)
true_entropy = np.zeros(shape=(n_samples, ))
true_entropy[true_query_idx] = entropy(
np.ones(n_classes)/n_classes)
np.testing.assert_array_almost_equal(
vote_entr, true_entropy)
# 2. unfitted committee
committee = mock.MockCommittee(fitted=False)
true_entropy = np.zeros(shape=(n_samples,))
vote_entr = modAL.disagreement.vote_entropy(
committee, np.random.rand(n_samples, n_classes)
)
np.testing.assert_almost_equal(vote_entr, true_entropy)
def test_consensus_entropy(self):
for n_samples in range(1, 10):
for n_classes in range(2, 10):
for true_query_idx in range(n_samples):
# 1. fitted committee
proba = np.zeros(shape=(n_samples, n_classes))
proba[:, 0] = 1.0
proba[true_query_idx] = np.ones(n_classes)/n_classes
committee = mock.MockCommittee(predict_proba_return=proba)
consensus_entropy = modAL.disagreement.consensus_entropy(
committee, np.random.rand(n_samples, n_classes)
)
true_entropy = np.zeros(shape=(n_samples,))
true_entropy[true_query_idx] = entropy(
np.ones(n_classes) / n_classes)
np.testing.assert_array_almost_equal(
consensus_entropy, true_entropy)
# 2. unfitted committee
committee = mock.MockCommittee(fitted=False)
true_entropy = np.zeros(shape=(n_samples,))
consensus_entropy = modAL.disagreement.consensus_entropy(
committee, np.random.rand(n_samples, n_classes)
)
np.testing.assert_almost_equal(
consensus_entropy, true_entropy)
def test_KL_max_disagreement(self):
for n_samples in range(1, 10):
for n_classes in range(2, 10):
for n_learners in range(2, 10):
# 1. fitted committee
vote_proba = np.zeros(
shape=(n_samples, n_learners, n_classes))
vote_proba[:, :, 0] = 1.0
committee = mock.MockCommittee(
n_learners=n_learners, classes_=range(n_classes),
vote_proba_return=vote_proba
)
true_KL_disagreement = np.zeros(shape=(n_samples, ))
try:
np.testing.assert_array_almost_equal(
true_KL_disagreement,
modAL.disagreement.KL_max_disagreement(
committee, np.random.rand(n_samples, 1))
)
except:
modAL.disagreement.KL_max_disagreement(
committee, np.random.rand(n_samples, 1))
# 2. unfitted committee
committee = mock.MockCommittee(fitted=False)
true_KL_disagreement = np.zeros(shape=(n_samples,))
returned_KL_disagreement = modAL.disagreement.KL_max_disagreement(
committee, np.random.rand(n_samples, n_classes)
)
np.testing.assert_almost_equal(
returned_KL_disagreement, true_KL_disagreement)
def test_vote_entropy_sampling(self):
for n_samples, n_features, n_classes in product(range(1, 10), range(1, 10), range(1, 10)):
committee = mock.MockCommittee(classes_=np.asarray(range(n_classes)),
vote_return=np.zeros(shape=(n_samples, n_classes), dtype=np.int16))
modAL.disagreement.vote_entropy_sampling(
committee, np.random.rand(n_samples, n_features))
modAL.disagreement.vote_entropy_sampling(committee, np.random.rand(n_samples, n_features),
random_tie_break=True)
def test_consensus_entropy_sampling(self):
for n_samples, n_features, n_classes in product(range(1, 10), range(1, 10), range(1, 10)):
committee = mock.MockCommittee(
predict_proba_return=np.random.rand(n_samples, n_classes))
modAL.disagreement.consensus_entropy_sampling(
committee, np.random.rand(n_samples, n_features))
modAL.disagreement.consensus_entropy_sampling(committee, np.random.rand(n_samples, n_features),
random_tie_break=True)
def test_max_disagreement_sampling(self):
for n_samples, n_features, n_classes, n_learners in product(range(1, 10), range(1, 10), range(1, 10), range(2, 5)):
committee = mock.MockCommittee(
n_learners=n_learners, classes_=range(n_classes),
vote_proba_return=np.zeros(
shape=(n_samples, n_learners, n_classes))
)
modAL.disagreement.max_disagreement_sampling(
committee, np.random.rand(n_samples, n_features))
modAL.disagreement.max_disagreement_sampling(committee, np.random.rand(n_samples, n_features),
random_tie_break=True)
def test_max_std_sampling(self):
for n_samples, n_features in product(range(1, 10), range(1, 10)):
regressor = GaussianProcessRegressor()
regressor.fit(np.random.rand(n_samples, n_features),
np.random.rand(n_samples))
modAL.disagreement.max_std_sampling(
regressor, np.random.rand(n_samples, n_features))
modAL.disagreement.max_std_sampling(regressor, np.random.rand(n_samples, n_features),
random_tie_break=True)
class TestEER(unittest.TestCase):
def test_eer(self):
for n_pool, n_features, n_classes in product(range(5, 10), range(1, 5), range(2, 5)):
X_training_, y_training = np.random.rand(
10, n_features).tolist(), np.random.randint(0, n_classes, size=10)
X_pool_, y_pool = np.random.rand(n_pool, n_features).tolist(
), np.random.randint(0, n_classes+1, size=n_pool)
for data_type in (sp.csr_matrix, pd.DataFrame, np.array, list):
X_training, X_pool = data_type(X_training_), data_type(X_pool_)
learner = modAL.models.ActiveLearner(RandomForestClassifier(n_estimators=2),
X_training=X_training, y_training=y_training)
modAL.expected_error.expected_error_reduction(learner, X_pool)
modAL.expected_error.expected_error_reduction(
learner, X_pool, random_tie_break=True)
modAL.expected_error.expected_error_reduction(
learner, X_pool, p_subsample=0.1)
modAL.expected_error.expected_error_reduction(
learner, X_pool, loss='binary')
modAL.expected_error.expected_error_reduction(
learner, X_pool, p_subsample=0.1, loss='log')
self.assertRaises(AssertionError, modAL.expected_error.expected_error_reduction,
learner, X_pool, p_subsample=1.5)
self.assertRaises(AssertionError, modAL.expected_error.expected_error_reduction,
learner, X_pool, loss=42)
class TestUncertainties(unittest.TestCase):
def test_classifier_uncertainty(self):
test_cases = (Test(p * np.ones(shape=(k, l)), (1 - p) * np.ones(shape=(k, )))
for k in range(1, 100) for l in range(1, 10) for p in np.linspace(0, 1, 11))
for case in test_cases:
# testing _proba_uncertainty
np.testing.assert_almost_equal(
modAL.uncertainty._proba_uncertainty(case.input),
case.output
)
# fitted estimator
fitted_estimator = mock.MockEstimator(
predict_proba_return=case.input)
np.testing.assert_almost_equal(
modAL.uncertainty.classifier_uncertainty(
fitted_estimator, np.random.rand(10)),
case.output
)
# not fitted estimator
not_fitted_estimator = mock.MockEstimator(fitted=False)
np.testing.assert_almost_equal(
modAL.uncertainty.classifier_uncertainty(
not_fitted_estimator, case.input),
np.ones(shape=(len(case.output)))
)
def test_classifier_margin(self):
test_cases_1 = (Test(p * np.ones(shape=(k, l)), np.zeros(shape=(k,)))
for k in range(1, 100) for l in range(1, 10) for p in np.linspace(0, 1, 11))
test_cases_2 = (Test(p * np.tile(np.asarray(range(k))+1.0, l).reshape(l, k),
p * np.ones(shape=(l, ))*int(k != 1))
for k in range(1, 10) for l in range(1, 100) for p in np.linspace(0, 1, 11))
for case in chain(test_cases_1, test_cases_2):
# _proba_margin
np.testing.assert_almost_equal(
modAL.uncertainty._proba_margin(case.input),
case.output
)
# fitted estimator
fitted_estimator = mock.MockEstimator(
predict_proba_return=case.input)
np.testing.assert_almost_equal(
modAL.uncertainty.classifier_margin(
fitted_estimator, np.random.rand(10)),
case.output
)
# not fitted estimator
not_fitted_estimator = mock.MockEstimator(fitted=False)
np.testing.assert_almost_equal(
modAL.uncertainty.classifier_margin(
not_fitted_estimator, case.input),
np.zeros(shape=(len(case.output)))
)
def test_classifier_entropy(self):
for n_samples in range(1, 100):
for n_classes in range(1, 20):
proba = np.zeros(shape=(n_samples, n_classes))
for sample_idx in range(n_samples):
proba[sample_idx, np.random.choice(range(n_classes))] = 1.0
# _proba_entropy
np.testing.assert_almost_equal(
modAL.uncertainty._proba_entropy(proba),
np.zeros(shape=(n_samples,))
)
# fitted estimator
fitted_estimator = mock.MockEstimator(
predict_proba_return=proba)
np.testing.assert_equal(
modAL.uncertainty.classifier_entropy(
fitted_estimator, np.random.rand(n_samples, 1)),
np.zeros(shape=(n_samples, ))
)
# not fitted estimator
not_fitted_estimator = mock.MockEstimator(fitted=False)
np.testing.assert_almost_equal(
modAL.uncertainty.classifier_entropy(
not_fitted_estimator, np.random.rand(n_samples, 1)),
np.zeros(shape=(n_samples, ))
)
def test_uncertainty_sampling(self):
for n_samples in range(1, 10):
for n_classes in range(1, 10):
max_proba = np.zeros(n_classes)
for true_query_idx in range(n_samples):
predict_proba = np.random.rand(n_samples, n_classes)
predict_proba[true_query_idx] = max_proba
classifier = mock.MockEstimator(
predict_proba_return=predict_proba)
query_idx, query_metric = modAL.uncertainty.uncertainty_sampling(
classifier, np.random.rand(n_samples, n_classes)
)
shuffled_query_idx, shuffled_query_metric = modAL.uncertainty.uncertainty_sampling(
classifier, np.random.rand(n_samples, n_classes),
random_tie_break=True
)
np.testing.assert_array_equal(query_idx, true_query_idx)
np.testing.assert_array_equal(
shuffled_query_idx, true_query_idx)
def test_margin_sampling(self):
for n_samples in range(1, 10):
for n_classes in range(2, 10):
for true_query_idx in range(n_samples):
predict_proba = np.zeros(shape=(n_samples, n_classes))
predict_proba[:, 0] = 1.0
predict_proba[true_query_idx, 0] = 0.0
classifier = mock.MockEstimator(
predict_proba_return=predict_proba)
query_idx, query_metric = modAL.uncertainty.margin_sampling(
classifier, np.random.rand(n_samples, n_classes)
)
shuffled_query_idx, shuffled_query_metric = modAL.uncertainty.margin_sampling(
classifier, np.random.rand(n_samples, n_classes),
random_tie_break=True
)
np.testing.assert_array_equal(query_idx, true_query_idx)
np.testing.assert_array_equal(
shuffled_query_idx, true_query_idx)
def test_entropy_sampling(self):
for n_samples in range(1, 10):
for n_classes in range(2, 10):
max_proba = np.ones(n_classes)/n_classes
for true_query_idx in range(n_samples):
predict_proba = np.zeros(shape=(n_samples, n_classes))
predict_proba[:, 0] = 1.0
predict_proba[true_query_idx] = max_proba
classifier = mock.MockEstimator(
predict_proba_return=predict_proba)
query_idx, query_metric = modAL.uncertainty.entropy_sampling(
classifier, np.random.rand(n_samples, n_classes)
)
shuffled_query_idx, shuffled_query_metric = modAL.uncertainty.entropy_sampling(
classifier, np.random.rand(n_samples, n_classes),
random_tie_break=True
)
np.testing.assert_array_equal(query_idx, true_query_idx)
np.testing.assert_array_equal(
shuffled_query_idx, true_query_idx)
# PyTorch model for test cases --> Do not change the layers
class Torch_Model(nn.Module):
def __init__(self,):
super(Torch_Model, self).__init__()
self.convs = nn.Sequential(
nn.Conv2d(1, 32, 3),
nn.ReLU(),
nn.Conv2d(32, 64, 3),
nn.ReLU(),
nn.MaxPool2d(2),
nn.Dropout(0.25)
)
self.fcs = nn.Sequential(
nn.Linear(12*12*64, 128),
nn.ReLU(),
nn.Dropout(0.5),
nn.Linear(128, 10),
)
def forward(self, x):
return x
class TestDropout(unittest.TestCase):
def setUp(self):
self.skorch_classifier = NeuralNetClassifier(Torch_Model,
criterion=torch.nn.CrossEntropyLoss,
optimizer=torch.optim.Adam,
train_split=None,
verbose=1)
def test_mc_dropout_bald(self):
learner = modAL.models.learners.DeepActiveLearner(
estimator=self.skorch_classifier,
query_strategy=modAL.dropout.mc_dropout_bald,
)
for random_tie_break in [True, False]:
for num_cycles, sample_per_forward_pass in product(range(1, 5), range(1, 5)):
for n_samples, n_classes in product(range(1, 5), range(1, 5)):
for n_instances in range(1, n_samples):
X_pool = torch.randn(n_samples, n_classes)
modAL.dropout.mc_dropout_bald(learner, X_pool, n_instances, random_tie_break, [],
num_cycles, sample_per_forward_pass)
def test_mc_dropout_mean_st(self):
learner = modAL.models.learners.DeepActiveLearner(
estimator=self.skorch_classifier,
query_strategy=modAL.dropout.mc_dropout_mean_st,
)
for random_tie_break in [True, False]:
for num_cycles, sample_per_forward_pass in product(range(1, 5), range(1, 5)):
for n_samples, n_classes in product(range(1, 5), range(1, 5)):
for n_instances in range(1, n_samples):
X_pool = torch.randn(n_samples, n_classes)
modAL.dropout.mc_dropout_mean_st(learner, X_pool, n_instances, random_tie_break, [],
num_cycles, sample_per_forward_pass)
def test_mc_dropout_max_entropy(self):
learner = modAL.models.learners.DeepActiveLearner(
estimator=self.skorch_classifier,
query_strategy=modAL.dropout.mc_dropout_max_entropy,
)
for random_tie_break in [True, False]:
for num_cycles, sample_per_forward_pass in product(range(1, 5), range(1, 5)):
for n_samples, n_classes in product(range(1, 5), range(1, 5)):
for n_instances in range(1, n_samples):
X_pool = torch.randn(n_samples, n_classes)
modAL.dropout.mc_dropout_max_entropy(learner, X_pool, n_instances, random_tie_break, [],
num_cycles, sample_per_forward_pass)
def test_mc_dropout_max_variationRatios(self):
learner = modAL.models.learners.DeepActiveLearner(
estimator=self.skorch_classifier,
query_strategy=modAL.dropout.mc_dropout_max_variationRatios,
)
for random_tie_break in [True, False]:
for num_cycles, sample_per_forward_pass in product(range(1, 5), range(1, 5)):
for n_samples, n_classes in product(range(1, 5), range(1, 5)):
for n_instances in range(1, n_samples):
X_pool = torch.randn(n_samples, n_classes)
modAL.dropout.mc_dropout_max_variationRatios(learner, X_pool, n_instances, random_tie_break, [],
num_cycles, sample_per_forward_pass)
def test_get_predictions(self):
X = torch.randn(100, 1)
learner = modAL.models.learners.DeepActiveLearner(
estimator=self.skorch_classifier,
query_strategy=mock.MockFunction(return_val=None),
)
# num predictions tests
for num_predictions in range(1, 20):
for samples_per_forward_pass in range(1, 10):
predictions = modAL.dropout.get_predictions(
learner, X, dropout_layer_indexes=[],
num_predictions=num_predictions,
sample_per_forward_pass=samples_per_forward_pass)
self.assertEqual(len(predictions), num_predictions)
self.assertRaises(AssertionError, modAL.dropout.get_predictions,
learner, X, dropout_layer_indexes=[],
num_predictions=-1,
sample_per_forward_pass=0)
self.assertRaises(AssertionError, modAL.dropout.get_predictions,
learner, X, dropout_layer_indexes=[],
num_predictions=10,
sample_per_forward_pass=-5)
# logits adapter function test
for samples, classes, subclasses in product(range(1, 10), range(1, 10), range(1, 10)):
input_shape = (samples, classes, subclasses)
desired_shape = (input_shape[0], np.prod(input_shape[1:]))
X_adaption_needed = torch.randn(input_shape)
def logits_adaptor(input_tensor, data): return torch.flatten(
input_tensor, start_dim=1)
predictions = modAL.dropout.get_predictions(
learner, X_adaption_needed, dropout_layer_indexes=[],
num_predictions=num_predictions,
sample_per_forward_pass=samples_per_forward_pass,
logits_adaptor=logits_adaptor)
self.assertEqual(predictions[0].shape, desired_shape)
def test_set_dropout_mode(self):
# set dropmout mode for all dropout layers
for train_mode in [True, False]:
model = Torch_Model()
modules = list(model.modules())
for module in modules:
self.assertEqual(module.training, True)
modAL.dropout.set_dropout_mode(model, [], train_mode)
self.assertEqual(modules[7].training, train_mode)
self.assertEqual(modules[11].training, train_mode)
# set dropout mode only for special layers:
for train_mode in [True, False]:
model = Torch_Model()
modules = list(model.modules())
modAL.dropout.set_dropout_mode(model, [7], train_mode)
self.assertEqual(modules[7].training, train_mode)
self.assertEqual(modules[11].training, True)
modAL.dropout.set_dropout_mode(model, [], True)
modAL.dropout.set_dropout_mode(model, [11], train_mode)
self.assertEqual(modules[11].training, train_mode)
self.assertEqual(modules[7].training, True)
# No Dropout Layer
self.assertRaises(KeyError, modAL.dropout.set_dropout_mode,
model, [5], train_mode)
class TestDeepActiveLearner(unittest.TestCase):
"""
Tests for the base class methods of the BaseLearner (base.py) are provided in
the TestActiveLearner.
"""
def setUp(self):
self.mock_deep_estimator = mock.MockEstimator()
# Add methods that can not be autospecced (because of the wrapper)
self.mock_deep_estimator.initialize = MagicMock(name='initialize')
self.mock_deep_estimator.partial_fit = MagicMock(name='partial_fit')
def test_teach(self):
for bootstrap, warm_start in product([True, False], [True, False]):
for n_samples in range(1, 10):
X = torch.randn(n_samples, 1)
y = torch.randn(n_samples)
learner = modAL.models.learners.DeepActiveLearner(
estimator=self.mock_deep_estimator
)
learner.teach(X, y, bootstrap=bootstrap, warm_start=warm_start)
def test_batch_size(self):
learner = modAL.models.learners.DeepActiveLearner(
estimator=self.mock_deep_estimator
)
for batch_size in range(1, 50):
learner.batch_size = batch_size
self.assertEqual(batch_size, learner.batch_size)
def test_num_epochs(self):
learner = modAL.models.learners.DeepActiveLearner(
estimator=self.mock_deep_estimator
)
for num_epochs in range(1, 50):
learner.num_epochs = num_epochs
self.assertEqual(num_epochs, learner.num_epochs)
class TestActiveLearner(unittest.TestCase):
def test_add_training_data(self):
for n_samples in range(1, 10):
for n_features in range(1, 10):
for n_new_samples in range(1, 10):
# testing for valid cases
# 1. integer class labels
X_initial = np.random.rand(n_samples, n_features)
y_initial = np.random.randint(0, 2, size=(n_samples,))
X_new = np.random.rand(n_new_samples, n_features)
y_new = np.random.randint(0, 2, size=(n_new_samples,))
learner = modAL.models.learners.ActiveLearner(
estimator=mock.MockEstimator(),
X_training=X_initial, y_training=y_initial
)
learner._add_training_data(X_new, y_new)
np.testing.assert_almost_equal(
learner.X_training,
np.vstack((X_initial, X_new))
)
np.testing.assert_equal(
learner.y_training,
np.concatenate((y_initial, y_new))
)
# 2. vector class labels
y_initial = np.random.randint(
0, 2, size=(n_samples, n_features+1))
y_new = np.random.randint(
0, 2, size=(n_new_samples, n_features+1))
learner = modAL.models.learners.ActiveLearner(
estimator=mock.MockEstimator(),
X_training=X_initial, y_training=y_initial
)
learner._add_training_data(X_new, y_new)
np.testing.assert_equal(
learner.y_training,
np.concatenate((y_initial, y_new))
)
# 3. data with shape (n, )
X_initial = np.random.rand(n_samples, )
y_initial = np.random.randint(0, 2, size=(n_samples,))
learner = modAL.models.learners.ActiveLearner(
estimator=mock.MockEstimator(),
X_training=X_initial, y_training=y_initial
)
X_new = np.random.rand(n_new_samples,)
y_new = np.random.randint(0, 2, size=(n_new_samples,))
learner._add_training_data(X_new, y_new)
# testing for invalid cases
# 1. len(X_new) != len(y_new)
X_new = np.random.rand(n_new_samples, n_features)
y_new = np.random.randint(0, 2, size=(2*n_new_samples,))
self.assertRaises(
ValueError, learner._add_training_data, X_new, y_new)
# 2. X_new has wrong dimensions
X_new = np.random.rand(n_new_samples, 2*n_features)
y_new = np.random.randint(0, 2, size=(n_new_samples,))
self.assertRaises(
ValueError, learner._add_training_data, X_new, y_new)
def test_predict(self):
for n_samples in range(1, 100):
for n_features in range(1, 10):
X = np.random.rand(n_samples, n_features)
predict_return = np.random.randint(0, 2, size=(n_samples, ))
mock_classifier = mock.MockEstimator(
predict_return=predict_return)
learner = modAL.models.learners.ActiveLearner(
estimator=mock_classifier
)
np.testing.assert_equal(
learner.predict(X),
predict_return
)
def test_predict_proba(self):
for n_samples in range(1, 100):
for n_features in range(1, 10):
X = np.random.rand(n_samples, n_features)
predict_proba_return = np.random.randint(
0, 2, size=(n_samples,))
mock_classifier = mock.MockEstimator(
predict_proba_return=predict_proba_return)
learner = modAL.models.learners.ActiveLearner(
estimator=mock_classifier
)
np.testing.assert_equal(
learner.predict_proba(X),
predict_proba_return
)
def test_query(self):
for n_samples in range(1, 100):
for n_features in range(1, 10):
X = np.random.rand(n_samples, n_features)
query_idx = np.random.randint(0, n_samples)
query_metrics = np.random.randint(0, n_samples)
mock_query = mock.MockFunction(
return_val=(query_idx, query_metrics))
learner = modAL.models.learners.ActiveLearner(
estimator=None,
query_strategy=mock_query
)
np.testing.assert_equal(
learner.query(X),
(query_idx, X[query_idx])
)
np.testing.assert_equal(
learner.query(X, return_metrics=True),
(query_idx, X[query_idx], query_metrics)
)
def test_score(self):
test_cases = (np.random.rand() for _ in range(10))
for score_return in test_cases:
mock_classifier = mock.MockEstimator(score_return=score_return)
learner = modAL.models.learners.ActiveLearner(
mock_classifier, mock.MockFunction(None))
np.testing.assert_almost_equal(
learner.score(np.random.rand(5, 2), np.random.rand(5, )),
score_return
)
def test_teach(self):
X_training = np.random.rand(10, 2)
y_training = np.random.randint(0, 2, size=10)
for bootstrap, only_new in product([True, False], [True, False]):
for n_samples in range(1, 10):
X = np.random.rand(n_samples, 2)
y = np.random.randint(0, 2, size=n_samples)
learner = modAL.models.learners.ActiveLearner(
X_training=X_training, y_training=y_training,
estimator=mock.MockEstimator()
)
learner.teach(X, y, bootstrap=bootstrap, only_new=only_new)
def test_nan(self):
X_training_nan = np.ones(shape=(10, 2)) * np.nan
X_training_inf = np.ones(shape=(10, 2)) * np.inf
y_training = np.random.randint(0, 2, size=10)
learner = modAL.models.learners.ActiveLearner(
X_training=X_training_nan, y_training=y_training,
estimator=mock.MockEstimator(),
force_all_finite=False
)
learner.teach(X_training_nan, y_training)
learner = modAL.models.learners.ActiveLearner(
X_training=X_training_inf, y_training=y_training,
estimator=mock.MockEstimator(),
force_all_finite=False
)
learner.teach(X_training_inf, y_training)
def test_keras(self):
pass
def test_sklearn(self):
learner = modAL.models.learners.ActiveLearner(
estimator=RandomForestClassifier(n_estimators=10),
X_training=np.random.rand(10, 10),
y_training=np.random.randint(0, 2, size=(10,))
)
learner.fit(np.random.rand(10, 10),
np.random.randint(0, 2, size=(10,)))
pred = learner.predict(np.random.rand(10, 10))
learner.predict_proba(np.random.rand(10, 10))
confusion_matrix(pred, np.random.randint(0, 2, size=(10,)))
def test_sparse_matrices(self):
query_strategies = [
modAL.uncertainty.uncertainty_sampling,
modAL.uncertainty.entropy_sampling,
modAL.uncertainty.margin_sampling
]
formats = ['lil', 'csc', 'csr']
sample_count = range(10, 20)
feature_count = range(1, 5)
for query_strategy, format, n_samples, n_features in product(query_strategies, formats, sample_count, feature_count):
X_pool = sp.random(n_samples, n_features, format=format)
y_pool = np.random.randint(0, 2, size=(n_samples, ))
initial_idx = np.random.choice(
range(n_samples), size=5, replace=False)
learner = modAL.models.learners.ActiveLearner(
estimator=RandomForestClassifier(n_estimators=10), query_strategy=query_strategy,
X_training=X_pool[initial_idx], y_training=y_pool[initial_idx]
)
query_idx, query_inst = learner.query(X_pool)
learner.teach(X_pool[query_idx], y_pool[query_idx])
def test_on_transformed(self):
n_samples = 10
n_features = 5
query_strategies = [
modAL.batch.uncertainty_batch_sampling
# add further strategies which work with instance representations
# no further ones as of 25.09.2020
]
X_pool = np.random.rand(n_samples, n_features)
# use pandas data frame as X_pool, which will be transformed back to numpy with sklearn pipeline
X_pool = | pd.DataFrame(X_pool) | pandas.DataFrame |
"""
"""
"""
>>> # ---
>>> # SETUP
>>> # ---
>>> import os
>>> import logging
>>> logger = logging.getLogger('PT3S.Rm')
>>> # ---
>>> # path
>>> # ---
>>> if __name__ == "__main__":
... try:
... dummy=__file__
... logger.debug("{0:s}{1:s}{2:s}".format('DOCTEST: __main__ Context: ','path = os.path.dirname(__file__)'," ."))
... path = os.path.dirname(__file__)
... except NameError:
... logger.debug("{0:s}{1:s}{2:s}".format('DOCTEST: __main__ Context: ',"path = '.' because __file__ not defined and: "," from Rm import Rm"))
... path = '.'
... from Rm import Rm
... else:
... path = '.'
... logger.debug("{0:s}{1:s}".format('Not __main__ Context: ',"path = '.' ."))
>>> try:
... from PT3S import Mx
... except ImportError:
... logger.debug("{0:s}{1:s}".format("DOCTEST: from PT3S import Mx: ImportError: ","trying import Mx instead ... maybe pip install -e . is active ..."))
... import Mx
>>> try:
... from PT3S import Xm
... except ImportError:
... logger.debug("{0:s}{1:s}".format("DOCTEST: from PT3S import Xm: ImportError: ","trying import Xm instead ... maybe pip install -e . is active ..."))
... import Xm
>>> # ---
>>> # testDir
>>> # ---
>>> # globs={'testDir':'testdata'}
>>> try:
... dummy= testDir
... except NameError:
... testDir='testdata'
>>> # ---
>>> # dotResolution
>>> # ---
>>> # globs={'dotResolution':''}
>>> try:
... dummy= dotResolution
... except NameError:
... dotResolution=''
>>> import pandas as pd
>>> import matplotlib.pyplot as plt
>>> pd.set_option('display.max_columns',None)
>>> pd.set_option('display.width',666666666)
>>> # ---
>>> # LocalHeatingNetwork SETUP
>>> # ---
>>> xmlFile=os.path.join(os.path.join(path,testDir),'LocalHeatingNetwork.XML')
>>> xm=Xm.Xm(xmlFile=xmlFile)
>>> mx1File=os.path.join(path,os.path.join(testDir,'WDLocalHeatingNetwork\B1\V0\BZ1\M-1-0-1'+dotResolution+'.MX1'))
>>> mx=Mx.Mx(mx1File=mx1File,NoH5Read=True,NoMxsRead=True)
>>> mx.setResultsToMxsFile(NewH5Vec=True)
5
>>> xm.MxSync(mx=mx)
>>> rm=Rm(xm=xm,mx=mx)
>>> # ---
>>> # Plot 3Classes False
>>> # ---
>>> plt.close('all')
>>> ppi=72 # matplotlib default
>>> dpi_screen=2*ppi
>>> fig=plt.figure(dpi=dpi_screen,linewidth=1.)
>>> timeDeltaToT=mx.df.index[2]-mx.df.index[0]
>>> # 3Classes und FixedLimits sind standardmaessig Falsch; RefPerc ist standardmaessig Wahr
>>> # die Belegung von MCategory gemaess FixedLimitsHigh/Low erfolgt immer ...
>>> pFWVB=rm.pltNetDHUS(timeDeltaToT=timeDeltaToT,pFWVBMeasureCBFixedLimitHigh=0.80,pFWVBMeasureCBFixedLimitLow=0.66,pFWVBGCategory=['BLNZ1u5u7'],pVICsDf=pd.DataFrame({'Kundenname': ['VIC1'],'Knotenname': ['V-K007']}))
>>> # ---
>>> # Check pFWVB Return
>>> # ---
>>> f=lambda x: "{0:8.5f}".format(x)
>>> print(pFWVB[['Measure','MCategory','GCategory','VIC']].round(2).to_string(formatters={'Measure':f}))
Measure MCategory GCategory VIC
0 0.81000 Top BLNZ1u5u7 NaN
1 0.67000 Middle NaN
2 0.66000 Middle BLNZ1u5u7 NaN
3 0.66000 Bottom BLNZ1u5u7 VIC1
4 0.69000 Middle NaN
>>> # ---
>>> # Print
>>> # ---
>>> (wD,fileName)=os.path.split(xm.xmlFile)
>>> (base,ext)=os.path.splitext(fileName)
>>> plotFileName=wD+os.path.sep+base+'.'+'pdf'
>>> if os.path.exists(plotFileName):
... os.remove(plotFileName)
>>> plt.savefig(plotFileName,dpi=2*dpi_screen)
>>> os.path.exists(plotFileName)
True
>>> # ---
>>> # Plot 3Classes True
>>> # ---
>>> plt.close('all')
>>> # FixedLimits wird automatisch auf Wahr gesetzt wenn 3Classes Wahr ...
>>> pFWVB=rm.pltNetDHUS(timeDeltaToT=timeDeltaToT,pFWVBMeasure3Classes=True,pFWVBMeasureCBFixedLimitHigh=0.80,pFWVBMeasureCBFixedLimitLow=0.66)
>>> # ---
>>> # LocalHeatingNetwork Clean Up
>>> # ---
>>> if os.path.exists(mx.h5File):
... os.remove(mx.h5File)
>>> if os.path.exists(mx.mxsZipFile):
... os.remove(mx.mxsZipFile)
>>> if os.path.exists(mx.h5FileVecs):
... os.remove(mx.h5FileVecs)
>>> if os.path.exists(plotFileName):
... os.remove(plotFileName)
"""
__version__='172.16.58.3.dev1'
import warnings # 3.6
#...\Anaconda3\lib\site-packages\h5py\__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.
# from ._conv import register_converters as _register_converters
warnings.simplefilter(action='ignore', category=FutureWarning)
#C:\Users\Wolters\Anaconda3\lib\site-packages\matplotlib\cbook\deprecation.py:107: MatplotlibDeprecationWarning: Adding an axes using the same arguments as a previous axes currently reuses the earlier instance. In a future version, a new instance will always be created and returned. Meanwhile, this warning can be suppressed, and the future behavior ensured, by passing a unique label to each axes instance.
# warnings.warn(message, mplDeprecation, stacklevel=1)
import matplotlib.cbook
warnings.filterwarnings("ignore",category=matplotlib.cbook.mplDeprecation)
import os
import sys
import nbformat
from nbconvert.preprocessors import ExecutePreprocessor
from nbconvert.preprocessors.execute import CellExecutionError
import timeit
import xml.etree.ElementTree as ET
import re
import struct
import collections
import zipfile
import pandas as pd
import h5py
from collections import namedtuple
from operator import attrgetter
import subprocess
import warnings
import tables
import math
import matplotlib.pyplot as plt
from matplotlib import colors
from matplotlib.colorbar import make_axes
import matplotlib as mpl
import matplotlib.gridspec as gridspec
import matplotlib.dates as mdates
from matplotlib import markers
from matplotlib.path import Path
from mpl_toolkits.axes_grid1 import make_axes_locatable
from matplotlib.backends.backend_pdf import PdfPages
import numpy as np
import scipy
import networkx as nx
from itertools import chain
import math
import sys
from copy import deepcopy
from itertools import chain
import scipy
from scipy.signal import savgol_filter
import logging
# ---
# --- PT3S Imports
# ---
logger = logging.getLogger('PT3S')
if __name__ == "__main__":
logger.debug("{0:s}{1:s}".format('in MODULEFILE: __main__ Context','.'))
else:
logger.debug("{0:s}{1:s}{2:s}{3:s}".format('in MODULEFILE: Not __main__ Context: ','__name__: ',__name__," ."))
try:
from PT3S import Mx
except ImportError:
logger.debug("{0:s}{1:s}".format('ImportError: ','from PT3S import Mx - trying import Mx instead ... maybe pip install -e . is active ...'))
import Mx
try:
from PT3S import Xm
except ImportError:
logger.debug("{0:s}{1:s}".format('ImportError: ','from PT3S import Xm - trying import Xm instead ... maybe pip install -e . is active ...'))
import Xm
try:
from PT3S import Am
except ImportError:
logger.debug("{0:s}{1:s}".format('ImportError: ','from PT3S import Am - trying import Am instead ... maybe pip install -e . is active ...'))
import Am
try:
from PT3S import Lx
except ImportError:
logger.debug("{0:s}{1:s}".format('ImportError: ','from PT3S import Lx - trying import Lx instead ... maybe pip install -e . is active ...'))
import Lx
# ---
# --- main Imports
# ---
import argparse
import unittest
import doctest
import math
from itertools import tee
# --- Parameter Allgemein
# -----------------------
DINA6 = (4.13 , 5.83)
DINA5 = (5.83 , 8.27)
DINA4 = (8.27 , 11.69)
DINA3 = (11.69 , 16.54)
DINA2 = (16.54 , 23.39)
DINA1 = (23.39 , 33.11)
DINA0 = (33.11 , 46.81)
DINA6q = ( 5.83, 4.13)
DINA5q = ( 8.27, 5.83)
DINA4q = ( 11.69, 8.27)
DINA3q = ( 16.54,11.69)
DINA2q = ( 23.39,16.54)
DINA1q = ( 33.11,23.39)
DINA0q = ( 46.81,33.11)
dpiSize=72
DINA4_x=8.2677165354
DINA4_y=11.6929133858
DINA3_x=DINA4_x*math.sqrt(2)
DINA3_y=DINA4_y*math.sqrt(2)
linestyle_tuple = [
('loosely dotted', (0, (1, 10))),
('dotted', (0, (1, 1))),
('densely dotted', (0, (1, 1))),
('loosely dashed', (0, (5, 10))),
('dashed', (0, (5, 5))),
('densely dashed', (0, (5, 1))),
('loosely dashdotted', (0, (3, 10, 1, 10))),
('dashdotted', (0, (3, 5, 1, 5))),
('densely dashdotted', (0, (3, 1, 1, 1))),
('dashdotdotted', (0, (3, 5, 1, 5, 1, 5))),
('loosely dashdotdotted', (0, (3, 10, 1, 10, 1, 10))),
('densely dashdotdotted', (0, (3, 1, 1, 1, 1, 1)))]
ylimpD=(-5,70)
ylimpDmlc=(600,1350) #(300,1050)
ylimQD=(-75,300)
ylim3rdD=(0,3)
yticks3rdD=[0,1,2,3]
yGridStepsD=30
yticksALD=[0,3,4,10,20,30,40]
ylimALD=(yticksALD[0],yticksALD[-1])
yticksRD=[0,2,4,10,15,30,45]
ylimRD=(-yticksRD[-1],yticksRD[-1])
ylimACD=(-5,5)
yticksACD=[-5,0,5]
yticksTVD=[0,100,135,180,200,300]
ylimTVD=(yticksTVD[0],yticksTVD[-1])
plotTVAmLabelD='TIMER u. AM [Sek. u. (N)m3*100]'
def getDerivative(df,col,shiftSize=1,windowSize=60,fct=None,savgol_polyorder=None):
"""
returns a df
df: the df
col: the col of df to be derived
shiftsize: the Difference between 2 indices for dValue and dt
windowSize: size for rolling mean or window_length of savgol_filter; choosen filtertechnique is applied after fct
windowsSize must be an even number
for savgol_filter windowsSize-1 is used
fct: function to be applied on dValue/dt
savgol_polyorder: if not None savgol_filter is applied; pandas' rolling.mean() is applied otherwise
new cols:
dt (with shiftSize)
dValue (from col)
dValueDt (from col); fct applied
dValueDtFiltered; choosen filtertechnique is applied
"""
mDf=df.dropna().copy(deep=True)
try:
dt=mDf.index.to_series().diff(periods=shiftSize)
mDf['dt']=dt
mDf['dValue']=mDf[col].diff(periods=shiftSize)
mDf=mDf.iloc[shiftSize:]
mDf['dValueDt']=mDf.apply(lambda row: row['dValue']/row['dt'].total_seconds(),axis=1)
if fct != None:
mDf['dValueDt']=mDf['dValueDt'].apply(fct)
if savgol_polyorder == None:
mDf['dValueDtFiltered']=mDf['dValueDt'].rolling(window=windowSize).mean()
mDf=mDf.iloc[windowSize-1:]
else:
mDf['dValueDtFiltered']=savgol_filter(mDf['dValueDt'].values,windowSize-1, savgol_polyorder)
mDf=mDf.iloc[windowSize/2+1+savgol_polyorder-1:]
#mDf=mDf.iloc[windowSize-1:]
except Exception as e:
raise e
finally:
return mDf
def fCVDNodesFromName(x):
Nodes=x.replace('รยฐ','~')
Nodes=Nodes.split('~')
Nodes =[Node.lstrip().rstrip() for Node in Nodes if len(Node)>0]
return Nodes
def fgetMaxpMinFromName(CVDName,dfSegsNodesNDataDpkt):
"""
returns max. pMin for alle NODEs in CVDName
"""
nodeLst=fCVDNodesFromName(CVDName)
df=dfSegsNodesNDataDpkt[dfSegsNodesNDataDpkt['NODEsName'].isin(nodeLst)][['pMin','pMinMlc']]
s=df.max()
return s.pMin
# --- Funktionen Allgemein
# -----------------------
def pairwise(iterable):
"s -> (s0,s1), (s1,s2), (s2, s3), ..."
a, b = tee(iterable)
next(b, None)
return zip(a, b)
def genTimespans(timeStart
,timeEnd
,timeSpan=pd.Timedelta('12 Minutes')
,timeOverlap=pd.Timedelta('0 Seconds')
,timeStartPraefix=pd.Timedelta('0 Seconds')
,timeEndPostfix=pd.Timedelta('0 Seconds')
):
# generates timeSpan-Sections
# if timeStart is
# an int, it is considered as the number of desired Sections before timeEnd; timeEnd must be a time
# a time, it is considered as timeStart
# if timeEnd is
# an int, it is considered as the number of desired Sections after timeStart; timeStart must be a time
# a time, it is considered as timeEnd
# if timeSpan is
# an int, it is considered as the number of desired Sections
# a time, it is considered as timeSpan
# returns an array of tuples
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
xlims=[]
try:
if type(timeStart) == int:
numOfDesiredSections=timeStart
timeStartEff=timeEnd+timeEndPostfix-numOfDesiredSections*timeSpan+(numOfDesiredSections-1)*timeOverlap-timeStartPraefix
else:
timeStartEff=timeStart-timeStartPraefix
logger.debug("{0:s}timeStartEff: {1:s}".format(logStr,str(timeStartEff)))
if type(timeEnd) == int:
numOfDesiredSections=timeEnd
timeEndEff=timeStart-timeStartPraefix+numOfDesiredSections*timeSpan-(numOfDesiredSections-1)*timeOverlap+timeEndPostfix
else:
timeEndEff=timeEnd+timeEndPostfix
logger.debug("{0:s}timeEndEff: {1:s}".format(logStr,str(timeEndEff)))
if type(timeSpan) == int:
numOfDesiredSections=timeSpan
dt=timeEndEff-timeStartEff
timeSpanEff=dt/numOfDesiredSections+(numOfDesiredSections-1)*timeOverlap
else:
timeSpanEff=timeSpan
logger.debug("{0:s}timeSpanEff: {1:s}".format(logStr,str(timeSpanEff)))
logger.debug("{0:s}timeOverlap: {1:s}".format(logStr,str(timeOverlap)))
timeStartAct = timeStartEff
while timeStartAct < timeEndEff:
logger.debug("{0:s}timeStartAct: {1:s}".format(logStr,str(timeStartAct)))
timeEndAct=timeStartAct+timeSpanEff
xlim=(timeStartAct,timeEndAct)
xlims.append(xlim)
timeStartAct = timeEndAct - timeOverlap
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return xlims
def gen2Timespans(
timeStart # Anfang eines "Prozesses"
,timeEnd # Ende eines "Prozesses"
,timeSpan=pd.Timedelta('12 Minutes')
,timeStartPraefix=pd.Timedelta('0 Seconds')
,timeEndPostfix=pd.Timedelta('0 Seconds')
,roundStr=None # i.e. '5min': timeStart.round(roundStr) und timeEnd dito
):
"""
erzeugt 2 gleich lange Zeitbereiche
1 um timeStart herum
1 um timeEnd herum
"""
#print("timeStartPraefix: {:s}".format(str(timeStartPraefix)))
#print("timeEndPostfix: {:s}".format(str(timeEndPostfix)))
xlims=[]
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
if roundStr != None:
timeStart=timeStart.round(roundStr)
timeEnd=timeEnd.round(roundStr)
xlims.append((timeStart-timeStartPraefix,timeStart-timeStartPraefix+timeSpan))
xlims.append((timeEnd+timeEndPostfix-timeSpan,timeEnd+timeEndPostfix))
return xlims
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return xlims
def fTotalTimeFromPairs(
x
,denominator=None # i.e. pd.Timedelta('1 minute') for totalTime in Minutes
,roundToInt=True # round to and return as int if denominator is specified; else td is rounded by 2
):
tdTotal=pd.Timedelta('0 seconds')
for idx,tPairs in enumerate(x):
t1,t2=tPairs
if idx==0:
tLast=t2
else:
if t1 <= tLast:
print("Zeitpaar รผberlappt?!")
td=t2-t1
if td < pd.Timedelta('1 seconds'):
pass
#print("Zeitpaar < als 1 Sekunde?!")
tdTotal=tdTotal+td
if denominator==None:
return tdTotal
else:
td=tdTotal / denominator
if roundToInt:
td=int(round(td,0))
else:
td=round(td,2)
return td
def findAllTimeIntervalls(
df
,fct=lambda row: True if row['col'] == 46 else False
,tdAllowed=None
):
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
tPairs=[]
try:
rows,cols=df.shape
if df.empty:
logger.debug("{:s}df ist leer".format(logStr))
elif rows == 1:
logger.debug("{:s}df hat nur 1 Zeile: {:s}".format(logStr,df.to_string()))
rowValue=fct(df.iloc[0])
if rowValue:
tPair=(df.index[0],df.index[0])
tPairs.append(tPair)
else:
pass
else:
tEin=None
# paarweise รผber alle Zeilen
for (i1, row1), (i2, row2) in pairwise(df.iterrows()):
row1Value=fct(row1)
row2Value=fct(row2)
# wenn 1 nicht x und 2 x tEin=t2 "geht Ein"
if not row1Value and row2Value:
tEin=i2
# wenn 1 x und 2 nicht x tAus=t2 "geht Aus"
elif row1Value and not row2Value:
if tEin != None:
# Paar speichern
tPair=(tEin,i1)
tPairs.append(tPair)
else:
pass # sonst: Bed. ist jetzt Aus und war nicht Ein
# Bed. kann nur im ersten Fall Ein gehen
# wenn 1 x und 2 x
elif row1Value and row2Value:
if tEin != None:
pass
else:
# im ersten Wertepaar ist der Bereich Ein
tEin=i1
# letztes Paar
if row1Value and row2Value:
if tEin != None:
tPair=(tEin,i2)
tPairs.append(tPair)
if tdAllowed != None:
tPairs=fCombineSubsequenttPairs(tPairs,tdAllowed)
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return tPairs
def findAllTimeIntervallsSeries(
s=pd.Series()
,fct=lambda x: True if x == 46 else False
,tdAllowed=None # if not None all subsequent TimePairs with TimeDifference <= tdAllowed are combined to one TimePair
,debugOutput=True
):
"""
# if fct:
# alle [Zeitbereiche] finden fuer die fct Wahr ist; diese Zeitbereiche werden geliefert; es werden nur Paare geliefert; Wahr-Solitรคre gehen nicht verloren sondern werden als Paar (t,t) geliefert
# Wahr-Solitรคre sind NUR dann enthalten, wenn s nur 1 Wert enthรคlt und dieser Wahr ist; das 1 gelieferte Paar enthaelt dann den Solitรคr-Zeitstempel fรผr beide Zeiten
# tdAllowed can be be specified
# dann im Anschluss in Zeitbereiche zusammenfassen, die nicht mehr als tdAllowed auseinander liegen; diese Zeitbereiche werden dann geliefert
# if fct None:
# tdAllowed must be specified
# in Zeitbereiche zerlegen, die nicht mehr als Schwellwert tdAllowed auseinander liegen; diese Zeitbereiche werden geliefert
# generell hat jeder gelieferte Zeitbereich Anfang und Ende (d.h. 2 Zeiten), auch dann, wenn dadurch ein- oder mehrfach der Schwellwert ignoriert werden muss
# denn es soll kein Zeitbereich verloren gehen, der in s enthalten ist
# wenn s nur 1 Wert enthรคlt, wird 1 Zeitpaar mit demselben Zeitstempel fรผr beide Zeiten geliefert, wenn Wert nicht Null
# returns array of Time-Pair-Tuples
>>> import pandas as pd
>>> t=pd.Timestamp('2021-03-19 01:02:00')
>>> t1=t +pd.Timedelta('1 second')
>>> t2=t1+pd.Timedelta('1 second')
>>> t3=t2+pd.Timedelta('1 second')
>>> t4=t3+pd.Timedelta('1 second')
>>> t5=t4+pd.Timedelta('1 second')
>>> t6=t5+pd.Timedelta('1 second')
>>> t7=t6+pd.Timedelta('1 second')
>>> d = {t1: 46, t2: 0} # geht aus - kein Paar
>>> s1PaarGehtAus=pd.Series(data=d, index=[t1, t2])
>>> d = {t1: 0, t2: 46} # geht ein - kein Paar
>>> s1PaarGehtEin=pd.Series(data=d, index=[t1, t2])
>>> d = {t5: 46, t6: 0} # geht ausE - kein Paar
>>> s1PaarGehtAusE=pd.Series(data=d, index=[t5, t6])
>>> d = {t5: 0, t6: 46} # geht einE - kein Paar
>>> s1PaarGehtEinE=pd.Series(data=d, index=[t5, t6])
>>> d = {t1: 46, t2: 46} # geht aus - ein Paar
>>> s1PaarEin=pd.Series(data=d, index=[t1, t2])
>>> d = {t1: 0, t2: 0} # geht aus - kein Paar
>>> s1PaarAus=pd.Series(data=d, index=[t1, t2])
>>> s2PaarAus=pd.concat([s1PaarGehtAus,s1PaarGehtAusE])
>>> s2PaarEin=pd.concat([s1PaarGehtEin,s1PaarGehtEinE])
>>> s2PaarAusEin=pd.concat([s1PaarGehtAus,s1PaarGehtEinE])
>>> s2PaarEinAus=pd.concat([s1PaarGehtEin,s1PaarGehtAusE])
>>> # 1 Wert
>>> d = {t1: 46} # 1 Wert - Wahr
>>> s1WertWahr=pd.Series(data=d, index=[t1])
>>> d = {t1: 44} # 1 Wert - Falsch
>>> s1WertFalsch=pd.Series(data=d, index=[t1])
>>> d = {t1: None} # 1 Wert - None
>>> s1WertNone=pd.Series(data=d, index=[t1])
>>> ###
>>> # 46 0
>>> # 0 46
>>> # 0 0
>>> # 46 46 !1 Paar
>>> # 46 0 46 0
>>> # 46 0 0 46
>>> # 0 46 0 46
>>> # 0 46 46 0 !1 Paar
>>> ###
>>> findAllTimeIntervallsSeries(s1PaarGehtAus)
[]
>>> findAllTimeIntervallsSeries(s1PaarGehtEin)
[]
>>> findAllTimeIntervallsSeries(s1PaarEin)
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:02'))]
>>> findAllTimeIntervallsSeries(s1PaarAus)
[]
>>> findAllTimeIntervallsSeries(s2PaarAus)
[]
>>> findAllTimeIntervallsSeries(s2PaarEin)
[]
>>> findAllTimeIntervallsSeries(s2PaarAusEin)
[]
>>> findAllTimeIntervallsSeries(s2PaarEinAus)
[(Timestamp('2021-03-19 01:02:02'), Timestamp('2021-03-19 01:02:05'))]
>>> # 1 Wert
>>> findAllTimeIntervallsSeries(s1WertWahr)
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:01'))]
>>> findAllTimeIntervallsSeries(s1WertFalsch)
[]
>>> ###
>>> # 46 0 !1 Paar
>>> # 0 46 !1 Paar
>>> # 0 0 !1 Paar
>>> # 46 46 !1 Paar
>>> # 46 0 46 0 !2 Paare
>>> # 46 0 0 46 !2 Paare
>>> # 0 46 0 46 !2 Paare
>>> # 0 46 46 0 !2 Paare
>>> ###
>>> findAllTimeIntervallsSeries(s1PaarGehtAus,fct=None,tdAllowed=pd.Timedelta('1 second'))
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:02'))]
>>> findAllTimeIntervallsSeries(s1PaarGehtEin,fct=None,tdAllowed=pd.Timedelta('1 second'))
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:02'))]
>>> findAllTimeIntervallsSeries(s1PaarEin,fct=None,tdAllowed=pd.Timedelta('1 second'))
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:02'))]
>>> findAllTimeIntervallsSeries(s1PaarAus,fct=None,tdAllowed=pd.Timedelta('1 second'))
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:02'))]
>>> findAllTimeIntervallsSeries(s2PaarAus,fct=None,tdAllowed=pd.Timedelta('1 second'))
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:02')), (Timestamp('2021-03-19 01:02:05'), Timestamp('2021-03-19 01:02:06'))]
>>> findAllTimeIntervallsSeries(s2PaarEin,fct=None,tdAllowed=pd.Timedelta('1 second'))
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:02')), (Timestamp('2021-03-19 01:02:05'), Timestamp('2021-03-19 01:02:06'))]
>>> findAllTimeIntervallsSeries(s2PaarAusEin,fct=None,tdAllowed=pd.Timedelta('1 second'))
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:02')), (Timestamp('2021-03-19 01:02:05'), Timestamp('2021-03-19 01:02:06'))]
>>> findAllTimeIntervallsSeries(s2PaarEinAus,fct=None,tdAllowed=pd.Timedelta('1 second'))
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:02')), (Timestamp('2021-03-19 01:02:05'), Timestamp('2021-03-19 01:02:06'))]
>>> # 1 Wert
>>> findAllTimeIntervallsSeries(s1WertWahr,fct=None)
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:01'))]
>>> findAllTimeIntervallsSeries(s1WertNone,fct=None)
[]
>>> ###
>>> d = {t1: 0, t3: 0}
>>> s1PaarmZ=pd.Series(data=d, index=[t1, t3])
>>> findAllTimeIntervallsSeries(s1PaarmZ,fct=None,tdAllowed=pd.Timedelta('1 second'))
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:03'))]
>>> d = {t4: 0, t5: 0}
>>> s1PaaroZ=pd.Series(data=d, index=[t4, t5])
>>> s2PaarmZoZ=pd.concat([s1PaarmZ,s1PaaroZ])
>>> findAllTimeIntervallsSeries(s2PaarmZoZ,fct=None,tdAllowed=pd.Timedelta('1 second'))
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:05'))]
>>> ###
>>> d = {t1: 0, t2: 0}
>>> s1PaaroZ=pd.Series(data=d, index=[t1, t2])
>>> d = {t3: 0, t5: 0}
>>> s1PaarmZ=pd.Series(data=d, index=[t3, t5])
>>> s2PaaroZmZ=pd.concat([s1PaaroZ,s1PaarmZ])
>>> findAllTimeIntervallsSeries(s2PaaroZmZ,fct=None,tdAllowed=pd.Timedelta('1 second'))
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:05'))]
>>> ###
>>> d = {t6: 0, t7: 0}
>>> s1PaaroZ2=pd.Series(data=d, index=[t6, t7])
>>> d = {t4: 0}
>>> solitaer=pd.Series(data=d, index=[t4])
>>> s5er=pd.concat([s1PaaroZ,solitaer,s1PaaroZ2])
>>> findAllTimeIntervallsSeries(s5er,fct=None,tdAllowed=pd.Timedelta('1 second'))
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:02')), (Timestamp('2021-03-19 01:02:04'), Timestamp('2021-03-19 01:02:07'))]
>>> s3er=pd.concat([s1PaaroZ,solitaer])
>>> findAllTimeIntervallsSeries(s3er,fct=None,tdAllowed=pd.Timedelta('1 second'))
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:04'))]
>>> s3er=pd.concat([solitaer,s1PaaroZ2])
>>> findAllTimeIntervallsSeries(s3er,fct=None,tdAllowed=pd.Timedelta('1 second'))
[(Timestamp('2021-03-19 01:02:04'), Timestamp('2021-03-19 01:02:07'))]
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
#logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
tPairs=[]
try:
if s.empty:
logger.debug("{:s}Series {!s:s} ist leer".format(logStr,s.name))
elif s.size == 1:
logger.debug("{:s}Series {!s:s} hat nur 1 Element: {:s}".format(logStr,s.name,s.to_string()))
if fct != None:
# 1 Paar mit selben Zeiten wenn das 1 Element Wahr
sValue=fct(s.iloc[0])
if sValue:
tPair=(s.index[0],s.index[0])
tPairs.append(tPair)
else:
pass
else:
# 1 Paar mit selben Zeiten wenn das 1 Element nicht None
sValue=s.iloc[0]
if sValue != None:
tPair=(s.index[0],s.index[0])
tPairs.append(tPair)
else:
pass
else:
tEin=None
if fct != None:
# paarweise รผber alle Zeiten
for idx,((i1, s1), (i2, s2)) in enumerate(pairwise(s.iteritems())):
s1Value=fct(s1)
s2Value=fct(s2)
# wenn 1 nicht x und 2 x tEin=t2 "geht Ein"
if not s1Value and s2Value:
tEin=i2
if idx > 0: # Info
pass
else:
# beim ersten Paar "geht Ein"
pass
# wenn 1 x und 2 nicht x tAus=t2 "geht Aus"
elif s1Value and not s2Value:
if tEin != None:
if tEin<i1:
# Paar speichern
tPair=(tEin,i1)
tPairs.append(tPair)
else:
# singulaeres Ereignis
# Paar mit selben Zeiten
tPair=(tEin,i1)
tPairs.append(tPair)
pass
else: # geht Aus ohne Ein zu sein
if idx > 0: # Info
pass
else:
# im ersten Paar
pass
# wenn 1 x und 2 x
elif s1Value and s2Value:
if tEin != None:
pass
else:
# im ersten Wertepaar ist der Bereich Ein
tEin=i1
# Behandlung letztes Paar
# bleibt Ein am Ende der Series: Paar speichern
if s1Value and s2Value:
if tEin != None:
tPair=(tEin,i2)
tPairs.append(tPair)
# Behandlung tdAllowed
if tdAllowed != None:
if debugOutput:
logger.debug("{:s}Series {!s:s}: Intervalle werden mit {!s:s} zusammengefasst ...".format(logStr,s.name,tdAllowed))
tPairsOld=tPairs.copy()
tPairs=fCombineSubsequenttPairs(tPairs,tdAllowed,debugOutput=debugOutput)
if debugOutput:
tPairsZusammengefasst=sorted(list(set(tPairsOld) - set(tPairs)))
if len(tPairsZusammengefasst)>0:
logger.debug("{:s}Series {!s:s}: Intervalle wurden wg. {!s:s} zusammengefasst. Nachfolgend die zusgefassten Intervalle: {!s:s}. Sowie die entsprechenden neuen: {!s:s}".format(
logStr
,s.name
,tdAllowed
,tPairsZusammengefasst
,sorted(list(set(tPairs) - set(tPairsOld)))
))
else:
# paarweise รผber alle Zeiten
# neues Paar beginnen
anzInPair=1 # Anzahl der Zeiten in aktueller Zeitspanne
for (i1, s1), (i2, s2) in pairwise(s.iteritems()):
td=i2-i1
if td > tdAllowed: # Zeit zwischen 2 Zeiten > als Schwelle: Zeitspanne ist abgeschlossen
if tEin==None:
# erstes Paar liegt bereits > als Schwelle auseinander
# Zeitspannenabschluss wird ignoriert, denn sonst Zeitspanne mit nur 1 Wert
# aktuelle Zeitspanne beginnt beim 1. Wert und geht รผber Schwellwert
tEin=i1
anzInPair=2
else:
if anzInPair>=2:
# Zeitspanne abschlieรen
tPair=(tEin,i1)
tPairs.append(tPair)
# neue Zeitspanne beginnen
tEin=i2
anzInPair=1
else:
# Zeitspannenabschluss wird ignoriert, denn sonst Zeitspanne mit nur 1 Wert
anzInPair=2
else: # Zeitspanne zugelassen, weiter ...
if tEin==None:
tEin=i1
anzInPair=anzInPair+1
# letztes Zeitpaar behandeln
if anzInPair>=2:
tPair=(tEin,i2)
tPairs.append(tPair)
else:
# ein letzter Wert wuerde ueber bleiben, letzte Zeitspanne verlรคngern ...
tPair=tPairs[-1]
tPair=(tPair[0],i2)
tPairs[-1]=tPair
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
#logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return tPairs
def fCombineSubsequenttPairs(
tPairs
,tdAllowed=pd.Timedelta('1 second') # all subsequent TimePairs with TimeDifference <= tdAllowed are combined to one TimePair
,debugOutput=False
):
# returns tPairs
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
#logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
for idx,(tp1,tp2) in enumerate(pairwise(tPairs)):
t1Ende=tp1[1]
t2Start=tp2[0]
if t2Start-t1Ende <= tdAllowed:
if debugOutput:
logger.debug("{:s} t1Ende: {!s:s} t2Start: {!s:s} Gap: {!s:s}".format(logStr,t1Ende,t2Start,t2Start-t1Ende))
tPairs[idx]=(tp1[0],tp2[1]) # Folgepaar in vorheriges Paar integrieren
tPairs.remove(tp2) # Folgepaar lรถschen
tPairs=fCombineSubsequenttPairs(tPairs,tdAllowed) # Rekursion
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
#logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return tPairs
class RmError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
AlarmEvent = namedtuple('alarmEvent','tA,tE,ZHKNR,LDSResBaseType')
# --- Parameter und Funktionen LDS Reports
# ----------------------------------------
def pltMakeCategoricalColors(color,nOfSubColorsReq=3,reversedOrder=False):
"""
Returns an array of rgb colors derived from color.
Parameter:
color: a rgb color
nOfSubColorsReq: number of SubColors requested
Raises:
RmError
>>> import matplotlib
>>> color='red'
>>> c=list(matplotlib.colors.to_rgb(color))
>>> import Rm
>>> Rm.pltMakeCategoricalColors(c)
array([[1. , 0. , 0. ],
[1. , 0.375, 0.375],
[1. , 0.75 , 0.75 ]])
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
rgb=None
try:
chsv = matplotlib.colors.rgb_to_hsv(color[:3])
arhsv = np.tile(chsv,nOfSubColorsReq).reshape(nOfSubColorsReq,3)
arhsv[:,1] = np.linspace(chsv[1],0.25,nOfSubColorsReq)
arhsv[:,2] = np.linspace(chsv[2],1,nOfSubColorsReq)
rgb = matplotlib.colors.hsv_to_rgb(arhsv)
if reversedOrder:
rgb=list(reversed(rgb))
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return rgb
# Farben fuer Druecke
SrcColorp='green'
SrcColorsp=pltMakeCategoricalColors(list(matplotlib.colors.to_rgb(SrcColorp)),nOfSubColorsReq=4,reversedOrder=False)
# erste Farbe ist Original-Farbe
SnkColorp='blue'
SnkColorsp=pltMakeCategoricalColors(list(matplotlib.colors.to_rgb(SnkColorp)),nOfSubColorsReq=4,reversedOrder=True)
# letzte Farbe ist Original-Farbe
# Farben fuer Fluesse
SrcColorQ='red'
SrcColorsQ=pltMakeCategoricalColors(list(matplotlib.colors.to_rgb(SrcColorQ)),nOfSubColorsReq=4,reversedOrder=False)
# erste Farbe ist Original-Farbe
SnkColorQ='orange'
SnkColorsQ=pltMakeCategoricalColors(list(matplotlib.colors.to_rgb(SnkColorQ)),nOfSubColorsReq=4,reversedOrder=True)
# letzte Farbe ist Original-Farbe
lwBig=4.5
lwSmall=2.5
attrsDct={ 'p Src':{'color':SrcColorp,'lw':lwBig,'where':'post'}
,'p Snk':{'color':SnkColorp,'lw':lwSmall+1.,'where':'post'}
,'p Snk 2':{'color':'mediumorchid','where':'post'}
,'p Snk 3':{'color':'darkviolet','where':'post'}
,'p Snk 4':{'color':'plum','where':'post'}
,'Q Src':{'color':SrcColorQ,'lw':lwBig,'where':'post'}
,'Q Snk':{'color':SnkColorQ,'lw':lwSmall+1.,'where':'post'}
,'Q Snk 2':{'color':'indianred','where':'post'}
,'Q Snk 3':{'color':'coral','where':'post'}
,'Q Snk 4':{'color':'salmon','where':'post'}
,'Q Src RTTM':{'color':SrcColorQ,'lw':matplotlib.rcParams['lines.linewidth']+1.,'ls':'dotted','where':'post'}
,'Q Snk RTTM':{'color':SnkColorQ,'lw':matplotlib.rcParams['lines.linewidth'] ,'ls':'dotted','where':'post'}
,'Q Snk 2 RTTM':{'color':'indianred','ls':'dotted','where':'post'}
,'Q Snk 3 RTTM':{'color':'coral','ls':'dotted','where':'post'}
,'Q Snk 4 RTTM':{'color':'salmon','ls':'dotted','where':'post'}
,'p ISrc 1':{'color':SrcColorsp[-1],'ls':'dashdot','where':'post'}
,'p ISrc 2':{'color':SrcColorsp[-2],'ls':'dashdot','where':'post'}
,'p ISrc 3':{'color':SrcColorsp[-2],'ls':'dashdot','where':'post'} # ab hier selbe Farbe
,'p ISrc 4':{'color':SrcColorsp[-2],'ls':'dashdot','where':'post'}
,'p ISrc 5':{'color':SrcColorsp[-2],'ls':'dashdot','where':'post'}
,'p ISrc 6':{'color':SrcColorsp[-2],'ls':'dashdot','where':'post'}
,'p ISnk 1':{'color':SnkColorsp[0],'ls':'dashdot','where':'post'}
,'p ISnk 2':{'color':SnkColorsp[1],'ls':'dashdot','where':'post'}
,'p ISnk 3':{'color':SnkColorsp[1],'ls':'dashdot','where':'post'} # ab hier selbe Farbe
,'p ISnk 4':{'color':SnkColorsp[1],'ls':'dashdot','where':'post'}
,'p ISnk 5':{'color':SnkColorsp[1],'ls':'dashdot','where':'post'}
,'p ISnk 6':{'color':SnkColorsp[1],'ls':'dashdot','where':'post'}
,'Q xSrc 1':{'color':SrcColorsQ[-1],'ls':'dashdot','where':'post'}
,'Q xSrc 2':{'color':SrcColorsQ[-2],'ls':'dashdot','where':'post'}
,'Q xSrc 3':{'color':SrcColorsQ[-3],'ls':'dashdot','where':'post'}
,'Q xSnk 1':{'color':SnkColorsQ[0],'ls':'dashdot','where':'post'}
,'Q xSnk 2':{'color':SnkColorsQ[1],'ls':'dashdot','where':'post'}
,'Q xSnk 3':{'color':SnkColorsQ[2],'ls':'dashdot','where':'post'}
,'Q (DE) Me':{'color': 'indigo','ls': 'dashdot','where': 'post','lw':1.5}
,'Q (DE) Re':{'color': 'cyan','ls': 'dashdot','where': 'post','lw':3.5}
,'p (DE) SS Me':{'color': 'magenta','ls': 'dashdot','where': 'post'}
,'p (DE) DS Me':{'color': 'darkviolet','ls': 'dashdot','where': 'post'}
,'p (DE) SS Re':{'color': 'magenta','ls': 'dotted','where': 'post'}
,'p (DE) DS Re':{'color': 'darkviolet','ls': 'dotted','where': 'post'}
,'p OPC LDSErgV':{'color':'olive'
,'lw':lwSmall-.5
,'ms':matplotlib.rcParams['lines.markersize']
,'marker':'x'
,'mec':'olive'
,'mfc':'olive'
,'where':'post'}
,'p OPC Src':{'color':SrcColorp
,'lw':0.05+2
,'ms':matplotlib.rcParams['lines.markersize']/2
,'marker':'D'
,'mec':SrcColorp
,'mfc':SrcColorQ
,'where':'post'}
,'p OPC Snk':{'color':SnkColorp
,'lw':0.05+2
,'ms':matplotlib.rcParams['lines.markersize']/2
,'marker':'D'
,'mec':SnkColorp
,'mfc':SnkColorQ
,'where':'post'}
,'Q OPC Src':{'color':SrcColorQ
,'lw':0.05+2
,'ms':matplotlib.rcParams['lines.markersize']/2
,'marker':'D'
,'mec':SrcColorQ
,'mfc':SrcColorp
,'where':'post'}
,'Q OPC Snk':{'color':SnkColorQ
,'lw':0.05+2
,'ms':matplotlib.rcParams['lines.markersize']/2
,'marker':'D'
,'mec':SnkColorQ
,'mfc':SnkColorp
,'where':'post'}
}
attrsDctLDS={
'Seg_AL_S_Attrs':{'color':'blue','lw':3.,'where':'post'}
,'Druck_AL_S_Attrs':{'color':'blue','lw':3.,'ls':'dashed','where':'post'}
,'Seg_MZ_AV_Attrs':{'color':'orange','zorder':3,'where':'post'}
,'Druck_MZ_AV_Attrs':{'color':'orange','zorder':3,'ls':'dashed','where':'post'}
,'Seg_LR_AV_Attrs':{'color':'green','zorder':1,'where':'post'}
,'Druck_LR_AV_Attrs':{'color':'green','zorder':1,'ls':'dashed','where':'post'}
,'Seg_LP_AV_Attrs':{'color':'turquoise','zorder':0,'lw':1.50,'where':'post'}
,'Druck_LP_AV_Attrs':{'color':'turquoise','zorder':0,'lw':1.50,'ls':'dashed','where':'post'}
,'Seg_NG_AV_Attrs':{'color':'red','zorder':2,'where':'post'}
,'Druck_NG_AV_Attrs':{'color':'red','zorder':2,'ls':'dashed','where':'post'}
,'Seg_SB_S_Attrs':{'color':'black','alpha':.5,'where':'post'}
,'Druck_SB_S_Attrs':{'color':'black','ls':'dashed','alpha':.75,'where':'post','lw':1.0}
,'Seg_AC_AV_Attrs':{'color':'indigo','where':'post'}
,'Druck_AC_AV_Attrs':{'color':'indigo','ls':'dashed','where':'post'}
,'Seg_ACF_AV_Attrs':{'color':'blueviolet','where':'post','lw':1.0}
,'Druck_ACF_AV_Attrs':{'color':'blueviolet','ls':'dashed','where':'post','lw':1.0}
,'Seg_ACC_Limits_Attrs':{'color':'indigo','ls':linestyle_tuple[2][1]} # 'densely dotted'
,'Druck_ACC_Limits_Attrs':{'color':'indigo','ls':linestyle_tuple[8][1]} # 'densely dashdotted'
,'Seg_TIMER_AV_Attrs':{'color':'chartreuse','where':'post'}
,'Druck_TIMER_AV_Attrs':{'color':'chartreuse','ls':'dashed','where':'post'}
,'Seg_AM_AV_Attrs':{'color':'chocolate','where':'post'}
,'Druck_AM_AV_Attrs':{'color':'chocolate','ls':'dashed','where':'post'}
#
,'Seg_DPDT_REF_Attrs':{'color':'violet','ls':linestyle_tuple[2][1]} # 'densely dotted'
,'Druck_DPDT_REF_Attrs':{'color':'violet','ls':linestyle_tuple[8][1]} # 'densely dashdotted'
,'Seg_DPDT_AV_Attrs':{'color':'fuchsia','where':'post','lw':2.0}
,'Druck_DPDT_AV_Attrs':{'color':'fuchsia','ls':'dashed','where':'post','lw':2.0}
,'Seg_QM16_AV_Attrs':{'color':'sandybrown','ls':linestyle_tuple[6][1],'where':'post','lw':1.0} # 'loosely dashdotted'
,'Druck_QM16_AV_Attrs':{'color':'sandybrown','ls':linestyle_tuple[10][1],'where':'post','lw':1.0} # 'loosely dashdotdotted'
}
pSIDEvents=re.compile('(?P<Prae>IMDI\.)?Objects\.(?P<colRegExMiddle>3S_FBG_ESCHIEBER|FBG_ESCHIEBER{1})\.(3S_)?(?P<colRegExSchieberID>[a-z,A-Z,0-9,_]+)\.(?P<colRegExEventID>(In\.ZUST|In\.LAEUFT|In\.LAEUFT_NICHT|In\.STOER|Out\.AUF|Out\.HALT|Out\.ZU)$)')
# ausgewertet werden: colRegExSchieberID (um welchen Schieber geht es), colRegExMiddle (Befehl oder Zustand) und colRegExEventID (welcher Befehl bzw. Zustand)
# die Befehle bzw. Zustaende (die Auspraegungen von colRegExEventID) muessen nachf. def. sein um den Marker (des Befehls bzw. des Zustandes) zu definieren
eventCCmds={ 'Out.AUF':0
,'Out.ZU':1
,'Out.HALT':2}
eventCStats={'In.LAEUFT':3
,'In.LAEUFT_NICHT':4
,'In.ZUST':5
,'Out.AUF':6
,'Out.ZU':7
,'Out.HALT':8
,'In.STOER':9}
valRegExMiddleCmds='3S_FBG_ESCHIEBER' # colRegExMiddle-Auspraegung fuer Befehle (==> eventCCmds)
LDSParameter=[
'ACC_SLOWTRANSIENT'
,'ACC_TRANSIENT'
,'DESIGNFLOW'
,'DT'
,'FILTERWINDOW'
#,'L_PERCENT'
,'L_PERCENT_STDY'
,'L_PERCENT_STRAN'
,'L_PERCENT_TRANS'
,'L_SHUTOFF'
,'L_SLOWTRANSIENT'
,'L_SLOWTRANSIENTQP'
,'L_STANDSTILL'
,'L_STANDSTILLQP'
,'L_TRANSIENT'
,'L_TRANSIENTQP'
,'L_TRANSIENTVBIGF'
,'L_TRANSIENTPDNTF'
,'MEAN'
,'NAME'
,'ORDER'
,'TIMER'
,'TTIMERTOALARM'
,'TIMERTOLISS'
,'TIMERTOLIST'
]
LDSParameterDataD={
'ACC_SLOWTRANSIENT':0.1
,'ACC_TRANSIENT':0.8
,'DESIGNFLOW':250.
,'DT':1
,'FILTERWINDOW':180
#,'L_PERCENT':1.6
,'L_PERCENT_STDY':1.6
,'L_PERCENT_STRAN':1.6
,'L_PERCENT_TRANS':1.6
,'L_SHUTOFF':2.
,'L_SLOWTRANSIENT':4.
,'L_SLOWTRANSIENTQP':4.
,'L_STANDSTILL':2.
,'L_STANDSTILLQP':2.
,'L_TRANSIENT':10.
,'L_TRANSIENTQP':10.
,'L_TRANSIENTVBIGF':3.
,'L_TRANSIENTPDNTF':1.5
,'MEAN':1
,'ORDER':1
,'TIMER':180
,'TTIMERTOALARM':45 # TIMER/4
,'TIMERTOLISS':180
,'TIMERTOLIST':180
,'NAME':''
}
def fSEGNameFromPV_2(Beschr):
# fSEGNameFromSWVTBeschr
# 2,3,4,5
if Beschr in ['',None]:
return None
m=re.search(Lx.pID,Beschr)
if m == None:
return Beschr
return m.group('C2')+'_'+m.group('C3')+'_'+m.group('C4')+'_'+m.group('C5')
def fSEGNameFromPV_3(PV):
# fSEGNameFromPV
# ...
m=re.search(Lx.pID,PV)
return m.group('C3')+'_'+m.group('C4')+'_'+m.group('C5')+m.group('C6')
def fSEGNameFromPV_3m(PV):
# fSEGNameFromPV
# ...
m=re.search(Lx.pID,PV)
#print("C4: {:s} C6: {:s}".format(m.group('C4'),m.group('C6')))
if m.group('C4')=='AAD' and m.group('C6')=='_OHN':
return m.group('C3')+'_'+m.group('C4')+'_'+m.group('C5')+'_OHV1'
elif m.group('C4')=='OHN' and m.group('C6')=='_NGD':
return m.group('C3')+'_'+'OHV2'+'_'+m.group('C5')+m.group('C6')
else:
return m.group('C3')+'_'+m.group('C4')+'_'+m.group('C5')+m.group('C6')
# Ableitung eines DIVPipelineNamens von PV
def fDIVNameFromPV(PV):
m=re.search(Lx.pID,PV)
return m.group('C2')+'-'+m.group('C4')
# Ableitung eines DIVPipelineNamens von SEGName
def fDIVNameFromSEGName(SEGName):
if pd.isnull(SEGName):
return None
# dfSegsNodesNDataDpkt['DIVPipelineName']=dfSegsNodesNDataDpkt['SEGName'].apply(lambda x: re.search('(\d+)_(\w+)_(\w+)_(\w+)',x).group(1)+'_'+re.search('(\d+)_(\w+)_(\w+)_(\w+)',x).group(3) )
m=re.search('(\d+)_(\w+)_(\w+)_(\w+)',SEGName)
if m == None:
return SEGName
return m.group(1)+'_'+m.group(3)
#def getNamesFromOPCITEM_ID(dfSegsNodesNDataDpkt
# ,OPCITEM_ID):
# """
# Returns tuple (DIVPipelineName,SEGName) from OPCITEM_ID PH
# """
# df=dfSegsNodesNDataDpkt[dfSegsNodesNDataDpkt['OPCITEM_ID']==OPCITEM_ID]
# if not df.empty:
# return (df['DIVPipelineName'].iloc[0],df['SEGName'].iloc[0])
def fGetBaseIDFromResID(
ID='Objects.3S_XXX_DRUCK.3S_6_BNV_01_PTI_01.In.MW.value'
):
"""
Returns 'Objects.3S_XXX_DRUCK.3S_6_BNV_01_PTI_01.In.'
funktioniert im Prinzip fuer SEG- und Druck-Ergs: jede Erg-PV eines Vektors liefert die Basis gueltig fuer alle Erg-PVs des Vektors
d.h. die Erg-PVs eines Vektors unterscheiden sich nur hinten
siehe auch fGetSEGBaseIDFromSEGName
"""
if pd.isnull(ID):
return None
m=re.search(Lx.pID,ID)
if m == None:
return None
try:
base=m.group('A')+'.'+m.group('B')\
+'.'+m.group('C1')\
+'_'+m.group('C2')\
+'_'+m.group('C3')\
+'_'+m.group('C4')\
+'_'+m.group('C5')\
+m.group('C6')
#print(m.groups())
#print(m.groupdict())
if 'C7' in m.groupdict().keys():
if m.group('C7') != None:
base=base+m.group('C7')
base=base+'.'+m.group('D')\
+'.'
#print(base)
except:
base=m.group(0)+' (Fehler in fGetBaseIDFromResID)'
return base
def fGetSEGBaseIDFromSEGName(
SEGName='6_AAD_41_OHV1'
):
"""
Returns 'Objects.3S_FBG_SEG_INFO.3S_L_'+SEGName+'.In.'
In some cases SEGName is manipulated ...
siehe auch fGetBaseIDFromResID
"""
if SEGName == '6_AAD_41_OHV1':
x='6_AAD_41_OHN'
elif SEGName == '6_OHV2_41_NGD':
x='6_OHN_41_NGD'
else:
x=SEGName
return 'Objects.3S_FBG_SEG_INFO.3S_L_'+x+'.In.'
def getNamesFromSEGResIDBase(dfSegsNodesNDataDpkt
,SEGResIDBase):
"""
Returns tuple (DIVPipelineName,SEGName) from SEGResIDBase
"""
df=dfSegsNodesNDataDpkt[dfSegsNodesNDataDpkt['SEGResIDBase']==SEGResIDBase]
if not df.empty:
return (df['DIVPipelineName'].iloc[0],df['SEGName'].iloc[0])
def getNamesFromDruckResIDBase(dfSegsNodesNDataDpkt
,DruckResIDBase):
"""
Returns tuple (DIVPipelineName,SEGName,SEGResIDBase,SEGOnlyInLDSPara) from DruckResIDBase
"""
df=dfSegsNodesNDataDpkt[dfSegsNodesNDataDpkt['DruckResIDBase']==DruckResIDBase]
if not df.empty:
#return (df['DIVPipelineName'].iloc[0],df['SEGName'].iloc[0],df['SEGResIDBase'].iloc[0])
tupleLst=[]
for index,row in df.iterrows():
tupleItem=(row['DIVPipelineName'],row['SEGName'],row['SEGResIDBase'],row['SEGOnlyInLDSPara'])
tupleLst.append(tupleItem)
return tupleLst
else:
return []
def fGetErgIDsFromBaseID(
baseID='Objects.3S_FBG_SEG_INFO.3S_L_6_BUV_01_BUA.In.'
,dfODI=pd.DataFrame() # df mit ODI Parametrierungsdaten
,strSep=' '
,patternPat='^IMDI.' #
,pattern=True # nur ergIDs, fuer die 2ndPatternPat zutrifft liefern
):
"""
returns string
mit strSep getrennten IDs aus dfODI, welche baseID enthalten (und bei pattern WAHR patternPat matchen)
baseID (und group(0) von patternPat bei pattern WAHR) sind in den IDs entfernt
"""
if baseID in [None,'']:
return None
df=dfODI[dfODI.index.str.contains(baseID)]
if df.empty:
return None
if pattern:
ergIDs=''.join([e.replace(baseID,'').replace(re.search(patternPat,e).group(0),'')+' ' for e in df.index if re.search(patternPat,e) != None])
else:
ergIDs=''.join([e.replace(baseID,'')+' ' for e in df.index if re.search(patternPat,e) == None])
return ergIDs
def dfSegsNodesNDataDpkt(
VersionsDir=r"C:\3s\Projekte\Projekt\04 - Versionen\Version82.3"
,Model=r"MDBDOC\FBG.mdb" # a Access Model
,am=None # a Access Model already processed
,SEGsDefPattern='(?P<SEG_Ki>\S+)~(?P<SEG_Kk>\S+)$' # RSLW-Beschreibung: liefert die Knotennamen der Segmentdefinition ()
,RIDefPattern='(?P<Prae>\S+)\.(?P<Post>RICHT.S)$' # SWVT-Beschreibung (RICHT-DP): liefert u.a. SEGName
,fSEGNameFromPV_2=fSEGNameFromPV_2 # Funktion, die von SWVT-Beschreibung (RICHT-DP) u.a. SEGName liefert
,fGetBaseIDFromResID=fGetBaseIDFromResID # Funktion, die von OPCITEM-ID des PH-Kanals eines KNOTens den Wortstamm der Knotenergebnisse liefert
,fGetSEGBaseIDFromSEGName=fGetSEGBaseIDFromSEGName # Funktion, die aus SEGName den Wortstamm der Segmentergebnisse liefert
,LDSPara=r"App LDS\Modelle\WDFBG\B1\V0\BZ1\LDS_Para.xml"
,LDSParaPT=r"App LDS\SirOPC\AppLDS_DPDTParams.csv"
,ODI=r"App LDS\SirOPC\AppLDS_ODI.csv"
,LDSParameter=LDSParameter
,LDSParameterDataD=LDSParameterDataD
):
"""
alle Segmente mit Pfaddaten (Kantenzuege) mit Kanten- und Knotendaten sowie Parametrierungsdaten
returns df:
DIVPipelineName
SEGName
SEGNodes (Ki~Kk; Schluessel in LDSPara)
SEGOnlyInLDSPara
NODEsRef
NODEsRef_max
NODEsSEGLfdNr
NODEsSEGLfdNrType
NODEsName
OBJTYPE
ZKOR
Blockname
ATTRTYPE (PH)
CLIENT_ID
OPCITEM_ID
NAME (der DPKT-Gruppe)
DruckResIDBase
SEGResIDBase
SEGResIDs
SEGResIDsIMDI
DruckResIDs
DruckResIDsIMDI
NODEsSEGDruckErgLfdNr
# LDSPara
ACC_SLOWTRANSIENT
ACC_TRANSIENT
DESIGNFLOW
DT
FILTERWINDOW
L_PERCENT_STDY
L_PERCENT_STRAN
L_PERCENT_TRANS
L_SHUTOFF
L_SLOWTRANSIENT
L_SLOWTRANSIENTQP
L_STANDSTILL
L_STANDSTILLQP
L_TRANSIENT
L_TRANSIENTPDNTF
L_TRANSIENTQP
L_TRANSIENTVBIGF
MEAN
ORDER
TIMER
TIMERTOLISS
TIMERTOLIST
TTIMERTOALARM
# LDSParaPT
#ID
pMin
DT_Vorhaltemass
TTimer_PMin
Faktor_PMin
MaxL_PMin
pMinMlc
pMinMlcMinSEG
pMinMlcMaxSEG
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
dfSegsNodesNDataDpkt=pd.DataFrame()
try:
###### --- LDSPara
LDSParaFile=os.path.join(VersionsDir,LDSPara)
logger.info("{:s}###### {:10s}: {:s}: Lesen und prรผfen ...".format(logStr,'LDSPara',LDSPara))
with open(LDSParaFile) as f:
xml = f.read()
xmlWellFormed='<root>'+xml+'</root>'
root=ET.fromstring(xmlWellFormed)
LDSParameterData={}
for key in LDSParameterDataD.keys():
LDSParameterData[key]=[]
logger.debug("{:s}LDSParameter: {!s:s}.".format(logStr,LDSParameter))
for idx,element in enumerate(root.iter(tag='LDSI')):
attribKeysMute=[]
for key,value in element.attrib.items():
if key not in LDSParameter:
logger.warning("{:s}{:s}: Parameter: {:s} undefiniert.".format(logStr,element.attrib['NAME'],key))
attribKeysMute.append(key)
keysIst=element.attrib.keys()
keysSoll=set(LDSParameter)
keysExplizitFehlend=keysSoll-keysIst
LDSIParaDct=element.attrib
for key in keysExplizitFehlend:
if key=='ORDER':
LDSIParaDct[key]=LDSParameterDataD[key]
logger.debug("{:s}{:25s}: explizit fehlender Parameter: {:20s} ({!s:s}).".format(logStr,element.attrib['NAME'],key,LDSIParaDct[key]))
elif key=='TTIMERTOALARM':
LDSIParaDct[key]=int(LDSIParaDct['TIMER'])/4
logger.debug("{:s}{:25s}: explizit fehlender Parameter: {:20s} ({!s:s}).".format(logStr,element.attrib['NAME'],key,LDSIParaDct[key]))
else:
LDSIParaDct[key]=LDSParameterDataD[key]
logger.debug("{:s}{:25s}: explizit fehlender Parameter: {:20s} ({!s:s}).".format(logStr,element.attrib['NAME'],key,LDSIParaDct[key]))
keyListToProcess=[key for key in LDSIParaDct.keys() if key not in attribKeysMute]
for key in keyListToProcess:
LDSParameterData[key].append(LDSIParaDct[key])
df=pd.DataFrame.from_dict(LDSParameterData)
df=df.set_index('NAME').sort_index()
df.index.rename('SEGMENT', inplace=True)
df=df[sorted(df.columns.to_list())]
df = df.apply(pd.to_numeric)
#logger.debug("{:s}df: {:s}".format(logStr,df.to_string()))
logger.debug("{:s}Parameter, die nicht auf Standardwerten sind:".format(logStr))
for index, row in df.iterrows():
for colName, colValue in zip(df.columns.to_list(),row):
if colValue != LDSParameterDataD[colName]:
logger.debug("Segment: {:30s}: Parameter: {:20s} Wert: {:10s} (Standard: {:s})".format(index,colName,str(colValue),str(LDSParameterDataD[colName])))
dfPara=df
# --- Einlesen Modell
if am == None:
accFile=os.path.join(VersionsDir,Model)
logger.info("{:s}###### {:10s}: {:s}: Lesen und verarbeiten ...".format(logStr,'Modell',Model))
am=Am.Am(accFile=accFile)
V_BVZ_RSLW=am.dataFrames['V_BVZ_RSLW']
V_BVZ_SWVT=am.dataFrames['V_BVZ_SWVT']
V3_KNOT=am.dataFrames['V3_KNOT']
V3_VBEL=am.dataFrames['V3_VBEL']
V3_DPKT=am.dataFrames['V3_DPKT']
V3_RSLW_SWVT=am.dataFrames['V3_RSLW_SWVT']
# --- Segmente ermitteln
# --- per Modell
SEGsDefinesPerRICHT=V3_RSLW_SWVT[
(V3_RSLW_SWVT['BESCHREIBUNG'].str.match(SEGsDefPattern).isin([True])) # Muster Ki~Kk ...
& #!
(V3_RSLW_SWVT['BESCHREIBUNG_SWVT'].str.match(RIDefPattern).isin([True])) # Muster Fรถrderrichtungs-PV ...
].copy(deep=True)
SEGsDefinesPerRICHT=SEGsDefinesPerRICHT[['BESCHREIBUNG','BESCHREIBUNG_SWVT']]
# --- nur per LDS Para
lSEGOnlyInLDSPara=[str(SEGNodes) for SEGNodes in dfPara.index if str(SEGNodes) not in SEGsDefinesPerRICHT['BESCHREIBUNG'].values]
for SEGNodes in lSEGOnlyInLDSPara:
logger.warning("{:s}LDSPara SEG {:s} ist nicht Modell-definiert!".format(logStr,SEGNodes))
# --- zusammenfassen
SEGsDefines=pd.concat([SEGsDefinesPerRICHT,pd.DataFrame(lSEGOnlyInLDSPara,columns=['BESCHREIBUNG'])])
# Knotennamen der SEGDef ergรคnzen
df=SEGsDefines['BESCHREIBUNG'].str.extract(SEGsDefPattern,expand=True)
dfCols=df.columns.to_list()
SEGsDefines=pd.concat([SEGsDefines,df],axis=1)
# ausduennen
SEGsDefines=SEGsDefines[dfCols+['BESCHREIBUNG_SWVT','BESCHREIBUNG']]
# sortieren
SEGsDefines=SEGsDefines.sort_values(by=['BESCHREIBUNG_SWVT','BESCHREIBUNG']).reset_index(drop=True)
# SEGName
SEGsDefines['BESCHREIBUNG_SWVT']=SEGsDefines.apply(lambda row: row['BESCHREIBUNG_SWVT'] if not pd.isnull(row['BESCHREIBUNG_SWVT']) else row['BESCHREIBUNG'] ,axis=1)
#print(SEGsDefines)
SEGsDefines['SEGName']=SEGsDefines['BESCHREIBUNG_SWVT'].apply(lambda x: fSEGNameFromPV_2(x))
# --- Segmentkantenzuege ermitteln
dfSegsNodeLst={} # nur zu Kontrollzwecken
dfSegsNode=[]
for index,row in SEGsDefines[~SEGsDefines[dfCols[-1]].isnull()].iterrows():
df=Xm.Xm.constructShortestPathFromNodeList(df=V3_VBEL.reset_index()
,sourceCol='NAME_i'
,targetCol='NAME_k'
,nl=[row[dfCols[0]],row[dfCols[-1]]]
,weight=None,query=None,fmask=None,filterNonQ0Rows=True)
s=pd.concat([pd.Series([row[dfCols[0]]]),df['nextNODE']])
s.name=row['SEGName']
dfSegsNodeLst[row['SEGName']]=s.reset_index(drop=True)
df2=pd.DataFrame(s.reset_index(drop=True)).rename(columns={s.name:'NODEs'})
df2['SEGName']=s.name
df2=df2[['SEGName','NODEs']]
sObj=pd.concat([pd.Series(['None']),df['OBJTYPE']])
sObj.name='OBJTYPE'
df3=pd.concat([df2,pd.DataFrame(sObj.reset_index(drop=True))],axis=1)
df4=df3.reset_index().rename(columns={'index':'NODEsLfdNr','NODEs':'NODEsName'})[['SEGName','NODEsLfdNr','NODEsName','OBJTYPE']]
df4['NODEsType']=df4.apply(lambda row: row['NODEsLfdNr'] if row['NODEsLfdNr'] < df4.index[-1] else -1, axis=1)
df4=df4[['SEGName','NODEsLfdNr','NODEsType','NODEsName','OBJTYPE']]
df4['SEGNodes']=row[dfCols[0]]+'~'+row[dfCols[-1]]
dfSegsNode.append(df4)
dfSegsNodes=pd.concat(dfSegsNode).reset_index(drop=True)
# ---
dfSegsNodes['SEGOnlyInLDSPara']=dfSegsNodes.apply(lambda row: True if row['SEGNodes'] in lSEGOnlyInLDSPara else False,axis=1)
dfSegsNodes['NODEsRef']=dfSegsNodes.sort_values(
by=['NODEsName','SEGOnlyInLDSPara','NODEsType','SEGName']
,ascending=[True,True,False,True]).groupby(['NODEsName']).cumcount() + 1
dfSegsNodes=pd.merge(dfSegsNodes,dfSegsNodes.groupby(['NODEsName']).max(),left_on='NODEsName',right_index=True,suffixes=('','_max'))
dfSegsNodes=dfSegsNodes[['SEGName','SEGNodes','SEGOnlyInLDSPara'
,'NODEsRef'
,'NODEsRef_max'
,'NODEsLfdNr','NODEsType','NODEsName','OBJTYPE']]
dfSegsNodes=dfSegsNodes.rename(columns={'NODEsLfdNr':'NODEsSEGLfdNr','NODEsType':'NODEsSEGLfdNrType'})
### # ---
### dfSegsNodes['SEGOnlyInLDSPara']=dfSegsNodes.apply(lambda row: True if row['SEGNodes'] in lSEGOnlyInLDSPara else False,axis=1)
dfSegsNodes=dfSegsNodes[['SEGName','SEGNodes','SEGOnlyInLDSPara'
,'NODEsRef'
,'NODEsRef_max'
,'NODEsSEGLfdNr','NODEsSEGLfdNrType','NODEsName','OBJTYPE']]
# --- Knotendaten ergaenzen
dfSegsNodesNData=pd.merge(dfSegsNodes,V3_KNOT, left_on='NODEsName',right_on='NAME',suffixes=('','KNOT'))
dfSegsNodesNData=dfSegsNodesNData.filter(items=dfSegsNodes.columns.to_list()+['ZKOR','NAME_CONT','NAME_VKNO','pk'])
dfSegsNodesNData=dfSegsNodesNData.rename(columns={'NAME_CONT':'Blockname','NAME_VKNO':'Bl.Kn. fuer Block'})
# --- Knotendatenpunktdaten ergรคnzen
V3_DPKT_KNOT= | pd.merge(V3_DPKT,V3_KNOT,left_on='fkOBJTYPE',right_on='pk',suffixes=('','_KNOT')) | pandas.merge |
from __future__ import print_function, division
#from nilmtk.stats import intersect_many_fast
import matplotlib.pyplot as plt
import pandas as pd
from datetime import timedelta
import matplotlib.dates as mdates
from copy import deepcopy
import numpy as np
# NILMTK imports
from nilmtk.consts import SECS_PER_DAY
from nilmtk.timeframe import TimeFrame, convert_none_to_nat
class TimeFrameGroup():
""" A collection of nilmtk.TimeFrame objects.
The timeframegroup is used to store TimeFrames of a certain
type (eg. good sections) for a whole load profile together.
It then allows intersection functionality between multiple
load profiles to eg. find the good timeframes in all
the TimeFrameGroups.
The TimeFrameGroup has been rewritten using pandas DataFrames
because the previous implementation was far to slow
Attributes:
----------
_df: [start_time, end_time]
The dataframe with the sec_start sec_end
"""
def __init__(self, timeframes=None, starts_and_ends = None):
if isinstance(timeframes, TimeFrameGroup):
self._df = timeframes._df.copy()
if isinstance(timeframes, pd.core.indexes.datetimes.DatetimeIndex):
self._df = timeframes
elif isinstance(timeframes, pd.DataFrame):
self._df = timeframes.copy()
elif not starts_and_ends is None:
self._df = pd.DataFrame({'section_start': starts_and_ends['starts'], 'section_end': starts_and_ends['ends']})
elif not timeframes is None:
self._df = pd.DataFrame([(frame.start, frame.end) for frame in timeframes], columns = ['section_start', 'section_end'])
else:
self._df = pd.DataFrame(columns = ['section_start', 'section_end'])
def plot(self, ax=None, y=0, height=1, gap=0.05, color='b', **plot_kwargs):
if ax is None:
ax = plt.gca()
ax.xaxis.axis_date()
height -= gap * 2
for _, row in self._df.iterrows():
length = (row['section_end'] - row['section_start']).total_seconds() / SECS_PER_DAY
bottom_left_corner = (mdates.date2num(row['section_start']), y + gap)
rect = plt.Rectangle(bottom_left_corner, length, height,
color=color, **plot_kwargs)
ax.add_patch(rect)
ax.autoscale_view()
return ax
def plot_simple(self, ax=None, gap=0.05, **plot_kwargs):
for _, row in self._df.iterrows():
length = (row['section_end'] - row['section_start']).total_seconds() / SECS_PER_DAY
bottom_left_corner = (mdates.date2num(row['section_start']), 0)
rect = plt.Rectangle(bottom_left_corner, length, 1,
color='b', **plot_kwargs)
ax.add_patch(rect)
return ax
def plot_deltahistogram(self, bins = 10):
(self._df['section_end'] - self._df['section_start']).apply(lambda e: e.total_seconds()).hist(bins=bins)
def get_timeframe(self):
''' Returns the timeframe from start of first section to end of last section.
Returns:
timeframe: outer timeframe of this TimeFrameGroup
'''
if self._df.empty:
return TimeFrame(start = None, end = None)
idx = self._df.index
return TimeFrame(start = self._df.loc[idx[0], 'section_start'], end = self._df.loc[idx[-1], 'section_end'])
def union(self, other):
'''
self.good_sections(): |######----#####-----######-#|
other.good_sections(): |---##---####----##-----###-#|
diff(): |######--#######-##--######-#|
'''
assert isinstance(other, (TimeFrameGroup, list))
return TimeFrameGroup.union_many([self, other])
def union_many(groups):
'''
Function to do a do a fast intersection between many timeframes
Paramters
---------
groups: [nilmtk.TimeFrameGroup]
The group of timeframegroups to calculate the union for.
'''
all_events = pd.Series()
for group in groups:
all_events = all_events.append(pd.Series(1, index=pd.DatetimeIndex(group._df['section_start'])))
all_events = all_events.append(pd.Series(-1, index=pd.DatetimeIndex(group._df['section_end'])))
all_events.sort_index(inplace=True)
any_active = (all_events.cumsum()>0).astype(int)
switches = (any_active - any_active.shift(1).fillna(0))
starts = all_events[switches == 1].index
ends = all_events[switches == -1].index
result = pd.DataFrame({'section_start': starts, 'section_end':ends})
return TimeFrameGroup(result)
def diff(self, other):
'''
Difference between this and the other TimeFrameGroup.
self.good_sections(): |######----#####-----######-#|
other.good_sections(): |---##---####----##-----###-#|
diff(): |###--#------###-----###-----|
'''
assert isinstance(other, (TimeFrameGroup, list))
all_events = pd.Series()
all_events = all_events.append(pd.Series(1, index=pd.DatetimeIndex(self._df['section_start'])))
all_events = all_events.append(pd.Series(-1, index=pd.DatetimeIndex(self._df['section_end'])))
all_events = all_events.append(pd.Series(-1, index=pd.DatetimeIndex(other._df['section_start'])))
all_events = all_events.append(pd.Series(+1, index=pd.DatetimeIndex(other._df['section_end'])))
all_events.sort_index(inplace=True)
all_active = (all_events.cumsum()>0)
starts = all_events.index[all_active]
ends = all_active.shift(1)
if len(ends > 0):
ends[0] = False
ends = all_events[ends].index
result = pd.DataFrame({'section_start': starts, 'section_end':ends})
return TimeFrameGroup(result)
def intersection(self, other):
"""Returns a new TimeFrameGroup of self masked by other.
Illustrated example:
self.good_sections(): |######----#####-----######-#|
other.good_sections(): |---##---####----##-----###-#|
intersection(): |---##-----##-----------###-#|
"""
# Hier hat es geknallt als ich Accuracy als Error Metric berechnen wollte. Bei der Assertion
assert isinstance(other, (TimeFrameGroup, list))
return TimeFrameGroup.intersect_many([self, other])
def intersect_many(groups):
'''
Function to do a do a fast intersection between many timeframes
Paramters
---------
groups: [nilmtk.TimeFrameGroup]
The group of timeframegroups to calculate the intersection for.
'''
if any(map(lambda grp: len(grp._df) == 0, groups)):
return TimeFrameGroup()
all_events = pd.Series()
for group in groups:
all_events = all_events.append(pd.Series(1, index=pd.DatetimeIndex(group._df['section_start'])))
all_events = all_events.append(pd.Series(-1, index=pd.DatetimeIndex(group._df['section_end'])))
all_events.sort_index(inplace=True)
all_active = (all_events.cumsum()==len(groups))
starts = all_events.index[all_active]
ends = all_active.shift(1).fillna(False)
#if len(ends > 0):
# ends[0] = False
ends = all_events[ends].index
result = pd.DataFrame({'section_start': starts, 'section_end':ends})
return TimeFrameGroup(result)
def matching(self, other): #, valid_timeframes = None, in_percent = False):
'''
Calculates the matching of two timeframegroups.
These are the timeframes where both are on or off.
If given, excluded timeframes are calculated out. This is usefull when there
are eg. notgood sections.
self.good_sections(): |######----#####-----######-#|
other.good_sections(): |---##---####----##-----###-#|
matching(): |---##-##--##---#--##---#####|
Paramters:
other: The other timeframe to match with
valid_timeframes: TimeFrameGroup which defines the area for which to do the calculation
in_percent: Whether the amount of matched time shall be returned as fraction of whole valid timespan.
(takes into account the "excluded_timeframes")
'''
assert isinstance(other, (TimeFrameGroup, list))
return TimeFrameGroup.matching_many([self, other])
def matching_many(groups):
'''
Function to do a do a fast matching between many timeframes
If the groups are two TimeFrameGroups as binary estimator, this
is the accuracy.
Paramters
---------
groups: [nilmtk.TimeFrameGroup]
The group of timeframegroups to calculate the matching for.
'''
if any(map(lambda grp: len(grp._df) == 0, groups)):
return TimeFrameGroup()
all_events = pd.Series()
for group in groups:
all_events = all_events.append(pd.Series(1, index=pd.DatetimeIndex(group._df['section_start'])))
all_events = all_events.append(pd.Series(-1, index=pd.DatetimeIndex(group._df['section_end'])))
all_events.sort_index(inplace=True)
all_events_sum = all_events.cumsum()
all_active = ((all_events_sum==len(groups)) | (all_events_sum == 0))
# Remove last, which is always created after end of all sections
starts = all_events.index[all_active][:-1]
ends = all_active.shift(1)
if len(ends > 0):
ends[0] = False
ends = all_events[ends].index
result = | pd.DataFrame({'section_start': starts, 'section_end':ends}) | pandas.DataFrame |
import os
import time
import pandas as pd
from geopy.exc import GeocoderTimedOut
from geopy.geocoders import Nominatim
def straat2coord(file_path: str, woonplaats: str, woonplaats_header: str, adres_header: str, sep: str = ";") -> None:
"""Berekend aan de hand van een CSV-bestand de breedte- en hoogtegraad.
Resultaten worden opgeslagen in een nieuw CSV-bestand `data/geoDataKDV.csv`.
Als input wordt om een woonplaats gevraagd. Alle punten die aan de waarde 'woonplaats voldoen'
in de kolom 'woonplaatsHeader' worden geimporteerd.
De breedte- en lengtegraad van de waardes die zich bevinden in de kolom 'adresHeader' worden opgevraagd.
Duplicaten worden direct overgeslagen.
:param file_path: totale path naar het te converteren bestand
:param woonplaats: woonplaats waar een selectie uit (landelijke) data word gehaald
:param woonplaats_header: kolom waar de waarde `woonplaats` zich in bevind
:param adres_header: kolom met de adressen die omgezet mooeten worden. Idealieter adres + huisnummer
:param sep: separator voor CSV-bestand, standaard ';' maar kan ook ',' of iets anders zijn
:returns: Geconverteerd bestand opgeslagen in data/output/. Bestand bevat de headers latitude en longitude
"""
if not isinstance(file_path, str) or not isinstance(woonplaats, str) \
or not isinstance(woonplaats_header, str) or not isinstance(adres_header, str):
raise ValueError("Verkeerde waardes meegegeven als argumenten")
print("Even geduld a.u.b, dit kan even duren...")
csv_data = pd.read_csv(file_path, sep=sep) # Data uitlezen uit bestand
subset = csv_data.loc[csv_data[woonplaats_header] == woonplaats] # Selectie maken van de data
geo_locator = Nominatim(user_agent="IPASS Project - <NAME> 2019") # Variabele opzetten voor API-calls
geo_locaties = pd.DataFrame(columns=["latitude", "longitude"]) # DataFrame
for adres in subset[adres_header].drop_duplicates(): # Ieder adres omzetten naar coรถrdinaten
try:
locatie = geo_locator.geocode(f"{adres} {woonplaats}") # Coordinaten opzoeken
except GeocoderTimedOut: # Te veel requests
locatie = None
if locatie is not None:
geo_locaties = geo_locaties.append({"latitude": locatie.latitude, "longitude": locatie.longitude},
ignore_index=True)
time.sleep(0.5) # ToManyRequestsError tegengaan
abs_path = os.path.basename(file_path)
file_name = os.path.splitext(abs_path)[0]
geo_locaties.to_csv(f"data/output/geo_{file_name}.csv", index=False) # Data opslaan tbv de snelheid
print(geo_locaties.head())
def coord2coord(file_path: str, lat_header: str, long_header: str) -> None:
"""Haalt uit een CSV-bestand de latitude- en longitudekolom.
De gebruiker dient de namen van de kolommen waarde latitude en logitude
zijn opgeslagen op te geven.
Deze kolommen worden opgeslagen in een nieuw bestand met de prefix `geo_`.
Het bestand kan vervolgens worden gebruikt om Voronoi's of kaarten te maken.
:param file_path: totale path naar het te converteren bestand
:param lat_header: naam van de kolom die de latitude bevat
:param long_header: naam van de kolom die de longiutde bevat
:returns: Geconverteerd bestand opgeslagen in data/output/. Bestand bevat de headers latitude en longitude
"""
if not isinstance(file_path, str) or not isinstance(lat_header, str) \
or not isinstance(long_header, str):
raise ValueError("Verkeerde waardes meegegeven als argumenten")
print("Even geduld a.u.b, dit kan even duren...")
csv_data = | pd.read_csv(file_path, sep=";") | pandas.read_csv |
"""
Functions for comparing and visualizing model performance. Most of these functions rely on ATOM's model tracker and
datastore services, which are not part of the standard AMPL installation, but a few functions will work on collections of
models saved as local files.
"""
import os
import sys
import pdb
import pandas as pd
import numpy as np
import matplotlib
import logging
import json
import shutil
import tarfile
import tempfile
from collections import OrderedDict
from atomsci.ddm.utils import datastore_functions as dsf
from atomsci.ddm.pipeline import model_tracker as trkr
import atomsci.ddm.pipeline.model_pipeline as mp
import atomsci.ddm.pipeline.parameter_parser as parse
import atomsci.ddm.pipeline.model_wrapper as mw
import atomsci.ddm.pipeline.featurization as feat
from tensorflow.python.keras.utils.layer_utils import count_params
logger = logging.getLogger('ATOM')
mlmt_supported = True
try:
from atomsci.clients import MLMTClient
except (ModuleNotFoundError, ImportError):
logger.debug("Model tracker client not supported in your environment; can look at models in filesystem only.")
mlmt_supported = False
matplotlib.rc('xtick', labelsize=12)
matplotlib.rc('ytick', labelsize=12)
matplotlib.rc('axes', labelsize=12)
logging.basicConfig(format='%(asctime)-15s %(message)s')
nan = np.float32('nan')
#------------------------------------------------------------------------------------------------------------------
def del_ignored_params(dictionary, ignored_params):
"""
Deletes ignored parameters from the dictionary if they exist
Args:
dictionary (dict): A dictionary with parameters
ignored_parameters (list(str)): A list of keys potentially in the dictionary
Returns:
None
"""
for ip in ignored_params:
if ip in dictionary:
del dictionary[ip]
#------------------------------------------------------------------------------------------------------------------
def get_collection_datasets(collection_name):
"""
Returns a list of unique training datasets used for all models in a given collection.
Args:
collection_name (str): Name of model tracker collection to search for models.
Returns:
list: List of model training (dataset_key, bucket) tuples.
"""
if not mlmt_supported:
print("Model tracker not supported in your environment; can examine models saved in filesystem only.")
return None
dataset_set = set()
mlmt_client = dsf.initialize_model_tracker()
dset_dicts = mlmt_client.model.query_datasets(collection_name=collection_name, metrics_type='training').result()
# Convert to a list of (dataset_key, bucket) tuples
for dset_dict in dset_dicts:
dataset_set.add((dset_dict['dataset_key'], dset_dict['bucket']))
return sorted(dataset_set)
#------------------------------------------------------------------------------------------------------------------
def extract_collection_perf_metrics(collection_name, output_dir, pred_type='regression'):
"""
Obtain list of training datasets with models in the given collection. Get performance metrics for
models on each dataset and save them as CSV files in the given output directory.
Args:
collection_name (str): Name of model tracker collection to search for models.
output_dir (str): Directory where tables of performance metrics will be written.
pred_type (str): Prediction type ('classification' or 'regression') of models to query.
Returns:
None
"""
if not mlmt_supported:
print("Model tracker not supported in your environment; can examine models saved in filesystem only.")
return
datasets = get_collection_datasets(collection_name)
os.makedirs(output_dir, exist_ok=True)
for dset_key, bucket in datasets:
dset_perf_df = get_training_perf_table(dset_key, bucket, collection_name, pred_type=pred_type)
dset_perf_file = '%s/%s_%s_model_perf_metrics.csv' % (output_dir, os.path.basename(dset_key).replace('.csv', ''), collection_name)
dset_perf_df.to_csv(dset_perf_file, index=False)
print('Wrote file %s' % dset_perf_file)
#------------------------------------------------------------------------------------------------------------------
def get_training_perf_table(dataset_key, bucket, collection_name, pred_type='regression', other_filters = {}):
"""
Load performance metrics from model tracker for all models saved in the model tracker DB under
a given collection that were trained against a particular dataset. Identify training parameters
that vary between models, and generate plots of performance vs particular combinations of
parameters.
Args:
dataset_key (str): Training dataset key.
bucket (str): Training dataset bucket.
collection_name (str): Name of model tracker collection to search for models.
pred_type (str): Prediction type ('classification' or 'regression') of models to query.
other_filters (dict): Other filter criteria to use in querying models.
Returns:
pd.DataFrame: Table of models and performance metrics.
"""
if not mlmt_supported:
print("Model tracker not supported in your environment; can examine models saved in filesystem only.")
return None
print("Finding models trained on %s dataset %s" % (bucket, dataset_key))
mlmt_client = dsf.initialize_model_tracker()
query_params = {
"match_metadata": {
"training_dataset.bucket": bucket,
"training_dataset.dataset_key": dataset_key,
},
"match_metrics": {
"metrics_type": "training", # match only training metrics
"label": "best",
},
}
query_params['match_metadata'].update(other_filters)
metadata_list = mlmt_client.model.query_model_metadata(
collection_name=collection_name,
query_params=query_params,
).result()
if metadata_list == []:
print("No matching models returned")
return
else:
print("Found %d matching models" % len(metadata_list))
model_uuid_list = []
model_type_list = []
max_epochs_list = []
learning_rate_list = []
dropouts_list = []
layer_sizes_list = []
featurizer_list = []
splitter_list = []
rf_estimators_list = []
rf_max_features_list = []
rf_max_depth_list = []
xgb_learning_rate_list = []
xgb_gamma_list = []
best_epoch_list = []
max_epochs_list = []
subsets = ['train', 'valid', 'test']
score_dict = {}
for subset in subsets:
score_dict[subset] = []
if pred_type == 'regression':
metric_type = 'r2_score'
else:
metric_type = 'roc_auc_score'
for metadata_dict in metadata_list:
model_uuid = metadata_dict['model_uuid']
#print("Got metadata for model UUID %s" % model_uuid)
# Get model metrics for this model
metrics_dicts = metadata_dict['training_metrics']
#print("Got %d metrics dicts for model %s" % (len(metrics_dicts), model_uuid))
if len(metrics_dicts) < 3:
print("Got no or incomplete metrics for model %s, skipping..." % model_uuid)
continue
subset_metrics = {}
for metrics_dict in metrics_dicts:
subset = metrics_dict['subset']
subset_metrics[subset] = metrics_dict['prediction_results']
model_uuid_list.append(model_uuid)
model_params = metadata_dict['model_parameters']
model_type = model_params['model_type']
model_type_list.append(model_type)
featurizer = model_params['featurizer']
featurizer_list.append(featurizer)
split_params = metadata_dict['splitting_parameters']
splitter_list.append(split_params['splitter'])
dataset_key = metadata_dict['training_dataset']['dataset_key']
if model_type == 'NN':
nn_params = metadata_dict['nn_specific']
max_epochs_list.append(nn_params['max_epochs'])
best_epoch_list.append(nn_params['best_epoch'])
learning_rate_list.append(nn_params['learning_rate'])
layer_sizes_list.append(','.join(['%d' % s for s in nn_params['layer_sizes']]))
dropouts_list.append(','.join(['%.2f' % d for d in nn_params['dropouts']]))
rf_estimators_list.append(nan)
rf_max_features_list.append(nan)
rf_max_depth_list.append(nan)
xgb_learning_rate_list.append(nan)
xgb_gamma_list.append(nan)
if model_type == 'RF':
rf_params = metadata_dict['rf_specific']
rf_estimators_list.append(rf_params['rf_estimators'])
rf_max_features_list.append(rf_params['rf_max_features'])
rf_max_depth_list.append(rf_params['rf_max_depth'])
max_epochs_list.append(nan)
best_epoch_list.append(nan)
learning_rate_list.append(nan)
layer_sizes_list.append(nan)
dropouts_list.append(nan)
xgb_learning_rate_list.append(nan)
xgb_gamma_list.append(nan)
if model_type == 'xgboost':
xgb_params = metadata_dict['xgb_specific']
rf_estimators_list.append(nan)
rf_max_features_list.append(nan)
rf_max_depth_list.append(nan)
max_epochs_list.append(nan)
best_epoch_list.append(nan)
learning_rate_list.append(nan)
layer_sizes_list.append(nan)
dropouts_list.append(nan)
xgb_learning_rate_list.append(xgb_params["xgb_learning_rate"])
xgb_gamma_list.append(xgb_params["xgb_gamma"])
for subset in subsets:
score_dict[subset].append(subset_metrics[subset][metric_type])
perf_df = pd.DataFrame(dict(
model_uuid=model_uuid_list,
model_type=model_type_list,
dataset_key=dataset_key,
featurizer=featurizer_list,
splitter=splitter_list,
max_epochs=max_epochs_list,
best_epoch=best_epoch_list,
learning_rate=learning_rate_list,
layer_sizes=layer_sizes_list,
dropouts=dropouts_list,
rf_estimators=rf_estimators_list,
rf_max_features=rf_max_features_list,
rf_max_depth=rf_max_depth_list,
xgb_learning_rate = xgb_learning_rate_list,
xgb_gamma = xgb_gamma_list))
for subset in subsets:
metric_col = '%s_%s' % (metric_type, subset)
perf_df[metric_col] = score_dict[subset]
sort_metric = '%s_valid' % metric_type
perf_df = perf_df.sort_values(sort_metric, ascending=False)
return perf_df
# -----------------------------------------------------------------------------------------------------------------
def extract_model_and_feature_parameters(metadata_dict):
"""
Given a config file, extract model and featuer parameters. Looks for parameter names
that end in *_specific. e.g. nn_specific, auto_featurizer_specific
Args:
model_metadict (dict): Dictionary containing NON-FLATTENED metadata for an AMPL model
Returns:
dictionary containing featurizer and model parameters. Most contain the following
keys. ['max_epochs', 'best_epoch', 'learning_rate', 'layer_sizes', 'drop_outs',
'rf_estimators', 'rf_max_features', 'rf_max_depth', 'xgb_gamma', 'xgb_learning_rate',
'featurizer_parameters_dict', 'model_parameters_dict']
"""
model_params = metadata_dict['model_parameters']
model_type = model_params['model_type']
required = ['max_epochs', 'best_epoch', 'learning_rate', 'layer_sizes', 'dropouts',
'rf_estimators', 'rf_max_features', 'rf_max_depth', 'xgb_gamma', 'xgb_learning_rate']
model_info = {}
model_info['model_uuid'] = metadata_dict['model_uuid']
if model_type == 'NN':
nn_params = metadata_dict['nn_specific']
model_info['max_epochs'] = nn_params['max_epochs']
model_info['best_epoch'] = nn_params['best_epoch']
model_info['learning_rate'] = nn_params['learning_rate']
model_info['layer_sizes'] = ','.join(['%d' % s for s in nn_params['layer_sizes']])
model_info['dropouts'] = ','.join(['%.2f' % d for d in nn_params['dropouts']])
elif model_type == 'RF':
rf_params = metadata_dict['rf_specific']
model_info['rf_estimators'] = rf_params['rf_estimators']
model_info['rf_max_features'] = rf_params['rf_max_features']
model_info['rf_max_depth'] = rf_params['rf_max_depth']
elif model_type == 'xgboost':
xgb_params = metadata_dict['xgb_specific']
model_info['xgb_gamma'] = xgb_params['xgb_gamma']
model_info['xgb_learning_rate'] = xgb_params['xgb_learning_rate']
for r in required:
if r not in model_info:
# all fields must be filled in
model_info[r] = nan
# the new way of extracting model parameters is to simply save them in json
if 'nn_specific' in metadata_dict:
model_metadata = metadata_dict['nn_specific']
# include learning rate, max_epochs, and best_epoch for convenience
model_info['max_epochs'] = model_metadata['max_epochs']
model_info['best_epoch'] = model_metadata['best_epoch']
learning_rate_col = [c for c in model_metadata.keys() if c.endswith('learning_rate')]
if len(learning_rate_col) == 1:
model_info['learning_rate'] = model_metadata[learning_rate_col[0]]
# delete several parameters that aren't normally saved
ignored_params = ['batch_size','bias_init_consts','optimizer_type',
'weight_decay_penalty','weight_decay_penalty_type','weight_init_stddevs']
del_ignored_params(model_metadata, ignored_params)
elif 'rf_specific' in metadata_dict:
model_metadata = metadata_dict['rf_specific']
elif 'xgb_specific' in metadata_dict:
model_metadata = metadata_dict['xgb_specific']
# delete several parameters that aren't normally saved
ignored_params = ['xgb_colsample_bytree','xgb_max_depth',
'xgb_min_child_weight','xgb_n_estimators','xgb_subsample']
del_ignored_params(model_metadata, ignored_params)
else:
# no model parameters found
model_metadata = {}
model_info['model_parameters_dict'] = json.dumps(model_metadata)
if 'ecfp_specific' in metadata_dict:
feat_metadata = metadata_dict['ecfp_specific']
elif 'auto_featurizer_specific' in metadata_dict:
feat_metadata = metadata_dict['auto_featurizer_specific']
elif 'autoencoder_specific' in metadata_dict:
feat_metadata = metadata_dict['autoencoder_specific']
else:
# no model parameters found
feat_metadata = {}
model_info['feat_parameters_dict'] = json.dumps(feat_metadata)
return model_info
# ------------------------------------------------------------------------------------------------------------------
def get_best_perf_table(metric_type, col_name=None, result_dir=None, model_uuid=None, metadata_dict=None, PK_pipe=False):
"""
Extract parameters and training run performance metrics for a single model. The model may be
specified either by a metadata dictionary, a model_uuid or a result directory; in the model_uuid case, the function
queries the model tracker DB for the model metadata. For models saved in the filesystem, can query the performance
data from the original result directory, but not from a saved tarball.
Args:
metric_type (str): Performance metric to include in result dictionary.
col_name (str): Collection name containing model, if model is specified by model_uuid.
result_dir (str): result directory of the model, if Model tracker is not supported and metadata_dict not provided.
model_uuid (str): UUID of model to query, if metadata_dict is not provided.
metadata_dict (dict): Full metadata dictionary for a model, including training metrics and
dataset metadata.
PK_pipe (bool): If True, include some additional parameters in the result dictionary specific to PK models.
Returns:
model_info (dict): Dictionary of parameter or metric name - value pairs.
Todo:
Add support for models saved as local tarball files.
"""
if not mlmt_supported and not result_dir:
print("Model tracker not supported in your environment; can examine models saved in filesystem only, 'result_dir' needs to be provided.")
return None
elif mlmt_supported and col_name:
mlmt_client = dsf.initialize_model_tracker()
if metadata_dict is None:
if model_uuid is None:
print("Have to specify either metadata_dict or model_uuid")
return
query_params = {
"match_metadata": {
"model_uuid": model_uuid,
},
"match_metrics": {
"metrics_type": "training", # match only training metrics
"label": "best",
},
}
metadata_list = list(mlmt_client.model.query_model_metadata(
collection_name=col_name,
query_params=query_params
).result())
if len(metadata_list) == 0:
print("No matching models returned")
return None
metadata_dict = metadata_list[0]
elif result_dir:
model_dir = ""
for dirpath, dirnames, filenames in os.walk(result_dir):
if model_uuid in dirnames:
model_dir = os.path.join(dirpath, model_uuid)
break
if model_dir:
with open(os.path.join(model_dir, 'model_metadata.json')) as f:
metadata_dict = json.load(f)
else:
print(f"model_uuid ({model_uuid}) not exist in {result_dir}.")
return None
model_info = {}
model_info['model_uuid'] = metadata_dict['model_uuid']
model_info['collection_name'] = col_name
# Get model metrics for this model
metrics_dicts = [d for d in metadata_dict['training_metrics'] if d['label'] == 'best']
if len(metrics_dicts) != 3:
print("Got no or incomplete metrics for model %s, skipping..." % model_uuid)
return None
model_params = metadata_dict['model_parameters']
model_info['model_type'] = model_params['model_type']
model_info['featurizer'] = model_params['featurizer']
split_params = metadata_dict['splitting_parameters']
model_info['splitter'] = split_params['splitter']
if 'split_uuid' in split_params:
model_info['split_uuid'] = split_params['split_uuid']
model_info['dataset_key'] = metadata_dict['training_dataset']['dataset_key']
model_info['bucket'] = metadata_dict['training_dataset']['bucket']
dset_meta = metadata_dict['training_dataset']['dataset_metadata']
if PK_pipe:
model_info['assay_name'] = dset_meta.get('assay_category', 'NA')
model_info['response_col'] = dset_meta.get('response_cols', dset_meta.get('response_col', 'NA'))
try:
model_info['descriptor_type'] = metadata_dict['descriptor_specific']['descriptor_type']
except KeyError:
model_info['descriptor_type'] = 'NA'
try:
model_info['num_samples'] = dset_meta['num_row']
except:
# KSM: Commented out because original dataset may no longer be accessible.
#tmp_df = dsf.retrieve_dataset_by_datasetkey(model_info['dataset_key'], model_info['bucket'])
#model_info['num_samples'] = tmp_df.shape[0]
model_info['num_samples'] = nan
# add model and feature params
# model_uuid appears in model_feature_params and will overwrite the one in model_info
# it's the same uuid, so it should be ok
model_feature_params = extract_model_and_feature_parameters(metadata_dict)
model_info.update(model_feature_params)
for metrics_dict in metrics_dicts:
subset = metrics_dict['subset']
metric_col = '%s_%s' % (metric_type, subset)
model_info[metric_col] = metrics_dict['prediction_results'][metric_type]
if (model_params['prediction_type'] == 'regression') and (metric_type != 'rms_score'):
metric_col = 'rms_score_%s' % subset
model_info[metric_col] = metrics_dict['prediction_results']['rms_score']
return model_info
# ---------------------------------------------------------------------------------------------------------
def get_best_models_info(col_names=None, bucket='public', pred_type="regression", result_dir=None, PK_pipeline=False,
output_dir='/usr/local/data',
shortlist_key=None, input_dset_keys=None, save_results=False, subset='valid',
metric_type=None, selection_type='max', other_filters={}):
"""
Tabulate parameters and performance metrics for the best models, according to a given metric, trained against
each specified dataset.
Args:
col_names (list of str): List of model tracker collections to search.
bucket (str): Datastore bucket for training datasets.
pred_type (str): Type of models (regression or classification).
result_dir (list of str): Result directories of the models, if model tracker is not supported.
PK_pipeline (bool): Are we being called from PK pipeline?
output_dir (str): Directory to write output table to.
shortlist_key (str): Datastore key for table of datasets to query models for.
input_dset_keys (str or list of str): List of datastore keys for datasets to query models for. Either shortlist_key
or input_dset_keys must be specified, but not both.
save_results (bool): If True, write the table of results to a CSV file.
subset (str): Input dataset subset ('train', 'valid', or 'test') for which metrics are used to select best models.
metric_type (str): Type of performance metric (r2_score, roc_auc_score, etc.) to use to select best models.
selection_type (str): Score criterion ('max' or 'min') to use to select best models.
other_filters (dict): Additional selection criteria to include in model query.
Returns:
top_models_df (DataFrame): Table of parameters and metrics for best models for each dataset.
"""
if not mlmt_supported and not result_dir:
print("Model tracker not supported in your environment; can examine models saved in filesystem only, 'result_dir' needs to be provided.")
return None
top_models_info = []
sort_order = {'max': -1, 'min': 1}
sort_ascending = {'max': False, 'min': True}
if metric_type is None:
if pred_type == 'regression':
metric_type = 'r2_score'
else:
metric_type = 'roc_auc_score'
if other_filters is None:
other_filters = {}
# define dset_keys
if input_dset_keys is not None and shortlist_key is not None:
raise ValueError("You can specify either shortlist_key or input_dset_keys but not both.")
elif input_dset_keys is not None and shortlist_key is None:
if type(input_dset_keys) == str:
dset_keys = [input_dset_keys]
else:
dset_keys = input_dset_keys
elif input_dset_keys is None and shortlist_key is None:
raise ValueError('Must specify either input_dset_keys or shortlist_key')
else:
dset_keys = dsf.retrieve_dataset_by_datasetkey(shortlist_key, bucket)
if dset_keys is None:
# define dset_keys, col_names and buckets from shortlist file
shortlist = pd.read_csv(shortlist_key)
if 'dataset_key' in shortlist.columns:
dset_keys = shortlist['dataset_key'].unique()
elif 'task_name' in shortlist.columns:
dset_keys = shortlist['task_name'].unique()
else:
dset_keys = shortlist.values
if 'collection' in shortlist.columns:
col_names = shortlist['collection'].unique()
if 'bucket' in shortlist.columns:
bucket = shortlist['bucket'].unique()
if mlmt_supported and col_names is not None:
mlmt_client = dsf.initialize_model_tracker()
if type(col_names) == str:
col_names = [col_names]
if type(bucket) == str:
bucket=[bucket]
# Get the best model over all collections for each dataset
for dset_key in dset_keys:
dset_key = dset_key.strip()
dset_model_info = []
for col_name in col_names:
for buck in bucket:
try:
query_params = {
"match_metadata": {
"training_dataset.dataset_key": dset_key,
"training_dataset.bucket": buck,
},
"match_metrics": {
"metrics_type": "training", # match only training metrics
"label": "best",
"subset": subset,
"$sort": [{"prediction_results.%s" % metric_type : sort_order[selection_type]}]
},
}
query_params['match_metadata'].update(other_filters)
try:
print('Querying collection %s for models trained on dataset %s, %s' % (col_name, buck, dset_key))
metadata_list = list(mlmt_client.model.query_model_metadata(
collection_name=col_name,
query_params=query_params,
limit=1
).result())
except Exception as e:
print("Error returned when querying the best model for dataset %s in collection %s" % (dset_key, col_name))
print(e)
continue
if len(metadata_list) == 0:
print("No models returned for dataset %s in collection %s" % (dset_key, col_name))
continue
print('Query returned %d models' % len(metadata_list))
model = metadata_list[0]
model_info = get_best_perf_table(metric_type, col_name, metadata_dict=model, PK_pipe=PK_pipeline)
if model_info is not None:
res_df = pd.DataFrame.from_records([model_info])
dset_model_info.append(res_df)
except Exception as e:
print(e)
continue
metric_col = '%s_%s' % (metric_type, subset)
if len(dset_model_info) > 0:
dset_model_df = pd.concat(dset_model_info, ignore_index=True).sort_values(
by=metric_col, ascending=sort_ascending[selection_type])
top_models_info.append(dset_model_df.head(1))
print('Adding data for bucket %s, dset_key %s' % (dset_model_df.bucket.values[0], dset_model_df.dataset_key.values[0]))
elif result_dir:
metric_col = '%s_%s' % (subset, metric_type)
for rd in result_dir:
temp_perf_df = get_filesystem_perf_results(result_dir = rd, pred_type = pred_type).sort_values(
by=metric_col, ascending=sort_ascending[selection_type])
top_models_info.append(temp_perf_df.head(1))
print(f"Adding data from '{rd}' ")
if len(top_models_info) == 0:
print("No metadata found")
return None
top_models_df = pd.concat(top_models_info, ignore_index=True)
if save_results:
os.makedirs(output_dir, exist_ok=True)
if shortlist_key is not None:
# Not including shortlist key right now because some are weirdly formed and have .csv in the middle
top_models_df.to_csv(os.path.join(output_dir, 'best_models_metadata.csv'), index=False)
else:
for dset_key in input_dset_keys:
# TODO: This doesn't make sense; why output multiple copies of the same table?
shortened_key = dset_key.rstrip('.csv')
top_models_df.to_csv(os.path.join(output_dir, 'best_models_metadata_%s.csv' % shortened_key), index=False)
return top_models_df
# TODO: This function looks like work in progress, should we delete it?
'''
#---------------------------------------------------------------------------------------------------------
def _get_best_grouped_models_info(collection='pilot_fixed', pred_type='regression', top_n=1, subset='test'):
"""
Get results for models in the given collection.
"""
if not mlmt_supported:
print("Model tracker not supported in your environment; can examine models saved in filesystem only.")
return
res_dir = '/usr/local/data/%s_perf' % collection
plt_dir = '%s/Plots' % res_dir
os.makedirs(plt_dir, exist_ok=True)
res_files = os.listdir(res_dir)
suffix = '_%s_model_perf_metrics.csv' % collection
if pred_type == 'regression':
metric_type = 'r2_score'
else:
metric_type = 'roc_auc_score'
for res_file in res_files:
try:
if not res_file.endswith(suffix):
continue
res_path = os.path.join(res_dir, res_file)
res_df = pd.read_csv(res_path, index_col=False)
res_df['combo'] = ['%s/%s' % (m,f) for m, f in zip(res_df.model_type.values, res_df.featurizer.values)]
dset_name = res_file.replace(suffix, '')
datasets.append(dset_name)
res_df['dataset'] = dset_name
print(dset_name)
res_df = res_df.sort_values('{0}_{1}'.format(metric_type, subset), ascending=False)
res_df['model_type/feat'] = ['%s/%s' % (m,f) for m, f in zip(res_df.model_type.values, res_df.featurizer.values)]
res_df = res_df.sort_values('{0}_{1}'.format(metric_type, subset), ascending=False)
grouped_df = res_df.groupby('model_type/feat').apply(
lambda t: t.head(top_n)
).reset_index(drop=True)
top_grouped_models.append(grouped_df)
top_combo = res_df['model_type/feat'].values[0]
top_combo_dsets.append(top_combo + dset_name.lstrip('ATOM_GSK_dskey'))
top_score = res_df['{0}_{1}'.format(metric_type, subset)].values[0]
top_model_feat.append(top_combo)
top_scores.append(top_score)
num_samples.append(res_df['Dataset Size'][0])
'''
#------------------------------------------------------------------------------------------------------------------
def get_umap_nn_model_perf_table(dataset_key, bucket, collection_name, pred_type='regression'):
"""
Load performance metrics from model tracker for all NN models with the given prediction_type saved in
the model tracker DB under a given collection that were trained against a particular dataset. Show
parameter settings for UMAP transformer for models where they are available.
Args:
dataset_key (str): Dataset key for training dataset.
bucket (str): Dataset bucket for training dataset.
collection_name (str): Name of model tracker collection to search for models.
pred_type (str): Prediction type ('classification' or 'regression') of models to query.
Returns:
pd.DataFrame: Table of model performance metrics.
"""
if not mlmt_supported:
print("Model tracker not supported in your environment; can examine models saved in filesystem only.")
return None
query_params = {
"match_metadata": {
"training_dataset.bucket": bucket,
"training_dataset.dataset_key": dataset_key,
"model_parameters.model_type" : "NN",
"model_parameters.prediction_type" : pred_type
},
"match_metrics": {
"metrics_type": "training", # match only training metrics
"label": "best",
},
}
query_params['match_metadata'].update(other_filters)
print("Finding models trained on %s dataset %s" % (bucket, dataset_key))
mlmt_client = dsf.initialize_model_tracker()
metadata_list = mlmt_client.model.query_model_metadata(
collection_name=collection_name,
query_params=query_params,
).result()
if metadata_list == []:
print("No matching models returned")
return
else:
print("Found %d matching models" % len(metadata_list))
model_uuid_list = []
learning_rate_list = []
dropouts_list = []
layer_sizes_list = []
featurizer_list = []
best_epoch_list = []
max_epochs_list = []
feature_transform_type_list = []
umap_dim_list = []
umap_targ_wt_list = []
umap_neighbors_list = []
umap_min_dist_list = []
subsets = ['train', 'valid', 'test']
if pred_type == 'regression':
sort_metric = 'r2_score'
metrics = ['r2_score', 'rms_score', 'mae_score']
else:
sort_metric = 'roc_auc_score'
metrics = ['roc_auc_score', 'prc_auc_score', 'matthews_cc', 'kappa', 'confusion_matrix']
score_dict = {}
for subset in subsets:
score_dict[subset] = {}
for metric in metrics:
score_dict[subset][metric] = []
for metadata_dict in metadata_list:
model_uuid = metadata_dict['model_uuid']
#print("Got metadata for model UUID %s" % model_uuid)
# Get model metrics for this model
metrics_dicts = metadata_dict['training_metrics']
#print("Got %d metrics dicts for model %s" % (len(metrics_dicts), model_uuid))
if len(metrics_dicts) < 3:
print("Got no or incomplete metrics for model %s, skipping..." % model_uuid)
continue
if len(metrics_dicts) > 3:
raise Exception('Got more than one set of best epoch metrics for model %s' % model_uuid)
subset_metrics = {}
for metrics_dict in metrics_dicts:
subset = metrics_dict['subset']
subset_metrics[subset] = metrics_dict['prediction_results']
model_uuid_list.append(model_uuid)
model_params = metadata_dict['model_parameters']
model_type = model_params['model_type']
if model_type != 'NN':
continue
featurizer = model_params['featurizer']
featurizer_list.append(featurizer)
feature_transform_type = metadata_dict['training_dataset']['feature_transform_type']
feature_transform_type_list.append(feature_transform_type)
nn_params = metadata_dict['nn_specific']
max_epochs_list.append(nn_params['max_epochs'])
best_epoch_list.append(nn_params['best_epoch'])
learning_rate_list.append(nn_params['learning_rate'])
layer_sizes_list.append(','.join(['%d' % s for s in nn_params['layer_sizes']]))
dropouts_list.append(','.join(['%.2f' % d for d in nn_params['dropouts']]))
for subset in subsets:
for metric in metrics:
score_dict[subset][metric].append(subset_metrics[subset][metric])
if 'umap_specific' in metadata_dict:
umap_params = metadata_dict['umap_specific']
umap_dim_list.append(umap_params['umap_dim'])
umap_targ_wt_list.append(umap_params['umap_targ_wt'])
umap_neighbors_list.append(umap_params['umap_neighbors'])
umap_min_dist_list.append(umap_params['umap_min_dist'])
else:
umap_dim_list.append(nan)
umap_targ_wt_list.append(nan)
umap_neighbors_list.append(nan)
umap_min_dist_list.append(nan)
perf_df = pd.DataFrame(dict(
model_uuid=model_uuid_list,
learning_rate=learning_rate_list,
dropouts=dropouts_list,
layer_sizes=layer_sizes_list,
featurizer=featurizer_list,
best_epoch=best_epoch_list,
max_epochs=max_epochs_list,
feature_transform_type=feature_transform_type_list,
umap_dim=umap_dim_list,
umap_targ_wt=umap_targ_wt_list,
umap_neighbors=umap_neighbors_list,
umap_min_dist=umap_min_dist_list ))
for subset in subsets:
for metric in metrics:
metric_col = '%s_%s' % (metric, subset)
perf_df[metric_col] = score_dict[subset][metric]
sort_by = '%s_valid' % sort_metric
perf_df = perf_df.sort_values(sort_by, ascending=False)
return perf_df
#------------------------------------------------------------------------------------------------------------------
def get_tarball_perf_table(model_tarball, pred_type='classification'):
"""
Retrieve model metadata and performance metrics for a model saved as a tarball (.tar.gz) file.
Args:
model_tarball (str): Path of model tarball file, named as model.tar.gz.
pred_type (str): Prediction type ('classification' or 'regression') of model.
Returns:
tuple (pd.DataFrame, dict): Table of performance metrics and a dictionary of model metadata.
"""
tarf_content = tarfile.open(model_tarball, "r")
metadata_file = tarf_content.getmember("./model_metadata.json")
ext_metadata = tarf_content.extractfile(metadata_file)
meta_json = json.load(ext_metadata)
ext_metadata.close()
subsets = ['train', 'valid', 'test']
if pred_type == 'regression':
metrics = ['r2_score', 'rms_score', 'mae_score']
else:
metrics = ['roc_auc_score', 'prc_auc_score', 'precision', 'recall_score',
'accuracy_score', 'npv', 'matthews_cc', 'kappa', 'cross_entropy', 'confusion_matrix']
score_dict = {}
for subset in subsets:
score_dict[subset] = {}
for metric in metrics:
score_dict[subset][metric] = [0,0]
for emet in meta_json["training_metrics"]:
label = emet["label"]
score_ix = 0 if label == "best" else 1
subset = emet["subset"]
for metric in metrics:
score_dict[subset][metric][score_ix] = emet["prediction_results"][metric]
perf_df = pd.DataFrame()
for subset in subsets:
for metric in metrics:
metric_col = '%s_%s' % (subset, metric)
perf_df[metric_col] = score_dict[subset][metric]
return perf_df, meta_json
#------------------------------------------------------------------------------------------------------------------
def get_filesystem_perf_results(result_dir, pred_type='classification'):
"""
Retrieve metadata and performance metrics for models stored in the filesystem from a hyperparameter search run.
Args:
result_dir (str): Root directory for results from a hyperparameter search training run.
pred_type (str): Prediction type ('classification' or 'regression') of models to query.
Returns:
pd.DataFrame: Table of metadata fields and performance metrics.
"""
ampl_version_list = []
model_uuid_list = []
model_type_list = []
featurizer_list = []
dataset_key_list = []
splitter_list = []
model_score_type_list = []
feature_transform_type_list = []
# model type specific lists
param_list = []
subsets = ['train', 'valid', 'test']
if pred_type == 'regression':
metrics = ['r2_score', 'rms_score', 'mae_score', 'num_compounds']
else:
metrics = ['roc_auc_score', 'prc_auc_score', 'precision', 'recall_score', 'num_compounds',
'accuracy_score', 'bal_accuracy', 'npv', 'matthews_cc', 'kappa', 'cross_entropy', 'confusion_matrix']
score_dict = {}
for subset in subsets:
score_dict[subset] = {}
for metric in metrics:
score_dict[subset][metric] = []
score_dict['valid']['model_choice_score'] = []
# Navigate the results directory tree
model_list = []
metrics_list = []
tar_list = []
for dirpath, dirnames, filenames in os.walk(result_dir):
# collect all tars for later
tar_list = tar_list + [os.path.join(dirpath, f) for f in filenames if f.endswith('.tar.gz')]
if ('model_metadata.json' in filenames) and ('model_metrics.json' in filenames):
meta_path = os.path.join(dirpath, 'model_metadata.json')
with open(meta_path, 'r') as meta_fp:
meta_dict = json.load(meta_fp)
if meta_dict['model_parameters']['prediction_type']==pred_type:
model_list.append(meta_dict)
metrics_path = os.path.join(dirpath, 'model_metrics.json')
with open(metrics_path, 'r') as metrics_fp:
metrics_dicts = json.load(metrics_fp)
metrics_list.append(metrics_dicts)
print("Found data for %d models under %s" % (len(model_list), result_dir))
# build dictonary of tarball names
tar_dict = {os.path.basename(tf):tf for tf in tar_list}
path_list = []
for metadata_dict, metrics_dicts in zip(model_list, metrics_list):
model_uuid = metadata_dict['model_uuid']
dataset_key = metadata_dict['training_dataset']['dataset_key']
dataset_name = mp.build_tarball_name(mp.build_dataset_name(dataset_key), model_uuid)
if dataset_name in tar_dict:
path_list.append(tar_dict[dataset_name])
else:
# unable to find saved tar file
path_list.append('')
# Get list of training run metrics for this model
if len(metrics_dicts) < 3:
print("Got no or incomplete metrics for model %s, skipping..." % model_uuid)
continue
subset_metrics = {}
for metrics_dict in metrics_dicts:
if metrics_dict['label'] == 'best':
subset = metrics_dict['subset']
subset_metrics[subset] = metrics_dict['prediction_results']
model_uuid_list.append(model_uuid)
model_params = metadata_dict['model_parameters']
ampl_version = model_params['ampl_version']
ampl_version_list.append(ampl_version)
model_type = model_params['model_type']
model_type_list.append(model_type)
model_score_type = model_params['model_choice_score_type']
model_score_type_list.append(model_score_type)
featurizer = model_params['featurizer']
#mix ecfp, graphconv, moe, mordred, rdkit for concise representation
if featurizer in ["computed_descriptors", "descriptors"]:
featurizer = metadata_dict["descriptor_specific"]["descriptor_type"]
featurizer_list.append(featurizer)
split_params = metadata_dict['splitting_parameters']
splitter_list.append(split_params['splitter'])
dataset_key_list.append(metadata_dict['training_dataset']['dataset_key'])
feature_transform_type = metadata_dict['training_dataset']['feature_transform_type']
feature_transform_type_list.append(feature_transform_type)
param_list.append(extract_model_and_feature_parameters(metadata_dict))
for subset in subsets:
for metric in metrics:
score_dict[subset][metric].append(subset_metrics[subset][metric])
score_dict['valid']['model_choice_score'].append(subset_metrics['valid']['model_choice_score'])
param_df = pd.DataFrame(param_list)
perf_df = pd.DataFrame(dict(
model_uuid=model_uuid_list,
model_path = path_list,
ampl_version=ampl_version_list,
model_type=model_type_list,
dataset_key=dataset_key_list,
featurizer=featurizer_list,
splitter=splitter_list,
model_score_type=model_score_type_list,
feature_transform_type=feature_transform_type_list))
perf_df = perf_df.merge(param_df, on='model_uuid', how='inner')
perf_df['model_choice_score'] = score_dict['valid']['model_choice_score']
for subset in subsets:
for metric in metrics:
metric_col = '%s_%s' % (subset, metric)
perf_df[metric_col] = score_dict[subset][metric]
sort_by = 'model_choice_score'
perf_df = perf_df.sort_values(sort_by, ascending=False)
return perf_df
def get_filesystem_models(result_dir, pred_type):
"""
Identify all models in result_dir and create perf_result table with 'tarball_path' column containing a path
to each tarball.
"""
perf_df = get_filesystem_perf_results(result_dir, pred_type)
if pred_type == 'regression':
metric = 'valid_r2_score'
else:
metric = 'valid_roc_auc_score'
#best_df = perf_df.sort_values(by=metric, ascending=False).drop_duplicates(subset='dataset_key').copy()
perf_df['dataset_names'] = perf_df['dataset_key'].apply(lambda f: os.path.splitext(os.path.basename(f))[0])
perf_df['tarball_names'] = perf_df.apply(lambda x: '%s_model_%s.tar.gz' % (x['dataset_names'], x['model_uuid']), axis=1)
tarball_names = set(perf_df['tarball_names'].values)
all_filenames = []
for dirpath, dirnames, filenames in os.walk(result_dir):
for fn in filenames:
if fn in tarball_names:
all_filenames.append((fn, os.path.join(dirpath, fn)))
found_files_df = pd.DataFrame({'tarball_names':[f[0] for f in all_filenames],
'tarball_paths':[f[1] for f in all_filenames]})
perf_df = perf_df.merge(found_files_df, on='tarball_names', how='outer')
return perf_df
#------------------------------------------------------------------------------------------------------------------
def copy_best_filesystem_models(result_dir, dest_dir, pred_type, force_update=False):
"""
Identify the best models for each dataset within a result directory tree (e.g. from a hyperparameter search).
Copy the associated model tarballs to a destination directory.
Args:
result_dir (str): Path to model training result directory.
dest_dir (str): Path of directory wherre model tarballs will be copied to.
pred_type (str): Prediction type ('classification' or 'regression') of models to copy
force_update (bool): If true, overwrite tarball files that already exist in dest_dir.
Returns:
pd.DataFrame: Table of performance metrics for best models.
"""
perf_df = get_filesystem_perf_results(result_dir, pred_type)
if pred_type == 'regression':
metric = 'valid_r2_score'
else:
metric = 'valid_roc_auc_score'
best_df = perf_df.sort_values(by=metric, ascending=False).drop_duplicates(subset='dataset_key').copy()
dataset_names = [os.path.splitext(os.path.basename(f))[0] for f in best_df.dataset_key.values]
model_uuids = best_df.model_uuid.values
tarball_names = ['%s_model_%s.tar.gz' % (dset_name, model_uuid) for dset_name, model_uuid in zip(dataset_names, model_uuids)]
for dirpath, dirnames, filenames in os.walk(result_dir):
for fn in filenames:
if (fn in tarball_names) and (force_update or not os.path.exists(os.path.join(dest_dir, fn))):
shutil.copy2(os.path.join(dirpath, fn), dest_dir)
print('Copied %s' % fn)
return best_df
#------------------------------------------------------------------------------------------------------------------
def get_summary_perf_tables(collection_names=None, filter_dict={}, result_dir=None, prediction_type='regression', verbose=False):
"""
Load model parameters and performance metrics from model tracker for all models saved in the model tracker DB under
the given collection names (or result directory if Model tracker is not available) with the given prediction type.
Tabulate the parameters and metrics including:
dataset (assay name, target, parameter, key, bucket)
dataset size (train/valid/test/total)
number of training folds
model type (NN or RF)
featurizer
transformation type
metrics: r2_score, mae_score and rms_score for regression, or ROC AUC for classification
Args:
collection_names (list): Names of model tracker collections to search for models.
filter_dict (dict): Additional filter criteria to use in model query.
result_dir (str or list): Directories to search for models; must be provided if the model tracker DB is not available.
prediction_type (str): Type of models (classification or regression) to query.
verbose (bool): If true, print status messages as collections are processed.
Returns:
pd.DataFrame: Table of model metadata fields and performance metrics.
"""
if not mlmt_supported and not result_dir:
print("Model tracker not supported in your environment; can examine models saved in filesystem only, 'result_dir' is needed.")
return None
collection_list = []
ampl_version_list=[]
model_uuid_list = []
time_built_list = []
model_type_list = []
dataset_key_list = []
bucket_list = []
param_list = []
featurizer_list = []
desc_type_list = []
transform_list = []
dset_size_list = []
splitter_list = []
split_strategy_list = []
split_uuid_list = []
umap_dim_list = []
umap_targ_wt_list = []
umap_neighbors_list = []
umap_min_dist_list = []
split_uuid_list=[]
model_feat_param_list = []
if prediction_type == 'regression':
score_types = ['r2_score', 'mae_score', 'rms_score']
else:
# TODO: add more classification metrics later
score_types = ['roc_auc_score', 'prc_auc_score', 'accuracy_score', 'bal_accuracy', 'precision', 'recall_score', 'npv', 'matthews_cc', 'kappa']
subsets = ['train', 'valid', 'test']
score_dict = {}
ncmpd_dict = {}
for subset in subsets:
score_dict[subset] = {}
for score_type in score_types:
score_dict[subset][score_type] = []
ncmpd_dict[subset] = []
metadata_list_dict = {}
if mlmt_supported and collection_names:
mlmt_client = dsf.initialize_model_tracker()
filter_dict['model_parameters.prediction_type'] = prediction_type
for collection_name in collection_names:
print("Finding models in collection %s" % collection_name)
query_params = {
"match_metadata": filter_dict,
"match_metrics": {
"metrics_type": "training", # match only training metrics
"label": "best",
},
}
metadata_list = mlmt_client.model.query_model_metadata(
collection_name=collection_name,
query_params=query_params,
).result()
metadata_list_dict[collection_name] = metadata_list
elif result_dir:
if isinstance(result_dir, str):
result_dir = [result_dir]
for rd in result_dir:
if rd not in metadata_list_dict:
metadata_list_dict[rd] = []
for dirpath, dirnames, filenames in os.walk(rd):
if "model_metadata.json" in filenames:
with open(os.path.join(dirpath, 'model_metadata.json')) as f:
metadata_dict = json.load(f)
metadata_list_dict[rd].append(metadata_dict)
for ss in metadata_list_dict:
for i, metadata_dict in enumerate(metadata_list_dict[ss]):
if (i % 10 == 0) and verbose:
print('Processing collection %s model %d' % (ss, i))
# Check that model has metrics before we go on
if not 'training_metrics' in metadata_dict:
continue
collection_list.append(ss)
model_uuid = metadata_dict['model_uuid']
model_uuid_list.append(model_uuid)
time_built = metadata_dict['time_built']
time_built_list.append(time_built)
model_params = metadata_dict['model_parameters']
ampl_version = model_params.get('ampl_version', 'probably 1.0.0')
ampl_version_list.append(ampl_version)
model_type = model_params['model_type']
model_type_list.append(model_type)
featurizer = model_params['featurizer']
featurizer_list.append(featurizer)
if 'descriptor_specific' in metadata_dict:
desc_type = metadata_dict['descriptor_specific']['descriptor_type']
elif featurizer in ['graphconv', 'ecfp']:
desc_type = featurizer
else:
desc_type = ''
desc_type_list.append(desc_type)
dataset_key = metadata_dict['training_dataset']['dataset_key']
bucket = metadata_dict['training_dataset']['bucket']
dataset_key_list.append(dataset_key)
bucket_list.append(bucket)
dset_metadata = metadata_dict['training_dataset']['dataset_metadata']
param = metadata_dict['training_dataset']['response_cols'][0]
param_list.append(param)
transform_type = metadata_dict['training_dataset']['feature_transform_type']
transform_list.append(transform_type)
split_params = metadata_dict['splitting_parameters']
splitter_list.append(split_params['splitter'])
split_uuid_list.append(split_params.get('split_uuid', ''))
split_strategy = split_params['split_strategy']
split_strategy_list.append(split_strategy)
if 'umap_specific' in metadata_dict:
umap_params = metadata_dict['umap_specific']
umap_dim_list.append(umap_params['umap_dim'])
umap_targ_wt_list.append(umap_params['umap_targ_wt'])
umap_neighbors_list.append(umap_params['umap_neighbors'])
umap_min_dist_list.append(umap_params['umap_min_dist'])
else:
umap_dim_list.append(nan)
umap_targ_wt_list.append(nan)
umap_neighbors_list.append(nan)
umap_min_dist_list.append(nan)
model_feat_param_list.append(extract_model_and_feature_parameters(metadata_dict))
# Get model metrics for this model
metrics_dicts = metadata_dict['training_metrics']
#print("Got %d metrics dicts for model %s" % (len(metrics_dicts), model_uuid))
subset_metrics = {}
for metrics_dict in metrics_dicts:
if metrics_dict['label'] == 'best':
subset = metrics_dict['subset']
subset_metrics[subset] = metrics_dict['prediction_results']
if split_strategy == 'k_fold_cv':
dset_size = subset_metrics['train']['num_compounds'] + subset_metrics['test']['num_compounds']
else:
dset_size = subset_metrics['train']['num_compounds'] + subset_metrics['valid']['num_compounds'] + subset_metrics['test']['num_compounds']
for subset in subsets:
subset_size = subset_metrics[subset]['num_compounds']
for score_type in score_types:
try:
score = subset_metrics[subset][score_type]
except KeyError:
score = float('nan')
score_dict[subset][score_type].append(score)
ncmpd_dict[subset].append(subset_size)
dset_size_list.append(dset_size)
col_dict = dict(
collection=collection_list,
ampl_version=ampl_version_list,
model_uuid=model_uuid_list,
time_built=time_built_list,
model_type=model_type_list,
featurizer=featurizer_list,
features=desc_type_list,
transformer=transform_list,
splitter=splitter_list,
split_strategy=split_strategy_list,
split_uuid=split_uuid_list,
umap_dim=umap_dim_list,
umap_targ_wt=umap_targ_wt_list,
umap_neighbors=umap_neighbors_list,
umap_min_dist=umap_min_dist_list,
dataset_bucket=bucket_list,
dataset_key=dataset_key_list,
dataset_size=dset_size_list,
parameter=param_list
)
perf_df = pd.DataFrame(col_dict)
param_df = pd.DataFrame(model_feat_param_list)
perf_df = perf_df.merge(param_df, on='model_uuid', how='inner')
for subset in subsets:
ncmpds_col = '%s_size' % subset
perf_df[ncmpds_col] = ncmpd_dict[subset]
for score_type in score_types:
metric_col = '%s_%s' % (subset, score_type)
perf_df[metric_col] = score_dict[subset][score_type]
return perf_df
#------------------------------------------------------------------------------------------------------------------
def get_summary_metadata_table(uuids, collections=None):
"""
Tabulate metadata fields and performance metrics for a set of models identified by specific model_uuids.
Args:
uuids (list): List of model UUIDs to query.
collections (list or str): Names of collections in model tracker DB to get models from. If collections is
a string, it must identify one collection to search for all models. If a list, it must be of the same
length as `uuids`. If not provided, all collections will be searched.
Returns:
pd.DataFrame: Table of metadata fields and performance metrics for models.
"""
if not mlmt_supported:
print("Model tracker not supported in your environment; can examine models saved in filesystem only.")
return None
if isinstance(uuids,str):
uuids = [uuids]
if isinstance(collections,str):
collections = [collections] * len(uuids)
mlist = []
mlmt_client = dsf.initialize_model_tracker()
for idx,uuid in enumerate(uuids):
if collections is not None:
collection_name = collections[idx]
else:
collection_name = trkr.get_model_collection_by_uuid(uuid)
model_meta = trkr.get_full_metadata_by_uuid(uuid, collection_name=collection_name)
mdl_params = model_meta['model_parameters']
data_params = model_meta['training_dataset']
# Get model metrics for this model
metrics = pd.DataFrame(model_meta['training_metrics'])
metrics = metrics[metrics['label']=='best']
train_metrics = metrics[metrics['subset']=='train']['prediction_results'].values[0]
valid_metrics = metrics[metrics['subset']=='valid']['prediction_results'].values[0]
test_metrics = metrics[metrics['subset']=='test']['prediction_results'].values[0]
# Try to name the model something intelligible in the table
name = 'NA'
if 'target' in data_params['dataset_metadata']:
name = data_params['dataset_metadata']['target']
if (name == 'NA') & ('assay_endpoint' in data_params['dataset_metadata']):
name = data_params['dataset_metadata']['assay_endpoint']
if (name == 'NA') & ('response_col' in data_params['dataset_metadata']):
name = data_params['dataset_metadata']['response_col']
if name != 'NA':
if 'param' in data_params['dataset_metadata'].keys():
name = name + ' ' + data_params['dataset_metadata']['param']
else:
name = 'unknown'
transform = 'None'
if 'transformation' in data_params['dataset_metadata'].keys():
transform = data_params['dataset_metadata']['transformation']
if mdl_params['featurizer'] == 'computed_descriptors':
featurizer = model_meta['descriptor_specific']['descriptor_type']
else:
featurizer = mdl_params['featurizer']
try:
split_uuid = model_meta['splitting_parameters']['split_uuid']
except:
split_uuid = 'Not Available'
if mdl_params['prediction_type'] == 'regression':
if mdl_params['model_type'] == 'NN':
nn_params = model_meta['nn_specific']
minfo = {'Name': name,
'Transformation': transform,
'AMPL version used:': mdl_params.get('ampl_version', 'probably 1.0.0'),
'Model Type (Featurizer)': '%s (%s)' % (mdl_params['model_type'],featurizer),
'r^2 (Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics['r2_score'], valid_metrics['r2_score'], test_metrics['r2_score']),
'MAE (Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics['mae_score'], valid_metrics['mae_score'], test_metrics['mae_score']),
'RMSE(Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics['rms_score'], valid_metrics['rms_score'], test_metrics['rms_score']),
'Data Size (Train/Valid/Test)': '%i/%i/%i' % (train_metrics["num_compounds"],valid_metrics["num_compounds"],test_metrics["num_compounds"]),
'Splitter': model_meta['splitting_parameters']['splitter'],
'Layer Sizes': nn_params['layer_sizes'],
'Optimizer': nn_params['optimizer_type'],
'Learning Rate': nn_params['learning_rate'],
'Dropouts': nn_params['dropouts'],
'Best Epoch (Max)': '%i (%i)' % (nn_params['best_epoch'],nn_params['max_epochs']),
'Collection': collection_name,
'UUID': model_meta['model_uuid'],
'Split UUID': split_uuid,
'Dataset Key': data_params['dataset_key']}
elif mdl_params['model_type'] == 'RF':
rf_params = model_meta['rf_specific']
minfo = {'Name': name,
'Transformation': transform,
'AMPL version used:': mdl_params.get('ampl_version', 'probably 1.0.0'),
'Model Type (Featurizer)': '%s (%s)' % (mdl_params['model_type'],featurizer),
'Max Depth': rf_params['rf_max_depth'],
'Max Features': rf_params['rf_max_depth'],
'RF Estimators': rf_params['rf_estimators'],
'r^2 (Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics['r2_score'], valid_metrics['r2_score'], test_metrics['r2_score']),
'MAE (Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics['mae_score'], valid_metrics['mae_score'], test_metrics['mae_score']),
'RMSE(Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics['rms_score'], valid_metrics['rms_score'], test_metrics['rms_score']),
'Data Size (Train/Valid/Test)': '%i/%i/%i' % (train_metrics["num_compounds"],valid_metrics["num_compounds"],test_metrics["num_compounds"]),
'Splitter': model_meta['splitting_parameters']['splitter'],
'Collection': collection_name,
'UUID': model_meta['model_uuid'],
'Split UUID': split_uuid,
'Dataset Key': data_params['dataset_key']}
elif mdl_params['model_type'] == 'xgboost':
xgb_params = model_meta['xgb_specific']
minfo = {'Name': name,
'Transformation': transform,
'AMPL version used:': mdl_params.get('ampl_version', 'probably 1.0.0'),
'Model Type (Featurizer)': '%s (%s)' % (mdl_params['model_type'],featurizer),
'Gamma': xgb_params['xgb_gamma'],
'Learning rate': xgb_params['xgb_max_depth'],
'r^2 (Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics['r2_score'], valid_metrics['r2_score'], test_metrics['r2_score']),
'MAE (Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics['mae_score'], valid_metrics['mae_score'], test_metrics['mae_score']),
'RMSE(Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics['rms_score'], valid_metrics['rms_score'], test_metrics['rms_score']),
'Data Size (Train/Valid/Test)': '%i/%i/%i' % (train_metrics["num_compounds"],valid_metrics["num_compounds"],test_metrics["num_compounds"]),
'Splitter': model_meta['splitting_parameters']['splitter'],
'Collection': collection_name,
'UUID': model_meta['model_uuid'],
'Split UUID': split_uuid,
'Dataset Key': data_params['dataset_key']}
else:
architecture = 'unknown'
elif mdl_params['prediction_type'] == 'classification':
if mdl_params['model_type'] == 'NN':
nn_params = model_meta['nn_specific']
minfo = {'Name': name,
'Transformation': transform,
'AMPL version used:': mdl_params.get('ampl_version', 'probably 1.0.0'),
'Model Type (Featurizer)': '%s (%s)' % (mdl_params['model_type'],featurizer),
'ROC AUC (Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics['roc_auc_score'], valid_metrics['roc_auc_score'], test_metrics['roc_auc_score']),
'PRC AUC (Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics['prc_auc_score'], valid_metrics['prc_auc_score'], test_metrics['prc_auc_score']),
'Balanced accuracy (Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics.get('bal_accuracy', np.nan), valid_metrics.get('bal_accuracy',np.nan), test_metrics.get('bal_accuracy', np.nan)),
'Accuracy (Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics['accuracy_score'], valid_metrics['accuracy_score'], test_metrics['accuracy_score']),
'Precision (Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics['precision'], valid_metrics['precision'], test_metrics['precision']),
'Recall (Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics['recall_score'], valid_metrics['recall_score'], test_metrics['recall_score']),
'NPV (Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics['npv'], valid_metrics['npv'], test_metrics['npv']),
'Kappa (Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics['kappa'], valid_metrics['kappa'], test_metrics['kappa']),
'Matthews CC (Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics['matthews_cc'], valid_metrics['matthews_cc'], test_metrics['matthews_cc']),
'Cross entropy (Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics['cross_entropy'], valid_metrics['cross_entropy'], test_metrics['cross_entropy']),
'Confusion matrices (Train/Valid/Test)': f"{str(train_metrics['confusion_matrix'])}/{str(valid_metrics['confusion_matrix'])}/{str(test_metrics['confusion_matrix'])}",
'Data Size (Train/Valid/Test)': '%i/%i/%i' % (train_metrics["num_compounds"],valid_metrics["num_compounds"],test_metrics["num_compounds"]),
'Splitter': model_meta['splitting_parameters']['splitter'],
'Layer Sizes': nn_params['layer_sizes'],
'Optimizer': nn_params['optimizer_type'],
'Learning Rate': nn_params['learning_rate'],
'Dropouts': nn_params['dropouts'],
'Best Epoch (Max)': '%i (%i)' % (nn_params['best_epoch'],nn_params['max_epochs']),
'Collection': collection_name,
'UUID': model_meta['model_uuid'],
'Split UUID': split_uuid,
'Dataset Key': data_params['dataset_key']}
elif mdl_params['model_type'] == 'RF':
rf_params = model_meta['rf_specific']
minfo = {'Name': name,
'Transformation': transform,
'AMPL version used:': mdl_params.get('ampl_version', 'probably 1.0.0'),
'Model Type (Featurizer)': '%s (%s)' % (mdl_params['model_type'],featurizer),
'Max Depth': rf_params['rf_max_depth'],
'Max Features': rf_params['rf_max_depth'],
'RF Estimators': rf_params['rf_estimators'],
'ROC AUC (Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics['roc_auc_score'], valid_metrics['roc_auc_score'], test_metrics['roc_auc_score']),
'PRC AUC (Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics['prc_auc_score'], valid_metrics['prc_auc_score'], test_metrics['prc_auc_score']),
'Balanced accuracy (Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics.get('bal_accuracy', np.nan), valid_metrics.get('bal_accuracy',np.nan), test_metrics.get('bal_accuracy', np.nan)),
'Accuracy (Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics['accuracy_score'], valid_metrics['accuracy_score'], test_metrics['accuracy_score']),
'Precision (Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics['precision'], valid_metrics['precision'], test_metrics['precision']),
'Recall (Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics['recall_score'], valid_metrics['recall_score'], test_metrics['recall_score']),
'NPV (Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics['npv'], valid_metrics['npv'], test_metrics['npv']),
'Kappa (Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics['kappa'], valid_metrics['kappa'], test_metrics['kappa']),
'Matthews CC (Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics['matthews_cc'], valid_metrics['matthews_cc'], test_metrics['matthews_cc']),
'Cross entropy (Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics['cross_entropy'], valid_metrics['cross_entropy'], test_metrics['cross_entropy']),
'Confusion matrices (Train/Valid/Test)': f"{train_metrics['confusion_matrix']}/{valid_metrics['confusion_matrix']}/{test_metrics['confusion_matrix']}",
'Data Size (Train/Valid/Test)': '%i/%i/%i' % (train_metrics["num_compounds"],valid_metrics["num_compounds"],test_metrics["num_compounds"]),
'Splitter': model_meta['splitting_parameters']['splitter'],
'Collection': collection_name,
'UUID': model_meta['model_uuid'],
'Split UUID': split_uuid,
'Dataset Key': data_params['dataset_key']}
elif mdl_params['model_type'] == 'xgboost':
xgb_params = model_meta['xgb_specific']
minfo = {'Name': name,
'Transformation': transform,
'AMPL version used:': mdl_params.get('ampl_version', 'probably 1.0.0'),
'Model Type (Featurizer)': '%s (%s)' % (mdl_params['model_type'],featurizer),
'Gamma': xgb_params['xgb_gamma'],
'XGB Learning rate': xgb_params['xgb_max_depth'],
'ROC AUC (Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics['roc_auc_score'], valid_metrics['roc_auc_score'], test_metrics['roc_auc_score']),
'PRC AUC (Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics['prc_auc_score'], valid_metrics['prc_auc_score'], test_metrics['prc_auc_score']),
'Balanced accuracy (Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics.get('bal_accuracy', np.nan), valid_metrics.get('bal_accuracy',np.nan), test_metrics.get('bal_accuracy', np.nan)),
'Accuracy (Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics['accuracy_score'], valid_metrics['accuracy_score'], test_metrics['accuracy_score']),
'Precision (Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics['precision'], valid_metrics['precision'], test_metrics['precision']),
'Recall (Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics['recall_score'], valid_metrics['recall_score'], test_metrics['recall_score']),
'NPV (Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics['npv'], valid_metrics['npv'], test_metrics['npv']),
'Kappa (Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics['kappa'], valid_metrics['kappa'], test_metrics['kappa']),
'Matthews CC (Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics['matthews_cc'], valid_metrics['matthews_cc'], test_metrics['matthews_cc']),
'Cross entropy (Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics['cross_entropy'], valid_metrics['cross_entropy'], test_metrics['cross_entropy']),
'Confusion matrices (Train/Valid/Test)': f"{train_metrics['confusion_matrix']}/{valid_metrics['confusion_matrix']}/{test_metrics['confusion_matrix']}",
'Data Size (Train/Valid/Test)': '%i/%i/%i' % (train_metrics["num_compounds"],valid_metrics["num_compounds"],test_metrics["num_compounds"]),
'Splitter': model_meta['splitting_parameters']['splitter'],
'Collection': collection_name,
'UUID': model_meta['model_uuid'],
'Split UUID': split_uuid,
'Dataset Key': data_params['dataset_key']}
else:
architecture = 'unknown'
mlist.append(OrderedDict(minfo))
return pd.DataFrame(mlist).set_index('Name').transpose()
#------------------------------------------------------------------------------------------------------------------
def get_training_datasets(collection_names):
"""
Query the model tracker DB for all the unique dataset keys and buckets used to train models in the given
collections.
Args:
collection_names (list): List of names of model tracker collections to search for models.
Returns:
dict: Dictionary mapping collection names to lists of (dataset_key, bucket) tuples for training sets.
"""
if not mlmt_supported:
print("Model tracker not supported in your environment; can examine models saved in filesystem only.")
return None
result_dict = {}
mlmt_client = dsf.initialize_model_tracker()
for collection_name in collection_names:
dset_list = mlmt_client.model.get_training_datasets(collection_name=collection_name).result()
result_dict[collection_name] = dset_list
return result_dict
#------------------------------------------------------------------------------------------------------------------
def get_dataset_models(collection_names, filter_dict={}):
"""
Query the model tracker for all models saved in the model tracker DB under the given collection names. Returns a dictionary
mapping (dataset_key,bucket) pairs to the list of (collection,model_uuid) pairs trained on the corresponding datasets.
Args:
collection_names (list): List of names of model tracker collections to search for models.
filter_dict (dict): Additional filter criteria to use in model query.
Returns:
dict: Dictionary mapping training set (dataset_key, bucket) tuples to (collection, model_uuid) pairs.
"""
if not mlmt_supported:
print("Model tracker not supported in your environment; can examine models saved in filesystem only.")
return None
result_dict = {}
coll_dset_dict = get_training_dict(collection_names)
mlmt_client = dsf.initialize_model_tracker()
for collection_name in collection_names:
dset_list = coll_dset_dict[collection_name]
for dset_dict in dset_list:
query_filter = {
'training_dataset.bucket': dset_dict['bucket'],
'training_dataset.dataset_key': dset_dict['dataset_key']
}
query_filter.update(filter_dict)
query_params = {
"match_metadata": query_filter
}
print('Querying models in collection %s for dataset %s, %s' % (collection_name, bucket, dset_key))
metadata_list = mlmt_client.model.query_model_metadata(
collection_name=collection_name,
query_params=query_params,
include_fields=['model_uuid']
).result()
for i, metadata_dict in enumerate(metadata_list):
if i % 50 == 0:
print('Processing collection %s model %d' % (collection_name, i))
model_uuid = metadata_dict['model_uuid']
result_dict.setdefault((dset_key,bucket), []).append((collection_name, model_uuid))
return result_dict
#-------------------------------------------------------------------------------------------------------------------
def get_multitask_perf_from_files(result_dir, pred_type='regression'):
"""
Retrieve model metadata and performance metrics stored in the filesystem from a multitask hyperparameter search.
Format the per-task performance metrics in a table with a row for each task and columns for each model/subset
combination.
Args:
result_dir (str): Path to root result directory containing output from a hyperparameter search run.
pred_type (str): Prediction type ('classification' or 'regression') of models to query.
Returns:
pd.DataFrame: Table of model metadata fields and performance metrics.
"""
model_uuid_list = []
learning_rate_list = []
dropouts_list = []
layer_sizes_list = []
best_epoch_list = []
max_epochs_list = []
subsets = ['train', 'valid', 'test']
if pred_type == 'regression':
metrics = ['num_compounds', 'r2_score', 'task_r2_scores']
else:
metrics = ['num_compounds', 'roc_auc_score', 'task_roc_auc_scores']
score_dict = {}
for subset in subsets:
score_dict[subset] = {}
for metric in metrics:
score_dict[subset][metric] = []
# Navigate the results directory tree
model_list = []
metrics_list = []
for dirpath, dirnames, filenames in os.walk(result_dir):
if ('model_metadata.json' in filenames) and ('model_metrics.json' in filenames):
meta_path = os.path.join(dirpath, 'model_metadata.json')
with open(meta_path, 'r') as meta_fp:
meta_dict = json.load(meta_fp)
model_list.append(meta_dict)
metrics_path = os.path.join(dirpath, 'model_metrics.json')
with open(metrics_path, 'r') as metrics_fp:
metrics_dicts = json.load(metrics_fp)
metrics_list.append(metrics_dicts)
print("Found data for %d models under %s" % (len(model_list), result_dir))
for metadata_dict, metrics_dicts in zip(model_list, metrics_list):
model_uuid = metadata_dict['model_uuid']
#print("Got metadata for model UUID %s" % model_uuid)
# Get list of training run metrics for this model
#print("Got %d metrics dicts for model %s" % (len(metrics_dicts), model_uuid))
if len(metrics_dicts) < 3:
raise Exception("Got no or incomplete metrics for model %s, skipping..." % model_uuid)
#print("Got no or incomplete metrics for model %s, skipping..." % model_uuid)
#continue
subset_metrics = {}
for metrics_dict in metrics_dicts:
if metrics_dict['label'] == 'best':
subset = metrics_dict['subset']
subset_metrics[subset] = metrics_dict['prediction_results']
model_uuid_list.append(model_uuid)
model_params = metadata_dict['model_parameters']
dset_params = metadata_dict['training_dataset']
response_cols = dset_params['response_cols']
nn_params = metadata_dict['nn_specific']
max_epochs_list.append(nn_params['max_epochs'])
best_epoch_list.append(nn_params['best_epoch'])
learning_rate_list.append(nn_params['learning_rate'])
layer_sizes_list.append(','.join(['%d' % s for s in nn_params['layer_sizes']]))
dropouts_list.append(','.join(['%.2f' % d for d in nn_params['dropouts']]))
for subset in subsets:
for metric in metrics:
score_dict[subset][metric].append(subset_metrics[subset][metric])
# Format the data as a table with groups of 3 columns for each model
num_models = len(model_uuid_list)
if pred_type == 'regression':
model_params = ['model_uuid', 'learning_rate', 'layer_sizes', 'dropouts', 'max_epochs', 'best_epoch',
'subset', 'num_compounds', 'mean_r2_score']
else:
model_params = ['model_uuid', 'learning_rate', 'layer_sizes', 'dropouts', 'max_epochs', 'best_epoch',
'subset', 'num_compounds', 'mean_roc_auc_score']
param_list = model_params + response_cols
perf_df = pd.DataFrame(dict(col_0=param_list))
colnum = 0
for i in range(num_models):
for subset in subsets:
vals = []
if subset == 'train':
vals.append(model_uuid_list[i])
vals.append(learning_rate_list[i])
vals.append(layer_sizes_list[i])
vals.append(dropouts_list[i])
vals.append('%d' % max_epochs_list[i])
vals.append('%d' % best_epoch_list[i])
else:
vals = vals + ['']*6
vals.append(subset)
vals.append('%d' % score_dict[subset]['num_compounds'][i])
if pred_type == 'regression':
vals.append('%.3f' % score_dict[subset]['r2_score'][i])
vals = vals + ['%.3f' % v for v in score_dict[subset]['task_r2_scores'][i]]
else:
vals.append('%.3f' % score_dict[subset]['roc_auc_score'][i])
vals = vals + ['%.3f' % v for v in score_dict[subset]['task_roc_auc_scores'][i]]
colnum += 1
colname = 'col_%d' % colnum
perf_df[colname] = vals
return perf_df
#-------------------------------------------------------------------------------------------------------------------
def get_multitask_perf_from_files_new(result_dir, pred_type='regression'):
"""
Retrieve model metadata and performance metrics stored in the filesystem from a multitask hyperparameter search.
Format the per-task performance metrics in a table with a row for each task and columns for each model/subset
combination.
Args:
result_dir (str): Path to root result directory containing output from a hyperparameter search run.
pred_type (str): Prediction type ('classification' or 'regression') of models to query.
Returns:
pd.DataFrame: Table of model metadata fields and performance metrics.
"""
model_uuid_list = []
learning_rate_list = []
dropouts_list = []
layer_sizes_list = []
best_epoch_list = []
max_epochs_list = []
featurizer_list = []
subsets = ['train', 'valid', 'test']
if pred_type == 'regression':
metrics = ['num_compounds', 'r2_score', 'task_r2_scores',
'task_rms_scores']
else:
metrics = ['num_compounds', 'roc_auc_score', 'task_roc_auc_scores']
score_dict = {}
for subset in subsets:
score_dict[subset] = {}
for metric in metrics:
score_dict[subset][metric] = []
# Navigate the results directory tree
model_list = []
metrics_list = []
for dirpath, dirnames, filenames in os.walk(result_dir):
if ('model_metadata.json' in filenames) and ('model_metrics.json' in filenames):
meta_path = os.path.join(dirpath, 'model_metadata.json')
with open(meta_path, 'r') as meta_fp:
meta_dict = json.load(meta_fp)
model_list.append(meta_dict)
metrics_path = os.path.join(dirpath, 'model_metrics.json')
with open(metrics_path, 'r') as metrics_fp:
metrics_dicts = json.load(metrics_fp)
metrics_list.append(metrics_dicts)
print("Found data for %d models under %s" % (len(model_list), result_dir))
for metadata_dict, metrics_dicts in zip(model_list, metrics_list):
model_uuid = metadata_dict['model_uuid']
#print("Got metadata for model UUID %s" % model_uuid)
# Get list of training run metrics for this model
#print("Got %d metrics dicts for model %s" % (len(metrics_dicts), model_uuid))
if len(metrics_dicts) < 3:
raise Exception("Got no or incomplete metrics for model %s, skipping..." % model_uuid)
#print("Got no or incomplete metrics for model %s, skipping..." % model_uuid)
#continue
subset_metrics = {}
for metrics_dict in metrics_dicts:
if metrics_dict['label'] == 'best':
subset = metrics_dict['subset']
subset_metrics[subset] = metrics_dict['prediction_results']
model_uuid_list.append(model_uuid)
model_params = metadata_dict['model_parameters']
dset_params = metadata_dict['training_dataset']
response_cols = dset_params['response_cols']
nn_params = metadata_dict['nn_specific']
max_epochs_list.append(nn_params['max_epochs'])
best_epoch_list.append(nn_params['best_epoch'])
learning_rate_list.append(nn_params['learning_rate'])
layer_sizes_list.append(','.join(['%d' % s for s in nn_params['layer_sizes']]))
dropouts_list.append(','.join(['%.2f' % d for d in nn_params['dropouts']]))
featurizer_list.append(model_params["featurizer"])
for subset in subsets:
for metric in metrics:
score_dict[subset][metric].append(subset_metrics[subset][metric])
# Format the data as a table with groups of 3 columns for each model
num_models = len(model_uuid_list)
data = {
"model_uuid": model_uuid_list,
"learning_rate": learning_rate_list,
"layer_sizes": layer_sizes_list,
"dropouts": dropouts_list,
"featurizer": featurizer_list
}
for i in range(num_models):
for subset in subsets:
for ix, task in enumerate(response_cols):
if pred_type == "regression":
colr2 = f"{subset}_{task}_r2"
colrms = f"{subset}_{task}_rms"
if colr2 not in data:
data[colr2] = []
data[colrms] = []
data[colr2].append(score_dict[subset]["task_r2_scores"][i][ix])
data[colrms].append(score_dict[subset]["task_rms_scores"][i][ix])
else:
colauc = f"{subset}_{task}_roc_auc"
if colauc not in data:
data[colauc] = []
data[colauc].append(score_dict[subset]["task_roc_auc_scores"][i][ix])
perf_df = pd.DataFrame(data)
return perf_df
#-------------------------------------------------------------------------------------------------------------------
def get_multitask_perf_from_tracker(collection_name, response_cols=None, expand_responses=None, expand_subsets='test',
exhaustive=False):
"""
Retrieve full metadata and metrics from model tracker for all models in a collection and format them
into a table, including per-task performance metrics for multitask models.
Meant for multitask NN models, but works for single task models as well.
By AKP. Works for model tracker as of 10/2020
Args:
collection_name (str): Name of model tracker collection to search for models.
response_cols (list, str or None): Names of tasks (response columns) to query performance results for.
If None, checks to see if the entire collection has the same response cols.
Otherwise, should be list of strings or a comma-separated string.
asks for clarification. Note: make sure response cols are listed in same order as in metadata.
Recommended: None first, then clarify.
expand_responses (list, str or None): Names of tasks / response columns you want to include results for in
the final dataframe. Useful if you have a lot of tasks and only want to look at the performance of a
few of them. Must also be a list or comma separated string, and must be a subset of response_cols.
If None, will expand all responses.
expand_subsets (list, str or None): Dataset subsets ('train', 'valid' and/or 'test') to show metrics for.
Again, must be list or comma separated string, or None to expand all.
exhaustive (bool): If True, return large dataframe with all model tracker metadata minus any columns not
in expand_responses. If False, return trimmed dataframe with most relevant columns.
Returns:
pd.DataFrame: Table of model metadata fields and performance metrics.
"""
if not mlmt_supported:
print("Model tracker not supported in your environment; can examine models saved in filesystem only.")
return None
# check inputs are correct
if collection_name.startswith('old_'):
raise Exception("This function is not implemented for the old format of metadata.")
if isinstance(response_cols, list):
pass
elif response_cols is None:
pass
elif isinstance(response_cols, str):
response_cols=[x.strip() for x in response_cols.split(',')]
else:
raise Exception("Please input response cols as None, list or comma separated string.")
if isinstance(expand_responses, list):
pass
elif expand_responses is None:
pass
elif isinstance(expand_responses, str):
expand_responses=[x.strip() for x in expand_responses.split(',')]
else:
raise Exception("Please input expand response col(s) as list or comma separated string.")
if isinstance(expand_subsets, list):
pass
elif expand_subsets is None:
pass
elif isinstance(expand_subsets, str):
expand_subsets=[x.strip() for x in expand_subsets.split(',')]
else:
raise Exception("Please input subset(s) as list or comma separated string.")
# get metadata
if response_cols is not None:
filter_dict={'training_dataset.response_cols': response_cols}
else:
filter_dict={}
models = trkr.get_full_metadata(filter_dict, collection_name)
if len(models)==0:
raise Exception("No models found with these response cols in this collection. To get a list of possible response cols, pass response_cols=None.")
models = pd.DataFrame.from_records(models)
# expand model metadata - deal with NA descriptors / NA other fields
alldat=models[['model_uuid', 'time_built']]
models=models.drop(['model_uuid', 'time_built'], axis = 1)
for column in models.columns:
if column == 'training_metrics':
continue
nai=models[models[column].isna()].index
nonas=models[~models[column].isna()]
tempdf=pd.DataFrame.from_records(nonas[column].tolist(), index=nonas.index)
tempdf=pd.concat([tempdf, pd.DataFrame(np.nan, index=nai, columns=tempdf.columns)])
alldat=alldat.join(tempdf)
# assign response cols
if len(alldat.response_cols.astype(str).unique())==1:
response_cols=alldat.response_cols[0]
print("Response cols:", response_cols)
else:
raise Exception(f"There is more than one set of response cols in this collection. Please choose from these lists: {alldat.response_cols.unique()}")
# expand training metrics - deal with NA's in columns
metrics=pd.DataFrame.from_dict(models['training_metrics'].tolist())
allmet=alldat[['model_uuid']]
for column in metrics.columns:
nai=metrics[metrics[column].isna()].index
nonas=metrics[~metrics[column].isna()]
tempdf=pd.DataFrame.from_records(nonas[column].tolist(), index=nonas.index)
tempdf=pd.concat([tempdf, pd.DataFrame(np.nan, index=nai, columns=tempdf.columns)])
label=tempdf[f'label'][nonas.index[0]]
metrics_type=tempdf[f'metrics_type'][nonas.index[0]]
subset=tempdf[f'subset'][nonas.index[0]]
nai=tempdf[tempdf[f'prediction_results'].isna()].index
nonas=tempdf[~tempdf[f'prediction_results'].isna()]
tempdf=pd.DataFrame.from_records(nonas[f'prediction_results'].tolist(), index=nonas.index)
tempdf=pd.concat([tempdf, | pd.DataFrame(np.nan, index=nai, columns=tempdf.columns) | pandas.DataFrame |
#########################################
# "LaZy Bot" for Discord #
# Author: <NAME> #
# www.brick.technology #
#########################################
## To run at its best, follow the advice below ##
# 1. Works well with XavinBot. Users can Emoji react to XavinBot posts to add roles to their name.
# 2. Add a role for every game you want your server to promote.
# 3. This bot works by filtering the ignore list vs the users total roles.
# 4. Whatever remains the bot will randomly pick from and select a game.
# Type !lazy for the bot to select a game. Type !lazystats to see how many times your bot has been used.
# Start Code:
# 1.0 Imports
import os
import random
import discord
from discord.ext import commands
import pandas as pd
## Steps 2-4: Server Admin Controls
# 2.0 Obtain your Discord token from Discords Developer Portal
TOKEN = 'DISCORD TOKEN HERE'
# 2.1 Enter your Guild name
GUILD = 'GUILD NAME'
# 2.2 Enter your YouTube Api Key Here
Youtube_Api_Key = 'YOUTUBE API KEY HERE'
# 3.0 Insert the names of Roles that are not video game roles:
ignore = ['GameBot','XavinBot','@everyone','Chaos','Nemesis','Eros','Co-owner','Cronos','Helios','Aether','Hades','Hermes','Dionysus','Poseidon','Gods','Hecate','Banished to Tartarus','Aristoi','Chaos Seeds','Server Booster','Streaming','Gods','YourAnus','Discgolf','UrAnus','Tabletop','Uranus']
# 4.0 Bot Prefix Command
bot = commands.Bot(command_prefix='!')
## Steps 5-6: Bot Code
# 5.0 Stat Counter Function
def counter():
csv_open = pd.read_csv('data/counter.csv')
df_open = | pd.DataFrame(csv_open) | pandas.DataFrame |
import sys
import pandas as pd
sys.path.append('../minvime')
import estimator_classification as esti # The file ../minvime/estimator_classification.py
tps = [20000,10000,8000,6000,4000,2000,1000]
fps = [-900,-800,-600,-500,-400,-200,-100]
tn = 0
fn = 0
minroi = 100000
cases = 1000000
baserate = 0.001
rez = | pd.DataFrame() | pandas.DataFrame |
import os
import tempfile
import pandas as pd
import pytest
from pandas.util import testing as pdt
from .. import simulation as sim
from ...utils.testing import assert_frames_equal
def setup_function(func):
sim.clear_sim()
sim.enable_cache()
def teardown_function(func):
sim.clear_sim()
sim.enable_cache()
@pytest.fixture
def df():
return pd.DataFrame(
{'a': [1, 2, 3],
'b': [4, 5, 6]},
index=['x', 'y', 'z'])
def test_tables(df):
wrapped_df = sim.add_table('test_frame', df)
@sim.table()
def test_func(test_frame):
return test_frame.to_frame() / 2
assert set(sim.list_tables()) == {'test_frame', 'test_func'}
table = sim.get_table('test_frame')
assert table is wrapped_df
assert table.columns == ['a', 'b']
assert table.local_columns == ['a', 'b']
assert len(table) == 3
pdt.assert_index_equal(table.index, df.index)
pdt.assert_series_equal(table.get_column('a'), df.a)
pdt.assert_series_equal(table.a, df.a)
pdt.assert_series_equal(table['b'], df['b'])
table = sim._TABLES['test_func']
assert table.index is None
assert table.columns == []
assert len(table) is 0
pdt.assert_frame_equal(table.to_frame(), df / 2)
pdt.assert_frame_equal(table.to_frame(columns=['a']), df[['a']] / 2)
pdt.assert_index_equal(table.index, df.index)
pdt.assert_series_equal(table.get_column('a'), df.a / 2)
pdt.assert_series_equal(table.a, df.a / 2)
pdt.assert_series_equal(table['b'], df['b'] / 2)
assert len(table) == 3
assert table.columns == ['a', 'b']
def test_table_func_cache(df):
sim.add_injectable('x', 2)
@sim.table(cache=True)
def table(variable='x'):
return df * variable
pdt.assert_frame_equal(sim.get_table('table').to_frame(), df * 2)
sim.add_injectable('x', 3)
pdt.assert_frame_equal(sim.get_table('table').to_frame(), df * 2)
sim.get_table('table').clear_cached()
pdt.assert_frame_equal(sim.get_table('table').to_frame(), df * 3)
sim.add_injectable('x', 4)
pdt.assert_frame_equal(sim.get_table('table').to_frame(), df * 3)
sim.clear_cache()
pdt.assert_frame_equal(sim.get_table('table').to_frame(), df * 4)
sim.add_injectable('x', 5)
pdt.assert_frame_equal(sim.get_table('table').to_frame(), df * 4)
sim.add_table('table', table)
pdt.assert_frame_equal(sim.get_table('table').to_frame(), df * 5)
def test_table_func_cache_disabled(df):
sim.add_injectable('x', 2)
@sim.table('table', cache=True)
def asdf(x):
return df * x
sim.disable_cache()
pdt.assert_frame_equal(sim.get_table('table').to_frame(), df * 2)
sim.add_injectable('x', 3)
pdt.assert_frame_equal(sim.get_table('table').to_frame(), df * 3)
sim.enable_cache()
sim.add_injectable('x', 4)
pdt.assert_frame_equal(sim.get_table('table').to_frame(), df * 3)
def test_table_copy(df):
sim.add_table('test_frame_copied', df, copy_col=True)
sim.add_table('test_frame_uncopied', df, copy_col=False)
sim.add_table('test_func_copied', lambda: df, copy_col=True)
sim.add_table('test_func_uncopied', lambda: df, copy_col=False)
@sim.table(copy_col=True)
def test_funcd_copied():
return df
@sim.table(copy_col=False)
def test_funcd_uncopied():
return df
@sim.table(copy_col=True)
def test_funcd_copied2(test_frame_copied):
# local returns original, but it is copied by copy_col.
return test_frame_copied.local
@sim.table(copy_col=True)
def test_funcd_copied3(test_frame_uncopied):
# local returns original, but it is copied by copy_col.
return test_frame_uncopied.local
@sim.table(copy_col=False)
def test_funcd_uncopied2(test_frame_copied):
# local returns original.
return test_frame_copied.local
@sim.table(copy_col=False)
def test_funcd_uncopied3(test_frame_uncopied):
# local returns original.
return test_frame_uncopied.local
sim.add_table('test_cache_copied', lambda: df, cache=True, copy_col=True)
sim.add_table(
'test_cache_uncopied', lambda: df, cache=True, copy_col=False)
@sim.table(cache=True, copy_col=True)
def test_cached_copied():
return df
@sim.table(cache=True, copy_col=False)
def test_cached_uncopied():
return df
# Create tables with computed columns.
sim.add_table('test_copied_columns', pd.DataFrame(index=df.index),
copy_col=True)
sim.add_table('test_uncopied_columns', pd.DataFrame(index=df.index),
copy_col=False)
for column_name in ['a', 'b']:
label = "test_frame_uncopied.{}".format(column_name)
func = lambda col=label: col
for table_name in ['test_copied_columns', 'test_uncopied_columns']:
sim.add_column(table_name, column_name, func)
for name in ['test_frame_uncopied', 'test_func_uncopied',
'test_funcd_uncopied', 'test_funcd_uncopied2',
'test_funcd_uncopied3', 'test_cache_uncopied',
'test_cached_uncopied', 'test_uncopied_columns',
'test_frame_copied', 'test_func_copied',
'test_funcd_copied', 'test_funcd_copied2',
'test_funcd_copied3', 'test_cache_copied',
'test_cached_copied', 'test_copied_columns']:
table = sim.get_table(name)
table2 = sim.get_table(name)
# to_frame will always return a copy.
pdt.assert_frame_equal(table.to_frame(), df)
assert table.to_frame() is not df
pdt.assert_frame_equal(table.to_frame(), table.to_frame())
assert table.to_frame() is not table.to_frame()
pdt.assert_series_equal(table.to_frame()['a'], df['a'])
assert table.to_frame()['a'] is not df['a']
pdt.assert_series_equal(table.to_frame()['a'],
table.to_frame()['a'])
assert table.to_frame()['a'] is not table.to_frame()['a']
if 'uncopied' in name:
pdt.assert_series_equal(table['a'], df['a'])
assert table['a'] is df['a']
pdt.assert_series_equal(table['a'], table2['a'])
assert table['a'] is table2['a']
else:
pdt.assert_series_equal(table['a'], df['a'])
assert table['a'] is not df['a']
pdt.assert_series_equal(table['a'], table2['a'])
assert table['a'] is not table2['a']
def test_columns_for_table():
sim.add_column(
'table1', 'col10', pd.Series([1, 2, 3], index=['a', 'b', 'c']))
sim.add_column(
'table2', 'col20', pd.Series([10, 11, 12], index=['x', 'y', 'z']))
@sim.column('table1')
def col11():
return pd.Series([4, 5, 6], index=['a', 'b', 'c'])
@sim.column('table2', 'col21')
def asdf():
return pd.Series([13, 14, 15], index=['x', 'y', 'z'])
t1_col_names = sim._list_columns_for_table('table1')
assert set(t1_col_names) == {'col10', 'col11'}
t2_col_names = sim._list_columns_for_table('table2')
assert set(t2_col_names) == {'col20', 'col21'}
t1_cols = sim._columns_for_table('table1')
assert 'col10' in t1_cols and 'col11' in t1_cols
t2_cols = sim._columns_for_table('table2')
assert 'col20' in t2_cols and 'col21' in t2_cols
def test_columns_and_tables(df):
sim.add_table('test_frame', df)
@sim.table()
def test_func(test_frame):
return test_frame.to_frame() / 2
sim.add_column('test_frame', 'c', pd.Series([7, 8, 9], index=df.index))
@sim.column('test_func', 'd')
def asdf(test_func):
return test_func.to_frame(columns=['b'])['b'] * 2
@sim.column('test_func')
def e(column='test_func.d'):
return column + 1
test_frame = sim.get_table('test_frame')
assert set(test_frame.columns) == set(['a', 'b', 'c'])
assert_frames_equal(
test_frame.to_frame(),
pd.DataFrame(
{'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9]},
index=['x', 'y', 'z']))
assert_frames_equal(
test_frame.to_frame(columns=['a', 'c']),
pd.DataFrame(
{'a': [1, 2, 3],
'c': [7, 8, 9]},
index=['x', 'y', 'z']))
test_func_df = sim._TABLES['test_func']
assert set(test_func_df.columns) == set(['d', 'e'])
assert_frames_equal(
test_func_df.to_frame(),
pd.DataFrame(
{'a': [0.5, 1, 1.5],
'b': [2, 2.5, 3],
'c': [3.5, 4, 4.5],
'd': [4., 5., 6.],
'e': [5., 6., 7.]},
index=['x', 'y', 'z']))
assert_frames_equal(
test_func_df.to_frame(columns=['b', 'd']),
pd.DataFrame(
{'b': [2, 2.5, 3],
'd': [4., 5., 6.]},
index=['x', 'y', 'z']))
assert set(test_func_df.columns) == set(['a', 'b', 'c', 'd', 'e'])
assert set(sim.list_columns()) == {('test_frame', 'c'), ('test_func', 'd'),
('test_func', 'e')}
def test_column_cache(df):
sim.add_injectable('x', 2)
series = pd.Series([1, 2, 3], index=['x', 'y', 'z'])
key = ('table', 'col')
@sim.table()
def table():
return df
@sim.column(*key, cache=True)
def column(variable='x'):
return series * variable
c = lambda: sim._COLUMNS[key]
pdt.assert_series_equal(c()(), series * 2)
sim.add_injectable('x', 3)
pdt.assert_series_equal(c()(), series * 2)
c().clear_cached()
pdt.assert_series_equal(c()(), series * 3)
sim.add_injectable('x', 4)
pdt.assert_series_equal(c()(), series * 3)
sim.clear_cache()
pdt.assert_series_equal(c()(), series * 4)
sim.add_injectable('x', 5)
pdt.assert_series_equal(c()(), series * 4)
sim.get_table('table').clear_cached()
pdt.assert_series_equal(c()(), series * 5)
sim.add_injectable('x', 6)
pdt.assert_series_equal(c()(), series * 5)
sim.add_column(*key, column=column, cache=True)
pdt.assert_series_equal(c()(), series * 6)
def test_column_cache_disabled(df):
sim.add_injectable('x', 2)
series = pd.Series([1, 2, 3], index=['x', 'y', 'z'])
key = ('table', 'col')
@sim.table()
def table():
return df
@sim.column(*key, cache=True)
def column(x):
return series * x
c = lambda: sim._COLUMNS[key]
sim.disable_cache()
pdt.assert_series_equal(c()(), series * 2)
sim.add_injectable('x', 3)
pdt.assert_series_equal(c()(), series * 3)
sim.enable_cache()
sim.add_injectable('x', 4)
pdt.assert_series_equal(c()(), series * 3)
def test_update_col(df):
wrapped = sim.add_table('table', df)
wrapped.update_col('b', pd.Series([7, 8, 9], index=df.index))
pdt.assert_series_equal(wrapped['b'], | pd.Series([7, 8, 9], index=df.index) | pandas.Series |
# Visualize streamflow time series and fill missing data
# Script written in Python 3.7
import config as config
import numpy as np
import pandas as pd
import tempfile
import datetime
from sklearn.svm import SVR
import geopandas as gpd
from sklearn.metrics import mean_squared_error as mse
import matplotlib.pyplot as plt
import importlib
# ======================================================================================================================
tmp_dir = tempfile.mkdtemp()
# =======================================================================
# Visualizing streamflow
# =======================================================================
flow_path = config.streamflow
# flow = pd.read_csv(str(flow_path))
# flow = pd.read_csv(str(flow_path), header=0, squeeze=True)
flow = pd.read_csv(str(flow_path), usecols=['Date', 'Flow_cfs'], parse_dates=True, index_col=0)
quality = pd.read_csv(str(flow_path), usecols=['Date', 'Quality'], parse_dates=True, index_col=0)
# Convert streamflow from cfs to mm/day
# 2.446576 ft3/sec = 1m3/35.314667ft3 * 1/km2 * 86400sec/1day * 1km2/1000000m2 * 1000mm/1m
ft3_sec = (1/35.314667) * 86400 * (1/1000000) * 1000
area = 13.7393 # area of upstream Ellsworth watershed, sq. km
flow['flow_mm_day'] = (flow['Flow_cfs'] / area) * ft3_sec
flow.drop('Flow_cfs', axis=1, inplace=True)
# Expand date range to include every day of all the years present
begin = '01-01-{}'.format(flow.index.to_frame()['Date'].min().year)
end = '12-31-{}'.format(flow.index.to_frame()['Date'].max().year)
rng = pd.date_range(begin, end)
df = pd.DataFrame(index=rng)
daily_flow = df.merge(flow, left_index=True, right_index=True, how='left')
# Plot available runoff
daily_flow['year'] = daily_flow.index.year
daily_flow['doy'] = daily_flow.index.dayofyear
flow_piv = pd.pivot_table(daily_flow, index=['doy'], columns=['year'], values=['flow_mm_day'])
flow_piv.plot(subplots=True, legend=False)
plt.ylabel('Flow (mm/day)')
plt.xlabel('Day of Year')
# =======================================================================
# Imputing missing data through modeling:
# Ellsworth is missing Jan-Jun of 2003 and Oct-Dec of 2008. Will just remove those years
# Also missing small gaps from 2004-2007
# =======================================================================
# Import data and feature engineering
temp_mean = pd.read_csv(str(config.daily_temp_mean), parse_dates=True, index_col=0)
temp_mean_min = pd.read_csv(str(config.daily_temp_min), parse_dates=True, index_col=0)
temp_mean_max = pd.read_csv(str(config.daily_temp_max), parse_dates=True, index_col=0)
# Using average of PRISM precip and a nearby Naselle rain gauge
precip_prism = pd.read_csv(str(config.daily_ppt), parse_dates=True, index_col=0)
gauge_data = pd.read_csv(config.daily_ppt.parents[0] / 'GHCND_USC00455774_1929_2020.csv', parse_dates=True, index_col=5)
gauge_data['SNOW'].fillna(0, inplace=True)
gauge_data['SNOW_SWE'] = gauge_data['SNOW'] / 13
gauge_data['PRCP_TOT'] = gauge_data['PRCP'] + gauge_data['SNOW_SWE']
precip_gauge = gauge_data[['PRCP_TOT']].copy()
precip_gauge['PRCP_TOT'] = precip_gauge['PRCP_TOT'].combine_first(precip_prism['mean_ppt_mm'])
prism_start = precip_prism.index.min()
prism_end = precip_prism.index.max()
precip_gauge = precip_gauge[(precip_gauge.index >= prism_start) & (precip_gauge.index <= prism_end)].copy()
precip = precip_prism.merge(precip_gauge, left_index=True, right_index=True, how='left')
precip_mean = pd.DataFrame(precip.mean(axis=1), columns=['ppt'])
df = pd.concat([precip_mean, temp_mean, temp_mean_min, temp_mean_max], axis=1)
# Convert day of year to signal
day = 24*60*60
year = 365.2425 * day
timestamp_secs = pd.to_datetime(df.index)
timestamp_secs = timestamp_secs.map(datetime.datetime.timestamp)
df['year_cos'] = np.cos(timestamp_secs * (2 * np.pi / year))
df['year_sin'] = np.sin(timestamp_secs * (2 * np.pi / year))
# Sum of last 2 days precip
df['precip_sum-2t'] = precip_mean.rolling(2).sum()
# Previous days' precip
df['precip_t-1'] = precip_mean['ppt'].shift(1)
df['precip_t-2'] = precip_mean['ppt'].shift(2)
df['precip_t-3'] = precip_mean['ppt'].shift(3)
obs = df.merge(daily_flow['flow_mm_day'], left_index=True, right_index=True, how='right')
# Set aside dates with missing flow measurements
gap_data = obs[obs['flow_mm_day'].isna()]
obs.dropna(inplace=True)
# ========================================================
def plot_test_results(y_test, y_pred):
results = pd.DataFrame(data=np.column_stack([y_test, y_pred]), index=y_test.index, columns=['y_test', 'y_pred'])
results = (results * train_std['flow_mm_day']) + train_mean['flow_mm_day']
plt.plot(results)
plt.legend(results.columns)
# Split the data 70-20-10
n = obs.shape[0]
train_df = obs[0:int(n*0.7)]
test_df = obs[int(n*0.7):]
num_features = obs.shape[1]
cols = obs.columns.tolist()
target = cols.index('flow_mm_day')
# Normalize
train_mean = train_df.mean()
train_std = train_df.std()
train_df = (train_df - train_mean) / train_std
test_df = (test_df - train_mean) / train_std
X_train, y_train = train_df.iloc[:, 0:target], train_df.iloc[:, target]
X_test, y_test = test_df.iloc[:, 0:target], test_df.iloc[:, target]
svr = SVR(kernel='rbf', C=1, gamma='auto', epsilon=0.1)
svr.fit(X_train, y_train)
y_pred = svr.predict(X_test)
print(mse(y_test, y_pred))
plot_test_results(y_test, y_pred)
# ========================================================
# Predicting small data gaps from 2004-2007
velma_start = pd.to_datetime('01-01-2004')
velma_end = pd.to_datetime('12-31-2007')
gap_data_04_07 = gap_data[(gap_data.index >= velma_start) & (gap_data.index <= velma_end)].copy()
X_gap = gap_data_04_07.drop(columns='flow_mm_day', axis=1)
X_gap = (X_gap - train_mean[:-1]) / train_std[:-1]
gap_pred = svr.predict(X_gap)
gap_pred = (gap_pred * train_std['flow_mm_day']) + train_mean['flow_mm_day']
# Add dates to predicted flow
rng = pd.date_range(velma_start, velma_end)
date_df = pd.DataFrame(index=rng)
obs_04_07 = date_df.merge(obs, left_index=True, right_index=True, how='left')
gap_data_04_07['flow_mm_day'] = gap_pred
imp_04_07 = date_df.merge(gap_data_04_07, left_index=True, right_index=True, how='left')
# Combine the data
data_04_07 = pd.concat([obs, gap_data_04_07]).sort_index()
plt.plot(data_04_07['flow_mm_day'], label='Observed')
# plt.plot(obs_04_07['flow_mm_day'], label='Observed')
plt.plot(imp_04_07['flow_mm_day'], label='Modeled')
plt.legend()
# ========================================================
# # Export runoff and precip/temp for given time period
# Runoff
velma_start = | pd.to_datetime('01-01-2004') | pandas.to_datetime |
import numpy as np
import pandas as pd
import os
from dataV3 import make_directory
from dataV3 import get_indices_hard
import json
import math
def pointSort(scoring_directory, input_dir = None, weights = None,
scale_guide_dir = "./config/point_assignment_scaling_guide.csv", reporting = False, rep_direc = False,
tua_dir = None):
print('tua_dir', tua_dir)
if input_dir != None:
dir_path = os.path.dirname(os.path.realpath(input_dir))
input_path = os.path.join(dir_path, input_dir)
if not tua_dir:
tua_path = os.path.join(input_path, 'tua')
tua_location = ''
for file in os.walk(input_dir):
if 'tua' in file and os.path.join(input_dir, file).isdir():
tua_path = os.path.join(input_dir, file)
print("FOUND TUA", tua_path)
break
for file in os.listdir(input_dir+'/tua'):
print('file in tua',file)
tua_location = os.path.join(tua_path, file)
try:
tuas = tuas.append(pd.read_csv(tua_location))
except UnboundLocalError:
tuas = | pd.read_csv(tua_location) | pandas.read_csv |
import collections
import fnmatch
import os
from typing import Union
import tarfile
import pandas as pd
import numpy as np
from pandas.core.dtypes.common import is_string_dtype, is_numeric_dtype
from hydrodataset.data.data_base import DataSourceBase
from hydrodataset.data.stat import cal_fdc
from hydrodataset.utils import hydro_utils
from hydrodataset.utils.hydro_utils import download_one_zip, unzip_nested_zip
CAMELS_NO_DATASET_ERROR_LOG = (
"We cannot read this dataset now. Please check if you choose the correct dataset:\n"
' ["AUS", "BR", "CA", "CL", "GB", "US", "YR"]'
)
def time_intersect_dynamic_data(obs: np.array, date: np.array, t_range: list):
"""
chose data from obs in the t_range
Parameters
----------
obs
a np array
date
all periods for obs
t_range
the time range we need, such as ["1990-01-01","2000-01-01"]
Returns
-------
np.array
the chosen data
"""
t_lst = hydro_utils.t_range_days(t_range)
nt = t_lst.shape[0]
if len(obs) != nt:
out = np.full([nt], np.nan)
[c, ind1, ind2] = np.intersect1d(date, t_lst, return_indices=True)
out[ind2] = obs[ind1]
else:
out = obs
return out
class Camels(DataSourceBase):
def __init__(self, data_path, download=False, region: str = "US"):
"""
Initialization for CAMELS series dataset
Parameters
----------
data_path
where we put the dataset
download
if true, download
region
the default is CAMELS(-US), since it's the first CAMELS dataset.
Others now include: AUS, BR, CL, GB, YR
"""
super().__init__(data_path)
region_lst = ["AUS", "BR", "CA", "CE", "CL", "GB", "US", "YR"]
assert region in region_lst
self.region = region
self.data_source_description = self.set_data_source_describe()
if download:
self.download_data_source()
self.camels_sites = self.read_site_info()
def get_name(self):
return "CAMELS_" + self.region
def set_data_source_describe(self) -> collections.OrderedDict:
"""
Introduce the files in the dataset and list their location in the file system
Returns
-------
collections.OrderedDict
the description for a CAMELS dataset
"""
camels_db = self.data_source_dir
if self.region == "US":
# shp file of basins
camels_shp_file = os.path.join(
camels_db, "basin_set_full_res", "HCDN_nhru_final_671.shp"
)
# config of flow data
flow_dir = os.path.join(
camels_db,
"basin_timeseries_v1p2_metForcing_obsFlow",
"basin_dataset_public_v1p2",
"usgs_streamflow",
)
# forcing
forcing_dir = os.path.join(
camels_db,
"basin_timeseries_v1p2_metForcing_obsFlow",
"basin_dataset_public_v1p2",
"basin_mean_forcing",
)
forcing_types = ["daymet", "maurer", "nldas"]
# attr
attr_dir = os.path.join(
camels_db, "camels_attributes_v2.0", "camels_attributes_v2.0"
)
gauge_id_file = os.path.join(attr_dir, "camels_name.txt")
attr_key_lst = ["topo", "clim", "hydro", "vege", "soil", "geol"]
download_url_lst = [
"https://ral.ucar.edu/sites/default/files/public/product-tool/camels-catchment-attributes-and-meteorology-for-large-sample-studies-dataset-downloads/camels_attributes_v2.0.zip",
"https://ral.ucar.edu/sites/default/files/public/product-tool/camels-catchment-attributes-and-meteorology-for-large-sample-studies-dataset-downloads/basin_set_full_res.zip",
"https://ral.ucar.edu/sites/default/files/public/product-tool/camels-catchment-attributes-and-meteorology-for-large-sample-studies-dataset-downloads/basin_timeseries_v1p2_metForcing_obsFlow.zip",
]
return collections.OrderedDict(
CAMELS_DIR=camels_db,
CAMELS_FLOW_DIR=flow_dir,
CAMELS_FORCING_DIR=forcing_dir,
CAMELS_FORCING_TYPE=forcing_types,
CAMELS_ATTR_DIR=attr_dir,
CAMELS_ATTR_KEY_LST=attr_key_lst,
CAMELS_GAUGE_FILE=gauge_id_file,
CAMELS_BASINS_SHP_FILE=camels_shp_file,
CAMELS_DOWNLOAD_URL_LST=download_url_lst,
)
elif self.region == "AUS":
# id and name
gauge_id_file = os.path.join(
camels_db,
"01_id_name_metadata",
"01_id_name_metadata",
"id_name_metadata.csv",
)
# shp file of basins
camels_shp_file = os.path.join(
camels_db,
"02_location_boundary_area",
"02_location_boundary_area",
"shp",
"CAMELS_AUS_BasinOutlets_adopted.shp",
)
# config of flow data
flow_dir = os.path.join(camels_db, "03_streamflow", "03_streamflow")
# attr
attr_dir = os.path.join(camels_db, "04_attributes", "04_attributes")
# forcing
forcing_dir = os.path.join(
camels_db, "05_hydrometeorology", "05_hydrometeorology"
)
return collections.OrderedDict(
CAMELS_DIR=camels_db,
CAMELS_FLOW_DIR=flow_dir,
CAMELS_FORCING_DIR=forcing_dir,
CAMELS_ATTR_DIR=attr_dir,
CAMELS_GAUGE_FILE=gauge_id_file,
CAMELS_BASINS_SHP_FILE=camels_shp_file,
)
elif self.region == "BR":
# attr
attr_dir = os.path.join(
camels_db, "01_CAMELS_BR_attributes", "01_CAMELS_BR_attributes"
)
# we don't need the location attr file
attr_key_lst = [
"climate",
"geology",
"human_intervention",
"hydrology",
"land_cover",
"quality_check",
"soil",
"topography",
]
# id and name, there are two types stations in CAMELS_BR, and we only chose the 897-stations version
gauge_id_file = os.path.join(attr_dir, "camels_br_topography.txt")
# shp file of basins
camels_shp_file = os.path.join(
camels_db,
"14_CAMELS_BR_catchment_boundaries",
"14_CAMELS_BR_catchment_boundaries",
"camels_br_catchments.shp",
)
# config of flow data
flow_dir_m3s = os.path.join(
camels_db, "02_CAMELS_BR_streamflow_m3s", "02_CAMELS_BR_streamflow_m3s"
)
flow_dir_mm_selected_catchments = os.path.join(
camels_db,
"03_CAMELS_BR_streamflow_mm_selected_catchments",
"03_CAMELS_BR_streamflow_mm_selected_catchments",
)
flow_dir_simulated = os.path.join(
camels_db,
"04_CAMELS_BR_streamflow_simulated",
"04_CAMELS_BR_streamflow_simulated",
)
# forcing
forcing_dir_precipitation_chirps = os.path.join(
camels_db,
"05_CAMELS_BR_precipitation_chirps",
"05_CAMELS_BR_precipitation_chirps",
)
forcing_dir_precipitation_mswep = os.path.join(
camels_db,
"06_CAMELS_BR_precipitation_mswep",
"06_CAMELS_BR_precipitation_mswep",
)
forcing_dir_precipitation_cpc = os.path.join(
camels_db,
"07_CAMELS_BR_precipitation_cpc",
"07_CAMELS_BR_precipitation_cpc",
)
forcing_dir_evapotransp_gleam = os.path.join(
camels_db,
"08_CAMELS_BR_evapotransp_gleam",
"08_CAMELS_BR_evapotransp_gleam",
)
forcing_dir_evapotransp_mgb = os.path.join(
camels_db,
"09_CAMELS_BR_evapotransp_mgb",
"09_CAMELS_BR_evapotransp_mgb",
)
forcing_dir_potential_evapotransp_gleam = os.path.join(
camels_db,
"10_CAMELS_BR_potential_evapotransp_gleam",
"10_CAMELS_BR_potential_evapotransp_gleam",
)
forcing_dir_temperature_min_cpc = os.path.join(
camels_db,
"11_CAMELS_BR_temperature_min_cpc",
"11_CAMELS_BR_temperature_min_cpc",
)
forcing_dir_temperature_mean_cpc = os.path.join(
camels_db,
"12_CAMELS_BR_temperature_mean_cpc",
"12_CAMELS_BR_temperature_mean_cpc",
)
forcing_dir_temperature_max_cpc = os.path.join(
camels_db,
"13_CAMELS_BR_temperature_max_cpc",
"13_CAMELS_BR_temperature_max_cpc",
)
return collections.OrderedDict(
CAMELS_DIR=camels_db,
CAMELS_FLOW_DIR=[
flow_dir_m3s,
flow_dir_mm_selected_catchments,
flow_dir_simulated,
],
CAMELS_FORCING_DIR=[
forcing_dir_precipitation_chirps,
forcing_dir_precipitation_mswep,
forcing_dir_precipitation_cpc,
forcing_dir_evapotransp_gleam,
forcing_dir_evapotransp_mgb,
forcing_dir_potential_evapotransp_gleam,
forcing_dir_temperature_min_cpc,
forcing_dir_temperature_mean_cpc,
forcing_dir_temperature_max_cpc,
],
CAMELS_ATTR_DIR=attr_dir,
CAMELS_ATTR_KEY_LST=attr_key_lst,
CAMELS_GAUGE_FILE=gauge_id_file,
CAMELS_BASINS_SHP_FILE=camels_shp_file,
)
elif self.region == "CL":
# attr
attr_dir = os.path.join(camels_db, "1_CAMELScl_attributes")
attr_file = os.path.join(attr_dir, "1_CAMELScl_attributes.txt")
# shp file of basins
camels_shp_file = os.path.join(
camels_db,
"CAMELScl_catchment_boundaries",
"catchments_camels_cl_v1.3.shp",
)
# config of flow data
flow_dir_m3s = os.path.join(camels_db, "2_CAMELScl_streamflow_m3s")
flow_dir_mm = os.path.join(camels_db, "3_CAMELScl_streamflow_mm")
# forcing
forcing_dir_precip_cr2met = os.path.join(
camels_db, "4_CAMELScl_precip_cr2met"
)
forcing_dir_precip_chirps = os.path.join(
camels_db, "5_CAMELScl_precip_chirps"
)
forcing_dir_precip_mswep = os.path.join(
camels_db, "6_CAMELScl_precip_mswep"
)
forcing_dir_precip_tmpa = os.path.join(camels_db, "7_CAMELScl_precip_tmpa")
forcing_dir_tmin_cr2met = os.path.join(camels_db, "8_CAMELScl_tmin_cr2met")
forcing_dir_tmax_cr2met = os.path.join(camels_db, "9_CAMELScl_tmax_cr2met")
forcing_dir_tmean_cr2met = os.path.join(
camels_db, "10_CAMELScl_tmean_cr2met"
)
forcing_dir_pet_8d_modis = os.path.join(
camels_db, "11_CAMELScl_pet_8d_modis"
)
forcing_dir_pet_hargreaves = os.path.join(
camels_db,
"12_CAMELScl_pet_hargreaves",
)
forcing_dir_swe = os.path.join(camels_db, "13_CAMELScl_swe")
return collections.OrderedDict(
CAMELS_DIR=camels_db,
CAMELS_FLOW_DIR=[flow_dir_m3s, flow_dir_mm],
CAMELS_FORCING_DIR=[
forcing_dir_precip_cr2met,
forcing_dir_precip_chirps,
forcing_dir_precip_mswep,
forcing_dir_precip_tmpa,
forcing_dir_tmin_cr2met,
forcing_dir_tmax_cr2met,
forcing_dir_tmean_cr2met,
forcing_dir_pet_8d_modis,
forcing_dir_pet_hargreaves,
forcing_dir_swe,
],
CAMELS_ATTR_DIR=attr_dir,
CAMELS_GAUGE_FILE=attr_file,
CAMELS_BASINS_SHP_FILE=camels_shp_file,
)
elif self.region == "GB":
# shp file of basins
camels_shp_file = os.path.join(
camels_db,
"8344e4f3-d2ea-44f5-8afa-86d2987543a9",
"8344e4f3-d2ea-44f5-8afa-86d2987543a9",
"data",
"CAMELS_GB_catchment_boundaries",
"CAMELS_GB_catchment_boundaries.shp",
)
# flow and forcing data are in a same file
flow_dir = os.path.join(
camels_db,
"8344e4f3-d2ea-44f5-8afa-86d2987543a9",
"8344e4f3-d2ea-44f5-8afa-86d2987543a9",
"data",
"timeseries",
)
forcing_dir = flow_dir
# attr
attr_dir = os.path.join(
camels_db,
"8344e4f3-d2ea-44f5-8afa-86d2987543a9",
"8344e4f3-d2ea-44f5-8afa-86d2987543a9",
"data",
)
gauge_id_file = os.path.join(
attr_dir, "CAMELS_GB_hydrometry_attributes.csv"
)
attr_key_lst = [
"climatic",
"humaninfluence",
"hydrogeology",
"hydrologic",
"hydrometry",
"landcover",
"soil",
"topographic",
]
return collections.OrderedDict(
CAMELS_DIR=camels_db,
CAMELS_FLOW_DIR=flow_dir,
CAMELS_FORCING_DIR=forcing_dir,
CAMELS_ATTR_DIR=attr_dir,
CAMELS_ATTR_KEY_LST=attr_key_lst,
CAMELS_GAUGE_FILE=gauge_id_file,
CAMELS_BASINS_SHP_FILE=camels_shp_file,
)
elif self.region == "YR":
# shp files of basins
camels_shp_files_dir = os.path.join(
camels_db, "9_Normal_Camels_YR", "Normal_Camels_YR_basin_boundary"
)
# attr, flow and forcing data are all in the same dir. each basin has one dir.
flow_dir = os.path.join(
camels_db, "9_Normal_Camels_YR", "1_Normal_Camels_YR_basin_data"
)
forcing_dir = flow_dir
attr_dir = flow_dir
# no gauge id file for CAMELS_YR; natural_watersheds.txt showed unregulated basins in CAMELS_YR
gauge_id_file = os.path.join(
camels_db, "9_Normal_Camels_YR", "natural_watersheds.txt"
)
return collections.OrderedDict(
CAMELS_DIR=camels_db,
CAMELS_FLOW_DIR=flow_dir,
CAMELS_FORCING_DIR=forcing_dir,
CAMELS_ATTR_DIR=attr_dir,
CAMELS_GAUGE_FILE=gauge_id_file,
CAMELS_BASINS_SHP_DIR=camels_shp_files_dir,
)
elif self.region == "CA":
# shp file of basins
camels_shp_files_dir = os.path.join(camels_db, "CANOPEX_BOUNDARIES")
# config of flow data
flow_dir = os.path.join(
camels_db, "CANOPEX_NRCAN_ASCII", "CANOPEX_NRCAN_ASCII"
)
forcing_dir = flow_dir
# There is no attr data in CANOPEX, hence we use attr from HYSET -- https://osf.io/7fn4c/
attr_dir = camels_db
gauge_id_file = os.path.join(camels_db, "STATION_METADATA.xlsx")
return collections.OrderedDict(
CAMELS_DIR=camels_db,
CAMELS_FLOW_DIR=flow_dir,
CAMELS_FORCING_DIR=forcing_dir,
CAMELS_ATTR_DIR=attr_dir,
CAMELS_GAUGE_FILE=gauge_id_file,
CAMELS_BASINS_SHP_DIR=camels_shp_files_dir,
)
elif self.region == "CE":
# We use A_basins_total_upstrm
# shp file of basins
camels_shp_file = os.path.join(
camels_db,
"2_LamaH-CE_daily",
"A_basins_total_upstrm",
"3_shapefiles",
"Basins_A.shp",
)
# config of flow data
flow_dir = os.path.join(
camels_db, "2_LamaH-CE_daily", "D_gauges", "2_timeseries", "daily"
)
forcing_dir = os.path.join(
camels_db,
"2_LamaH-CE_daily",
"A_basins_total_upstrm",
"2_timeseries",
"daily",
)
attr_dir = os.path.join(
camels_db, "2_LamaH-CE_daily", "A_basins_total_upstrm", "1_attributes"
)
gauge_id_file = os.path.join(
camels_db,
"2_LamaH-CE_daily",
"D_gauges",
"1_attributes",
"Gauge_attributes.csv",
)
return collections.OrderedDict(
CAMELS_DIR=camels_db,
CAMELS_FLOW_DIR=flow_dir,
CAMELS_FORCING_DIR=forcing_dir,
CAMELS_ATTR_DIR=attr_dir,
CAMELS_GAUGE_FILE=gauge_id_file,
CAMELS_BASINS_SHP_FILE=camels_shp_file,
)
else:
raise NotImplementedError(CAMELS_NO_DATASET_ERROR_LOG)
def download_data_source(self) -> None:
"""
Download CAMELS dataset.
Now we only support CAMELS-US's downloading.
For others, please download it manually and put all files of a CAMELS dataset in one directory.
For example, all files of CAMELS_AUS should be put in "camels_aus" directory
Returns
-------
None
"""
camels_config = self.data_source_description
if self.region == "US":
if not os.path.isdir(camels_config["CAMELS_DIR"]):
os.makedirs(camels_config["CAMELS_DIR"])
[
download_one_zip(attr_url, camels_config["CAMELS_DIR"])
for attr_url in camels_config["CAMELS_DOWNLOAD_URL_LST"]
if not os.path.isfile(
os.path.join(camels_config["CAMELS_DIR"], attr_url.split("/")[-1])
)
]
print("The CAMELS_US data have been downloaded!")
print(
"Please download it manually and put all files of a CAMELS dataset in the CAMELS_DIR directory."
)
print("We unzip all files now.")
if self.region == "CE":
# We only use CE's dauly files now and it is tar.gz formatting
file = tarfile.open(
os.path.join(camels_config["CAMELS_DIR"], "2_LamaH-CE_daily.tar.gz")
)
# extracting file
file.extractall(
os.path.join(camels_config["CAMELS_DIR"], "2_LamaH-CE_daily")
)
file.close()
for f_name in os.listdir(camels_config["CAMELS_DIR"]):
if fnmatch.fnmatch(f_name, "*.zip"):
unzip_dir = os.path.join(camels_config["CAMELS_DIR"], f_name[0:-4])
file_name = os.path.join(camels_config["CAMELS_DIR"], f_name)
unzip_nested_zip(file_name, unzip_dir)
def read_site_info(self) -> pd.DataFrame:
"""
Read the basic information of gages in a CAMELS dataset
Returns
-------
pd.DataFrame
basic info of gages
"""
camels_file = self.data_source_description["CAMELS_GAUGE_FILE"]
if self.region == "US":
data = pd.read_csv(
camels_file, sep=";", dtype={"gauge_id": str, "huc_02": str}
)
elif self.region == "AUS":
data = pd.read_csv(camels_file, sep=",", dtype={"station_id": str})
elif self.region == "BR":
data = pd.read_csv(camels_file, sep="\s+", dtype={"gauge_id": str})
elif self.region == "CL":
data = pd.read_csv(camels_file, sep="\t", index_col=0)
elif self.region == "GB":
data = pd.read_csv(camels_file, sep=",", dtype={"gauge_id": str})
elif self.region == "YR":
dirs_ = os.listdir(self.data_source_description["CAMELS_ATTR_DIR"])
data = pd.DataFrame({"gauge_id": dirs_})
elif self.region == "CA":
data = pd.read_excel(camels_file)
elif self.region == "CE":
data = pd.read_csv(camels_file, sep=";")
else:
raise NotImplementedError(CAMELS_NO_DATASET_ERROR_LOG)
return data
def get_constant_cols(self) -> np.array:
"""
all readable attrs in CAMELS
Returns
-------
np.array
attribute types
"""
data_folder = self.data_source_description["CAMELS_ATTR_DIR"]
if self.region == "US":
var_dict = dict()
var_lst = list()
key_lst = self.data_source_description["CAMELS_ATTR_KEY_LST"]
for key in key_lst:
data_file = os.path.join(data_folder, "camels_" + key + ".txt")
data_temp = pd.read_csv(data_file, sep=";")
var_lst_temp = list(data_temp.columns[1:])
var_dict[key] = var_lst_temp
var_lst.extend(var_lst_temp)
return np.array(var_lst)
elif self.region == "AUS":
attr_all_file = os.path.join(
self.data_source_description["CAMELS_DIR"],
"CAMELS_AUS_Attributes-Indices_MasterTable.csv",
)
camels_aus_attr_indices_data = pd.read_csv(attr_all_file, sep=",")
# exclude station id
return camels_aus_attr_indices_data.columns.values[1:]
elif self.region == "BR":
var_dict = dict()
var_lst = list()
key_lst = self.data_source_description["CAMELS_ATTR_KEY_LST"]
for key in key_lst:
data_file = os.path.join(data_folder, "camels_br_" + key + ".txt")
data_temp = pd.read_csv(data_file, sep="\s+")
var_lst_temp = list(data_temp.columns[1:])
var_dict[key] = var_lst_temp
var_lst.extend(var_lst_temp)
return np.array(var_lst)
elif self.region == "CL":
camels_cl_attr_data = self.camels_sites
# exclude station id
return camels_cl_attr_data.index.values
elif self.region == "GB":
var_dict = dict()
var_lst = list()
key_lst = self.data_source_description["CAMELS_ATTR_KEY_LST"]
for key in key_lst:
data_file = os.path.join(
data_folder, "CAMELS_GB_" + key + "_attributes.csv"
)
data_temp = pd.read_csv(data_file, sep=",")
var_lst_temp = list(data_temp.columns[1:])
var_dict[key] = var_lst_temp
var_lst.extend(var_lst_temp)
return np.array(var_lst)
elif self.region == "YR":
attr_json_file = os.path.join(
self.data_source_description["CAMELS_ATTR_DIR"],
"0000",
"attributes.json",
)
attr_json = hydro_utils.unserialize_json_ordered(attr_json_file)
return np.array(list(attr_json.keys()))
elif self.region == "CA":
attr_all_file = os.path.join(
self.data_source_description["CAMELS_DIR"],
"HYSETS_watershed_properties.txt",
)
canopex_attr_indices_data = pd.read_csv(attr_all_file, sep=";")
# exclude HYSETS watershed id
return canopex_attr_indices_data.columns.values[1:]
elif self.region == "CE":
attr_all_file = os.path.join(
self.data_source_description["CAMELS_ATTR_DIR"],
"Catchment_attributes.csv",
)
lamah_ce_attr_indices_data = pd.read_csv(attr_all_file, sep=";")
return lamah_ce_attr_indices_data.columns.values[1:]
else:
raise NotImplementedError(CAMELS_NO_DATASET_ERROR_LOG)
def get_relevant_cols(self) -> np.array:
"""
all readable forcing types
Returns
-------
np.array
forcing types
"""
if self.region == "US":
return np.array(["dayl", "prcp", "srad", "swe", "tmax", "tmin", "vp"])
elif self.region == "AUS":
forcing_types = []
for root, dirs, files in os.walk(
self.data_source_description["CAMELS_FORCING_DIR"]
):
if root == self.data_source_description["CAMELS_FORCING_DIR"]:
continue
for file in files:
forcing_types.append(file[:-4])
return np.array(forcing_types)
elif self.region == "BR":
return np.array(
[
forcing_dir.split(os.sep)[-1][13:]
for forcing_dir in self.data_source_description[
"CAMELS_FORCING_DIR"
]
]
)
elif self.region == "CL":
return np.array(
[
"_".join(forcing_dir.split(os.sep)[-1].split("_")[2:])
for forcing_dir in self.data_source_description[
"CAMELS_FORCING_DIR"
]
]
)
elif self.region == "GB":
return np.array(
[
"precipitation",
"pet",
"temperature",
"peti",
"humidity",
"shortwave_rad",
"longwave_rad",
"windspeed",
]
)
elif self.region == "YR":
return np.array(
[
"pre",
"evp",
"gst_mean",
"prs_mean",
"tem_mean",
"rhu",
"win_mean",
"gst_min",
"prs_min",
"tem_min",
"gst_max",
"prs_max",
"tem_max",
"ssd",
"win_max",
]
)
elif self.region == "CA":
# Although there is climatic potential evaporation item, CANOPEX does not have any PET data
return np.array(["prcp", "tmax", "tmin"])
elif self.region == "CE":
# Although there is climatic potential evaporation item, CANOPEX does not have any PET data
return np.array(
[
"2m_temp_max",
"2m_temp_mean",
"2m_temp_min",
"2m_dp_temp_max",
"2m_dp_temp_mean",
"2m_dp_temp_min",
"10m_wind_u",
"10m_wind_v",
"fcst_alb",
"lai_high_veg",
"lai_low_veg",
"swe",
"surf_net_solar_rad_max",
"surf_net_solar_rad_mean",
"surf_net_therm_rad_max",
"surf_net_therm_rad_mean",
"surf_press",
"total_et",
"prec",
"volsw_123",
"volsw_4",
]
)
else:
raise NotImplementedError(CAMELS_NO_DATASET_ERROR_LOG)
def get_target_cols(self) -> np.array:
"""
For CAMELS, the target vars are streamflows
Returns
-------
np.array
streamflow types
"""
if self.region == "US":
return np.array(["usgsFlow"])
elif self.region == "AUS":
# QualityCodes are not streamflow data.
# MLd means "1 Megaliters Per Day"; 1 MLd = 0.011574074074074 cubic-meters-per-second
# mmd means "mm/day"
return np.array(
[
"streamflow_MLd",
"streamflow_MLd_inclInfilled",
"streamflow_mmd",
"streamflow_QualityCodes",
]
)
elif self.region == "BR":
return np.array(
[
flow_dir.split(os.sep)[-1][13:]
for flow_dir in self.data_source_description["CAMELS_FLOW_DIR"]
]
)
elif self.region == "CL":
return np.array(
[
flow_dir.split(os.sep)[-1][11:]
for flow_dir in self.data_source_description["CAMELS_FLOW_DIR"]
]
)
elif self.region == "GB":
return np.array(["discharge_spec", "discharge_vol"])
elif self.region == "YR":
return np.array(["normalized_q"])
elif self.region == "CA":
return np.array(["discharge"])
elif self.region == "CE":
return np.array(["qobs"])
else:
raise NotImplementedError(CAMELS_NO_DATASET_ERROR_LOG)
def get_other_cols(self) -> dict:
return {
"FDC": {"time_range": ["1980-01-01", "2000-01-01"], "quantile_num": 100}
}
def read_object_ids(self, **kwargs) -> np.array:
"""
read station ids
Parameters
----------
**kwargs
optional params if needed
Returns
-------
np.array
gage/station ids
"""
if self.region in ["BR", "GB", "US", "YR"]:
return self.camels_sites["gauge_id"].values
elif self.region == "AUS":
return self.camels_sites["station_id"].values
elif self.region == "CL":
station_ids = self.camels_sites.columns.values
# for 7-digit id, replace the space with 0 to get a 8-digit id
cl_station_ids = [
station_id.split(" ")[-1].zfill(8) for station_id in station_ids
]
return np.array(cl_station_ids)
elif self.region == "CA":
ids = self.camels_sites["STATION_ID"].values
id_strs = [id_.split("'")[1] for id_ in ids]
# although there are 698 sites, there are only 611 sites with attributes data.
# Hence we only use 611 sites now
attr_all_file = os.path.join(
self.data_source_description["CAMELS_DIR"],
"HYSETS_watershed_properties.txt",
)
if not os.path.isfile(attr_all_file):
raise FileNotFoundError(
"Please download HYSETS_watershed_properties.txt from https://osf.io/7fn4c/ and put it in the "
"root directory of CANOPEX"
)
canopex_attr_data = pd.read_csv(attr_all_file, sep=";")
return np.intersect1d(id_strs, canopex_attr_data["Official_ID"].values)
elif self.region == "CE":
# Not all basins have attributes, so we just chose those with attrs
ids = self.camels_sites["ID"].values
attr_all_file = os.path.join(
self.data_source_description["CAMELS_ATTR_DIR"],
"Catchment_attributes.csv",
)
attr_data = pd.read_csv(attr_all_file, sep=";")
return np.intersect1d(ids, attr_data["ID"].values)
else:
raise NotImplementedError(CAMELS_NO_DATASET_ERROR_LOG)
def read_usgs_gage(self, usgs_id, t_range):
"""
read streamflow data of a station from CAMELS-US
Parameters
----------
usgs_id
the station id
t_range
the time range, for example, ["1990-01-01", "2000-01-01"]
Returns
-------
np.array
streamflow data of one station for a given time range
"""
print("reading %s streamflow data", usgs_id)
gage_id_df = self.camels_sites
huc = gage_id_df[gage_id_df["gauge_id"] == usgs_id]["huc_02"].values[0]
usgs_file = os.path.join(
self.data_source_description["CAMELS_FLOW_DIR"],
huc,
usgs_id + "_streamflow_qc.txt",
)
data_temp = pd.read_csv(usgs_file, sep=r"\s+", header=None)
obs = data_temp[4].values
obs[obs < 0] = np.nan
t_lst = hydro_utils.t_range_days(t_range)
nt = t_lst.shape[0]
if len(obs) != nt:
out = np.full([nt], np.nan)
df_date = data_temp[[1, 2, 3]]
df_date.columns = ["year", "month", "day"]
date = pd.to_datetime(df_date).values.astype("datetime64[D]")
[C, ind1, ind2] = np.intersect1d(date, t_lst, return_indices=True)
out[ind2] = obs[ind1]
else:
out = obs
return out
def read_br_gage_flow(self, gage_id, t_range, flow_type):
"""
Read gage's streamflow from CAMELS-BR
Parameters
----------
gage_id
the station id
t_range
the time range, for example, ["1990-01-01", "2000-01-01"]
flow_type
"streamflow_m3s" or "streamflow_mm_selected_catchments" or "streamflow_simulated"
Returns
-------
np.array
streamflow data of one station for a given time range
"""
dir_ = [
flow_dir
for flow_dir in self.data_source_description["CAMELS_FLOW_DIR"]
if flow_type in flow_dir
][0]
if flow_type == "streamflow_mm_selected_catchments":
flow_type = "streamflow_mm"
elif flow_type == "streamflow_simulated":
flow_type = "simulated_streamflow"
gage_file = os.path.join(dir_, gage_id + "_" + flow_type + ".txt")
data_temp = pd.read_csv(gage_file, sep=r"\s+")
obs = data_temp.iloc[:, 3].values
obs[obs < 0] = np.nan
df_date = data_temp[["year", "month", "day"]]
date = pd.to_datetime(df_date).values.astype("datetime64[D]")
out = time_intersect_dynamic_data(obs, date, t_range)
return out
def read_gb_gage_flow_forcing(self, gage_id, t_range, var_type):
"""
Read gage's streamflow or forcing from CAMELS-GB
Parameters
----------
gage_id
the station id
t_range
the time range, for example, ["1990-01-01", "2000-01-01"]
var_type
flow type: "discharge_spec" or "discharge_vol"
forcing type: "precipitation", "pet", "temperature", "peti", "humidity", "shortwave_rad", "longwave_rad",
"windspeed"
Returns
-------
np.array
streamflow or forcing data of one station for a given time range
"""
gage_file = os.path.join(
self.data_source_description["CAMELS_FLOW_DIR"],
"CAMELS_GB_hydromet_timeseries_" + gage_id + "_19701001-20150930.csv",
)
data_temp = pd.read_csv(gage_file, sep=",")
obs = data_temp[var_type].values
if var_type in ["discharge_spec", "discharge_vol"]:
obs[obs < 0] = np.nan
date = pd.to_datetime(data_temp["date"]).values.astype("datetime64[D]")
out = time_intersect_dynamic_data(obs, date, t_range)
return out
def read_target_cols(
self,
gage_id_lst: Union[list, np.array] = None,
t_range: list = None,
target_cols: Union[list, np.array] = None,
**kwargs
) -> np.array:
"""
read target values; for CAMELS, they are streamflows
default target_cols is an one-value list
Parameters
----------
gage_id_lst
station ids
t_range
the time range, for example, ["1990-01-01", "2000-01-01"]
target_cols
the default is None, but we neea at least one default target.
For CAMELS-US, it is ["usgsFlow"];
for CAMELS-AUS, it's ["streamflow_mmd"]
for CAMELS-AUS, it's ["streamflow_m3s"]
kwargs
some other params if needed
Returns
-------
np.array
streamflow data, 3-dim [station, time, streamflow]
"""
if target_cols is None:
return np.array([])
else:
nf = len(target_cols)
t_range_list = hydro_utils.t_range_days(t_range)
nt = t_range_list.shape[0]
y = np.empty([len(gage_id_lst), nt, nf])
if self.region == "US":
for k in range(len(gage_id_lst)):
data_obs = self.read_usgs_gage(gage_id_lst[k], t_range)
# For CAMELS-US, only ["usgsFlow"]
y[k, :, 0] = data_obs
elif self.region == "AUS":
for k in range(len(target_cols)):
flow_data = pd.read_csv(
os.path.join(
self.data_source_description["CAMELS_FLOW_DIR"],
target_cols[k] + ".csv",
)
)
df_date = flow_data[["year", "month", "day"]]
date = pd.to_datetime(df_date).values.astype("datetime64[D]")
[c, ind1, ind2] = np.intersect1d(
date, t_range_list, return_indices=True
)
chosen_data = flow_data[gage_id_lst].values[ind1, :]
chosen_data[chosen_data < 0] = np.nan
y[:, ind2, k] = chosen_data.T
elif self.region == "BR":
for j in range(len(target_cols)):
for k in range(len(gage_id_lst)):
data_obs = self.read_br_gage_flow(
gage_id_lst[k], t_range, target_cols[j]
)
y[k, :, j] = data_obs
elif self.region == "CL":
for k in range(len(target_cols)):
if target_cols[k] == "streamflow_m3s":
flow_data = pd.read_csv(
os.path.join(
self.data_source_description["CAMELS_FLOW_DIR"][0],
"2_CAMELScl_streamflow_m3s.txt",
),
sep="\t",
index_col=0,
)
elif target_cols[k] == "streamflow_mm":
flow_data = pd.read_csv(
os.path.join(
self.data_source_description["CAMELS_FLOW_DIR"][1],
"3_CAMELScl_streamflow_mm.txt",
),
sep="\t",
index_col=0,
)
else:
raise NotImplementedError(CAMELS_NO_DATASET_ERROR_LOG)
date = pd.to_datetime(flow_data.index.values).values.astype(
"datetime64[D]"
)
[c, ind1, ind2] = np.intersect1d(
date, t_range_list, return_indices=True
)
station_ids = self.read_object_ids()
assert all(x < y for x, y in zip(station_ids, station_ids[1:]))
[s, ind3, ind4] = np.intersect1d(
station_ids, gage_id_lst, return_indices=True
)
chosen_data = flow_data.iloc[ind1, ind3].replace(
"\s+", np.nan, regex=True
)
chosen_data = chosen_data.astype(float)
chosen_data[chosen_data < 0] = np.nan
y[:, ind2, k] = chosen_data.values.T
elif self.region == "GB":
for j in range(len(target_cols)):
for k in range(len(gage_id_lst)):
data_obs = self.read_gb_gage_flow_forcing(
gage_id_lst[k], t_range, target_cols[j]
)
y[k, :, j] = data_obs
elif self.region == "YR":
for k in range(len(gage_id_lst)):
# only one streamflow type: normalized_q
flow_file = os.path.join(
self.data_source_description["CAMELS_FLOW_DIR"],
gage_id_lst[k],
target_cols[0] + ".csv",
)
flow_data = pd.read_csv(flow_file, sep=",")
date = pd.to_datetime(flow_data["date"]).values.astype("datetime64[D]")
[c, ind1, ind2] = np.intersect1d(
date, t_range_list, return_indices=True
)
# flow data has been normalized, so we don't set negative values NaN
y[k, ind2, 0] = flow_data["q"].values[ind1]
elif self.region == "CA":
for k in range(len(gage_id_lst)):
# only one streamflow type: discharge
canopex_id = self.camels_sites[
self.camels_sites["STATION_ID"] == "'" + gage_id_lst[k] + "'"
]["CANOPEX_ID"].values[0]
flow_file = os.path.join(
self.data_source_description["CAMELS_FLOW_DIR"],
str(canopex_id) + ".dly",
)
read_flow_file = pd.read_csv(flow_file, header=None).values.tolist()
flow_data = []
flow_date = []
for one_site in read_flow_file:
flow_date.append(
hydro_utils.t2dt(int(one_site[0][:8].replace(" ", "0")))
)
all_data = one_site[0].split(" ")
real_data = [one_data for one_data in all_data if one_data != ""]
flow_data.append(float(real_data[-3]))
date = pd.to_datetime(flow_date).values.astype("datetime64[D]")
[c, ind1, ind2] = np.intersect1d(
date, t_range_list, return_indices=True
)
obs = np.array(flow_data)
obs[obs < 0] = np.nan
y[k, ind2, 0] = obs[ind1]
elif self.region == "CE":
for k in range(len(gage_id_lst)):
flow_file = os.path.join(
self.data_source_description["CAMELS_FLOW_DIR"],
"ID_" + str(gage_id_lst[k]) + ".csv",
)
flow_data = pd.read_csv(flow_file, sep=";")
df_date = flow_data[["YYYY", "MM", "DD"]]
df_date.columns = ["year", "month", "day"]
date = pd.to_datetime(df_date).values.astype("datetime64[D]")
[c, ind1, ind2] = np.intersect1d(
date, t_range_list, return_indices=True
)
obs = flow_data["qobs"].values
obs[obs < 0] = np.nan
y[k, ind2, 0] = obs[ind1]
else:
raise NotImplementedError(CAMELS_NO_DATASET_ERROR_LOG)
return y
def read_forcing_gage(self, usgs_id, var_lst, t_range_list, forcing_type="daymet"):
# data_source = daymet or maurer or nldas
print("reading %s forcing data", usgs_id)
gage_id_df = self.camels_sites
huc = gage_id_df[gage_id_df["gauge_id"] == usgs_id]["huc_02"].values[0]
data_folder = self.data_source_description["CAMELS_FORCING_DIR"]
if forcing_type == "daymet":
temp_s = "cida"
else:
temp_s = forcing_type
data_file = os.path.join(
data_folder,
forcing_type,
huc,
"%s_lump_%s_forcing_leap.txt" % (usgs_id, temp_s),
)
data_temp = pd.read_csv(data_file, sep=r"\s+", header=None, skiprows=4)
forcing_lst = [
"Year",
"Mnth",
"Day",
"Hr",
"dayl",
"prcp",
"srad",
"swe",
"tmax",
"tmin",
"vp",
]
df_date = data_temp[[0, 1, 2]]
df_date.columns = ["year", "month", "day"]
date = pd.to_datetime(df_date).values.astype("datetime64[D]")
nf = len(var_lst)
[c, ind1, ind2] = np.intersect1d(date, t_range_list, return_indices=True)
nt = c.shape[0]
out = np.empty([nt, nf])
for k in range(nf):
ind = forcing_lst.index(var_lst[k])
out[ind2, k] = data_temp[ind].values[ind1]
return out
def read_br_basin_forcing(self, gage_id, t_range, var_type) -> np.array:
"""
Read one forcing data for a basin in CAMELS_BR
Parameters
----------
gage_id
basin id
t_range
the time range, for example, ["1995-01-01", "2005-01-01"]
var_type
the forcing variable type
Returns
-------
np.array
one type forcing data of a basin in a given time range
"""
dir_ = [
_dir
for _dir in self.data_source_description["CAMELS_FORCING_DIR"]
if var_type in _dir
][0]
if var_type in [
"temperature_min_cpc",
"temperature_mean_cpc",
"temperature_max_cpc",
]:
var_type = var_type[:-4]
gage_file = os.path.join(dir_, gage_id + "_" + var_type + ".txt")
data_temp = pd.read_csv(gage_file, sep=r"\s+")
obs = data_temp.iloc[:, 3].values
df_date = data_temp[["year", "month", "day"]]
date = pd.to_datetime(df_date).values.astype("datetime64[D]")
out = time_intersect_dynamic_data(obs, date, t_range)
return out
def read_relevant_cols(
self,
gage_id_lst: list = None,
t_range: list = None,
var_lst: list = None,
forcing_type="daymet",
) -> np.array:
"""
Read forcing data
Parameters
----------
gage_id_lst
station ids
t_range
the time range, for example, ["1990-01-01", "2000-01-01"]
var_lst
forcing variable types
forcing_type
now only for CAMELS-US, there are three types: daymet, nldas, maurer
Returns
-------
np.array
forcing data
"""
t_range_list = hydro_utils.t_range_days(t_range)
nt = t_range_list.shape[0]
x = np.empty([len(gage_id_lst), nt, len(var_lst)])
if self.region == "US":
for k in range(len(gage_id_lst)):
data = self.read_forcing_gage(
gage_id_lst[k], var_lst, t_range_list, forcing_type=forcing_type
)
x[k, :, :] = data
elif self.region == "AUS":
for k in range(len(var_lst)):
if "precipitation_" in var_lst[k]:
forcing_dir = os.path.join(
self.data_source_description["CAMELS_FORCING_DIR"],
"01_precipitation_timeseries",
)
elif "et_" in var_lst[k] or "evap_" in var_lst[k]:
forcing_dir = os.path.join(
self.data_source_description["CAMELS_FORCING_DIR"],
"02_EvaporativeDemand_timeseries",
)
else:
if "_AWAP" in var_lst[k]:
forcing_dir = os.path.join(
self.data_source_description["CAMELS_FORCING_DIR"],
"03_Other",
"AWAP",
)
elif "_SILO" in var_lst[k]:
forcing_dir = os.path.join(
self.data_source_description["CAMELS_FORCING_DIR"],
"03_Other",
"SILO",
)
else:
raise NotImplementedError(CAMELS_NO_DATASET_ERROR_LOG)
forcing_data = pd.read_csv(
os.path.join(forcing_dir, var_lst[k] + ".csv")
)
df_date = forcing_data[["year", "month", "day"]]
date = pd.to_datetime(df_date).values.astype("datetime64[D]")
[c, ind1, ind2] = np.intersect1d(
date, t_range_list, return_indices=True
)
chosen_data = forcing_data[gage_id_lst].values[ind1, :]
x[:, ind2, k] = chosen_data.T
elif self.region == "BR":
for j in range(len(var_lst)):
for k in range(len(gage_id_lst)):
data_obs = self.read_br_basin_forcing(
gage_id_lst[k], t_range, var_lst[j]
)
x[k, :, j] = data_obs
elif self.region == "CL":
for k in range(len(var_lst)):
for tmp in os.listdir(self.data_source_description["CAMELS_DIR"]):
if fnmatch.fnmatch(tmp, "*" + var_lst[k]):
tmp_ = os.path.join(
self.data_source_description["CAMELS_DIR"], tmp
)
if os.path.isdir(tmp_):
forcing_file = os.path.join(tmp_, os.listdir(tmp_)[0])
forcing_data = pd.read_csv(forcing_file, sep="\t", index_col=0)
date = pd.to_datetime(forcing_data.index.values).values.astype(
"datetime64[D]"
)
[c, ind1, ind2] = np.intersect1d(
date, t_range_list, return_indices=True
)
station_ids = self.read_object_ids()
assert all(x < y for x, y in zip(station_ids, station_ids[1:]))
[s, ind3, ind4] = np.intersect1d(
station_ids, gage_id_lst, return_indices=True
)
chosen_data = forcing_data.iloc[ind1, ind3].replace(
"\s+", np.nan, regex=True
)
x[:, ind2, k] = chosen_data.values.T
elif self.region == "GB":
for j in range(len(var_lst)):
for k in range(len(gage_id_lst)):
data_forcing = self.read_gb_gage_flow_forcing(
gage_id_lst[k], t_range, var_lst[j]
)
x[k, :, j] = data_forcing
elif self.region == "YR":
for k in range(len(gage_id_lst)):
forcing_file = os.path.join(
self.data_source_description["CAMELS_FORCING_DIR"],
gage_id_lst[k],
"forcing.csv",
)
forcing_data = pd.read_csv(forcing_file, sep=",")
date = pd.to_datetime(forcing_data["date"]).values.astype(
"datetime64[D]"
)
[c, ind1, ind2] = np.intersect1d(
date, t_range_list, return_indices=True
)
for j in range(len(var_lst)):
x[k, ind2, j] = forcing_data[var_lst[j]].values[ind1]
elif self.region == "CA":
for k in range(len(gage_id_lst)):
canopex_id = self.camels_sites[
self.camels_sites["STATION_ID"] == "'" + gage_id_lst[k] + "'"
]["CANOPEX_ID"].values[0]
forcing_file = os.path.join(
self.data_source_description["CAMELS_FLOW_DIR"],
str(canopex_id) + ".dly",
)
read_forcing_file = pd.read_csv(
forcing_file, header=None
).values.tolist()
forcing_date = []
for j in range(len(var_lst)):
forcing_data = []
for one_site in read_forcing_file:
forcing_date.append(
hydro_utils.t2dt(int(one_site[0][:8].replace(" ", "0")))
)
all_data = one_site[0].split(" ")
real_data = [
one_data for one_data in all_data if one_data != ""
]
if var_lst[j] == "prcp":
forcing_data.append(float(real_data[-5]))
elif var_lst[j] == "tmax":
forcing_data.append(float(real_data[-2]))
elif var_lst[j] == "tmin":
forcing_data.append(float(real_data[-1]))
else:
raise NotImplementedError(
"No such forcing type in CANOPEX now!"
)
date = pd.to_datetime(forcing_date).values.astype("datetime64[D]")
[c, ind1, ind2] = np.intersect1d(
date, t_range_list, return_indices=True
)
x[k, ind2, j] = np.array(forcing_data)[ind1]
elif self.region == "CE":
for k in range(len(gage_id_lst)):
forcing_file = os.path.join(
self.data_source_description["CAMELS_FORCING_DIR"],
"ID_" + str(gage_id_lst[k]) + ".csv",
)
forcing_data = pd.read_csv(forcing_file, sep=";")
df_date = forcing_data[["YYYY", "MM", "DD"]]
df_date.columns = ["year", "month", "day"]
date = pd.to_datetime(df_date).values.astype("datetime64[D]")
[c, ind1, ind2] = np.intersect1d(
date, t_range_list, return_indices=True
)
for j in range(len(var_lst)):
x[k, ind2, j] = forcing_data[var_lst[j]].values[ind1]
else:
raise NotImplementedError(CAMELS_NO_DATASET_ERROR_LOG)
return x
def read_attr_all(self):
data_folder = self.data_source_description["CAMELS_ATTR_DIR"]
key_lst = self.data_source_description["CAMELS_ATTR_KEY_LST"]
f_dict = dict() # factorize dict
var_dict = dict()
var_lst = list()
out_lst = list()
gage_dict = self.camels_sites
if self.region == "US":
camels_str = "camels_"
sep_ = ";"
elif self.region == "BR":
camels_str = "camels_br_"
sep_ = "\s+"
elif self.region == "GB":
camels_str = "CAMELS_GB_"
sep_ = ","
else:
raise NotImplementedError(CAMELS_NO_DATASET_ERROR_LOG)
for key in key_lst:
data_file = os.path.join(data_folder, camels_str + key + ".txt")
if self.region == "GB":
data_file = os.path.join(
data_folder, camels_str + key + "_attributes.csv"
)
data_temp = pd.read_csv(data_file, sep=sep_)
var_lst_temp = list(data_temp.columns[1:])
var_dict[key] = var_lst_temp
var_lst.extend(var_lst_temp)
k = 0
n_gage = len(gage_dict["gauge_id"].values)
out_temp = np.full([n_gage, len(var_lst_temp)], np.nan)
for field in var_lst_temp:
if is_string_dtype(data_temp[field]):
value, ref = pd.factorize(data_temp[field], sort=True)
out_temp[:, k] = value
f_dict[field] = ref.tolist()
elif is_numeric_dtype(data_temp[field]):
out_temp[:, k] = data_temp[field].values
k = k + 1
out_lst.append(out_temp)
out = np.concatenate(out_lst, 1)
return out, var_lst, var_dict, f_dict
def read_attr_all_in_one_file(self):
"""
Read all attr data in CAMELS_AUS or CAMELS_CL
Returns
-------
np.array
all attr data in CAMELS_AUS or CAMELS_CL
"""
if self.region == "AUS":
attr_all_file = os.path.join(
self.data_source_description["CAMELS_DIR"],
"CAMELS_AUS_Attributes-Indices_MasterTable.csv",
)
all_attr = pd.read_csv(attr_all_file, sep=",")
elif self.region == "CL":
attr_all_file = os.path.join(
self.data_source_description["CAMELS_ATTR_DIR"],
"1_CAMELScl_attributes.txt",
)
all_attr_tmp = pd.read_csv(attr_all_file, sep="\t", index_col=0)
all_attr = pd.DataFrame(
all_attr_tmp.values.T,
index=all_attr_tmp.columns,
columns=all_attr_tmp.index,
)
elif self.region == "CA":
attr_all_file = os.path.join(
self.data_source_description["CAMELS_ATTR_DIR"],
"HYSETS_watershed_properties.txt",
)
all_attr_tmp = pd.read_csv(attr_all_file, sep=";", index_col=0)
all_attr = all_attr_tmp[
all_attr_tmp["Official_ID"].isin(self.read_object_ids())
]
elif self.region == "CE":
attr_all_file = os.path.join(
self.data_source_description["CAMELS_ATTR_DIR"],
"Catchment_attributes.csv",
)
all_attr = | pd.read_csv(attr_all_file, sep=";") | pandas.read_csv |
# -*- coding: utf-8 -*-
import pandas as pd
import numpy as np
import sys
import os
import shutil
import scanpy as sc
from ..utility import exec_process
rscript_folder = os.path.abspath(os.path.dirname(__file__))
# this is a function to integrate matrix and meta data and make AnnData object
def _constructAnnData(main_matrix, cell_ids, var_ids, meta_data,
categorical_in_meta, raw_data=None):
'''
Make anndata manually.
Args:
main_matrix (sparce matrix): this should be imported with sc.read_mtx() function.
cell_ids (numpy.array): array of cell name. the shape should be match with main_matrix
var_ids (numpy.array): array of variable name. e.g., gene name or peak name in genome.
the shape should be match with main_matrix
meta_data (pandas.dataframe): metadata. If the structure of meta_data does not match main_matrix,
main_matrix will be reconstructed to fit the shape of meta_data.
categorical_in_meta (array of str): some of meta_data should be categorical data rather than numeric. but such categorical data is might be imported as numeric data sometimes.
this function convert numeric data into categorical data if you set the colname.
Returns:
anndata: anndata.
'''
main_matrix = main_matrix.transpose()
# change dtyope in cluster info mation
for i in meta_data.columns:
if i in categorical_in_meta:
meta_data[i] = meta_data[i].astype(np.object) # change dtype
# integrate data.
if main_matrix.shape[0] != meta_data.shape[0]:
mat = sc.AnnData(main_matrix.X,
obs=pd.DataFrame(index=cell_ids),
var=pd.DataFrame(index=var_ids))
cell_ids_in_mata = list(map(lambda x: x in meta_data.index, cell_ids))
cells_ids_in_meta_index = np.arange(len(cell_ids))[cell_ids_in_mata]
mat = sc.AnnData(main_matrix.X[cells_ids_in_meta_index,:],
obs=meta_data, var=pd.DataFrame(index=var_ids))
else:
mat = sc.AnnData(main_matrix.X,
obs=meta_data,
var=pd.DataFrame(index=var_ids))
# add dimensional reduction information
if ("tsne_1" in meta_data.columns):
mat.obsm["X_tsne"] = | pd.concat([mat.obs.tsne_1, mat.obs.tsne_2],axis=1) | pandas.concat |
"""
$ pip install streamlit streamlit-option-menu streamlit-aggrid
- Bootstrap icons:
https://icons.getbootstrap.com/
- This app builds on the following streamlit contributions, Thank you!
- streamlit-option-menu
- streamlit-aggrid
## TODO
- parse table schema to get column name/type and build create/update form programmatically
SELECT
sql
FROM
sqlite_schema
WHERE
type ='table'
"""
import streamlit as st
from streamlit_option_menu import option_menu
from st_aggrid import GridOptionsBuilder, AgGrid, GridUpdateMode, DataReturnMode, JsCode
import sqlite3 as sql
import hashlib
import pandas as pd
_DB_NAME = "journals.db"
# Aggrid options
grid_dict = {
"grid_height": 300,
"return_mode_value": DataReturnMode.__members__["FILTERED"],
"update_mode_value": GridUpdateMode.__members__["MODEL_CHANGED"],
"fit_columns_on_grid_load": True,
"selection_mode": "single", # "multiple",
"fit_columns_on_grid_load": True,
"allow_unsafe_jscode": True,
"groupSelectsChildren": True,
"groupSelectsFiltered": True,
"enable_pagination": True,
"paginationPageSize": 8,
}
# @st.experimental_singleton
# def sql_connect(db_name, mode="rw"):
# return sql.connect(f"file:{db_name}?mode={mode}", uri=True)
#
# will give this error:
# ProgrammingError: SQLite objects created in a thread can only be used in that same thread. The object was created in thread id 3484 and this is thread id 9876.
def _hashit(password):
return hashlib.sha256(str.encode(password)).hexdigest()
def _get_records(conn, table_name, limit=10000):
sql_stmt = f"""
select * from {table_name} limit {limit}
"""
return pd.read_sql(sql_stmt, conn)
## Read
def _view_records(conn, table_name, context="read"):
# return selected_df
table_name = st.session_state["table_name"] if "table_name" in st.session_state else ""
if not table_name:
st.error("No table is selected!")
df = _get_records(conn, table_name)
enable_selection=True if context in ["update", "delete"] else False
## grid options
gb = GridOptionsBuilder.from_dataframe(df)
if enable_selection:
gb.configure_selection(grid_dict["selection_mode"],
use_checkbox=True,
groupSelectsChildren=grid_dict["groupSelectsChildren"],
groupSelectsFiltered=grid_dict["groupSelectsFiltered"]
)
gb.configure_pagination(paginationAutoPageSize=False,
paginationPageSize=grid_dict["paginationPageSize"])
gb.configure_grid_options(domLayout='normal')
grid_response = AgGrid(
df,
gridOptions=gb.build(),
height=grid_dict["grid_height"],
# width='100%',
data_return_mode=grid_dict["return_mode_value"],
update_mode=grid_dict["update_mode_value"],
fit_columns_on_grid_load=grid_dict["fit_columns_on_grid_load"],
allow_unsafe_jscode=True, #Set it to True to allow jsfunction to be injected
)
if enable_selection:
selected_df = pd.DataFrame(grid_response['selected_rows'])
return selected_df
else:
return None
## Full-text Search
def _clear_find_form():
pass
def _search_record(conn, table_name):
_view_records(conn, table_name, context="search")
with st.form(key="find_user"):
st.text_input("Phrase:", value="", key="find_phrase")
st.form_submit_button('Search', on_click=_clear_find_form)
## Create
def _clear_add_form():
db_name = st.session_state["db_name"] if "db_name" in st.session_state else _DB_NAME
table_name = st.session_state["table_name"] if "table_name" in st.session_state else ""
if not table_name:
st.error("No table is selected!")
with sql.connect(f"file:{db_name}?mode=rw", uri=True) as conn:
user_ = st.session_state["add_username"]
pass_ = st.session_state["add_password"]
if user_ and pass_:
su_ = st.session_state["add_su"]
notes_ = st.session_state["add_notes"]
conn.execute(
f"INSERT INTO {table_name} (username, password, su, notes) VALUES(?,?,?,?)",
(user_, _hashit(pass_), su_, notes_),
)
st.session_state["add_username"] = ""
st.session_state["add_password"] = ""
st.session_state["add_su"] = False
st.session_state["add_notes"] = ""
def _create_record(conn, table_name):
_view_records(conn, table_name, context="create")
with st.form(key="add_user"):
st.text_input("Username (required)", key="add_username")
st.text_input("Password (required)", key="add_password")
st.checkbox("Is a superuser?", value=False, key="add_su")
st.text_area('Notes', key="add_notes")
st.form_submit_button('Add', on_click=_clear_add_form)
## Update
def _clear_upd_form():
db_name = st.session_state["db_name"] if "db_name" in st.session_state else _DB_NAME
table_name = st.session_state["table_name"] if "table_name" in st.session_state else ""
if not table_name:
st.error("No table is selected!")
with sql.connect(f"file:{db_name}?mode=rw", uri=True) as conn:
username_ = st.session_state["upd_username"]
pass_ = st.session_state["upd_password"]
su_ = st.session_state["upd_su"]
notes_ = st.session_state["upd_notes"]
conn.execute(
f"update {table_name} set password = ?,su = ?, notes = ? where username = ?", (_hashit(pass_),su_,notes_,username_)
)
st.session_state["upd_username"] = ""
st.session_state["upd_password"] = ""
st.session_state["upd_su"] = False
st.session_state["upd_notes"] = ""
def _update_record(conn, table_name):
selected_df = _view_records(conn, table_name, context="update")
# st.dataframe(selected_df)
if selected_df is not None:
row = selected_df.to_dict()
if row:
with st.form(key="upd_user"):
st.text_input("Username:", value=row["username"][0], key="upd_username")
st.text_input("Password:", value=row["password"][0], key="upd_password")
st.checkbox("Is superuser?", value=row["su"][0], key="upd_su")
st.text_area('Notes', value=row["notes"][0], key="upd_notes")
st.form_submit_button('Update', on_click=_clear_upd_form)
## Delete
def _clear_del_form():
db_name = st.session_state["db_name"] if "db_name" in st.session_state else _DB_NAME
table_name = st.session_state["table_name"] if "table_name" in st.session_state else ""
if not table_name:
st.error("No table is selected!")
with sql.connect(f"file:{db_name}?mode=rw", uri=True) as conn:
username_ = st.session_state["del_username"]
conn.execute(
f"delete from {table_name} where username = ?", (username_,)
)
st.session_state["del_username"] = ""
def _delete_record(conn, table_name):
selected_df = _view_records(conn, table_name, context="delete")
if selected_df is not None:
row = selected_df.to_dict()
if row:
with st.form(key="del_user"):
st.text_input("Username:", value=row["username"][0], key="del_username")
st.form_submit_button('Delete', on_click=_clear_del_form)
def _manage_table():
db_name = st.session_state["db_name"] if "db_name" in st.session_state else _DB_NAME
sql_stmt = """
SELECT
name
FROM
sqlite_schema
WHERE
type ='table'
order by name
"""
tables = []
table_name = ""
col1, buf, col2 = st.columns([3,1,3])
with col1:
db = st.text_input('SQLite database', value=db_name)
conn = sql.connect(f"file:{db}?mode=rw", uri=True)
df = | pd.read_sql(sql_stmt, conn) | pandas.read_sql |
# -*- coding: utf-8 -*-
"""
Created on Thu Jul 9 23:41:26 2020
@author: <NAME>
"""
import pandas as pd
import numpy as np
movie =pd.read_csv("IMDB-Dataset//movies.csv")
rating = pd.read_csv("IMDB-Dataset//ratings.csv")
df = | pd.merge(movie, rating, on='movieId') | pandas.merge |
# -*- coding: utf-8 -*-
import os
from datetime import datetime
from numerapi.numerapi import NumerAPI
import luigi
import pandas as pd
from sklearn import metrics, preprocessing, linear_model
from .numerai_fetch_training_data import FetchAndExtractData
class TrainAndPredict(luigi.Task):
"""
Trains a naรฏve bayes classifier with an assumed bernoulli distribution of
the features, then predicts the targets on the tournament data.
The default signature of this task is ``TrainAndPredict(output_path='./data')``.
:param: output_path (str):
path to the directory where the predictions shall be saved to, defaults to
``./data``.
"""
output_path = luigi.Parameter(default='./data/')
def requires(self):
"""
Dependencies to be fullfiled prior to execution. This task needs the
:py:class:`tasks.numerai_fetch_training_data.FetchAndExtractData` task that provides
the training/tournament data.
"""
return FetchAndExtractData(output_path=self.output_path)
def output(self):
"""
Saves outputs of this task--which is a csv file of the predictions made for the
given data.
"""
self.apc = NumerAPI()
fn ='predictions_{0}_LogisticRegression.csv'.format(self.apc.get_current_round())
return luigi.LocalTarget(os.path.join(self.output_path, fn))
def run(self):
"""
Trains a model and makes predictions given the data. These are then saved
to a csv file.
"""
data = self.input()
out = self.output()
training_data = pd.read_csv(data['training_data.csv'].path, header=0)
prediction_data = pd.read_csv(data['tournament_data.csv'].path, header=0)
# Transform the loaded CSV data into numpy arrays
features = [f for f in list(training_data) if "feature" in f]
X = training_data[features]
Y = training_data["target"]
x_prediction = prediction_data[features]
ids = prediction_data["id"]
# This is your model that will learn to predict
model = linear_model.LogisticRegression(n_jobs=-1)
# Your model is trained on the training_data
model.fit(X, Y)
# Your trained model is now used to make predictions on the
# numerai_tournament_data
# The model returns two columns: [probability of 0, probability of 1]
# We are just interested in the probability that the target is 1.
y_prediction = model.predict_proba(x_prediction)
results = y_prediction[:, 1]
results_df = pd.DataFrame(data={'probability': results})
joined = pd.DataFrame(ids).join(results_df)
print("Writing predictions to predictions.csv")
# Save the predictions out to a CSV file
joined.to_csv("predictions.csv", index=False)
y_prediction = model.predict_proba(x_prediction)
results = y_prediction[:, 1]
results_df = | pd.DataFrame(data={'probability': results}) | pandas.DataFrame |
import unittest
import qteasy as qt
import pandas as pd
from pandas import Timestamp
import numpy as np
from numpy import int64
import itertools
import datetime
from qteasy.utilfuncs import list_to_str_format, regulate_date_format, time_str_format, str_to_list
from qteasy.utilfuncs import maybe_trade_day, is_market_trade_day, prev_trade_day, next_trade_day, prev_market_trade_day
from qteasy.utilfuncs import next_market_trade_day
from qteasy.space import Space, Axis, space_around_centre, ResultPool
from qteasy.core import apply_loop
from qteasy.built_in import SelectingFinanceIndicator
from qteasy.history import stack_dataframes
from qteasy.tsfuncs import income, indicators, name_change, get_bar
from qteasy.tsfuncs import stock_basic, trade_calendar, new_share, get_index
from qteasy.tsfuncs import balance, cashflow, top_list, index_indicators, composite
from qteasy.tsfuncs import future_basic, future_daily, options_basic, options_daily
from qteasy.tsfuncs import fund_basic, fund_net_value, index_basic
from qteasy.evaluate import eval_alpha, eval_benchmark, eval_beta, eval_fv
from qteasy.evaluate import eval_info_ratio, eval_max_drawdown, eval_sharp
from qteasy.evaluate import eval_volatility
from qteasy.tafuncs import bbands, dema, ema, ht, kama, ma, mama, mavp, mid_point
from qteasy.tafuncs import mid_price, sar, sarext, sma, t3, tema, trima, wma, adx, adxr
from qteasy.tafuncs import apo, bop, cci, cmo, dx, macd, macdext, aroon, aroonosc
from qteasy.tafuncs import macdfix, mfi, minus_di, minus_dm, mom, plus_di, plus_dm
from qteasy.tafuncs import ppo, roc, rocp, rocr, rocr100, rsi, stoch, stochf, stochrsi
from qteasy.tafuncs import trix, ultosc, willr, ad, adosc, obv, atr, natr, trange
from qteasy.tafuncs import avgprice, medprice, typprice, wclprice, ht_dcperiod
from qteasy.tafuncs import ht_dcphase, ht_phasor, ht_sine, ht_trendmode, cdl2crows
from qteasy.tafuncs import cdl3blackcrows, cdl3inside, cdl3linestrike, cdl3outside
from qteasy.tafuncs import cdl3starsinsouth, cdl3whitesoldiers, cdlabandonedbaby
from qteasy.tafuncs import cdladvanceblock, cdlbelthold, cdlbreakaway, cdlclosingmarubozu
from qteasy.tafuncs import cdlconcealbabyswall, cdlcounterattack, cdldarkcloudcover
from qteasy.tafuncs import cdldoji, cdldojistar, cdldragonflydoji, cdlengulfing
from qteasy.tafuncs import cdleveningdojistar, cdleveningstar, cdlgapsidesidewhite
from qteasy.tafuncs import cdlgravestonedoji, cdlhammer, cdlhangingman, cdlharami
from qteasy.tafuncs import cdlharamicross, cdlhighwave, cdlhikkake, cdlhikkakemod
from qteasy.tafuncs import cdlhomingpigeon, cdlidentical3crows, cdlinneck
from qteasy.tafuncs import cdlinvertedhammer, cdlkicking, cdlkickingbylength
from qteasy.tafuncs import cdlladderbottom, cdllongleggeddoji, cdllongline, cdlmarubozu
from qteasy.tafuncs import cdlmatchinglow, cdlmathold, cdlmorningdojistar, cdlmorningstar
from qteasy.tafuncs import cdlonneck, cdlpiercing, cdlrickshawman, cdlrisefall3methods
from qteasy.tafuncs import cdlseparatinglines, cdlshootingstar, cdlshortline, cdlspinningtop
from qteasy.tafuncs import cdlstalledpattern, cdlsticksandwich, cdltakuri, cdltasukigap
from qteasy.tafuncs import cdlthrusting, cdltristar, cdlunique3river, cdlupsidegap2crows
from qteasy.tafuncs import cdlxsidegap3methods, beta, correl, linearreg, linearreg_angle
from qteasy.tafuncs import linearreg_intercept, linearreg_slope, stddev, tsf, var, acos
from qteasy.tafuncs import asin, atan, ceil, cos, cosh, exp, floor, ln, log10, sin, sinh
from qteasy.tafuncs import sqrt, tan, tanh, add, div, max, maxindex, min, minindex, minmax
from qteasy.tafuncs import minmaxindex, mult, sub, sum
from qteasy.history import get_financial_report_type_raw_data, get_price_type_raw_data
from qteasy.database import DataSource
from qteasy._arg_validators import _parse_string_kwargs, _valid_qt_kwargs
class TestCost(unittest.TestCase):
def setUp(self):
self.amounts = np.array([10000, 20000, 10000])
self.op = np.array([0, 1, -0.33333333])
self.prices = np.array([10, 20, 10])
self.r = qt.Cost()
def test_rate_creation(self):
print('testing rates objects\n')
self.assertIsInstance(self.r, qt.Cost, 'Type should be Rate')
def test_rate_operations(self):
self.assertEqual(self.r['buy_fix'], 0.0, 'Item got is incorrect')
self.assertEqual(self.r['sell_fix'], 0.0, 'Item got is wrong')
self.assertEqual(self.r['buy_rate'], 0.003, 'Item got is incorrect')
self.assertEqual(self.r['sell_rate'], 0.001, 'Item got is incorrect')
self.assertEqual(self.r['buy_min'], 5., 'Item got is incorrect')
self.assertEqual(self.r['sell_min'], 0.0, 'Item got is incorrect')
self.assertEqual(self.r['slipage'], 0.0, 'Item got is incorrect')
self.assertEqual(np.allclose(self.r(self.amounts), [0.003, 0.003, 0.003]), True, 'fee calculation wrong')
def test_rate_fee(self):
self.r.buy_rate = 0.003
self.r.sell_rate = 0.001
self.r.buy_fix = 0
self.r.sell_fix = 0
self.r.buy_min = 0
self.r.sell_min = 0
self.r.slipage = 0
print('\nSell result with fixed rate = 0.001 and moq = 0:')
print(self.r.get_selling_result(self.prices, self.op, self.amounts))
test_rate_fee_result = self.r.get_selling_result(self.prices, self.op, self.amounts)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 0., -3333.3333]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], 33299.999667, msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 33.333332999999996, msg='result incorrect')
print('\nSell result with fixed rate = 0.001 and moq = 1:')
print(self.r.get_selling_result(self.prices, self.op, self.amounts, 1))
test_rate_fee_result = self.r.get_selling_result(self.prices, self.op, self.amounts, 1)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 0., -3333]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], 33296.67, msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 33.33, msg='result incorrect')
print('\nSell result with fixed rate = 0.001 and moq = 100:')
print(self.r.get_selling_result(self.prices, self.op, self.amounts, 100))
test_rate_fee_result = self.r.get_selling_result(self.prices, self.op, self.amounts, 100)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 0., -3300]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], 32967.0, msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 33, msg='result incorrect')
print('\nPurchase result with fixed rate = 0.003 and moq = 0:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 0))
test_rate_fee_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 0)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 997.00897308, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], -20000.0, msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 59.82053838484547, msg='result incorrect')
print('\nPurchase result with fixed rate = 0.003 and moq = 1:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 1))
test_rate_fee_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 1)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 997., 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], -19999.82, msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 59.82, msg='result incorrect')
print('\nPurchase result with fixed rate = 0.003 and moq = 100:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 100))
test_rate_fee_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 100)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 900., 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], -18054., msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 54.0, msg='result incorrect')
def test_min_fee(self):
self.r.buy_rate = 0.
self.r.sell_rate = 0.
self.r.buy_fix = 0.
self.r.sell_fix = 0.
self.r.buy_min = 300
self.r.sell_min = 300
self.r.slipage = 0.
print('\npurchase result with fixed cost rate with min fee = 300 and moq = 0:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 0))
test_min_fee_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 0)
self.assertIs(np.allclose(test_min_fee_result[0], [0., 985, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], -20000.0, msg='result incorrect')
self.assertAlmostEqual(test_min_fee_result[2], 300.0, msg='result incorrect')
print('\npurchase result with fixed cost rate with min fee = 300 and moq = 10:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 10))
test_min_fee_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 10)
self.assertIs(np.allclose(test_min_fee_result[0], [0., 980, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], -19900.0, msg='result incorrect')
self.assertAlmostEqual(test_min_fee_result[2], 300.0, msg='result incorrect')
print('\npurchase result with fixed cost rate with min fee = 300 and moq = 100:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 100))
test_min_fee_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 100)
self.assertIs(np.allclose(test_min_fee_result[0], [0., 900, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], -18300.0, msg='result incorrect')
self.assertAlmostEqual(test_min_fee_result[2], 300.0, msg='result incorrect')
print('\nselling result with fixed cost rate with min fee = 300 and moq = 0:')
print(self.r.get_selling_result(self.prices, self.op, self.amounts))
test_min_fee_result = self.r.get_selling_result(self.prices, self.op, self.amounts)
self.assertIs(np.allclose(test_min_fee_result[0], [0, 0, -3333.3333]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], 33033.333)
self.assertAlmostEqual(test_min_fee_result[2], 300.0)
print('\nselling result with fixed cost rate with min fee = 300 and moq = 1:')
print(self.r.get_selling_result(self.prices, self.op, self.amounts, 1))
test_min_fee_result = self.r.get_selling_result(self.prices, self.op, self.amounts, 1)
self.assertIs(np.allclose(test_min_fee_result[0], [0, 0, -3333]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], 33030)
self.assertAlmostEqual(test_min_fee_result[2], 300.0)
print('\nselling result with fixed cost rate with min fee = 300 and moq = 100:')
print(self.r.get_selling_result(self.prices, self.op, self.amounts, 100))
test_min_fee_result = self.r.get_selling_result(self.prices, self.op, self.amounts, 100)
self.assertIs(np.allclose(test_min_fee_result[0], [0, 0, -3300]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], 32700)
self.assertAlmostEqual(test_min_fee_result[2], 300.0)
def test_rate_with_min(self):
"""Test transaction cost calculated by rate with min_fee"""
self.r.buy_rate = 0.0153
self.r.sell_rate = 0.01
self.r.buy_fix = 0.
self.r.sell_fix = 0.
self.r.buy_min = 300
self.r.sell_min = 333
self.r.slipage = 0.
print('\npurchase result with fixed cost rate with buy_rate = 0.0153, min fee = 300 and moq = 0:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 0))
test_rate_with_min_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 0)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0., 984.9305624, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], -20000.0, msg='result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[2], 301.3887520929774, msg='result incorrect')
print('\npurchase result with fixed cost rate with buy_rate = 0.0153, min fee = 300 and moq = 10:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 10))
test_rate_with_min_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 10)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0., 980, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], -19900.0, msg='result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[2], 300.0, msg='result incorrect')
print('\npurchase result with fixed cost rate with buy_rate = 0.0153, min fee = 300 and moq = 100:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 100))
test_rate_with_min_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 100)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0., 900, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], -18300.0, msg='result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[2], 300.0, msg='result incorrect')
print('\nselling result with fixed cost rate with sell_rate = 0.01, min fee = 333 and moq = 0:')
print(self.r.get_selling_result(self.prices, self.op, self.amounts))
test_rate_with_min_result = self.r.get_selling_result(self.prices, self.op, self.amounts)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0, 0, -3333.3333]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], 32999.99967)
self.assertAlmostEqual(test_rate_with_min_result[2], 333.33333)
print('\nselling result with fixed cost rate with sell_rate = 0.01, min fee = 333 and moq = 1:')
print(self.r.get_selling_result(self.prices, self.op, self.amounts, 1))
test_rate_with_min_result = self.r.get_selling_result(self.prices, self.op, self.amounts, 1)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0, 0, -3333]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], 32996.7)
self.assertAlmostEqual(test_rate_with_min_result[2], 333.3)
print('\nselling result with fixed cost rate with sell_rate = 0.01, min fee = 333 and moq = 100:')
print(self.r.get_selling_result(self.prices, self.op, self.amounts, 100))
test_rate_with_min_result = self.r.get_selling_result(self.prices, self.op, self.amounts, 100)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0, 0, -3300]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], 32667.0)
self.assertAlmostEqual(test_rate_with_min_result[2], 333.0)
def test_fixed_fee(self):
self.r.buy_rate = 0.
self.r.sell_rate = 0.
self.r.buy_fix = 200
self.r.sell_fix = 150
self.r.buy_min = 0
self.r.sell_min = 0
self.r.slipage = 0
print('\nselling result of fixed cost with fixed fee = 150 and moq=0:')
print(self.r.get_selling_result(self.prices, self.op, self.amounts, 0))
test_fixed_fee_result = self.r.get_selling_result(self.prices, self.op, self.amounts)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0, 0, -3333.3333]), True, 'result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[1], 33183.333, msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 150.0, msg='result incorrect')
print('\nselling result of fixed cost with fixed fee = 150 and moq=100:')
print(self.r.get_selling_result(self.prices, self.op, self.amounts, 100))
test_fixed_fee_result = self.r.get_selling_result(self.prices, self.op, self.amounts, 100)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0, 0, -3300.]), True,
f'result incorrect, {test_fixed_fee_result[0]} does not equal to [0,0,-3400]')
self.assertAlmostEqual(test_fixed_fee_result[1], 32850., msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 150., msg='result incorrect')
print('\npurchase result of fixed cost with fixed fee = 200:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 0))
test_fixed_fee_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 0)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0., 990., 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[1], -20000.0, msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 200.0, msg='result incorrect')
print('\npurchase result of fixed cost with fixed fee = 200:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 100))
test_fixed_fee_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 100)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0., 900., 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[1], -18200.0, msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 200.0, msg='result incorrect')
def test_slipage(self):
self.r.buy_fix = 0
self.r.sell_fix = 0
self.r.buy_min = 0
self.r.sell_min = 0
self.r.buy_rate = 0.003
self.r.sell_rate = 0.001
self.r.slipage = 1E-9
print('\npurchase result of fixed rate = 0.003 and slipage = 1E-10 and moq = 0:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 0))
print('\npurchase result of fixed rate = 0.003 and slipage = 1E-10 and moq = 100:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 100))
print('\nselling result with fixed rate = 0.001 and slipage = 1E-10:')
print(self.r.get_selling_result(self.prices, self.op, self.amounts))
test_fixed_fee_result = self.r.get_selling_result(self.prices, self.op, self.amounts)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0, 0, -3333.3333]), True,
f'{test_fixed_fee_result[0]} does not equal to [0, 0, -10000]')
self.assertAlmostEqual(test_fixed_fee_result[1], 33298.88855591,
msg=f'{test_fixed_fee_result[1]} does not equal to 99890.')
self.assertAlmostEqual(test_fixed_fee_result[2], 34.44444409,
msg=f'{test_fixed_fee_result[2]} does not equal to -36.666663.')
test_fixed_fee_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 0)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0., 996.98909294, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[1], -20000.0, msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 60.21814121353513, msg='result incorrect')
test_fixed_fee_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 100)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0., 900., 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[1], -18054.36, msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 54.36, msg='result incorrect')
class TestSpace(unittest.TestCase):
def test_creation(self):
"""
test if creation of space object is fine
"""
# first group of inputs, output Space with two discr axis from [0,10]
print('testing space objects\n')
# pars_list = [[(0, 10), (0, 10)],
# [[0, 10], [0, 10]]]
#
# types_list = ['discr',
# ['discr', 'discr']]
#
# input_pars = itertools.product(pars_list, types_list)
# for p in input_pars:
# # print(p)
# s = qt.Space(*p)
# b = s.boes
# t = s.types
# # print(s, t)
# self.assertIsInstance(s, qt.Space)
# self.assertEqual(b, [(0, 10), (0, 10)], 'boes incorrect!')
# self.assertEqual(t, ['discr', 'discr'], 'types incorrect')
#
pars_list = [[(0, 10), (0, 10)],
[[0, 10], [0, 10]]]
types_list = ['foo, bar',
['foo', 'bar']]
input_pars = itertools.product(pars_list, types_list)
for p in input_pars:
# print(p)
s = Space(*p)
b = s.boes
t = s.types
# print(s, t)
self.assertEqual(b, [(0, 10), (0, 10)], 'boes incorrect!')
self.assertEqual(t, ['enum', 'enum'], 'types incorrect')
pars_list = [[(0, 10), (0, 10)],
[[0, 10], [0, 10]]]
types_list = [['discr', 'foobar']]
input_pars = itertools.product(pars_list, types_list)
for p in input_pars:
# print(p)
s = Space(*p)
b = s.boes
t = s.types
# print(s, t)
self.assertEqual(b, [(0, 10), (0, 10)], 'boes incorrect!')
self.assertEqual(t, ['discr', 'enum'], 'types incorrect')
pars_list = [(0., 10), (0, 10)]
s = Space(pars=pars_list, par_types=None)
self.assertEqual(s.types, ['conti', 'discr'])
self.assertEqual(s.dim, 2)
self.assertEqual(s.size, (10.0, 11))
self.assertEqual(s.shape, (np.inf, 11))
self.assertEqual(s.count, np.inf)
self.assertEqual(s.boes, [(0., 10), (0, 10)])
pars_list = [(0., 10), (0, 10)]
s = Space(pars=pars_list, par_types='conti, enum')
self.assertEqual(s.types, ['conti', 'enum'])
self.assertEqual(s.dim, 2)
self.assertEqual(s.size, (10.0, 2))
self.assertEqual(s.shape, (np.inf, 2))
self.assertEqual(s.count, np.inf)
self.assertEqual(s.boes, [(0., 10), (0, 10)])
pars_list = [(1, 2), (2, 3), (3, 4)]
s = Space(pars=pars_list)
self.assertEqual(s.types, ['discr', 'discr', 'discr'])
self.assertEqual(s.dim, 3)
self.assertEqual(s.size, (2, 2, 2))
self.assertEqual(s.shape, (2, 2, 2))
self.assertEqual(s.count, 8)
self.assertEqual(s.boes, [(1, 2), (2, 3), (3, 4)])
pars_list = [(1, 2, 3), (2, 3, 4), (3, 4, 5)]
s = Space(pars=pars_list)
self.assertEqual(s.types, ['enum', 'enum', 'enum'])
self.assertEqual(s.dim, 3)
self.assertEqual(s.size, (3, 3, 3))
self.assertEqual(s.shape, (3, 3, 3))
self.assertEqual(s.count, 27)
self.assertEqual(s.boes, [(1, 2, 3), (2, 3, 4), (3, 4, 5)])
pars_list = [((1, 2, 3), (2, 3, 4), (3, 4, 5))]
s = Space(pars=pars_list)
self.assertEqual(s.types, ['enum'])
self.assertEqual(s.dim, 1)
self.assertEqual(s.size, (3,))
self.assertEqual(s.shape, (3,))
self.assertEqual(s.count, 3)
pars_list = ((1, 2, 3), (2, 3, 4), (3, 4, 5))
s = Space(pars=pars_list)
self.assertEqual(s.types, ['enum', 'enum', 'enum'])
self.assertEqual(s.dim, 3)
self.assertEqual(s.size, (3, 3, 3))
self.assertEqual(s.shape, (3, 3, 3))
self.assertEqual(s.count, 27)
self.assertEqual(s.boes, [(1, 2, 3), (2, 3, 4), (3, 4, 5)])
def test_extract(self):
"""
:return:
"""
pars_list = [(0, 10), (0, 10)]
types_list = ['discr', 'discr']
s = Space(pars=pars_list, par_types=types_list)
extracted_int, count = s.extract(3, 'interval')
extracted_int_list = list(extracted_int)
print('extracted int\n', extracted_int_list)
self.assertEqual(count, 16, 'extraction count wrong!')
self.assertEqual(extracted_int_list, [(0, 0), (0, 3), (0, 6), (0, 9), (3, 0), (3, 3),
(3, 6), (3, 9), (6, 0), (6, 3), (6, 6), (6, 9),
(9, 0), (9, 3), (9, 6), (9, 9)],
'space extraction wrong!')
extracted_rand, count = s.extract(10, 'rand')
extracted_rand_list = list(extracted_rand)
self.assertEqual(count, 10, 'extraction count wrong!')
print('extracted rand\n', extracted_rand_list)
for point in list(extracted_rand_list):
self.assertEqual(len(point), 2)
self.assertLessEqual(point[0], 10)
self.assertGreaterEqual(point[0], 0)
self.assertLessEqual(point[1], 10)
self.assertGreaterEqual(point[1], 0)
pars_list = [(0., 10), (0, 10)]
s = Space(pars=pars_list, par_types=None)
extracted_int2, count = s.extract(3, 'interval')
self.assertEqual(count, 16, 'extraction count wrong!')
extracted_int_list2 = list(extracted_int2)
self.assertEqual(extracted_int_list2, [(0, 0), (0, 3), (0, 6), (0, 9), (3, 0), (3, 3),
(3, 6), (3, 9), (6, 0), (6, 3), (6, 6), (6, 9),
(9, 0), (9, 3), (9, 6), (9, 9)],
'space extraction wrong!')
print('extracted int list 2\n', extracted_int_list2)
self.assertIsInstance(extracted_int_list2[0][0], float)
self.assertIsInstance(extracted_int_list2[0][1], (int, int64))
extracted_rand2, count = s.extract(10, 'rand')
self.assertEqual(count, 10, 'extraction count wrong!')
extracted_rand_list2 = list(extracted_rand2)
print('extracted rand list 2:\n', extracted_rand_list2)
for point in extracted_rand_list2:
self.assertEqual(len(point), 2)
self.assertIsInstance(point[0], float)
self.assertLessEqual(point[0], 10)
self.assertGreaterEqual(point[0], 0)
self.assertIsInstance(point[1], (int, int64))
self.assertLessEqual(point[1], 10)
self.assertGreaterEqual(point[1], 0)
pars_list = [(0., 10), ('a', 'b')]
s = Space(pars=pars_list, par_types='enum, enum')
extracted_int3, count = s.extract(1, 'interval')
self.assertEqual(count, 4, 'extraction count wrong!')
extracted_int_list3 = list(extracted_int3)
self.assertEqual(extracted_int_list3, [(0., 'a'), (0., 'b'), (10, 'a'), (10, 'b')],
'space extraction wrong!')
print('extracted int list 3\n', extracted_int_list3)
self.assertIsInstance(extracted_int_list3[0][0], float)
self.assertIsInstance(extracted_int_list3[0][1], str)
extracted_rand3, count = s.extract(3, 'rand')
self.assertEqual(count, 3, 'extraction count wrong!')
extracted_rand_list3 = list(extracted_rand3)
print('extracted rand list 3:\n', extracted_rand_list3)
for point in extracted_rand_list3:
self.assertEqual(len(point), 2)
self.assertIsInstance(point[0], (float, int))
self.assertLessEqual(point[0], 10)
self.assertGreaterEqual(point[0], 0)
self.assertIsInstance(point[1], str)
self.assertIn(point[1], ['a', 'b'])
pars_list = [((0, 10), (1, 'c'), ('a', 'b'), (1, 14))]
s = Space(pars=pars_list, par_types='enum')
extracted_int4, count = s.extract(1, 'interval')
self.assertEqual(count, 4, 'extraction count wrong!')
extracted_int_list4 = list(extracted_int4)
it = zip(extracted_int_list4, [(0, 10), (1, 'c'), (0, 'b'), (1, 14)])
for item, item2 in it:
print(item, item2)
self.assertTrue(all([tuple(ext_item) == item for ext_item, item in it]))
print('extracted int list 4\n', extracted_int_list4)
self.assertIsInstance(extracted_int_list4[0], tuple)
extracted_rand4, count = s.extract(3, 'rand')
self.assertEqual(count, 3, 'extraction count wrong!')
extracted_rand_list4 = list(extracted_rand4)
print('extracted rand list 4:\n', extracted_rand_list4)
for point in extracted_rand_list4:
self.assertEqual(len(point), 2)
self.assertIsInstance(point[0], (int, str))
self.assertIn(point[0], [0, 1, 'a'])
self.assertIsInstance(point[1], (int, str))
self.assertIn(point[1], [10, 14, 'b', 'c'])
self.assertIn(point, [(0., 10), (1, 'c'), ('a', 'b'), (1, 14)])
pars_list = [((0, 10), (1, 'c'), ('a', 'b'), (1, 14)), (1, 4)]
s = Space(pars=pars_list, par_types='enum, discr')
extracted_int5, count = s.extract(1, 'interval')
self.assertEqual(count, 16, 'extraction count wrong!')
extracted_int_list5 = list(extracted_int5)
for item, item2 in extracted_int_list5:
print(item, item2)
self.assertTrue(all([tuple(ext_item) == item for ext_item, item in it]))
print('extracted int list 5\n', extracted_int_list5)
self.assertIsInstance(extracted_int_list5[0], tuple)
extracted_rand5, count = s.extract(5, 'rand')
self.assertEqual(count, 5, 'extraction count wrong!')
extracted_rand_list5 = list(extracted_rand5)
print('extracted rand list 5:\n', extracted_rand_list5)
for point in extracted_rand_list5:
self.assertEqual(len(point), 2)
self.assertIsInstance(point[0], tuple)
print(f'type of point[1] is {type(point[1])}')
self.assertIsInstance(point[1], (int, np.int64))
self.assertIn(point[0], [(0., 10), (1, 'c'), ('a', 'b'), (1, 14)])
print(f'test incremental extraction')
pars_list = [(10., 250), (10., 250), (10., 250), (10., 250), (10., 250), (10., 250)]
s = Space(pars_list)
ext, count = s.extract(64, 'interval')
self.assertEqual(count, 4096)
points = list(ext)
# ๅทฒ็ปๅๅบๆๆ็็น๏ผๅด็ปๅ
ถไธญ10ไธช็น็ๆๅไธชsubspaces
# ๆฃๆฅๆฏๅฆๆฏไธชsubspace้ฝไธบSpace๏ผๆฏๅฆ้ฝๅจs่ๅดๅ
๏ผไฝฟ็จ32็ๆ็น้๏ผๆฃๆฅ็ๆๆฐ้ๆฏๅฆๆญฃ็กฎ
for point in points[1000:1010]:
subspace = s.from_point(point, 64)
self.assertIsInstance(subspace, Space)
self.assertTrue(subspace in s)
self.assertEqual(subspace.dim, 6)
self.assertEqual(subspace.types, ['conti', 'conti', 'conti', 'conti', 'conti', 'conti'])
ext, count = subspace.extract(32)
points = list(ext)
self.assertGreaterEqual(count, 512)
self.assertLessEqual(count, 4096)
print(f'\n---------------------------------'
f'\nthe space created around point <{point}> is'
f'\n{subspace.boes}'
f'\nand extracted {count} points, the first 5 are:'
f'\n{points[:5]}')
def test_axis_extract(self):
# test axis object with conti type
axis = Axis((0., 5))
self.assertIsInstance(axis, Axis)
self.assertEqual(axis.axis_type, 'conti')
self.assertEqual(axis.axis_boe, (0., 5.))
self.assertEqual(axis.count, np.inf)
self.assertEqual(axis.size, 5.0)
self.assertTrue(np.allclose(axis.extract(1, 'int'), [0., 1., 2., 3., 4.]))
self.assertTrue(np.allclose(axis.extract(0.5, 'int'), [0., 0.5, 1., 1.5, 2., 2.5, 3., 3.5, 4., 4.5]))
extracted = axis.extract(8, 'rand')
self.assertEqual(len(extracted), 8)
self.assertTrue(all([(0 <= item <= 5) for item in extracted]))
# test axis object with discrete type
axis = Axis((1, 5))
self.assertIsInstance(axis, Axis)
self.assertEqual(axis.axis_type, 'discr')
self.assertEqual(axis.axis_boe, (1, 5))
self.assertEqual(axis.count, 5)
self.assertEqual(axis.size, 5)
self.assertTrue(np.allclose(axis.extract(1, 'int'), [1, 2, 3, 4, 5]))
self.assertRaises(ValueError, axis.extract, 0.5, 'int')
extracted = axis.extract(8, 'rand')
self.assertEqual(len(extracted), 8)
self.assertTrue(all([(item in [1, 2, 3, 4, 5]) for item in extracted]))
# test axis object with enumerate type
axis = Axis((1, 5, 7, 10, 'A', 'F'))
self.assertIsInstance(axis, Axis)
self.assertEqual(axis.axis_type, 'enum')
self.assertEqual(axis.axis_boe, (1, 5, 7, 10, 'A', 'F'))
self.assertEqual(axis.count, 6)
self.assertEqual(axis.size, 6)
self.assertEqual(axis.extract(1, 'int'), [1, 5, 7, 10, 'A', 'F'])
self.assertRaises(ValueError, axis.extract, 0.5, 'int')
extracted = axis.extract(8, 'rand')
self.assertEqual(len(extracted), 8)
self.assertTrue(all([(item in [1, 5, 7, 10, 'A', 'F']) for item in extracted]))
def test_from_point(self):
"""ๆต่ฏไปไธไธช็น็ๆไธไธชspace"""
# ็ๆไธไธชspace๏ผๆๅฎspaceไธญ็ไธไธช็นไปฅๅdistance๏ผ็ๆไธไธชsub-space
pars_list = [(0., 10), (0, 10)]
s = Space(pars=pars_list, par_types=None)
self.assertEqual(s.types, ['conti', 'discr'])
self.assertEqual(s.dim, 2)
self.assertEqual(s.size, (10., 11))
self.assertEqual(s.shape, (np.inf, 11))
self.assertEqual(s.count, np.inf)
self.assertEqual(s.boes, [(0., 10), (0, 10)])
print('create subspace from a point in space')
p = (3, 3)
distance = 2
subspace = s.from_point(p, distance)
self.assertIsInstance(subspace, Space)
self.assertEqual(subspace.types, ['conti', 'discr'])
self.assertEqual(subspace.dim, 2)
self.assertEqual(subspace.size, (4.0, 5))
self.assertEqual(subspace.shape, (np.inf, 5))
self.assertEqual(subspace.count, np.inf)
self.assertEqual(subspace.boes, [(1, 5), (1, 5)])
print('create subspace from a 6 dimensional discrete space')
s = Space(pars=[(10, 250), (10, 250), (10, 250), (10, 250), (10, 250), (10, 250)])
p = (15, 200, 150, 150, 150, 150)
d = 10
subspace = s.from_point(p, d)
self.assertIsInstance(subspace, Space)
self.assertEqual(subspace.types, ['discr', 'discr', 'discr', 'discr', 'discr', 'discr'])
self.assertEqual(subspace.dim, 6)
self.assertEqual(subspace.volume, 65345616)
self.assertEqual(subspace.size, (16, 21, 21, 21, 21, 21))
self.assertEqual(subspace.shape, (16, 21, 21, 21, 21, 21))
self.assertEqual(subspace.count, 65345616)
self.assertEqual(subspace.boes, [(10, 25), (190, 210), (140, 160), (140, 160), (140, 160), (140, 160)])
print('create subspace from a 6 dimensional continuous space')
s = Space(pars=[(10., 250), (10., 250), (10., 250), (10., 250), (10., 250), (10., 250)])
p = (15, 200, 150, 150, 150, 150)
d = 10
subspace = s.from_point(p, d)
self.assertIsInstance(subspace, Space)
self.assertEqual(subspace.types, ['conti', 'conti', 'conti', 'conti', 'conti', 'conti'])
self.assertEqual(subspace.dim, 6)
self.assertEqual(subspace.volume, 48000000)
self.assertEqual(subspace.size, (15.0, 20.0, 20.0, 20.0, 20.0, 20.0))
self.assertEqual(subspace.shape, (np.inf, np.inf, np.inf, np.inf, np.inf, np.inf))
self.assertEqual(subspace.count, np.inf)
self.assertEqual(subspace.boes, [(10, 25), (190, 210), (140, 160), (140, 160), (140, 160), (140, 160)])
print('create subspace with different distances on each dimension')
s = Space(pars=[(10., 250), (10., 250), (10., 250), (10., 250), (10., 250), (10., 250)])
p = (15, 200, 150, 150, 150, 150)
d = [10, 5, 5, 10, 10, 5]
subspace = s.from_point(p, d)
self.assertIsInstance(subspace, Space)
self.assertEqual(subspace.types, ['conti', 'conti', 'conti', 'conti', 'conti', 'conti'])
self.assertEqual(subspace.dim, 6)
self.assertEqual(subspace.volume, 6000000)
self.assertEqual(subspace.size, (15.0, 10.0, 10.0, 20.0, 20.0, 10.0))
self.assertEqual(subspace.shape, (np.inf, np.inf, np.inf, np.inf, np.inf, np.inf))
self.assertEqual(subspace.count, np.inf)
self.assertEqual(subspace.boes, [(10, 25), (195, 205), (145, 155), (140, 160), (140, 160), (145, 155)])
class TestCashPlan(unittest.TestCase):
def setUp(self):
self.cp1 = qt.CashPlan(['2012-01-01', '2010-01-01'], [10000, 20000], 0.1)
self.cp1.info()
self.cp2 = qt.CashPlan(['20100501'], 10000)
self.cp2.info()
self.cp3 = qt.CashPlan(pd.date_range(start='2019-01-01',
freq='Y',
periods=12),
[i * 1000 + 10000 for i in range(12)],
0.035)
self.cp3.info()
def test_creation(self):
self.assertIsInstance(self.cp1, qt.CashPlan, 'CashPlan object creation wrong')
self.assertIsInstance(self.cp2, qt.CashPlan, 'CashPlan object creation wrong')
self.assertIsInstance(self.cp3, qt.CashPlan, 'CashPlan object creation wrong')
# test __repr__()
print(self.cp1)
print(self.cp2)
print(self.cp3)
# test __str__()
self.cp1.info()
self.cp2.info()
self.cp3.info()
# test assersion errors
self.assertRaises(AssertionError, qt.CashPlan, '2016-01-01', [10000, 10000])
self.assertRaises(KeyError, qt.CashPlan, '2020-20-20', 10000)
def test_properties(self):
self.assertEqual(self.cp1.amounts, [20000, 10000], 'property wrong')
self.assertEqual(self.cp1.first_day, Timestamp('2010-01-01'))
self.assertEqual(self.cp1.last_day, Timestamp('2012-01-01'))
self.assertEqual(self.cp1.investment_count, 2)
self.assertEqual(self.cp1.period, 730)
self.assertEqual(self.cp1.dates, [Timestamp('2010-01-01'), Timestamp('2012-01-01')])
self.assertEqual(self.cp1.ir, 0.1)
self.assertAlmostEqual(self.cp1.closing_value, 34200)
self.assertAlmostEqual(self.cp2.closing_value, 10000)
self.assertAlmostEqual(self.cp3.closing_value, 220385.3483685)
self.assertIsInstance(self.cp1.plan, pd.DataFrame)
self.assertIsInstance(self.cp2.plan, pd.DataFrame)
self.assertIsInstance(self.cp3.plan, pd.DataFrame)
def test_operation(self):
cp_self_add = self.cp1 + self.cp1
cp_add = self.cp1 + self.cp2
cp_add_int = self.cp1 + 10000
cp_mul_int = self.cp1 * 2
cp_mul_float = self.cp2 * 1.5
cp_mul_time = 3 * self.cp2
cp_mul_time2 = 2 * self.cp1
cp_mul_time3 = 2 * self.cp3
cp_mul_float2 = 2. * self.cp3
self.assertIsInstance(cp_self_add, qt.CashPlan)
self.assertEqual(cp_self_add.amounts, [40000, 20000])
self.assertEqual(cp_add.amounts, [20000, 10000, 10000])
self.assertEqual(cp_add_int.amounts, [30000, 20000])
self.assertEqual(cp_mul_int.amounts, [40000, 20000])
self.assertEqual(cp_mul_float.amounts, [15000])
self.assertEqual(cp_mul_float.dates, [Timestamp('2010-05-01')])
self.assertEqual(cp_mul_time.amounts, [10000, 10000, 10000])
self.assertEqual(cp_mul_time.dates, [Timestamp('2010-05-01'),
Timestamp('2011-05-01'),
Timestamp('2012-04-30')])
self.assertEqual(cp_mul_time2.amounts, [20000, 10000, 20000, 10000])
self.assertEqual(cp_mul_time2.dates, [Timestamp('2010-01-01'),
Timestamp('2012-01-01'),
Timestamp('2014-01-01'),
Timestamp('2016-01-01')])
self.assertEqual(cp_mul_time3.dates, [Timestamp('2019-12-31'),
Timestamp('2020-12-31'),
| Timestamp('2021-12-31') | pandas.Timestamp |
import os
import json
import re
from pathlib import Path
from typing import Dict, List, Union
import pandas as pd
import numpy as np
from npmrd_curator.parsers.html_table_parser import csv_to_json, parser
from npmrd_curator.exceptions import HtmlReadError
Pathlike = Union[Path, str]
def parse_html_str(input_html: str) -> pd.DataFrame:
try:
soup = parser.read_html(input_html)
headers = parser.find_headers(soup)
except AttributeError:
raise HtmlReadError("Could not load HTML. You may be missing headers?")
rows = parser.find_rows(soup)
columns = parser.get_columns(rows)
atom_index, atom_index_col_index = parser.get_atom_index(columns, headers)
residues, residue_col_index = parser.get_residues(columns, headers)
_2dnmr_col_indices = parser.get_2dnmr_indices(headers)
# add indices together and remove Nones
ignore_cols_indices = list(
filter(
lambda x: x is not None,
_2dnmr_col_indices + [atom_index_col_index, residue_col_index], # type: ignore
)
)
parser.fix_multidata(columns, ignore_cols_indices)
hshift, cshift, hmult, jcoup = parser.column_resolve(columns, ignore_cols_indices)
cols, grid, compound_num = parser.data_to_grid(
atom_index,
resi=residues,
cshift=cshift,
hshift=hshift,
mult=hmult,
coup=jcoup,
)
data = {cols[i]: g for i, g in enumerate(grid)}
return | pd.DataFrame(data) | pandas.DataFrame |
from sklearn.datasets import load_iris
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
data = load_iris(as_frame=True)
print(data["DESCR"])
data["filename"]
data["target_names"]
data["feature_names"]
data["frame"]
x, y = load_iris(return_X_y=True, as_frame=True)
df = | pd.concat([x, y], axis=1) | pandas.concat |
"""
Peak and plot simultaneously
Grant 2016, double potentials, EVI and my peak finder
"""
import csv
import numpy as np
import pandas as pd
# import geopandas as gpd
from IPython.display import Image
# from shapely.geometry import Point, Polygon
from math import factorial
import datetime
import time
import scipy
import os, os.path
from statsmodels.sandbox.regression.predstd import wls_prediction_std
from sklearn.linear_model import LinearRegression
from patsy import cr
# from pprint import pprint
import matplotlib.pyplot as plt
import seaborn as sb
import sys
start_time = time.time()
# search path for modules
# look @ https://stackoverflow.com/questions/67631/how-to-import-a-module-given-the-full-path
####################################################################################
###
### Local
###
####################################################################################
################
###
### Core path
###
sys.path.append('/Users/hn/Documents/00_GitHub/Ag/remote_sensing/python/')
################
###
### Directories
###
param_dir = "/Users/hn/Documents/00_GitHub/Ag/remote_sensing/parameters/"
####################################################################################
###
### Aeolus Core path
###
####################################################################################
sys.path.append('/home/hnoorazar/remote_sensing_codes/')
####################################################################################
###
### Aeolus Directories
###
####################################################################################
data_dir = "/data/hydro/users/Hossein/remote_sensing/01_NDVI_TS/batches_70_cloud/"
param_dir = "/home/hnoorazar/remote_sensing_codes/parameters/"
####################################################################################
###
### Parameters
###
####################################################################################
double_crop_potential_plants = pd.read_csv(param_dir + "double_crop_potential_plants.csv")
double_crop_potential_plants.head(2)
####################################################################################
###
### Import remote cores
###
####################################################################################
import remote_sensing_core as rc
import remote_sensing_core as rcp
####################################################################################
###
### Data Reading
###
####################################################################################
freedom_df = 7
delt = 0.1
file_names = ["batch_2016_B_TS_70_cloud.csv"]
file_N = file_names[0]
a_df = pd.read_csv(data_dir + file_N)
# filter Grant
a_df = a_df[a_df.county == "Grant"]
# filter double potentials
a_df = a_df[a_df.CropTyp.isin(double_crop_potential_plants['Crop_Type'])]
output_dir = data_dir + "/savitzky/Grant_2016_EVI/delta_" + str(delt) + "/just_potentials/"
plot_dir_base = output_dir
####################################################################################
###
### process data
###
####################################################################################
# The following columns do not exist in the old data
#
if not('DataSrc' in a_df.columns):
print ("Data source is being set to NA")
a_df['DataSrc'] = "NA"
if not('CovrCrp' in a_df.columns):
print ("Data source is being set to NA")
a_df['CovrCrp'] = "NA"
a_df = rc.initial_clean_EVI(a_df)
a_df.head(2)
an_EE_TS = a_df.copy()
### List of unique polygons
polygon_list = an_EE_TS['geo'].unique()
print(len(polygon_list))
max_output_columns = ['Acres', 'CovrCrp', 'CropGrp', 'CropTyp',
'DataSrc', 'ExctAcr', 'IntlSrD', 'Irrigtn', 'LstSrvD', 'Notes',
'RtCrpTy', 'Shap_Ar', 'Shp_Lng', 'TRS', 'county', 'year', 'geo',
'max_Doy', 'max_value', 'max_count']
all_poly_and_maxs_spline = pd.DataFrame(data=None,
index=np.arange(3*len(an_EE_TS)),
columns=max_output_columns)
all_poly_and_maxs_savitzky = pd.DataFrame(data=None,
index=np.arange(3*len(an_EE_TS)),
columns=max_output_columns)
min_output_columns = ['Acres', 'CovrCrp', 'CropGrp', 'CropTyp',
'DataSrc', 'ExctAcr', 'IntlSrD', 'Irrigtn', 'LstSrvD', 'Notes',
'RtCrpTy', 'Shap_Ar', 'Shp_Lng', 'TRS', 'county', 'year', 'geo',
'min_Doy', 'min_value', 'min_count']
all_poly_and_mins_spline = pd.DataFrame(data=None,
index=np.arange(3*len(an_EE_TS)),
columns=min_output_columns)
all_poly_and_mins_savitzky = pd.DataFrame(data=None,
index=np.arange(3*len(an_EE_TS)),
columns=min_output_columns)
# double_max_columns = ['Acres', 'CovrCrp', 'CropGrp', 'CropTyp',
# 'DataSrc', 'ExctAcr', 'IntlSrD', 'Irrigtn', 'LstSrvD', 'Notes',
# 'RtCrpTy', 'Shap_Ar', 'Shp_Lng', 'TRS', 'county', 'year', 'geo',
# 'max_count']
# double_poly_max_spline = pd.DataFrame(data=None,
# index=np.arange(2*len(an_EE_TS)),
# columns=double_max_columns)
# double_poly_max_savitzky = pd.DataFrame(data=None,
# index=np.arange(2*len(an_EE_TS)),
# columns=double_max_columns)
# double_min_columns = ['Acres', 'CovrCrp', 'CropGrp', 'CropTyp',
# 'DataSrc', 'ExctAcr', 'IntlSrD', 'Irrigtn', 'LstSrvD', 'Notes',
# 'RtCrpTy', 'Shap_Ar', 'Shp_Lng', 'TRS', 'county', 'year', 'geo',
# 'min_count']
# double_poly_min_spline = pd.DataFrame(data=None,
# index=np.arange(2*len(an_EE_TS)),
# columns=double_min_columns)
# double_poly_min_savitzky = pd.DataFrame(data=None,
# index=np.arange(2*len(an_EE_TS)),
# columns=double_min_columns)
pointer_max_spline = 0
pointer_min_spline = 0
pointer_max_savitzky = 0
pointer_min_savitzky = 0
counter = 0
# double_max_spline_pointer = 0
# double_min_spline_pointer = 0
# double_max_savitzky_pointer = 0
# double_min_savitzky_pointer = 0
for a_poly in polygon_list:
if (counter%1000 == 0):
print (counter)
counter += 1
curr_field = an_EE_TS[an_EE_TS['geo']==a_poly]
year = int(curr_field['year'].unique())
plant = curr_field['CropTyp'].unique()[0]
# Take care of names, replace "/" and "," and " " by "_"
plant = plant.replace("/", "_")
plant = plant.replace(",", "_")
plant = plant.replace(" ", "_")
plant = plant.replace("__", "_")
county = curr_field['county'].unique()[0]
TRS = curr_field['TRS'].unique()[0]
###
### There is a chance that a polygon is repeated twice?
###
X = curr_field['doy']
y = curr_field['EVI']
#############################################
###
### Smoothen
###
#############################################
#
# Spline
#
x_basis = cr(X, df=freedom_df, constraints='center') # Generate spline basis with "freedom_df" degrees of freedom
model = LinearRegression().fit(x_basis, y) # Fit model to the data
spline_pred = model.predict(x_basis) # Get estimates
#
# savitzky
#
savitzky_pred = rc.savitzky_golay(y, window_size=5, order=1)
#############################################
###
### find peaks
###
#############################################
#
# Spline peaks
#
spline_max_min = rc.my_peakdetect(y_axis=spline_pred, x_axis=X, delta=delt);
spline_max = spline_max_min[0];
spline_min = spline_max_min[1];
spline_max = rc.separate_x_and_y(m_list = spline_max);
spline_min = rc.separate_x_and_y(m_list = spline_min);
spline_max_DoYs_series = pd.Series(spline_max[0]);
spline_max_series = pd.Series(spline_max[1]);
spline_min_DoYs_series = pd.Series(spline_min[0]);
spline_min_series = pd.Series(spline_min[1]);
spline_max_df = pd.DataFrame({
'max_Doy': spline_max_DoYs_series,
'max_value': spline_max_series
})
# add number of max to the data frame.
spline_max_df['max_count'] = spline_max_df.shape[0]
spline_min_df = pd.DataFrame({
'min_Doy': spline_min_DoYs_series,
'min_value': spline_min_series
})
# add number of max to the data frame.
spline_min_df['max_count'] = spline_min_df.shape[0]
#################################################################################
#
# savitzky
#
savitzky_max_min = rc.my_peakdetect(y_axis=savitzky_pred, x_axis=X, delta=delt);
savitzky_max = savitzky_max_min[0];
savitzky_min = savitzky_max_min[1];
savitzky_max = rc.separate_x_and_y(m_list = savitzky_max);
savitzky_min = rc.separate_x_and_y(m_list = savitzky_min);
savitzky_max_DoYs_series = pd.Series(savitzky_max[0]);
savitzky_max_series = pd.Series(savitzky_max[1]);
savitzky_min_DoYs_series = pd.Series(savitzky_min[0]);
savitzky_min_series = pd.Series(savitzky_min[1]);
savitzky_max_df = pd.DataFrame({
'max_Doy': savitzky_max_DoYs_series,
'max_value': savitzky_max_series
})
# add number of max to the data frame.
savitzky_max_df['max_count'] = savitzky_max_df.shape[0]
savitzky_min_df = pd.DataFrame({
'min_Doy': savitzky_min_DoYs_series,
'min_value': savitzky_min_series
})
# add number of max to the data frame.
savitzky_min_df['max_count'] = savitzky_min_df.shape[0]
########################################################################################################
########################################################################################################
#############################################
###
### plot
###
#############################################
sub_out = "/plant_based_plots/" + plant + "/"
plot_path = plot_dir_base + sub_out
plot_path = plot_path + str(savitzky_max_df.shape[0]) + "_peaks/"
os.makedirs(plot_path, exist_ok=True)
if (len(os.listdir(plot_path)) < 70):
plot_title = county + ", " + plant + ", " + str(year) + " (" + TRS + ")"
sb.set();
fig, ax = plt.subplots(figsize=(8,6));
ax.scatter(X, y, label="Data", s=30);
ax.plot(X, savitzky_pred, 'k--', label="savitzky")
ax.scatter(savitzky_max_DoYs_series, savitzky_max_series, s=200, c='k', marker='*');
ax.plot(X, spline_pred, 'r--', label="Spline")
ax.scatter(spline_max_DoYs_series, spline_max_series, s=100, c='r', marker='*');
ax.legend(loc="best");
ax.set_title(plot_title);
ax.set(xlabel='DoY', ylabel='EVI')
ax.legend(loc="best");
fig_name = plot_path + county + "_" + plant + "_" + str(year) + "_" + str(counter) + '.png'
plt.savefig(fname = fig_name, \
dpi=300,
bbox_inches='tight')
plt.close()
del(plot_path, sub_out) # county, plant, year
WSDA_df = rc.keep_WSDA_columns(curr_field)
WSDA_df = WSDA_df.drop_duplicates()
if (len(spline_max_df)>0):
WSDA_max_df_spline = | pd.concat([WSDA_df]*spline_max_df.shape[0]) | pandas.concat |
import os
import time
import math
import json
import hashlib
import datetime
import pandas as pd
import numpy as np
from run_pyspark import PySparkMgr
graph_type = "loan_agent/"
def make_md5(x):
md5 = hashlib.md5()
md5.update(x.encode('utf-8'))
return md5.hexdigest()
def make_node_schema(entity_name, entity_df, comp_index_properties = None, mix_index_properties = None):
properties = {"propertyKeys": []}
for col in entity_df.columns:
if entity_df[col].dtype == np.float:
prop = {"name": col, "dataType": "Float", "cardinality": "SINGLE"}
elif entity_df[col].dtype == np.integer:
prop = {"name": col, "dataType": "Integer", "cardinality": "SINGLE"}
else:
prop = {"name": col, "dataType": "String", "cardinality": "SINGLE"}
properties["propertyKeys"].append(prop)
vertexLabels = {"vertexLabels": []}
vertexLabels["vertexLabels"].append({"name": entity_name})
vertexIndexes = {"vertexIndexes": []}
if comp_index_properties is not None:
for prop in comp_index_properties:
vertexIndexes["vertexIndexes"].append({
"name" : entity_name + "_" + prop + "_comp",
"propertyKeys" : [ prop ],
"composite" : True,
"unique" : False
})
if mix_index_properties is not None:
for prop in mix_index_properties:
vertexIndexes["vertexIndexes"].append({
"name" : entity_name + "_" + prop + "_mixed",
"propertyKeys" : [ prop ],
"composite" : False,
"unique" : False,
"mixedIndex" : "search"
})
vertexIndexes["vertexIndexes"].append({
"name" : entity_name + "_graph_label_mixed",
"propertyKeys" : [ "graph_label" ],
"composite" : False,
"unique" : False,
"mixedIndex" : "search"
})
return {**properties, **vertexLabels, **vertexIndexes}
def make_node_mapper(entity_name, entity_df):
entity_file = "gra_" + entity_name + ".csv"
vertexMap = {"vertexMap": {entity_file: {}}}
vertexMap["vertexMap"][entity_file] = {
"[VertexLabel]" : entity_name
}
for col in entity_df.columns:
vertexMap["vertexMap"][entity_file][col] = col
return vertexMap
def make_vertex_centric_schema(edge_name, index_property, direction, order):
if direction not in ["BOTH", "IN", "OUT"]:
print("direction should be in {}".format(["BOTH", "IN", "OUT"]))
return None
if order not in ["incr", "decr"]:
print("order should be in {}".format(["incr", "decr"]))
return None
vertexCentricIndexes = {"vertexCentricIndexes": []}
vertexCentricIndexes["vertexIndexes"].append({
"name" : edge_name + "_" + index_property,
"edge" : edge_name,
"propertyKeys" : [ index_property ],
"order": order,
"direction": direction
})
return vertexCentricIndexes
def make_edge_schema(relation_df = None, relation_comp_index_properties = None, relation_mix_index_properties = None):
properties = {"propertyKeys": []}
relation_columns = relation_df.columns.tolist()
if "Left" not in relation_columns or "Right" not in relation_columns:
print("relation df lacks Left and Right columns ")
for col in relation_df.columns:
if col in ["Left", "Right", "Type"]:
continue
if relation_df[col].dtype == np.float:
prop = {"name": col, "dataType": "Float", "cardinality": "SINGLE"}
elif relation_df[col].dtype == np.integer:
prop = {"name": col, "dataType": "Integer", "cardinality": "SINGLE"}
else:
prop = {"name": col, "dataType": "String", "cardinality": "SINGLE"}
properties["propertyKeys"].append(prop)
relation_names = relation_df["Type"].value_counts().index.tolist()
edgeLabels = {"edgeLabels": []}
for relation in relation_names:
edgeLabels["edgeLabels"].append({
"name": relation,
"multiplicity": "MULTI",
"unidirected": False
})
edgeIndexes = {"edgeIndexes": []}
for relation_name in relation_names:
if relation_comp_index_properties is not None:
for prop in relation_comp_index_properties:
edgeIndexes["edgeIndexes"].append({
"name": relation_name + "_" + prop + "_comp",
"propertyKeys": [ prop ],
"composite": True,
"unique": False,
"indexOnly": relation_name
})
if relation_mix_index_properties is not None:
for prop in relation_mix_index_properties:
edgeIndexes["edgeIndexes"].append({
"name" : relation_name + "_" + prop + "_mixed",
"propertyKeys": [ prop ],
"composite": False,
"unique": False,
"mixedIndex": "search",
"indexOnly": relation_name
})
return {**properties, **edgeLabels, **edgeIndexes}
def make_edge_mapper(entity_relations, relation_df=None, specific_relation=None):
edgeMap = {"edgeMap": {}}
for relation_name, entity_pairs in entity_relations.items():
if specific_relation is not None and relation_name != specific_relation:
continue
for pair in entity_pairs:
relation_file = "gra_" + relation_name + ".csv"
edge = {"[edge_left]": {"Left": pair[0]},
"[EdgeLabel]": relation_name,
"[edge_right]": {"Right": pair[1]}}
if relation_df is not None:
relation_columns = relation_df.columns.tolist()
if "Left" not in relation_columns or "Right" not in relation_columns:
print("relation df lacks Left and Right columns ")
for col in relation_df.columns:
if col in ["Left", "Right", "Type"]:
continue
edge[col] = col
edgeMap["edgeMap"][relation_file] = edge
return edgeMap
def dump_schema(schema, datamapper, folder):
if not os.path.exists(graph_type + folder):
os.makedirs(graph_type + folder)
f = open(graph_type + folder + "/schema.json", 'w')
f.write(json.dumps(schema))
f.close()
f = open(graph_type + folder + "/datamapper.json", 'w')
f.write(json.dumps(datamapper))
f.close()
spark_args = {}
pysparkmgr = PySparkMgr(spark_args)
_, spark, sc = pysparkmgr.start('xubin.xu')
# ็ณ่ฏท่กจ
apply_loan_df = spark.sql("select * from adm.adm_credit_apply_quota_doc").toPandas()
# ๆฏ็จ่กจ
zhiyong_loan_df = spark.sql("select * from adm.adm_credit_loan_apply_doc").toPandas()
zhiyong_loan_df.quota_apply_id = zhiyong_loan_df.quota_apply_id.astype("int")
# ้พๆ่กจ
overdue_sql = """select
*
from adm.adm_credit_apply_quota_doc t1
--้พๆๅ
ณ่๏ผๅญๅจไธไธชๅฎขๆทไธๅๆถ้ดๅค็ฌ็ณ่ฏท๏ผไธๅ็ณ่ฏทไผๅฏนๅบไธๅ็้พๆ็ถๆ
--ๅฝๅ้พๆๅคฉๆฐๅๅๅฒๆๅคง้พๆๅคฉๆฐ
left join
(
select
quota_apply_id,
max(overdue_days_now) as overdue_days_now,
max(his_max_overdue_days) as his_max_overdue_days
from
(
select
c4.quota_apply_id,
c3.overdue_days_now,
c3.his_max_overdue_days
from
adm.adm_credit_loan_apply_doc c4
left join
(
select
c2.business_id,
max(overdue_days_now) as overdue_days_now,
max(overdue_day_calc) as his_max_overdue_days
from
(
select
c1.*,
(case when (overdue_day_calc>0 and latest_actual_repay_date is not null) then 0 else overdue_day_calc end) as overdue_days_now
FROM adm.adm_credit_rpt_risk_overdue_bill c1
) c2
group by c2.business_id
) c3
on c4.loan_no=c3.business_id
) c5
group by quota_apply_id
) t4
on t1.quota_apply_id=t4.quota_apply_id
--้ฆ้พๅคฉๆฐ:ๅฝๅ้ฆ้พๅคฉๆฐ๏ผๅๅฒๆๅคง้ฆ้พๅคฉๆฐ----------------------------------------------------------
left join
(
select
quota_apply_id,
max(fpd) as fpd,
max(fpd_ever) as fpd_ever
from
(
select
a1.*,a2.*
from
adm.adm_credit_loan_apply_doc a1
left join
(
select
c1.business_id,
(case when (overdue_day_calc>0 and latest_actual_repay_date is null) then overdue_day_calc else 0 end) as fpd,--ๅฝๅ้ฆ้พๅคฉๆฐ
c1.overdue_day_calc as fpd_ever--ๅๅฒ้ฆ้พๅคฉๆฐ
from
adm.adm_credit_rpt_risk_overdue_bill c1
where periods=1
) a2
on a1.loan_no=a2.business_id
) a3
group by quota_apply_id
) t5
on t1.quota_apply_id=t5.quota_apply_id"""
overday_df = spark.sql(overdue_sql).toPandas()
# ๆๅปบๅๆฌพ่
ๅฎไฝ
def make_borrower_entity():
shouxin_zhiyong_df = pd.merge(apply_loan_df, zhiyong_loan_df[
["quota_apply_id", "apply_id", "apply_status_risk", "loan_status", "loan_amount", "repayment_principal"]],
how='left', on='quota_apply_id')
borrower_basic_df = shouxin_zhiyong_df[
["name", "uus_id", "employee_no", "identity_no", "sex", "age", "zociac", "educate_level", "marital_status",
"city", "access_role", "entry_date",
"resign_date", "on_job_status", "current_working_days", "uc_job_level_name", "store_city", "apply_id",
"team_code", "shop_code", "area_code", "marketing_code", "region_code"]]
borrower = shouxin_zhiyong_df.groupby("identity_no")
borrower_ext_df = pd.DataFrame([], columns=["identity_no", "็ดฏ่ฎก่ดทๆฌพ็ฌๆฐ", "ๆช็ปๆธ
่ดทๆฌพ็ฌๆฐ", "็ดฏ่ฎก่ดทๆฌพ้้ข", "ๅฝๅ่ดทๆฌพไฝ้ข"])
idx = 0
for group, df in borrower:
loans_cnt = df[(~pd.isnull(df.apply_id)) & (df.apply_status_risk_y == "ๆพๆฌพๆๅ")].apply_id.count()
unclosed_loans_cnt = df[(~pd.isnull(df.apply_id)) & (df.apply_status_risk_y == "ๆพๆฌพๆๅ") & (
df.loan_status == "REPAYING")].apply_id.count()
loans_amt = df[(~pd.isnull(df.apply_id)) & (df.apply_status_risk_y == "ๆพๆฌพๆๅ")].loan_amount_y.sum()
unpayed_amt = loans_amt - df[
(~pd.isnull(df.apply_id)) & (df.apply_status_risk_y == "ๆพๆฌพๆๅ")].repayment_principal.sum()
borrower_ext_df.loc[idx] = {"identity_no": group, "็ดฏ่ฎก่ดทๆฌพ็ฌๆฐ": loans_cnt, "ๆช็ปๆธ
่ดทๆฌพ็ฌๆฐ": unclosed_loans_cnt,
"็ดฏ่ฎก่ดทๆฌพ้้ข": loans_amt, "ๅฝๅ่ดทๆฌพไฝ้ข": unpayed_amt}
idx += 1
borrower_basic_df.drop_duplicates(borrower_basic_df.columns, keep='first', inplace=True)
borrower_entity_df = pd.merge(borrower_basic_df, borrower_ext_df, on="identity_no")
borrower_entity_df = borrower_entity_df.fillna(0)
overday_gp = overday_df[(~pd.isnull(overday_df.overdue_days_now))].groupby("identity_no")["overdue_days_now"].max()
overday_now_df = | pd.DataFrame({"identity_no": overday_gp.index, "overdue_days_now": overday_gp.values}) | pandas.DataFrame |
#!/usr/bin/python
# <NAME>, <EMAIL>
# v1.0, 09/13/2021
import os
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from scipy.stats import mannwhitneyu, norm, kruskal, spearmanr
from scipy.optimize import minimize_scalar
import scikit_posthocs as sp
from statsmodels.stats.multitest import fdrcorrection
from sklearn import metrics
targets = ['Healthy', 'ARPC', 'Luminal', 'NEPC', 'Basal', 'Patient', 'Gray', 'AMPC', 'MIX']
colors = ['#009988', '#0077BB', '#33BBEE', '#CC3311', '#EE7733', '#EE3377', '#BBBBBB', '#FFAE42', '#9F009F']
palette = {targets[i]: colors[i] for i in range(len(targets))}
interest_genes = ['AR', 'ASCL1', 'FOXA1', 'HOXB13', 'NKX3-1', 'REST', 'PGR', 'SOX2', 'ONECUT2', 'MYOG', 'MYF5']
sns.set(font_scale=1.5)
sns.set_style('ticks')
def fraction_plots(ref_dict, full_df, name):
features = list(ref_dict.keys())
# labels = pd.read_table(name + '/' + name + '_beta-predictions.tsv', sep='\t', index_col=0)
# full_df = pd.merge(labels, full_df, left_index=True, right_index=True)
# normalize = Normalize(0, 1)
# cmap = LinearSegmentedColormap.from_list('', ['#CC3311', '#9F009F', '#0077BB'])
for feature in features:
df = pd.concat([full_df['TFX'], full_df['Subtype'], full_df.filter(regex=feature)], axis=1)
x_arpc, y_arpc = df.loc[df['Subtype'] == 'ARPC', 'TFX'].values, df.loc[df['Subtype'] == 'ARPC', feature].values
r_val_arpc, p_val_arpc = spearmanr(x_arpc, y_arpc)
m_arpc, b_arpc = np.polyfit(x_arpc, y_arpc, 1)
x_nepc, y_nepc = df.loc[df['Subtype'] == 'NEPC', 'TFX'].values, df.loc[df['Subtype'] == 'NEPC', feature].values
r_val_nepc, p_val_nepc = spearmanr(x_nepc, y_nepc)
m_nepc, b_nepc = np.polyfit(x_nepc, y_nepc, 1)
plt.figure(figsize=(8, 8))
sns.scatterplot(x='TFX', y=feature, hue='Subtype', data=df, alpha=0.8, palette=palette, s=300)
plt.plot(x_arpc, m_arpc * x_arpc + b_arpc, lw=2, color=palette['ARPC'])
plt.plot(x_nepc, m_nepc * x_nepc + b_nepc, lw=2, color=palette['NEPC'])
# scalarmappaple = cm.ScalarMappable(norm=normalize, cmap=cmap)
# scalarmappaple.set_array(df.values)
# plt.colorbar(scalarmappaple, )
plt.legend(bbox_to_anchor=(1.02, 1), loc='upper left', borderaxespad=0)
plt.title(feature + ' vs Tumor Fraction' +
'\n ARPC: Spearman = ' + "{:e}".format(r_val_arpc) + ', p-val = ' + "{:e}".format(p_val_arpc) +
'\n NEPC: Spearman = ' + "{:e}".format(r_val_nepc) + ', p-val = ' + "{:e}".format(p_val_nepc))
plt.savefig(name + '/' + feature + '_vsTFX.pdf', bbox_inches="tight")
plt.close()
def dist_plots(full_df, name):
features = list(set([item for item in list(full_df.columns) if '_' in item]))
for feature_label in features:
# format df for seaborn
subs_key = full_df['Subtype']
df = full_df.filter(regex=feature_label).transpose().melt()
df = pd.merge(subs_key, df, left_index=True, right_on='variable')
# histogram:
# plt.figure(figsize=(8, 8))
# sns.histplot(x='value', hue='Subtype', data=df, palette=palette, element="step")
# plt.xlabel(feature_label)
# plt.ylabel('Counts')
# plt.title(feature_label + ' Histogram', size=14)
# plt.savefig(name + '/' + feature_label + '_Histogram.pdf', bbox_inches="tight")
# plt.close()
# density plot
plt.figure(figsize=(8, 8))
sns.kdeplot(x='value', hue='Subtype', data=df, palette=palette, fill=True, common_norm=False)
plt.xlabel(feature_label)
plt.ylabel('Density')
plt.title(feature_label + ' Kernel Density Estimation', size=14)
plt.savefig(name + '/' + feature_label + '_Density.pdf', bbox_inches="tight")
plt.close()
def box_plots(df, name):
df = df[df.columns.drop(list(df.filter(regex='Window')))]
df = df.reindex(sorted(df.columns), axis=1)
df = df.melt(id_vars='Subtype', var_name='Feature', value_name='Value', ignore_index=False)
plt.figure(figsize=(12, 8))
ax = sns.boxplot(x='Feature', y='Value', hue='Subtype', data=df, palette=palette)
plt.setp(ax.get_xticklabels(), rotation=45)
plt.ylabel('Counts')
plt.title(name + ' Feature Distributions', size=14)
plt.savefig(name + '/' + name + '_BoxPlot.pdf', bbox_inches="tight")
plt.close()
def dist_plots_sample(full_df, name):
features = list(set([item for item in list(full_df.columns) if '_' in item]))
for feature_label in features:
# format df for seaborn
subs_key = full_df['Subtype']
df = full_df.filter(regex=feature_label).transpose().melt()
df = pd.merge(subs_key, df, left_index=True, right_on='variable')
print(df)
# plot_range = [0, 2 * np.mean(df['value'])]
for key in subs_key:
# density plot
plt.figure(figsize=(8, 8))
sns.kdeplot(x='value', hue='variable', data=df[df['Subtype'] == key], fill=True, common_norm=False)
plt.xlabel(feature_label)
plt.ylabel('Density')
plt.title(key + ' ' + name + ' Kernel Density Estimation', size=14)
plt.savefig(name + '/' + name + '_' + feature_label + '_Sample-Wise_'
+ key + '_Density.pdf', bbox_inches="tight")
plt.close()
def diff_exp_tw(df, name, thresh=0.05, sub_name=''):
print('Conducting three-way differential expression analysis . . .')
types = list(df.Subtype.unique())
df_t1 = df.loc[df['Subtype'] == types[0]].drop('Subtype', axis=1)
df_t2 = df.loc[df['Subtype'] == types[1]].drop('Subtype', axis=1)
df_t3 = df.loc[df['Subtype'] == types[2]].drop('Subtype', axis=1)
df_lpq = pd.DataFrame(index=df_t1.transpose().index, columns=['p-value', 'DunnSigPairs'])
for roi in list(df_t1.columns):
x, y, z = df_t1[roi].values, df_t2[roi].values, df_t3[roi].values
if np.count_nonzero(~np.isnan(x)) < 2 or np.count_nonzero(~np.isnan(y)) < 2 or np.count_nonzero(~np.isnan(z)) < 2:
continue
try:
kw_score = kruskal(x, y, z, nan_policy='omit')[1]
except ValueError:
continue
df_lpq.at[roi, 'p-value'] = kw_score
if kw_score < thresh:
pairs = 0
dunn_scores = sp.posthoc_dunn([x, y, z])
if dunn_scores[1][2] < thresh:
pairs += 1
if dunn_scores[1][3] < thresh:
pairs += 1
if dunn_scores[2][3] < thresh:
pairs += 1
df_lpq.at[roi, 'DunnSigPairs'] = pairs
else:
df_lpq.at[roi, 'DunnSigPairs'] = 0
# now calculate p-adjusted (Benjamini-Hochberg corrected p-values)
df_lpq = df_lpq.dropna(how='all')
df_lpq['p-adjusted'] = fdrcorrection(df_lpq['p-value'])[1]
df_lpq = df_lpq.infer_objects()
df_lpq = df_lpq.sort_values(by=['p-adjusted'])
df_lpq.to_csv(name + '/' + name + sub_name + '_three-way_rpq.tsv', sep="\t")
features = list(df_lpq[(df_lpq['p-adjusted'] < thresh) & (df_lpq['DunnSigPairs'] == 3)].index)
with open(name + '/' + name + sub_name + '_three-way_FeatureList.tsv', 'w') as f_output:
for item in features:
f_output.write(item + '\n')
return pd.concat([df.iloc[:, :1], df.loc[:, df.columns.isin(features)]], axis=1, join='inner')
def diff_exp(df, name, thresh=0.05, sub_name=''):
print('Conducting differential expression analysis . . .')
types = list(df.Subtype.unique())
df_t1 = df.loc[df['Subtype'] == types[0]].drop('Subtype', axis=1)
df_t2 = df.loc[df['Subtype'] == types[1]].drop('Subtype', axis=1)
df_lpq = pd.DataFrame(index=df_t1.transpose().index, columns=['ratio', 'p-value'])
for roi in list(df_t1.columns):
x, y = df_t1[roi].values, df_t2[roi].values
if np.count_nonzero(~np.isnan(x)) < 2 or np.count_nonzero(~np.isnan(y)) < 2:
continue
df_lpq.at[roi, 'ratio'] = np.mean(x)/np.mean(y)
df_lpq.at[roi, 'p-value'] = mannwhitneyu(x, y)[1]
# now calculate p-adjusted (Benjamini-Hochberg corrected p-values)
df_lpq['p-adjusted'] = fdrcorrection(df_lpq['p-value'])[1]
df_lpq = df_lpq.sort_values(by=['p-adjusted'])
df_lpq = df_lpq.infer_objects()
df_lpq.to_csv(name + '/' + name + sub_name + '_rpq.tsv', sep="\t")
features = list(df_lpq[(df_lpq['p-adjusted'] < thresh)].index)
with open(name + '/' + name + sub_name + '_FeatureList.tsv', 'w') as f_output:
for item in features:
f_output.write(item + '\n')
return pd.concat([df.iloc[:, :1], df.loc[:, df.columns.isin(features)]], axis=1, join='inner')
def metric_analysis(df, name):
print('Calculating metric dictionary . . .')
df = df.dropna(axis=1)
features = list(df.iloc[:, 1:].columns)
types = list(df.Subtype.unique())
mat = {}
for feature in features:
sub_df = pd.concat([df.iloc[:, :1], df[[feature]]], axis=1, join='inner')
mat[feature] = {'Feature': feature}
for subtype in types:
mat[feature][subtype + '_Mean'] = np.nanmean(
sub_df[sub_df['Subtype'] == subtype].iloc[:, 1:].to_numpy().flatten())
mat[feature][subtype + '_Std'] = np.nanstd(
sub_df[sub_df['Subtype'] == subtype].iloc[:, 1:].to_numpy().flatten())
pd.DataFrame(mat).to_csv(name + '/' + name + '_weights.tsv', sep="\t")
return mat
def gaussian_mixture_model(ref_dict, df, subtypes, name):
print('Running Gaussian Mixture Model Predictor on ' + name + ' . . . ')
features = list(ref_dict.keys())
samples = list(df.index)
predictions = pd.DataFrame(0, index=df.index, columns=['LR', 'Prediction'])
# latents = [0.5, 0.5]
for sample in samples:
tfx = df.loc[sample, 'TFX']
score_mat = pd.DataFrame(1, index=features, columns=[subtypes[0], subtypes[1], 'LR'])
for feature in features:
try:
feature_val = df.loc[sample, feature]
except KeyError:
continue
exp_a = tfx * ref_dict[feature][subtypes[0] + '_Mean'] + (1 - tfx) * ref_dict[feature]['Healthy_Mean']
std_a = np.sqrt(tfx * np.square(ref_dict[feature][subtypes[0] + '_Std']) +
(1 - tfx) * np.square(ref_dict[feature]['Healthy_Std']))
exp_b = tfx * ref_dict[feature][subtypes[1] + '_Mean'] + (1 - tfx) * ref_dict[feature]['Healthy_Mean']
std_b = np.sqrt(tfx * np.square(ref_dict[feature][subtypes[1] + '_Std']) +
(1 - tfx) * np.square(ref_dict[feature]['Healthy_Std']))
range_a = [exp_a - 3 * std_a, exp_a + 3 * std_a]
range_b = [exp_b - 3 * std_b, exp_b + 3 * std_b]
range_min, range_max = [min([item for sublist in [range_a, range_b] for item in sublist]),
max([item for sublist in [range_a, range_b] for item in sublist])]
pdf_a = norm.pdf(feature_val, loc=exp_a, scale=std_a)
pdf_b = norm.pdf(feature_val, loc=exp_b, scale=std_b)
if np.isnan(pdf_a) or np.isnan(pdf_b) or pdf_a == 0 or pdf_b == 0\
or np.isinf(pdf_a) or np.isinf(pdf_b) or not range_min < feature_val < range_max:
pdf_a = 1
pdf_b = 1
# score_mat.loc[feature, subtypes[0]] = pdf_a
# score_mat.loc[feature, subtypes[1]] = pdf_b
score_mat.loc[feature, 'LR'] = np.log(pdf_a / pdf_b)
# plot features for specific samples
# if sample in ['FH0200_E_2_A', 'FH0312_E_1_A', 'FH0486_E_2_A']:
# plt.figure(figsize=(8, 8))
# x = np.linspace(range_min, range_max, 100)
# plt.plot(x, norm.pdf(x, exp_a, std_a), c=colors[1], label='Shifted ARPC')
# plt.plot(x, norm.pdf(x, exp_b, std_b), c=colors[3], label='Shifted NEPC')
# plt.axvline(x=feature_val, c=colors[5], label='Sample Value')
# plt.ylabel('Density')
# plt.xlabel(feature)
# plt.legend()
# plt.title(sample + ' ' + feature + ' Shifted Curves and Sample Value', size=14)
# plt.savefig(name + '/' + name + '_' + sample + '_' + feature + '.pdf', bbox_inches="tight")
# plt.close()
# with pd.option_context('display.max_rows', None, 'display.max_columns', None):
# print(score_mat)
# gamma_a = score_mat[subtypes[0]].product(axis=0) * latents[0]
# gamma_b = score_mat[subtypes[1]].product(axis=0) * latents[1]
# marginal = gamma_a + gamma_b
# print(str(gamma_a) + '\t' + str(gamma_b) + '\t' + str(marginal))
# predictions.loc[sample, subtypes[0]] = gamma_a / marginal
# predictions.loc[sample, subtypes[1]] = gamma_b / marginal
# predictions.loc[sample, 'LR'] = np.log(gamma_a) - np.log(gamma_b)
predictions.loc[sample, 'LR'] = score_mat['LR'].sum(axis=0)
if predictions.loc[sample, 'LR'] > 2.3:
predictions.loc[sample, 'Prediction'] = subtypes[0]
elif predictions.loc[sample, 'LR'] < -2.3:
predictions.loc[sample, 'Prediction'] = subtypes[1]
else:
predictions.loc[sample, 'Prediction'] = 'Indeterminate'
predictions.to_csv(name + '/' + name + '_predictions.tsv', sep="\t")
# print('Predictions:')
# print(predictions)
def gaussian_mixture_model_v2(ref_dict, df, subtypes, name):
print('Running Gaussian Mixture Model Predictor (non-bianry) on ' + name + ' . . . ')
features = list(ref_dict.keys())
samples = list(df.index)
predictions = pd.DataFrame(0, index=df.index, columns=[subtypes[0], subtypes[1], 'Prediction'])
for sample in samples:
tfx = df.loc[sample, 'TFX']
score_mat = pd.DataFrame(1, index=features, columns=[subtypes[0], subtypes[1]])
for feature in features:
try:
feature_val = df.loc[sample, feature]
except KeyError:
continue
exp_a = tfx * ref_dict[feature][subtypes[0] + '_Mean'] + (1 - tfx) * ref_dict[feature]['Healthy_Mean']
std_a = np.sqrt(tfx * np.square(ref_dict[feature][subtypes[0] + '_Std']) +
(1 - tfx) * np.square(ref_dict[feature]['Healthy_Std']))
exp_b = tfx * ref_dict[feature][subtypes[1] + '_Mean'] + (1 - tfx) * ref_dict[feature]['Healthy_Mean']
std_b = np.sqrt(tfx * np.square(ref_dict[feature][subtypes[1] + '_Std']) +
(1 - tfx) * np.square(ref_dict[feature]['Healthy_Std']))
range_a = [exp_a - 3 * std_a, exp_a + 3 * std_a]
range_b = [exp_b - 3 * std_b, exp_b + 3 * std_b]
range_min, range_max = [min([item for sublist in [range_a, range_b] for item in sublist]),
max([item for sublist in [range_a, range_b] for item in sublist])]
pdf_a = norm.pdf(feature_val, loc=exp_a, scale=std_a)
pdf_b = norm.pdf(feature_val, loc=exp_b, scale=std_b)
pdf_healthy = norm.pdf(feature_val, loc=ref_dict[feature]['Healthy_Mean'], scale=ref_dict[feature]['Healthy_Std'])
if np.isnan(pdf_a) or np.isnan(pdf_healthy) or pdf_a == 0 or pdf_healthy == 0\
or np.isinf(pdf_a) or np.isinf(pdf_healthy) or not range_min < feature_val < range_max:
score_mat.loc[feature, subtypes[0]] = 0
else:
score_mat.loc[feature, subtypes[0]] = np.log(pdf_a / pdf_healthy)
if np.isnan(pdf_b) or np.isnan(pdf_healthy) or pdf_b == 0 or pdf_healthy == 0\
or np.isinf(pdf_b) or np.isinf(pdf_healthy) or not range_min < feature_val < range_max:
score_mat.loc[feature, subtypes[1]] = 0
else:
score_mat.loc[feature, subtypes[1]] = np.log(pdf_b / pdf_healthy)
predictions.loc[sample, subtypes[0]] = score_mat[subtypes[0]].sum(axis=0)
predictions.loc[sample, subtypes[1]] = score_mat[subtypes[1]].sum(axis=0)
ar_score = predictions.loc[sample, subtypes[0]]
ne_score = predictions.loc[sample, subtypes[1]]
if ar_score > 2.3 and ar_score > 2 * ne_score:
predictions.loc[sample, 'Prediction'] = subtypes[0]
elif ne_score > 2.3 and ne_score > 2 * ar_score:
predictions.loc[sample, 'Prediction'] = subtypes[1]
elif ar_score > 2.3 and ne_score > 2.3:
predictions.loc[sample, 'Prediction'] = 'Amphicrine'
else:
predictions.loc[sample, 'Prediction'] = 'Indeterminate/DNPC'
predictions.to_csv(name + '/' + name + '_categorical-predictions.tsv', sep="\t")
def Find_Optimal_Cutoff(target, predicted):
""" Find the optimal probability cutoff point for a classification model related to event rate
Parameters
----------
target : Matrix with dependent or target data, where rows are observations
predicted : Matrix with predicted data, where rows are observations
Returns
-------
list type, with optimal cutoff value
"""
fpr, tpr, threshold = metrics.roc_curve(target, predicted)
i = np.arange(len(tpr))
roc = pd.DataFrame({'tf': pd.Series(tpr - (1 - fpr), index=i), 'threshold': pd.Series(threshold, index=i)})
roc_t = roc.iloc[(roc.tf - 0).abs().argsort()[:1]]
return list(roc_t['threshold'])
def specificity_sensitivity(target, predicted, threshold):
thresh_preds = np.zeros(len(predicted))
thresh_preds[predicted > threshold] = 1
cm = metrics.confusion_matrix(target, thresh_preds)
return cm[1, 1] / (cm[1, 0] + cm[1, 1]), cm[0, 0] / (cm[0, 0] + cm[0, 1])
def nroc_curve(y_true, predicted, num_thresh=100):
step = 1/num_thresh
thresholds = np.arange(0, 1 + step, step)
fprs, tprs = [], []
for threshold in thresholds:
y_pred = np.where(predicted >= threshold, 1, 0)
fp = np.sum((y_pred == 1) & (y_true == 0))
tp = np.sum((y_pred == 1) & (y_true == 1))
fn = np.sum((y_pred == 0) & (y_true == 1))
tn = np.sum((y_pred == 0) & (y_true == 0))
fprs.append(fp / (fp + tn))
tprs.append(tp / (tp + fn))
return fprs, tprs, thresholds
def beta_descent(ref_dict, df, subtypes, name, eval, order=None, base_df=None):
print('Running Heterogeneous Beta Predictor on ' + name + ' . . . ')
if not os.path.exists(name + '/'):
os.makedirs(name + '/')
features = list(ref_dict.keys())
cols = subtypes
cols.append('Prediction')
samples = list(df.index)
if eval == 'Bar':
predictions = pd.DataFrame(0, index=df.index, columns=[subtypes[0], subtypes[1], 'TFX', 'Prediction', 'Depth',
subtypes[0] + '_PLL', subtypes[1] + '_PLL', 'JPLL'])
feature_pdfs = pd.DataFrame(columns=['Sample', 'TFX', 'Feature', 'Value',
subtypes[0] + '_s-mean', subtypes[1] + '_s-mean',
subtypes[0] + '_s-std', subtypes[1] + '_s-std',
subtypes[0] + '_pdf', subtypes[1] + '_pdf'])
else:
predictions = pd.DataFrame(0, index=df.index, columns=[subtypes[0], subtypes[1], 'TFX', 'Prediction',
subtypes[0] + '_PLL', subtypes[1] + '_PLL', 'JPLL'])
# predictions['Subtype'] = df['Subtype']
# predictions['Subtype'] = df['NEPC']
predictions['Subtype'] = 'Unknown'
i = 0
for sample in samples:
tfx = df.loc[sample, 'TFX']
pdf_set_a, pdf_set_b = [], []
if base_df is not None: # recompute reference dictionary without samples
if eval == 'Triplet':
sample_comp_1 = sample.split('_')[0] + '_LuCaP'
sample_comp_2 = sample.split('_')[1] + '_LuCaP'
ref_dict = metric_analysis(base_df.drop([sample_comp_1, sample_comp_2]), name)
else:
sample_comp = sample.split('_')[0] + '_LuCaP'
ref_dict = metric_analysis(base_df.drop(sample_comp), name)
for feature in features:
try:
feature_val = df.loc[sample, feature]
except KeyError:
continue
exp_a = tfx * ref_dict[feature][subtypes[0] + '_Mean'] + (1 - tfx) * ref_dict[feature]['Healthy_Mean']
std_a = np.sqrt(tfx * np.square(ref_dict[feature][subtypes[0] + '_Std']) +
(1 - tfx) * np.square(ref_dict[feature]['Healthy_Std']))
exp_b = tfx * ref_dict[feature][subtypes[1] + '_Mean'] + (1 - tfx) * ref_dict[feature]['Healthy_Mean']
std_b = np.sqrt(tfx * np.square(ref_dict[feature][subtypes[1] + '_Std']) +
(1 - tfx) * np.square(ref_dict[feature]['Healthy_Std']))
pdf_a = norm.pdf(feature_val, loc=exp_a, scale=std_a)
pdf_b = norm.pdf(feature_val, loc=exp_b, scale=std_b)
if np.isfinite(pdf_a) and np.isfinite(pdf_b) and pdf_a != 0 and pdf_b != 0:
pdf_set_a.append(pdf_a)
pdf_set_b.append(pdf_b)
# feature_pdfs.loc[i] = [sample, tfx, feature, feature_val, exp_a, exp_b, std_a, std_b, pdf_a, pdf_b]
i += 1
def objective(theta):
log_likelihood = 0
for val_1, val_2 in zip(pdf_set_a, pdf_set_b):
joint_pdf = theta * val_1 + (1 - theta) * val_2
if joint_pdf > 0:
log_likelihood += np.log(joint_pdf)
return -1 * log_likelihood
def final_pdf(final_weight):
log_likelihood_a, log_likelihood_b, jpdf = 0, 0, 0
for val_1, val_2 in zip(pdf_set_a, pdf_set_b):
joint_a, joint_b = final_weight * val_1, (1 - final_weight) * val_2
joint_pdf = final_weight * val_1 + (1 - final_weight) * val_2
if joint_a > 0:
log_likelihood_a += np.log(joint_a)
if joint_b > 0:
log_likelihood_b += np.log(joint_b)
if joint_pdf > 0:
jpdf += np.log(joint_pdf)
return log_likelihood_a, log_likelihood_b, jpdf
weight_1 = minimize_scalar(objective, bounds=(0, 1), method='bounded').x
final_pdf_a, final_pdf_b, final_jpdf = final_pdf(weight_1)
predictions.loc[sample, 'TFX'] = tfx
if eval == 'Bar':
predictions.loc[sample, 'Depth'] = df.loc[sample, 'Depth']
predictions.loc[sample, 'JPLL'] = final_jpdf
predictions.loc[sample, subtypes[0]], predictions.loc[sample, subtypes[1]] = np.round(weight_1, 4), np.round(1 - weight_1, 4)
predictions.loc[sample, subtypes[0] + '_PLL'], predictions.loc[sample, subtypes[1] + '_PLL'] = final_pdf_a, final_pdf_b
if predictions.loc[sample, subtypes[0]] > 0.9:
predictions.loc[sample, 'Prediction'] = subtypes[0]
elif predictions.loc[sample, subtypes[0]] < 0.1:
predictions.loc[sample, 'Prediction'] = subtypes[1]
elif predictions.loc[sample, subtypes[0]] > 0.5:
predictions.loc[sample, 'Prediction'] = 'Mixed_' + subtypes[0]
else:
predictions.loc[sample, 'Prediction'] = 'Mixed_' + subtypes[1]
predictions.to_csv(name + '/' + name + '_beta-predictions.tsv', sep="\t")
# feature_pdfs.to_csv(name + '/' + name + '_feature-values_pdfs.tsv', sep="\t")
# if eval == 'Bar': # for benchmarking
# depths = ['0.2X', '1X', '25X']
# bench_targets = [0.01, 0.03, 0.05, 0.1, 0.2, 0.3]
# # bench_colors = ['#1c9964', '#4b9634', '#768d00', '#a47d00', '#d35e00', '#ff0000']
# # bench_palette = {bench_targets[i]: bench_colors[i] for i in range(len(bench_targets))}
# df_bar = pd.DataFrame(columns=['Depth', 'TFX', 'AUC'])
# for depth in depths:
# for category in bench_targets:
# sub_df = predictions.loc[predictions['TFX'] == category]
# sub_df = sub_df.loc[sub_df['Depth'] == depth]
# y = pd.factorize(sub_df['Subtype'].values)[0]
# fpr, tpr, threshold = metrics.roc_curve(y, sub_df['NEPC'].values)
# roc_auc = metrics.auc(fpr, tpr)
# df_bar.loc[len(df_bar.index)] = [depth, category, roc_auc]
# plt.figure(figsize=(12, 8))
# sns.barplot(x='TFX', y='AUC', hue='Depth', data=df_bar)
# plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
# plt.savefig(name + '/' + 'AUCBarPlot.pdf', bbox_inches="tight")
# plt.close()
# df_bar.to_csv(name + '/' + 'AUCList.tsv', sep="\t")
if eval == 'Bar': # for benchmarking
depths = ['0.2X', '1X', '25X']
bench_targets = [0.01, 0.03, 0.05, 0.1, 0.2, 0.3]
predictions = predictions[predictions['TFX'] != 0.03]
for depth in depths:
df = predictions.loc[predictions['Depth'] == depth]
plt.figure(figsize=(8, 8))
# sns.boxplot(x='TFX', y='NEPC', hue='Subtype', data=df, order=bench_targets, boxprops=dict(alpha=.3), palette=palette)
sns.swarmplot(x='TFX', y='NEPC', hue='Subtype', palette=palette, data=df, s=10, alpha=0.8, dodge=False)
plt.ylabel('NEPC Score')
plt.xlabel('Tumor Fraction')
plt.title('Benchmarking Scores at ' + depth, size=14, y=1.1)
plt.legend(bbox_to_anchor=(1.02, 1), loc='upper left', borderaxespad=0)
plt.savefig(name + '/' + depth + '_BoxPlot.pdf', bbox_inches="tight")
plt.close()
if eval == 'SampleBar':
import matplotlib.cm as cm
from matplotlib.colors import LinearSegmentedColormap
if order is not None:
predictions = predictions.reindex(order.index)
predictions = predictions.sort_values('NEPC')
predictions['NEPC'] = predictions['NEPC'] - 0.3314
data = predictions.groupby(predictions['NEPC']).size()
cmap = LinearSegmentedColormap.from_list('', ['#0077BB', '#CC3311'])
cm.register_cmap("mycolormap", cmap)
if order is not None:
predictions = predictions.reindex(order.index)
pal = sns.color_palette("mycolormap", len(data))
sns.set_context(rc={'patch.linewidth': 0.0})
plt.figure(figsize=(3, 2))
g = sns.barplot(x=predictions.index, y='NEPC', hue='NEPC', data=predictions, palette=pal, dodge=False)
g.legend_.remove()
sns.scatterplot(x=predictions.index, y='NEPC', hue='NEPC', data=predictions, palette=pal, s=600, legend=False)
def change_width(ax, new_value):
for patch in ax.patches:
current_width = patch.get_width()
diff = current_width - new_value
# we change the bar width
patch.set_width(new_value)
# we recenter the bar
patch.set_x(patch.get_x() + diff * .5)
change_width(g, .2)
for item in g.get_xticklabels():
item.set_rotation(45)
plt.axhline(y=0, color='b', linestyle='--', lw=2)
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.savefig(name + '/PredictionBarPlot.pdf', bbox_inches="tight")
plt.close()
plt.figure(figsize=(3, 1))
################# Bar Plot thing #################
# df = pd.DataFrame({'Sample': predictions.index,
# 'ARPC': [0.75, 0.85, 0],
# 'NEPC': [0.0, 0.0, 0.75],
# 'AMPC': [0.25, 0.15, 0.25]})
# df.set_index('Sample').plot(kind='bar', stacked=True, color=['#0077BB', '#CC3311', '#800080'])
# plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
# plt.savefig(name + '/Types.pdf', bbox_inches="tight")
# plt.close()
if eval == 'AUCBar': # for benchmarking
df_bar = pd.DataFrame(columns=['TFX', 'Depth', 'AUC'])
bench_targets = [0.01, 0.05, 0.1, 0.2, 0.3]
for category in bench_targets:
for depth in ['0.2X', '1X', '25X']:
sub_df = predictions[(df['TFX'] == category) & (df['Depth'] == depth)]
y = pd.factorize(sub_df['Subtype'].values)[0]
fpr, tpr, _ = metrics.roc_curve(y, sub_df['NEPC'])
auc = metrics.auc(fpr, tpr)
df_bar.loc[len(df_bar.index) + 1] = [category, depth, auc]
plt.figure(figsize=(8, 8))
sns.barplot(x='TFX', y='AUC', hue='Depth', data=df_bar)
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.savefig(name + '/AUCBarPlot.pdf', bbox_inches="tight")
plt.close()
df_bar.to_csv(name + '/AUCList.tsv', sep="\t")
if eval == 'TripletBox': # Triplet Mixtures
plt.figure(figsize=(8, 8))
sns.boxplot(x='Subtype', y='NEPC', hue='TFX', data=predictions[predictions['TFX'] == 0.3])
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.savefig(name + '/TripletBoxPlot.pdf', bbox_inches="tight")
plt.close()
if eval == 'ROC':
predictions = predictions[predictions['Subtype'].isin(['ARPC', 'NEPC'])]
thresholds = pd.DataFrame(0, index=['AllTFX', '0.00-0.10', '0.10-1.00'],
columns=['OptimumThreshold', 'Sensitivity', 'Specificity'])
# All TFXs
plt.figure(figsize=(8, 8))
plt.plot([0, 1], [0, 1], linestyle='--', lw=2, color='black')
y = pd.factorize(predictions['Subtype'].values)[0]
fpr, tpr, threshold = metrics.roc_curve(y, predictions['NEPC'].values)
# fpr, tpr, threshold = nroc_curve(y, predictions['NEPC'].values)
pd.DataFrame([threshold, tpr, [1 - val for val in fpr]],
index=['Threshold', 'Sensitivity', 'Specificity'],
dtype=float).transpose().to_csv(name + '/' + name + '_AllThresholds.tsv', sep="\t")
roc_auc = metrics.auc(fpr, tpr)
optimum_thresh = Find_Optimal_Cutoff(y, predictions['NEPC'].values)[0]
specificity, sensitivity = specificity_sensitivity(y, predictions['NEPC'].values, optimum_thresh)
print(specificity_sensitivity(y, predictions['NEPC'].values, 0.3314))
thresholds.loc['AllTFX'] = [optimum_thresh, specificity, sensitivity]
plt.plot(fpr, tpr, label='AUC = % 0.2f' % roc_auc, lw=4)
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.legend(bbox_to_anchor=(1.02, 1), loc='upper left', borderaxespad=0)
plt.savefig(name + '/' + name + '_ROC.pdf', bbox_inches="tight")
plt.close()
# by TFX
plt.figure(figsize=(8, 8))
plt.plot([0, 1], [0, 1], linestyle='--', lw=2, color='black')
# 0.00 - 0.10
sub_df = predictions.loc[predictions['TFX'] < 0.10]
y = pd.factorize(sub_df['Subtype'].values)[0]
fpr, tpr, threshold = metrics.roc_curve(y, sub_df['NEPC'].values, drop_intermediate=False)
# fpr, tpr, threshold = nroc_curve(y, sub_df['NEPC'].values)
pd.DataFrame([threshold, tpr, [1 - val for val in fpr]],
index=['Threshold', 'Sensitivity', 'Specificity'],
dtype=float).transpose().to_csv(name + '/' + name + '_0.00-0.10Thresholds.tsv', sep="\t")
roc_auc = metrics.auc(fpr, tpr)
plt.plot(fpr, tpr, label='TFX < 0.10: AUC = % 0.2f' % roc_auc, lw=4, color='#1c9964')
optimum_thresh = Find_Optimal_Cutoff(y, sub_df['NEPC'].values)[0]
specificity, sensitivity = specificity_sensitivity(y, sub_df['NEPC'].values, optimum_thresh)
thresholds.loc['0.00-0.10'] = [optimum_thresh, specificity, sensitivity]
# 0.25 - 1.00
sub_df = predictions.loc[predictions['TFX'] > 0.25]
y = pd.factorize(sub_df['Subtype'].values)[0]
fpr, tpr, threshold = metrics.roc_curve(y, sub_df['NEPC'].values)
# fpr, tpr, threshold = nroc_curve(y, sub_df['NEPC'].values)
pd.DataFrame([threshold, tpr, [1 - val for val in fpr]],
index=['Threshold', 'Sensitivity', 'Specificity'],
dtype=float).transpose().to_csv(name + '/' + name + '_0.1-1.00Thresholds.tsv', sep="\t")
roc_auc = metrics.auc(fpr, tpr)
plt.plot(fpr, tpr, label='TFX > 0.10: AUC = % 0.2f' % roc_auc, lw=4, color='#ff0000')
optimum_thresh = Find_Optimal_Cutoff(y, sub_df['NEPC'].values)[0]
specificity, sensitivity = specificity_sensitivity(y, sub_df['NEPC'].values, optimum_thresh)
thresholds.loc['0.10-1.00'] = [optimum_thresh, specificity, sensitivity]
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.legend(bbox_to_anchor=(1.02, 1), loc='upper left', borderaxespad=0)
plt.savefig(name + '/' + name + '_TFX-ROC.pdf', bbox_inches="tight")
plt.close()
thresholds.to_csv(name + '/' + name + '_Thresholds.tsv', sep="\t")
def product_column(a, b):
ab = []
for item_a in a:
for item_b in b:
ab.append(item_a + '_' + item_b)
return ab
def subset_data(df, sub_list):
regions = list(set([item.split('_')[0] for item in list(df.columns) if '_' in item]))
categories = list(set([item.split('_')[1] for item in list(df.columns) if '_' in item]))
features = list(set([item.split('_')[2] for item in list(df.columns) if '_' in item]))
sub_list += [region for region in regions if any(gene + '-' in region for gene in sub_list)]
sub_list = list(set(sub_list))
all_features = product_column(categories, features)
sub_features = product_column(sub_list, all_features)
sub_df = df[df.columns.intersection(sub_features)]
return pd.concat([df['Subtype'], sub_df], axis=1, join='inner')
def main():
test_data = 'emseq' # bench or patient_ULP/WGS or freed or triplet
# LuCaP dataframe - data is formatted in the "ExploreFM.py" pipeline
pickl = '/fh/fast/ha_g/user/rpatton/LuCaP_data/Exploration/LuCaP_FM.pkl'
print("Loading " + pickl)
df = pd.read_pickle(pickl)
df = df.drop('LB-Phenotype', axis=1)
df = df.rename(columns={'PC-Phenotype': 'Subtype'})
df = df[df['Subtype'] != 'AMPC']
df = df[df['Subtype'] != 'ARlow']
df = df[df.columns.drop(list(df.filter(regex='shannon-entropy')))]
df_lucap = df[df.columns.drop(list(df.filter(regex='mean-depth')))]
# Healthy dataframe - data is formatted in the "ExploreFM.py" pipeline
pickl = '/fh/fast/ha_g/user/rpatton/HD_data/Exploration/Healthy_FM.pkl'
print("Loading " + pickl)
df = | pd.read_pickle(pickl) | pandas.read_pickle |
from typing import (
Any,
Dict,
List,
Tuple,
Union,
TypeVar,
Callable,
Hashable,
Iterable,
Optional,
Sequence,
)
from typing_extensions import Literal
import os
import wrapt
import warnings
from itertools import tee, product, combinations
from statsmodels.stats.multitest import multipletests
import scanpy as sc
from anndata import AnnData
from cellrank import logging as logg
from cellrank.tl._enum import ModeEnum
from cellrank.ul._docs import d
from cellrank.ul._utils import _get_neighs, _has_neighs, _get_neighs_params
from cellrank.tl._colors import (
_compute_mean_color,
_convert_to_hex_colors,
_insert_categorical_colors,
)
from cellrank.ul._parallelize import parallelize
from cellrank.tl._linear_solver import _solve_lin_system
from cellrank.tl.kernels._utils import np_std, np_mean, _filter_kwargs
import numpy as np
import pandas as pd
from pandas import Series
from scipy.stats import norm
from numpy.linalg import norm as d_norm
from scipy.sparse import eye as speye
from scipy.sparse import diags, issparse, spmatrix, csr_matrix, isspmatrix_csr
from sklearn.cluster import KMeans
from pandas.api.types import infer_dtype, is_bool_dtype, is_categorical_dtype
from scipy.sparse.linalg import norm as sparse_norm
import matplotlib.colors as mcolors
ColorLike = TypeVar("ColorLike")
GPCCA = TypeVar("GPCCA")
CFLARE = TypeVar("CFLARE")
DiGraph = TypeVar("DiGraph")
EPS = np.finfo(np.float64).eps
class TestMethod(ModeEnum): # noqa
FISCHER = "fischer"
PERM_TEST = "perm_test"
class RandomKeys:
"""
Create random keys inside an :class:`anndata.AnnData` object.
Parameters
----------
adata
Annotated data object.
n
Number of keys, If `None`, create just 1 keys.
where
Attribute of ``adata``. If `'obs'`, also clean up `'{key}_colors'` for each generated key.
"""
def __init__(self, adata: AnnData, n: Optional[int] = None, where: str = "obs"):
self._adata = adata
self._where = where
self._n = n or 1
self._keys = []
def _generate_random_keys(self):
def generator():
return f"RNG_COL_{np.random.randint(2 ** 16)}"
where = getattr(self._adata, self._where)
names, seen = [], set(where.keys())
while len(names) != self._n:
name = generator()
if name not in seen:
seen.add(name)
names.append(name)
return names
def __enter__(self):
self._keys = self._generate_random_keys()
return self._keys
def __exit__(self, exc_type, exc_val, exc_tb):
for key in self._keys:
try:
getattr(self._adata, self._where).drop(
key, axis="columns", inplace=True
)
except KeyError:
pass
if self._where == "obs":
try:
del self._adata.uns[f"{key}_colors"]
except KeyError:
pass
def _pairwise(iterable: Iterable) -> zip:
"""Return pairs of elements from an iterable."""
a, b = tee(iterable)
next(b, None)
return zip(a, b)
def _min_max_scale(x: np.ndarray) -> np.ndarray:
"""
Scale a 1D array to 0-1 range.
Parameters
----------
x
Array to be scaled.
Returns
-------
The scaled array.
"""
minn, maxx = np.nanmin(x), np.nanmax(x)
with warnings.catch_warnings():
warnings.simplefilter("ignore", RuntimeWarning)
return (x - minn) / (maxx - minn)
def _process_series(
series: pd.Series, keys: Optional[List[str]], colors: Optional[np.array] = None
) -> Union[pd.Series, Tuple[pd.Series, List[str]]]:
"""
Process :class:`pandas.Series` categorical objects.
Categories in ``series`` are combined/removed according to ``keys``,
the same transformation is applied to the corresponding colors.
Parameters
----------
series
Input data, must be a pd.series of categorical type.
keys
Keys could be e.g. `['cat_1, cat_2', 'cat_4']`. If originally,
there were 4 categories in `series`, then this would combine the first
and the second and remove the third. The same would be done to `colors`,
i.e. the first and second color would be merged (average color), while
the third would be removed.
colors
List of colors which aligns with the order of the categories.
Returns
-------
:class:`pandas.Series`
Categorical updated annotation. Each cell is assigned to either
`NaN` or one of updated approximate recurrent classes.
list
Color list processed according to keys.
"""
# determine whether we want to process colors as well
process_colors = colors is not None
# if keys is None, just return
if keys is None:
if process_colors:
return series, colors
return series
# assert dtype of the series
if not is_categorical_dtype(series):
raise TypeError(f"Series must be `categorical`, found `{infer_dtype(series)}`.")
# initialize a copy of the series object
series_in = series.copy()
if process_colors:
colors_in = np.array(colors.copy())
if len(colors_in) != len(series_in.cat.categories):
raise ValueError(
f"Length of colors ({len(colors_in)}) does not match length of "
f"categories ({len(series_in.cat.categories)})."
)
if not all(mcolors.is_color_like(c) for c in colors_in):
raise ValueError("Not all colors are color-like.")
# define a set of keys
keys_ = {
tuple(sorted({key.strip(" ") for key in rc.strip(" ,").split(",")}))
for rc in keys
}
# check that the keys are unique
overlap = [set(ks) for ks in keys_]
for c1, c2 in combinations(overlap, 2):
overlap = c1 & c2
if overlap:
raise ValueError(f"Found overlapping keys: `{list(overlap)}`.")
# check the `keys` are all proper categories
remaining_cat = [b for a in keys_ for b in a]
if not np.all(np.in1d(remaining_cat, series_in.cat.categories)):
raise ValueError(
"Not all keys are proper categories. Check for spelling mistakes in `keys`."
)
# remove cats and colors according to keys
n_remaining = len(remaining_cat)
removed_cat = list(set(series_in.cat.categories) - set(remaining_cat))
if process_colors:
mask = np.in1d(series_in.cat.categories, remaining_cat)
colors_temp = colors_in[mask].copy()
series_temp = series_in.cat.remove_categories(removed_cat)
# loop over all indiv. or combined rc's
colors_mod = {}
for cat in keys_:
# if there are more than two keys in this category, combine them
if len(cat) > 1:
new_cat_name = " or ".join(cat)
mask = np.repeat(False, len(series_temp))
for key in cat:
mask = np.logical_or(mask, series_temp == key)
remaining_cat.remove(key)
series_temp = series_temp.cat.add_categories(new_cat_name)
remaining_cat.append(new_cat_name)
series_temp[mask] = new_cat_name
if process_colors:
# apply the same to the colors array. We just append new colors at the end
color_mask = np.in1d(series_temp.cat.categories[:n_remaining], cat)
colors_merge = np.array(colors_temp)[:n_remaining][color_mask]
colors_mod[new_cat_name] = _compute_mean_color(colors_merge)
elif process_colors:
color_mask = np.in1d(series_temp.cat.categories[:n_remaining], cat[0])
colors_mod[cat[0]] = np.array(colors_temp)[:n_remaining][color_mask][0]
# Since we have just appended colors at the end, we must now delete the unused ones
series_temp = series_temp.cat.remove_unused_categories()
series_temp = series_temp.cat.reorder_categories(remaining_cat)
if process_colors:
# original colors can still be present, convert to hex
colors_temp = _convert_to_hex_colors(
[colors_mod[c] for c in series_temp.cat.categories]
)
return series_temp, colors_temp
return series_temp
def _complex_warning(
X: np.array, use: Union[list, int, tuple, range], use_imag: bool = False
) -> np.ndarray:
"""
Check for imaginary components in columns of X specified by ``use``.
Parameters
----------
X
Matrix containing the eigenvectors.
use
Selection of columns of `X`.
use_imag
For eigenvectors that are complex, use real or imaginary part.
Returns
-------
class:`numpy.ndarray`
An array containing either only real eigenvectors or also complex ones.
"""
complex_mask = np.sum(X.imag != 0, axis=0) > 0
complex_ixs = np.array(use)[np.where(complex_mask)[0]]
complex_key = "imaginary" if use_imag else "real"
if len(complex_ixs) > 0:
logg.warning(
f"The eigenvectors with indices `{list(complex_ixs)}` have an imaginary part. "
f"Showing their {complex_key} part"
)
X_ = X.real
if use_imag:
X_[:, complex_mask] = X.imag[:, complex_mask]
return X_
def _mat_mat_corr_sparse(
X: csr_matrix,
Y: np.ndarray,
) -> np.ndarray:
n = X.shape[1]
X_bar = np.reshape(np.array(X.mean(axis=1)), (-1, 1))
X_std = np.reshape(
np.sqrt(np.array(X.power(2).mean(axis=1)) - (X_bar ** 2)), (-1, 1)
)
y_bar = np.reshape(np.mean(Y, axis=0), (1, -1))
y_std = np.reshape(np.std(Y, axis=0), (1, -1))
with warnings.catch_warnings():
warnings.simplefilter("ignore", RuntimeWarning)
return (X @ Y - (n * X_bar * y_bar)) / ((n - 1) * X_std * y_std)
def _mat_mat_corr_dense(X: np.ndarray, Y: np.ndarray) -> np.ndarray:
n = X.shape[1]
X_bar = np.reshape(np_mean(X, axis=1), (-1, 1))
X_std = np.reshape(np_std(X, axis=1), (-1, 1))
y_bar = np.reshape(np_mean(Y, axis=0), (1, -1))
y_std = np.reshape(np_std(Y, axis=0), (1, -1))
with warnings.catch_warnings():
warnings.simplefilter("ignore", RuntimeWarning)
return (X @ Y - (n * X_bar * y_bar)) / ((n - 1) * X_std * y_std)
def _perm_test(
ixs: np.ndarray,
corr: np.ndarray,
X: Union[np.ndarray, spmatrix],
Y: np.ndarray,
seed: Optional[int] = None,
queue=None,
) -> Tuple[np.ndarray, np.ndarray]:
rs = np.random.RandomState(None if seed is None else seed + ixs[0])
cell_ixs = np.arange(X.shape[1])
pvals = np.zeros_like(corr, dtype=np.float64)
corr_bs = np.zeros((len(ixs), X.shape[0], Y.shape[1])) # perms x genes x lineages
mmc = _mat_mat_corr_sparse if issparse(X) else _mat_mat_corr_dense
for i, _ in enumerate(ixs):
rs.shuffle(cell_ixs)
corr_i = mmc(X, Y[cell_ixs, :])
pvals += np.abs(corr_i) >= np.abs(corr)
bootstrap_ixs = rs.choice(cell_ixs, replace=True, size=len(cell_ixs))
corr_bs[i, :, :] = mmc(X[:, bootstrap_ixs], Y[bootstrap_ixs, :])
if queue is not None:
queue.put(1)
if queue is not None:
queue.put(None)
return pvals, corr_bs
@d.get_sections(base="correlation_test", sections=["Returns"])
@d.dedent
def _correlation_test(
X: Union[np.ndarray, spmatrix],
Y: "Lineage", # noqa: F821
gene_names: Sequence[str],
method: TestMethod = TestMethod.FISCHER,
confidence_level: float = 0.95,
n_perms: Optional[int] = None,
seed: Optional[int] = None,
**kwargs: Any,
) -> pd.DataFrame:
"""
Perform a statistical test.
Return NaN for genes which don't vary across cells.
Parameters
----------
X
Array or sparse matrix of shape ``(n_cells, n_genes)`` containing the expression.
Y
Array of shape ``(n_cells, n_lineages)`` containing the absorption probabilities.
gene_names
Sequence of shape ``(n_genes,)`` containing the gene names.
method
Method for p-value calculation.
confidence_level
Confidence level for the confidence interval calculation. Must be in `[0, 1]`.
n_perms
Number of permutations if ``method = 'perm_test'``.
seed
Random seed if ``method = 'perm_test'``.
%(parallel)s
Returns
-------
Dataframe of shape ``(n_genes, n_lineages * 5)`` containing the following columns, one for each lineage:
- ``{lineage}_corr`` - correlation between the gene expression and absorption probabilities.
- ``{lineage}_pval`` - calculated p-values for double-sided test.
- ``{lineage}_qval`` - corrected p-values using Benjamini-Hochberg method at level `0.05`.
- ``{lineage}_ci_low`` - lower bound of the ``confidence_level`` correlation confidence interval.
- ``{lineage}_ci_high`` - upper bound of the ``confidence_level`` correlation confidence interval.
"""
corr, pvals, ci_low, ci_high = _correlation_test_helper(
X.T,
Y.X,
method=method,
n_perms=n_perms,
seed=seed,
confidence_level=confidence_level,
**kwargs,
)
invalid = np.sum((corr < -1) | (corr > 1))
if invalid:
raise ValueError(f"Found `{invalid}` correlations that are not in `[0, 1]`.")
res = | pd.DataFrame(corr, index=gene_names, columns=[f"{c}_corr" for c in Y.names]) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Project: Psychophysics_exps
Creator: Miao
Create time: 2021-01-05 19:14
IDE: PyCharm
Introduction:
"""
import pandas as pd
import numpy as np
from scipy import stats
import matplotlib.pyplot as plt
from src.analysis.exp1_local_density_analysis import dict_pix_to_deg, get_result_dict, interplote_result_dict_start, \
get_fitted_res_cdf_poisson, get_sample_plot_x_y, normolizedLD, get_data2fit, get_data_to_fit_list, \
get_fitted_power_list, get_data_to_ttest, get_avrg_dict, get_avrg_result_dict, interplote_avrg_result_dict_start, \
avrg_dict_pix_to_deg, get_avrg_data_to_fit
from src.commons.fitfuncs import fit_poisson_cdf
from src.commons.process_dataframe import process_col
from src.commons.process_dict import get_sub_dict
from src.commons.process_str import str_to_list
def get_diff_x_y(n: int, to_fit_dict_c: dict, to_fit_dict_nc: dict, fit_poly = True):
# ori x values
c_x_list = to_fit_dict_c[n][:, 0].tolist()
nc_x_list = to_fit_dict_nc[n][:, 0].tolist()
# ori y values
c_y_list = to_fit_dict_c[n][:, 1].tolist()
nc_y_list = to_fit_dict_nc[n][:, 1].tolist()
if fit_poly:
polyfit_crowding_avrg = np.poly1d(np.polyfit(x = c_x_list, y = c_y_list, deg = 2))
c_y_list = polyfit_crowding_avrg(c_x_list).tolist()
polyfit_no_crowding_avrg = np.poly1d(np.polyfit(x = nc_x_list, y = nc_y_list, deg = 2))
nc_y_list = polyfit_no_crowding_avrg(nc_x_list).tolist()
# x ็็ฌฌไธไธชๅผๆฏไธๆ ท็
assert (c_x_list[0] == nc_x_list[0])
# pair x, y
c_xy_dict = dict(zip(c_x_list, c_y_list))
nc_xy_dict = dict(zip(nc_x_list, nc_y_list))
# new_y=nc_y_list-c_y_list
# key: union x, value: new y
diff_x_y_dict = dict()
x_intersection = list(set(c_x_list) & set(nc_x_list))
x_c_unique = set(c_x_list) - set(nc_x_list)
x_nc_unique = set(nc_x_list) - set(c_x_list)
for x in x_intersection:
diff_x_y_dict[x] = nc_xy_dict[x] - c_xy_dict[x]
# for x in x_c_unique:
# # find previous x of nc_x_list
# curr_nc_x = __find_previous_x(x, nc_x_list)
# diff_x_y_dict[x] = nc_xy_dict[curr_nc_x] - c_xy_dict[x]
#
# for x in x_nc_unique:
# # find previous x of c_x_list
# curr_c_x = __find_previous_x(x, c_x_list)
# diff_x_y_dict[x] = nc_xy_dict[x] - c_xy_dict[curr_c_x]
res_x = sorted(list(diff_x_y_dict.keys()))
res_y = list()
for x in res_x:
res_y.append(diff_x_y_dict[x])
assert (res_y[0] == nc_y_list[0] - c_y_list[0])
return res_x, res_y
def __find_previous_x(target_x: float, x_list: list) -> float:
# ๅฏนไปๅฐๅฐๅคง็x_listๆพๅฐ็ฌฌไธไธชๆฏtarget_xๅคง็ๆฐ๏ผๅๅพๅๆพไธๆญฅ
for curr_index, curr_x in enumerate(x_list):
if curr_x > target_x:
return x_list[curr_index - 1]
# edge case: target_x > x_list[-1]
if target_x > x_list[-1]:
return x_list[-1]
assert (__find_previous_x(2.3, [1, 2.1, 3, 5]) == 2.1)
if __name__ == '__main__':
save_plot = False
fit_poisson = False
fit_polynomial = True
plot_each_display = True
plot_average_display = True
PATH = "../displays/"
FILE = "update_stim_info_full.xlsx"
stimuli_df = pd.read_excel(PATH + FILE)
# process positions
process_col(stimuli_df, "positions", str_to_list)
# crowding and no-crowding df
stimuli_df_c = stimuli_df[(stimuli_df['crowdingcons'] == 1)]
stimuli_df_nc = stimuli_df[(stimuli_df['crowdingcons'] == 0)]
# positions into dictionary, key is numerosity
crowding_dic = {k: g['positions'].tolist() for k, g in stimuli_df_c.groupby('N_disk')}
no_crowding_dic = {k: g['positions'].tolist() for k, g in stimuli_df_nc.groupby('N_disk')}
# get local density distribution
result_dict_c = get_result_dict(crowding_dic)
result_dict_nc = get_result_dict(no_crowding_dic)
# make sure the local density values start from (100,..)
result_dict_c = interplote_result_dict_start(result_dict_c)
result_dict_nc = interplote_result_dict_start(result_dict_nc)
# covert pixel to deg
result_dict_c = dict_pix_to_deg(result_dict_c, 1)
result_dict_nc = dict_pix_to_deg(result_dict_nc, 1)
# possible keys
k_03 = [21, 22, 23, 24, 25]
k_04 = [31, 32, 33, 34, 35]
k_05 = [41, 42, 43, 44, 45]
k_06 = [49, 50, 51, 52, 53]
k_07 = [54, 55, 56, 57, 58]
k_list = [k_03, k_04, k_05, k_06, k_07]
# data to fit
result_dict_c_list = [get_sub_dict(result_dict_c, k) for k in k_list]
result_dict_nc_list = [get_sub_dict(result_dict_nc, k) for k in k_list]
# %% fit polynomial
datac_to_fit = get_data_to_fit_list(result_dict_c_list)
datanc_to_fit = get_data_to_fit_list(result_dict_nc_list)
# ๆ้ซ้กน็ณปๆฐthe highest order
deg = 2
if deg == 2:
label_c = "polynomial fit radial"
label_nc = "polynomial fit tangential"
elif deg == 1:
label_c = "linear fit radial"
label_nc = "linear fit tangential"
fitted_c = get_fitted_power_list(datac_to_fit, deg = deg)
fitted_nc = get_fitted_power_list(datanc_to_fit, deg = deg)
# data for ttest
datac_ttest = get_data_to_ttest(fitted_c)
datanc_ttest = get_data_to_ttest(fitted_nc)
# covert to dataframe
dfc = | pd.DataFrame(datac_ttest) | pandas.DataFrame |
"""This file contains functions which are used to generate the log-likelihood
for different memory models and other code required to run the experiments in
the manuscript."""
import multiprocessing as MP
import warnings
from collections import defaultdict
import numpy as np
import pandas as pd
import matplotlib.patches as mpatches
import matplotlib.pyplot as plt
import seaborn as sns
# Constants
TIME_SCALE = 60 * 60 * 24
MODEL_POWER = True
B = [1]
POWER_B = 1
def get_unique_user_lexeme(data_dict):
"""Get all unique (user, lexeme) pairs."""
pairs = set()
for u_id in data_dict.keys():
pairs.update([(u_id, x) for x in data_dict[u_id].keys()])
return sorted(pairs)
def max_unif(N, sum_D):
"""Find maximum value of N * log(x) - x * sum_D"""
x_max = N / sum_D
return N * np.log(x_max) - sum_D * x_max
def max_memorize(n_0, a, b, recalled, Ds,
q_fixed=None, n_max=np.inf, n_min=0, verbose=True):
"""Return max_{over q} memorizeLL."""
# TODO: Currently, these are not true.
# n_max=1440, n_min=1/36500000
# maximum forgetting rate is clipped at 1 minute for exp(-1) forgetting and
# minimum forgetting rate is that exp(-1) chance of forgetting after 100,000 years.
assert len(recalled) == len(Ds), "recalled and t_is are not of the same length."
N = len(Ds)
n_t = n_0
log_sum = 0
int_sum = 0
n_ts = []
m_dts = []
not_warned_min, not_warned_max = True, True
n_correct, n_wrong = 0, 0
with warnings.catch_warnings():
warnings.simplefilter('once' if verbose else 'ignore')
for D, recall in zip(Ds, recalled):
if MODEL_POWER is False:
m_dt = np.exp(-n_t * D)
else:
m_dt = (1 + POWER_B * D)**(-n_t)
n_ts.append(n_t)
m_dts.append(m_dt)
if n_t < 1e-20:
# log_sum += np.log(n_0) + n_correct * np.log(a) + n_wrong * np.log(b) + np.log(D)
int_sum += n_t * (D ** 2) / 2
else:
if MODEL_POWER is False:
int_sum += D + np.expm1(-n_t * D) / n_t
else:
int_sum += D - ((1 + POWER_B * D) ** (1 - n_t) - 1) / (POWER_B * (1 - n_t))
if m_dt >= 1.0:
log_sum = -np.inf
else:
log_sum += np.log1p(-m_dt)
if recall:
n_t *= (1 - a)
n_correct += 1
else:
n_t *= (1 + b)
n_wrong += 1
n_t = min(n_max, max(n_min, n_t))
if n_t == n_max and not_warned_max:
if verbose:
warnings.warn('Max boundary hit.')
not_warned_max = False
if n_t == n_min and not_warned_min:
if verbose:
warnings.warn('Min boundary hit.')
not_warned_min = False
if int_sum != 0:
q_max = 1 / (4 * ((N / 2) / int_sum) ** 2) if q_fixed is None else q_fixed
else:
# If int_sum == 0, then LL should be -inf, not NaN
q_max = 1.0
LL = log_sum - (N / 2) * np.log(q_max) - (1 / q_max)**(0.5) * int_sum
return {
'q_max' : q_max,
'n_ts' : n_ts,
'm_dts' : m_dts,
'm_mean' : np.mean(m_dts),
'log_sum' : log_sum,
'int_sum' : int_sum,
'LL' : LL,
'max_boundary_hit' : not not_warned_max,
'min_boundary_hit' : not not_warned_min,
'n_max' : n_max,
'n_min' : n_min
}
def get_training_pairs(data_dict, pairs):
"""Returns the subset of pairs which have more than 3 reviews, i.e.
the set for which we will be able to perform training using `n-1` reviews
and testing for the last review."""
training_pairs = []
for u_id, l_id in pairs:
if len(data_dict[u_id][l_id]) >= 3:
training_pairs.append((u_id, l_id))
return training_pairs
def calc_ll_arr(method, data_arr, alpha=None, beta=None,
success_prob=0.49, eps=1e-10,
all_mem_output=False, verbose=True):
"""Calculate LL for a given user_id, lexeme_id pair's data_arr."""
n_0 = data_arr[0]['n_0']
if method == 'uniform':
sum_D = max(sum(x['delta_scaled'] for x in data_arr), eps)
N = len(data_arr)
return max_unif(N, sum_D)
elif method == 'memorize':
recalled = np.asarray([x['p_recall'] > success_prob for x in data_arr])
deltas = np.asarray([x['delta_scaled'] for x in data_arr])
deltas = np.where(deltas <= 0, eps, deltas)
op = max_memorize(n_0, alpha, beta, recalled, deltas, verbose=verbose)
if not all_mem_output:
return op['LL']
else:
return op
else:
raise ValueError("Invalid method: {}".format(method))
def calc_user_LL_dict(data_dict, alpha, beta, lexeme_difficulty, map_lexeme,
success_prob=0.49, n_procs=None, pairs=None, verbose=True,
training=False):
"""Calculate LL while assuming that the LL factors are the same per user
instead of setting them for each (user, lexeme) pair.
If `training` is True, then the LL calculation is done only for the
first n-1 entries instead of for all events in the sequence.
"""
u_l_dict = defaultdict(lambda: defaultdict(lambda: {}))
lexeme_difficulty = np.abs(lexeme_difficulty)
global stat_worker
def stat_worker(params):
u_id = params
data_per_lexeme = data_dict[u_id]
n_0s = []
Ns, sumDs = [], []
log_sums, int_sums = [], []
all_mem_op = []
# The tests for all lexemes.
all_tests = []
lexeme_ids = sorted(data_per_lexeme.keys())
valid_lexeme_ids = []
for l_id in lexeme_ids:
arr = data_per_lexeme[l_id]
if training:
if len(arr) < 3:
# Cannot calculate the LL for sequences shorter than 3
# elements if we are looking to train + test with these
# sequences.
continue
else:
# Ignore the last review, which we will use for testing.
all_tests.append(arr[-1])
# Append the test before truncating arr
arr = arr[:-1]
valid_lexeme_ids.append(l_id)
n_0 = arr[0]['n_0']
n_0s.append(n_0)
Ns.append(len(arr))
sumDs.append(sum(x['delta_scaled'] for x in arr))
mem_res = calc_ll_arr('memorize', arr,
alpha=alpha, beta=beta,
success_prob=success_prob, all_mem_output=True,
verbose=verbose)
log_sums.append(mem_res['log_sum'])
int_sums.append(mem_res['int_sum'])
all_mem_op.append(mem_res)
c_unif = np.sum(Ns) / np.sum(sumDs)
q_max = 1 / (4 * ((np.sum(Ns) / 2) / np.sum(int_sums)) ** 2)
res = {}
for idx, l_id in enumerate(valid_lexeme_ids):
res[l_id] = {
'uniform_LL': Ns[idx] * np.log(c_unif) - sumDs[idx] * c_unif,
'memorize_LL': log_sums[idx] + Ns[idx] * np.log(q_max) / 2 - (1 / q_max)**(0.5) * int_sums[idx],
'mem_op': all_mem_op[idx],
'q_max': q_max,
'c_unif': c_unif
}
if training:
res[l_id]['test'] = all_tests[idx]
return u_id, res
if n_procs is None:
n_procs = MP.cpu_count()
user_ids = sorted(set([u_id for u_id, _ in pairs]))
with MP.Pool(n_procs) as pool:
if n_procs > 1:
map_func = pool.map
else:
# This aids debugging.
map_func = map
for u_id, res in map_func(stat_worker, user_ids):
u_l_dict[u_id] = res
return u_l_dict
def max_threshold(n_0, a, b, recalled, Ds, w, p,
alpha_fixed=None, n_max=np.inf, n_min=0, verbose=True):
"""Return max_{over a} threshold-LL, unless alpha_fixed is provided.
In that case, the LL is calculated for the given alpha.
Note (relationship of the symbols with those used in the paper):
- p is m_{th} in the paper.
- alpha (also alpha_max) is c in the paper
- w is 1/\zeta in the paper.
"""
assert len(recalled) == len(Ds), "recalled and t_is are not of the same length."
N = len(Ds)
n_t = n_0
log_sum = 0
int_sum = 0
n_ts = []
m_dts = []
tau_dts = []
not_warned_min, not_warned_max = True, True
n_correct, n_wrong = 0, 0
sum_third = 0
sum_second = 0
with warnings.catch_warnings():
warnings.simplefilter('once' if verbose else 'ignore')
for D, recall in zip(Ds, recalled):
if MODEL_POWER is True:
tau = (np.exp(-np.log(p) / n_t) - 1) / B[0]
else:
tau = -np.log(p) / n_t
if n_t < 1e-20 and p != 1.0:
raise Exception("P should be 1 when n_t is not finite")
# When n_t is too small, p should also be 1.
elif n_t < 1e-20 and p == 1.0:
D_ = np.max([D, 0.0001])
else:
D_ = np.max([D - tau, 0.0001])
sum_third += w * np.expm1(-D_ / w)
sum_second += -D_ / w
n_ts.append(n_t)
m_dts.append(np.exp(-n_t * D))
tau_dts.append(tau)
if recall:
n_t *= a
n_correct += 1
else:
n_t *= b
n_wrong += 1
n_t = min(n_max, max(n_min, n_t))
if n_t == n_max and not_warned_max:
if verbose:
warnings.warn('Max boundary hit.')
not_warned_max = False
if n_t == n_min and not_warned_min:
if verbose:
warnings.warn('Min boundary hit.')
not_warned_min = False
if alpha_fixed is None:
alpha_max = -N / sum_third
else:
alpha_max = alpha_fixed
LL = N * np.log(np.max([alpha_max, 0.0001])) + sum_second + alpha_max * sum_third
if np.isfinite(LL).sum() == 0:
raise Exception("LL is not finite")
return {
'alpha_max': alpha_max,
'n_ts': n_ts,
'm_dts': m_dts,
'm_mean': np.mean(m_dts),
'log_sum': log_sum,
'int_sum': int_sum,
'LL': LL,
'max_boundary_hit': not not_warned_max,
'min_boundary_hit': not not_warned_min,
'n_max': n_max,
'n_min': n_min,
'p': p,
'w': w,
'sum_second': sum_second, # sum_i -D_i / w
'sum_third': sum_third, # sum_i w * (exp(-D_i / w) - 1)
'N': N
}
def calc_ll_arr_thres(
method, data_arr, alpha=None, beta=None,
success_prob=0.49, eps=1e-10, w_range=None, p_range=None,
verbose=True, all_thres_output=True, alpha_fixed=None):
assert method == 'threshold', "This function only computes the max_threshold LL."
n_0 = data_arr[0]['n_0']
recalled = np.asarray([x['p_recall'] > success_prob for x in data_arr])
deltas = np.asarray([x['delta_scaled'] for x in data_arr])
deltas = np.where(deltas <= 0, eps, deltas)
best_LL = None
if w_range is None:
w_range = [0.01, 0.1, 1, 10, 100]
n_is = [n_0]
with warnings.catch_warnings():
warnings.simplefilter('once' if verbose else 'ignore')
for x in data_arr:
if x['p_recall'] > success_prob:
n_is.append(n_is[-1] * alpha)
else:
n_is.append(n_is[-1] * beta)
# Remove the last n_t
n_is = np.array(n_is[:-1])
if p_range is None:
# In most cases p_ == 1, the np.unique limits useless iterations.
if (n_is < 1e-20).sum() > 0:
p_range = [1.0]
else:
p_ = np.exp(-deltas * n_is).max()
p_range = np.unique(np.linspace(p_, 1, 4))
for w in w_range:
for p in p_range:
op = max_threshold(n_0, a=alpha, b=beta, recalled=recalled,
Ds=deltas, p=p, w=w, verbose=verbose,
alpha_fixed=alpha_fixed)
if best_LL is None or best_LL['LL'] < op['LL']:
best_LL = op
if all_thres_output:
return best_LL
else:
return best_LL['LL']
def calc_LL_dict_threshold(data_dict, alpha, beta, pairs,
lexeme_difficulty, map_lexeme, success_prob=0.49,
p_range=None, w_range=None,
n_procs=None, verbose=True):
"""Calculate the LL of the threshold model optimized for each (user, item)
pair."""
u_l_dict = defaultdict(lambda: {})
lexeme_difficulty = np.abs(lexeme_difficulty)
global _max_threshold_worker
def _max_threshold_worker(params):
u_id, l_id = params
arr = data_dict[u_id][l_id]
op = calc_ll_arr_thres('threshold', arr, alpha=alpha, beta=beta,
success_prob=success_prob, all_thres_output=True,
verbose=verbose)
return u_id, l_id, {'threshold_op': op, 'threshold_LL': op['LL']}
if n_procs is None:
n_procs = MP.cpu_count()
with MP.Pool() as pool:
for u_id, l_id, res in pool.map(_max_threshold_worker, pairs):
u_l_dict[u_id][l_id] = res
return u_l_dict
def calc_user_ll_arr_thres(
method, user_data_dict, alpha=None, beta=None,
success_prob=0.49, eps=1e-10, w_range_init=None, p_range_init=None,
training=False, verbose=True, all_thres_output=True):
"""Calculates the best-LL for a given user, by computing it across all
items the user has touched.
If `training` is True, then only consider the first 'n - 1' reviews
of the user/lexme pairs, ignoring sequences smaller than 2.
"""
assert method == 'threshold', "This function only computes the max_threshold LL."
total_sum_second = defaultdict(lambda: 0)
total_sum_third = defaultdict(lambda: 0)
total_N = 0
p_ = 0.0
if w_range_init is None:
w_range = [0.01, 0.1, 1, 10, 100]
else:
w_range = w_range_init
if p_range_init is None:
for l_id in user_data_dict.keys():
data_arr = user_data_dict[l_id]
n_0 = data_arr[0]['n_0']
deltas = np.asarray([x['delta_scaled'] for x in data_arr])
deltas = np.where(deltas <= 0, eps, deltas)
n_is = [n_0]
with warnings.catch_warnings():
warnings.simplefilter('once' if verbose else 'ignore')
for x in data_arr:
if x['p_recall'] > success_prob:
n_is.append(n_is[-1] * alpha)
else:
n_is.append(n_is[-1] * beta)
# Remove the last n_t
n_is = np.array(n_is[:-1])
# In most cases p_ == 1, the np.unique limits useless iterations.
if (n_is < 1e-20).sum() > 0:
p_ = 1.0
else:
p_ = max(p_, np.exp(-deltas * n_is).max())
if p_ < 1.0:
p_range = np.linspace(p_, 1, 4)
else:
# if p_ == 1.0, then no point taking linspace.
p_range = [p_]
else:
p_range = p_range_init
for l_id in user_data_dict.keys():
data_arr = user_data_dict[l_id]
if training:
if len(data_arr) < 3:
# Cannot calculate the LL for training and have a test unless
# there are at least 3 reviews.
continue
else:
# Calculate the LL only using the first 'n-1' reviews.
data_arr = data_arr[:-1]
total_N += len(data_arr)
n_0 = data_arr[0]['n_0']
recalled = np.asarray([x['p_recall'] > success_prob for x in data_arr])
deltas = np.asarray([x['delta_scaled'] for x in data_arr])
deltas = np.where(deltas <= 0, eps, deltas)
for w in w_range:
for p in p_range:
op = max_threshold(n_0, a=alpha, b=beta, recalled=recalled,
Ds=deltas, p=p, w=w, verbose=verbose)
total_sum_second[w, p] += op['sum_second']
total_sum_third[w, p] += op['sum_third']
best_LL = None
for w, p in total_sum_second.keys():
alpha_max_user = - total_sum_third[w, p] / total_N
LL = total_N * alpha_max_user + total_sum_second[w, p] + alpha_max_user * total_sum_third[w, p]
if best_LL is None or best_LL['LL'] < LL:
best_LL = {
'LL': LL,
'w': w,
'p': p,
'sum_third': total_sum_third[w, p],
'sum_second': total_sum_second[w, p],
'alpha_max_user': alpha_max_user
}
if all_thres_output:
return best_LL
else:
return best_LL['LL']
def calc_user_LL_dict_threshold(data_dict, alpha, beta, pairs,
lexeme_difficulty, map_lexeme, success_prob=0.49,
p_range=None, w_range=None, training=False,
n_procs=None, verbose=True):
"""Calculate the LL of the threshold model optimized for each user.
if `training` is True, then it computes the likelihood only for the first
`n - 1` entries instead of for all 'n' reviews.
"""
u_l_dict = defaultdict(lambda: {})
lexeme_difficulty = np.abs(lexeme_difficulty)
if n_procs is None:
n_procs = MP.cpu_count()
global _max_user_c_worker
def _max_user_c_worker(params):
u_id = params
best_LL = calc_user_ll_arr_thres('threshold',
user_data_dict=data_dict[u_id],
alpha=alpha, beta=beta,
success_prob=success_prob,
training=training,
all_thres_output=True,
verbose=verbose)
return u_id, best_LL
with MP.Pool() as pool:
u_best_alpha = dict(pool.map(_max_user_c_worker, data_dict.keys()))
global _max_user_threshold_worker
def _max_user_threshold_worker(params):
u_id, l_id = params
alpha_max_user = u_best_alpha[u_id]['alpha_max_user']
w_range = [u_best_alpha[u_id]['w']]
p_range = [u_best_alpha[u_id]['p']]
arr = data_dict[u_id][l_id]
if training:
assert len(arr) >= 3, "Are you using `training_pairs` instead of" \
" all pairs in the call?"
test = arr[-1]
# Append the test before truncating arr
arr = arr[:-1]
op = calc_ll_arr_thres('threshold', arr, alpha=alpha, beta=beta,
success_prob=success_prob,
all_thres_output=True, verbose=verbose,
alpha_fixed=alpha_max_user, w_range=w_range,
p_range=p_range)
res = {'threshold_op': op, 'threshold_LL': op['LL']}
if training:
res['test'] = test
return u_id, l_id, res
with MP.Pool() as pool:
for u_id, l_id, res in pool.map(_max_user_threshold_worker, pairs):
u_l_dict[u_id][l_id] = res
return u_l_dict
def merge_with_thres_LL(other_LL, thres_LL, pairs):
"""Merge the dictionaries containing the threshold-LL and other thresholds.
Other_LL will be modified in place.
"""
for u_id, l_id in pairs:
for key in thres_LL[u_id][l_id]:
other_LL[u_id][l_id][key] = thres_LL[u_id][l_id][key]
return None
def get_all_durations(data_dict, pairs):
"""Generates all durations from the LL_dict or the data_dict."""
def _get_duration(user_id, item_id):
"""Generates test/train/total duration for the given user_id, item_id pair."""
session = data_dict[user_id][item_id]
session_length = len(session)
if session_length > 2:
train_duration = session[-2]['timestamp'] - session[0]['timestamp']
test_duration = session[-1]['timestamp'] - session[-2]['timestamp']
else:
train_duration = None
test_duration = None
if session_length > 1:
total_duration = session[-1]['timestamp'] - session[0]['timestamp']
else:
total_duration = None
return {
'train_duration': train_duration,
'test_duration': test_duration,
'total_duration': total_duration,
'session_length': session_length,
}
dur_dict = defaultdict(lambda: {})
for u_id, i_id in pairs:
dur_dict[u_id][i_id] = _get_duration(u_id, i_id)
return dur_dict
def filter_by_duration(durations_dict, pairs, T, alpha, verbose=False):
"""Filter the (u_id, l_id) by selecting those which have the duration in
[(1 - alpha) * T, (1 + alpha) * T]."""
filtered_pairs = []
for u_id, l_id in pairs:
train_duration = durations_dict[u_id][l_id]['train_duration'] / TIME_SCALE
if (1 - alpha) * T <= train_duration <= (1 + alpha) * T:
filtered_pairs.append((u_id, l_id))
count = len(filtered_pairs)
total = len(pairs)
if verbose:
print('{} / {} = {:.2f}% sequences selected.'
.format(count, total, count / total * 100.))
return filtered_pairs
def filter_by_users(pairs, users_, verbose=False):
"""Filter the (u_id, l_id) by selecting those which have u_id \in users_."""
filtered_pairs = []
for u_id, l_id in pairs:
if u_id in users_:
filtered_pairs.append((u_id, l_id))
count = len(filtered_pairs)
total = len(pairs)
if verbose:
print('{} / {} = {:.2f}% sequences selected.'
.format(count, total, count / total * 100.))
return filtered_pairs
def calc_empirical_forgetting_rate(data_dict, pairs, return_base=False, no_norm=False):
u_l_dict = defaultdict(lambda: defaultdict(lambda: None))
base = {}
base_count = {}
for u_id, l_id in pairs:
first_session = data_dict[u_id][l_id][0]
res = (- np.log(max(0.01, min(0.99, first_session['p_recall'] + 1e-10))) / (first_session['delta_scaled'] + 0.1))
if l_id not in base:
base[l_id] = res
base_count[l_id] = 1
else:
base[l_id]+=res
base_count[l_id] += 1
if return_base:
return dict([(l_id, base[l_id] / base_count[l_id]) for l_id in base.keys()])
for u_id, l_id in pairs:
last_session = data_dict[u_id][l_id][-1]
u_l_dict[u_id][l_id] = - np.log(max(0.01, min(0.99, last_session['p_recall'] + 1e-10))) / (last_session['delta_scaled'] + 0.1)
if not no_norm:
u_l_dict[u_id][l_id] = (u_l_dict[u_id][l_id]) / (base[l_id] / base_count[l_id])
else:
u_l_dict[u_id][l_id] = u_l_dict[u_id][l_id]
return u_l_dict
def calc_top_k_perf(LL_dict, perf, pairs, quantile=0.25, min_reps=0,
max_reps=None, with_overall=False, with_threshold=False,
only_finite=True, with_uniform=True, whis=1.5):
"""Calculates the average and median performance of people in the
top quantile by log-likelihood of following either strategy."""
def check_u_l(u_id, l_id):
return (not only_finite or np.isfinite(perf[u_id][l_id])) and \
(min_reps <= 1 or len(LL_dict[u_id][l_id]['mem_op']['n_ts']) >= min_reps) and \
(max_reps is None or len(LL_dict[u_id][l_id]['mem_op']['n_ts']) < max_reps)
# global _get_top_k
def _get_top_k(key):
sorted_by_ll = sorted((LL_dict[u_id][l_id][key], u_id, l_id)
for u_id, l_id in pairs
if check_u_l(u_id, l_id))
# print("counts {}".format(len(sorted_by_ll)))
# Taking the quantile after limiting to only valid pairs.
K = int(quantile * len(sorted_by_ll))
# print("K: {}".format(K), quantile, len(sorted_by_ll[-K:]), len(sorted_by_ll[-K:]))
return sorted_by_ll[-K:], sorted_by_ll[:K]
top_memorize_LL, bottom_memorize_LL = _get_top_k('memorize_LL')
top_common_u_l = set()
bottom_common_u_l = set()
if with_uniform:
top_uniform_LL, bottom_uniform_LL = _get_top_k('uniform_LL')
top_common_u_l = set([(u_id, l_id) for _, u_id, l_id in top_uniform_LL]).intersection(
[(u_id, l_id) for _, u_id, l_id in top_memorize_LL])
bottom_common_u_l = set([(u_id, l_id) for _, u_id, l_id in bottom_uniform_LL]).intersection(
[(u_id, l_id) for _, u_id, l_id in bottom_memorize_LL])
if with_threshold:
top_threshold_LL, bottom_threshold_LL = _get_top_k('threshold_LL')
# If we have already calculated a common set, then calculate
# intersection with that set. Otherwise, take the intersection with
# memorize directly.
if not with_uniform:
top_common_u_l = set([(u_id, l_id) for _, u_id, l_id in top_threshold_LL]).intersection(
[(u_id, l_id) for _, u_id, l_id in top_memorize_LL])
bottom_common_u_l = set([(u_id, l_id) for _, u_id, l_id in bottom_threshold_LL]).intersection(
[(u_id, l_id) for _, u_id, l_id in bottom_memorize_LL])
else:
top_common_u_l = top_common_u_l.intersection(
[(u_id, l_id) for _, u_id, l_id in top_threshold_LL])
bottom_common_u_l = bottom_common_u_l.intersection(
[(u_id, l_id) for _, u_id, l_id in bottom_threshold_LL])
# global _perf_top_k
def _perf_top_k(top_ll):
return [perf[u_id][l_id]
for _, u_id, l_id in top_ll
if (u_id, l_id) not in top_common_u_l]
def _perf_top_k_elem(top_ll):
return [(u_id, l_id)
for _, u_id, l_id in top_ll
if (u_id, l_id) not in top_common_u_l]
def _perf_bottom_k(bottom_ll):
return [perf[u_id][l_id]
for _, u_id, l_id in bottom_ll
if (u_id, l_id) not in bottom_common_u_l]
# Selecting only non-unique (u_id, l_id) from the top 25% of both.
# Because several times, the same user, lexeme pair have likelihood in the
# top 25%.
# print("common {}".format(len(top_common_u_l)))
perf_top_mem = _perf_top_k(top_memorize_LL)
perf_top_mem_elem = _perf_top_k_elem(top_memorize_LL)
perf_bottom_mem = _perf_bottom_k(bottom_memorize_LL)
perc = [0.1, 0.2, 0.25, 0.30, 0.40, 0.5, 0.6, 0.7, 0.75, 0.8, 0.9]
res = {
'mem_top': pd.Series(perf_top_mem).describe(percentiles=perc),
'mem_top_elem': perf_top_mem_elem,
'mem_bottom': pd.Series(perf_bottom_mem).describe(percentiles=perc),
'top_memorize_LL': top_memorize_LL,
'bottom_memorize_LL': bottom_memorize_LL,
'perf_top_mem': perf_top_mem,
'top_common_u_l': top_common_u_l,
'bottom_common_u_l': bottom_common_u_l
}
mem_min_whis = res['mem_top']['25%'] - (res['mem_top']['75%'] - res['mem_top']['25%']) * whis
ind = np.asarray(perf_top_mem) < mem_min_whis
if ind.sum() == 0:
res['mem_top'].loc['min_whis'] = np.asarray(perf_top_mem).min()
else:
res['mem_top'].loc['min_whis'] = np.asarray(perf_top_mem)[ind].max()
mem_max_whis = res['mem_top']['75%'] + (res['mem_top']['75%'] - res['mem_top']['25%']) * whis
ind = np.asarray(perf_top_mem) > mem_max_whis
if ind.sum() == 0:
res['mem_top'].loc['max_whis'] = np.asarray(perf_top_mem).max()
else:
res['mem_top'].loc['max_whis'] = np.asarray(perf_top_mem)[ind].min()
if with_uniform:
perf_top_unif = _perf_top_k(top_uniform_LL)
perf_top_unif_elem = _perf_top_k_elem(top_uniform_LL)
perf_bottom_unif = _perf_bottom_k(bottom_uniform_LL)
res.update({
'unif_top': pd.Series(perf_top_unif).describe(percentiles=perc),
'unif_top_elem': perf_top_unif_elem,
'unif_bottom': pd.Series(perf_bottom_unif).describe(percentiles=perc),
'top_uniform_LL': top_uniform_LL,
'bottom_uniform_LL': bottom_uniform_LL,
'perf_top_unif': perf_top_unif,
})
uni_min_whis = res['unif_top']['25%'] - (res['unif_top']['75%'] - res['unif_top']['25%']) * whis
ind = np.asarray(perf_top_unif) < uni_min_whis
if ind.sum() == 0:
res['unif_top'].loc['min_whis'] = np.asarray(perf_top_mem).min()
else:
res['unif_top'].loc['min_whis'] = np.asarray(perf_top_mem)[ind].max()
uni_max_whis = res['unif_top']['75%'] + (res['unif_top']['75%'] - res['unif_top']['25%']) * whis
ind = np.asarray(perf_top_unif) > uni_max_whis
if ind.sum() ==0:
res['unif_top'].loc['max_whis'] = np.asarray(perf_top_unif).max()
else:
res['unif_top'].loc['max_whis'] = np.asarray(perf_top_unif)[ind].min()
if with_threshold:
perf_top_threshold = _perf_top_k(top_threshold_LL)
perf_top_threshold_elem = _perf_top_k_elem(top_threshold_LL)
perf_bottom_threshold = _perf_bottom_k(bottom_threshold_LL)
res.update({
'threshold_top': pd.Series(perf_top_threshold).describe(percentiles=perc),
'threshold_top_elem': perf_top_threshold_elem,
'threshold_bottom': pd.Series(perf_bottom_threshold).describe(percentiles=perc),
'top_threshold_LL': top_threshold_LL,
'bottom_threshold_LL': bottom_threshold_LL,
'perf_top_threshold': perf_top_threshold,
})
thr_min_whis = res['threshold_top']['25%'] - (res['threshold_top']['75%'] - res['threshold_top']['25%']) * whis
ind = np.asarray(perf_top_threshold)<thr_min_whis
if ind.sum() ==0:
res['threshold_top'].loc['min_whis'] = np.asarray(perf_top_threshold).min()
else:
res['threshold_top'].loc['min_whis'] = np.asarray(perf_top_threshold)[ind].max()
thr_max_whis = res['threshold_top']['75%'] + (res['threshold_top']['75%'] - res['threshold_top']['25%']) * whis
ind = np.asarray(perf_top_threshold) > thr_max_whis
if ind.sum() == 0:
res['threshold_top'].loc['max_whis'] = np.asarray(perf_top_threshold).max()
else:
res['threshold_top'].loc['max_whis'] = np.asarray(perf_top_threshold)[ind].min()
if with_overall:
res['unif_top_overall'] = pd.Series(perf[u_id][l_id] for _, u_id, l_id in top_uniform_LL).describe()
res['mem_top_overall'] = pd.Series(perf[u_id][l_id] for _, u_id, l_id in top_memorize_LL).describe()
res['unif_bottom_overall'] = pd.Series(perf[u_id][l_id] for _, u_id, l_id in bottom_uniform_LL).describe()
res['mem_bottom_overall'] = | pd.Series(perf[u_id][l_id] for _, u_id, l_id in bottom_memorize_LL) | pandas.Series |
"""
This script save the direct/indirect effects for each neuron averaging across different groups depending
on negation type and correctness category.
Usage:
python compute_and_save_neuron_agg_effect.py $result_file_path $model_name $negation_test_set_file
"""
import os
import sys
import json
import pandas as pd
def get_correctness_category(results_df):
orig_label = results_df['orig_label'].all()
neg_label = results_df['neg_label'].all()
orig_assigned = False if float(results_df['candidate1_orig_prob'].unique()) > 0.5 else True
neg_assigned = False if float(results_df['candidate1_neg_prob'].unique()) > 0.5 else True
orig_correctness = "c" if orig_label == orig_assigned else "i"
neg_correctness = "c" if neg_label == neg_assigned else "i"
return "_".join([orig_correctness, neg_correctness])
def analyze_effect_results(results_df, effect):
# Calculate response variable under the null condition and with the neuron intervention
if results_df["orig_label"].all() == True:
odds_base = (
results_df["candidate1_orig_prob"] / results_df["candidate2_orig_prob"]
)
odds_intervention = (
results_df["candidate1_prob"] / results_df["candidate2_prob"]
)
else:
odds_base = (
results_df["candidate2_orig_prob"] / results_df["candidate1_orig_prob"]
)
odds_intervention = (
results_df["candidate2_prob"] / results_df["candidate1_prob"]
)
odds_ratio = odds_intervention / odds_base
results_df["odds_ratio"] = odds_ratio
# Add correctness category to dataframe
results_df["correctness_cat"] = get_correctness_category(results_df=results_df)
# Get the odds ratio for each neuron in each layer
results_df = results_df.pivot("neuron", "layer", "odds_ratio")
def get_all_effects(fname):
"""
Give fname from a direct effect file
"""
# Step 1: Load results for current file
print(fname)
indirect_result_df = | pd.read_csv(fname) | pandas.read_csv |
# -*- coding: utf-8 -*-
# edited from https://github.com/carpenterlab/unet4nuclei/blob/master/unet4nuclei/utils/evaluation.py and
# stardist's matching.py
import numpy as np
import pandas as pd
from scipy.optimize import linear_sum_assignment
def intersection_over_union(ground_truth, prediction):
# Count objects
true_objects = len(np.unique(ground_truth))
pred_objects = len(np.unique(prediction))
# Compute intersection
h = np.histogram2d(ground_truth.flatten(), prediction.flatten(), bins=(true_objects,pred_objects))
intersection = h[0]
# Area of objects
area_true = np.histogram(ground_truth, bins=true_objects)[0]
area_pred = np.histogram(prediction, bins=pred_objects)[0]
# Calculate union
area_true = np.expand_dims(area_true, -1)
area_pred = np.expand_dims(area_pred, 0)
union = area_true + area_pred - intersection
# Exclude background from the analysis
intersection = intersection[1:,1:]
union = union[1:,1:]
# Compute Intersection over Union
union[union == 0] = 1e-9
IOU = intersection/union
return IOU
def metrics(IOU_matrix, threshold):
n_true, n_pred = IOU_matrix.shape
n_matched = min(n_true, n_pred)
if IOU_matrix.shape[0] > 0:
jaccard = np.max(IOU_matrix, axis=0).mean()
else:
jaccard = 0.0
# compute optimal matching with scores as tie-breaker
costs = -(IOU_matrix >= threshold).astype(float) - IOU_matrix / (2*n_matched)
true_ind, pred_ind = linear_sum_assignment(-IOU_matrix)
assert n_matched == len(true_ind) == len(pred_ind)
matches = IOU_matrix[true_ind,pred_ind] >= threshold
true_positives = np.count_nonzero(matches) # Correct objects
false_positives = n_pred - true_positives # Extra objects
false_negatives = n_true - true_positives # Missed objects
#Precision
precision = true_positives/(true_positives + false_positives + 1e-9) if true_positives > 0 else 0
#Recall
recall = true_positives/(true_positives + false_negatives + 1e-9) if true_positives > 0 else 0
#Accuracy also known as "average precision"
Accuracy = true_positives/(true_positives + false_positives + false_negatives + 1e-9) if true_positives > 0 else 0
#F1 also known as "dice coefficient"
f1 = (2*true_positives)/(2*true_positives + false_positives + false_negatives + 1e-9) if true_positives > 0 else 0
# obtain the sum of iou values for all matched objects
sum_matched_score = np.sum(IOU_matrix[true_ind,pred_ind][matches])
# the score average over all matched objects (tp)
mean_matched_score = sum_matched_score / (true_positives + 1e-9)
# the score average over all gt/true objects
mean_true_score = sum_matched_score / (n_true + 1e-9)
#panoptic_quality defined as in Eq. 1 of Kirillov et al. "Panoptic Segmentation", CVPR 2019
panoptic_quality = sum_matched_score / ( true_positives + false_positives/2 + false_negatives/2 + 1e-9)
res = pd.DataFrame({"Threshold": threshold,
"Jaccard": jaccard,
"TP": true_positives,
"FP": false_positives,
"FN": false_negatives,
"Precision": precision,
"Recall": recall,
"Accuracy": Accuracy,
"F1": f1,
"sum_matched_score":sum_matched_score,
"mean_matched_score":mean_matched_score,
"mean_true_score":mean_true_score,
"panoptic_quality":panoptic_quality}, index=[0])
del jaccard, true_positives, false_positives, false_negatives, precision, recall, Accuracy, f1, mean_matched_score, mean_true_score, panoptic_quality
return res
def evaluate_segementation_per_image(ground_truth, prediction, thresholds_list, identifier):
# Compute IoU
IOU = intersection_over_union(ground_truth, prediction)
# Compute metrics accross all thresholds
df = | pd.DataFrame() | pandas.DataFrame |
# %%
'''
'''
## Se importan las librerias necesarias
import pandas as pd
import numpy as np
import datetime as dt
from datetime import timedelta
pd.options.display.max_columns = None
pd.options.display.max_rows = None
import glob as glob
import datetime
import re
import jenkspy
import tkinter as tk
root= tk.Tk()
canvas1 = tk.Canvas(root, width = 300, height = 300)
canvas1.pack()
# %%
def profiling():
#### Read Databases
datas=pd.read_csv('C:/Users/scadacat/Desktop/TIGO (Cliente)/Cobranzas/Notebooks/Bds/data_con_drop.csv',sep=';',encoding='utf-8',dtype='str')
salida=pd.read_csv('C:/Users/scadacat/Desktop/TIGO (Cliente)/Cobranzas/Notebooks/Bds/salida_limpia.csv',sep=';',encoding='utf-8',dtype='str')
seguimiento= | pd.read_csv('C:/Users/scadacat/Desktop/TIGO (Cliente)/Cobranzas/Notebooks/Bds/seguimiento.csv',sep=';',encoding='utf-8',dtype='str') | pandas.read_csv |
import pandas as pd
import sys,os,io,re
import numpy as np
path=sys.argv[1]
outName=sys.argv[2]
thresh=int(sys.argv[3])
anno_file=sys.argv[4]
anno_table=pd.read_csv(anno_file)
anno_col=["event_cat","group_increased_alt","aa_change_type","effect_cat"]
anno_col=list(np.intersect1d(anno_col,anno_table.columns))
filelist=os.listdir(path)
out_files=[]
for file in filelist:
if file.startswith(outName) and file.endswith('.bisbeeOutlier.csv'):
out_files.append(file)
columns=pd.read_csv(path + "/" + out_files[0],nrows=1).columns
data_col=columns[np.where(columns=='event_jid')[0][0]+1:]
total_counts=anno_table.pivot_table(index=anno_col,values='event_jid',aggfunc=len)
total_counts.index = ["_".join(v) for v in total_counts.index.values]
out_counts= | pd.DataFrame(index=total_counts.index,columns=data_col,data=0) | pandas.DataFrame |
# With this script, previosuly omitted rows are added again (my bad).
# post_id is a 1 to 1 connection because they are unique.
import pandas as pd
df_a = pd.read_csv("filtered_messages_subforum_and_keyword_with_spellcheck_all.csv", encoding="utf-8", sep=';')
df_b = | pd.read_csv("crawling_results/posts_and_threads_all.csv", encoding="utf-8", sep=';') | pandas.read_csv |
# -*- coding: utf-8 -*-
# Copyright (c) 2019 SMHI, Swedish Meteorological and Hydrological Institute
# License: MIT License (see LICENSE.txt or http://opensource.org/licenses/mit).
import codecs
import datetime
import logging
import logging.config
import os
import re
import time
import numpy as np
import sharkpylib
from sharkpylib import mappinglib
from sharkpylib.file import txt_reader
from sharkpylib.file.file_handlers import Directory
from sharkpylib.file.file_handlers import ListDirectory
from sharkpylib.file.file_handlers import MappingDirectory
from sharkpylib.qc.mask_areas import MaskAreasDirectory
try:
import pandas as pd
except:
pass
import sys
parent_directory = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
if parent_directory not in sys.path:
sys.path.append(parent_directory)
from sharkpylib import gismo
class TavastlandException(Exception):
"""
Blueprint for error message.
code is for external mapping of exceptions. For example if a GUI wants to
handle the error text for different languages.
"""
code = None
message = ''
def __init__(self, message='', code=''):
self.message = '{}: {}'.format(self.message, message)
if code:
self.code = code
class TavastlandExceptionCorrupedFile(TavastlandException):
"""
"""
code = ''
message = 'Corruped file'
class TavastlandExceptionNoCO2data(TavastlandException):
"""
"""
code = ''
message = ''
class TavastlandExceptionNoMatchWhenMerging(TavastlandException):
"""
"""
code = ''
message = ''
class File(object):
def __init__(self, file_path='', **kwargs):
self._set_logger(kwargs.get('logger'))
self.file_path = file_path
self.file_directory = os.path.dirname(self.file_path)
self.file_name = os.path.basename(self.file_path)
self.file_id = self.file_name
self.df = pd.DataFrame()
self.time_start = None
self.time_end = None
self.data_loaded = None
self.time_in_file_name_formats = ['TP_%Y%m%d%H%M%S.mit']
self._add_file_path_time()
self.time_frozen_between = []
if kwargs.get('load_file'):
self.load_file()
def _set_logger(self, logger):
if logger:
self.logger = logger
else:
logging.config.fileConfig('logging.conf')
self.logger = logging.getLogger('timedrotating')
def _len_header_equals_len_data(self, file_path):
with open(file_path) as fid:
for r, line in enumerate(fid):
split_line = line.split('\t')
if r==0:
header = split_line
else:
if len(header) == len(split_line):
return True
return False
def _add_file_path_time(self):
self.file_path_time = None
self.file_path_year = None
self.file_path_possible_years = []
for time_format in self.time_in_file_name_formats:
try:
time_object = datetime.datetime.strptime(self.file_name, time_format)
self.file_path_time = time_object
break
except ValueError:
# logger.debug('No time in file path for file: {}'.format(self.file_path))
pass
# Find year
result = re.findall('\d{4}', self.file_name)
if result:
self.file_path_year = int(result[0])
self.file_path_possible_years = [self.file_path_year-1, self.file_path_year, self.file_path_year+1]
def _delete_columns(self):
if 'Date' in self.df.columns:
self.df.drop(['Date'], axis=1, inplace=True)
# Time is removed in method _add_columns
elif 'PC Date' in self.df.columns:
self.df.drop(['PC Date', 'PC Time'], axis=1, inplace=True)
if 'Lat' in self.df.columns:
self.df.drop(['Lat', 'Lon'], axis=1, inplace=True)
elif 'latitude' in self.df.columns:
self.df.drop(['latitude', 'longitude'], axis=1, inplace=True)
def _add_columns(self):
# Time
if 'Date' in self.df.columns:
time_str = self.df['Date'] + ' ' + self.df['Time'].copy()
self.df.drop('Time', axis=1, inplace=True)
self.df['time'] = pd.to_datetime(time_str, format='%d.%m.%Y %H:%M:%S')
elif 'PC Date' in self.df.columns:
time_str = self.df['PC Date'] + ' ' + self.df['PC Time']
self.df['time'] = pd.to_datetime(time_str, format='%d/%m/%y %H:%M:%S')
# Position
if 'Lat' in self.df.columns:
self.df['lat'] = self.df['Lat'].apply(as_float)
self.df['lon'] = self.df['Lon'].apply(as_float)
elif 'latitude' in self.df.columns:
self.df['lat'] = self.df['latitude'].apply(as_float)
self.df['lon'] = self.df['longitude'].apply(as_float)
else:
self.df['lat'] = np.nan
self.df['lon'] = np.nan
self.df['source_file'] = self.file_name
def _remove_duplicates(self):
# print('REMOVE DUPLICATES', self.file_id)
# First save missing periodes
dub_boolean = self.df.duplicated('time', keep=False)
between = []
missing_period = []
for i, t0, b0, t1, b1 in zip(self.df.index[:-1], self.df['time'].values[:-1], dub_boolean.values[:-1],
self.df['time'].values[1:], dub_boolean.values[1:]):
if i == 0 and b0:
missing_period.append('?')
if b1 and not b0:
# t0s = pd.to_datetime(t0).strftime('%Y%m%d%H%M%S')
# missing_period.append(t0s)
missing_period.append(t0)
elif b0 and not b1:
# t1s = pd.to_datetime(t1).strftime('%Y%m%d%H%M%S')
# missing_period.append(t1s)
missing_period.append(t1)
# print(missing_period)
if len(missing_period) == 2:
between.append(missing_period)
# between.append('-'.join(missing_period))
missing_period = []
if missing_period:
missing_period.append('?')
between.append(missing_period)
# between.append('-'.join(missing_period))
# print('between:', len(between))
self.time_frozen_between = between
# Now drop all duplicates
self.df.drop_duplicates('time', keep=False, inplace=True)
def valid_data_line(self, line):
if 'DD.MM.YYYY' in line:
# print('DD.MM.YYYY', self.file_path)
return False
if not line.strip():
# print('BLANK', self.file_path)
return False
return True
def load_file(self, **kwargs):
if not os.path.exists(self.file_path):
raise FileNotFoundError
header = []
data = []
with codecs.open(self.file_path, encoding=kwargs.get('encoding', 'cp1252')) as fid:
for row, line in enumerate(fid):
split_line = line.strip('\n\r').split(kwargs.get('sep', '\t'))
split_line = [item.strip() for item in split_line]
if row == 1 and header:
if len(header) != len(split_line):
header = header[:len(split_line)]
if not header:
header = split_line
else:
if len(header) != len(split_line):
raise TavastlandExceptionCorrupedFile
self.logger.warning('invalid file: {}'.format(row, self.file_path))
self.data_loaded = False
return False
if not self.valid_data_line(line):
self.logger.warning('Removing invalid line {} from file: {}'.format(row, self.file_path))
continue
data.append(split_line)
self.original_columns = header[:]
self.df = pd.DataFrame(data, columns=header)
self._add_columns()
self._remove_duplicates()
self.filter_data()
self._delete_columns()
self.data_loaded = True
return True
def filter_data(self):
"""
Filters the data from unwanted lines etc.
:return:
"""
combined_keep_boolean = pd.Series([True]*len(self.df))
keep_boolean = ~self.df[self.original_columns[0]].str.contains('DD.MM.YYYY')
combined_keep_boolean = combined_keep_boolean & keep_boolean
keep_boolean = ~self.df[self.original_columns[0]].str.contains('.1904')
combined_keep_boolean = combined_keep_boolean & keep_boolean
keep_boolean = self.df['time'] <= datetime.datetime.now()
combined_keep_boolean = combined_keep_boolean & keep_boolean
removed = self.df.loc[~combined_keep_boolean]
if len(removed):
self.logger.warning('{} lines removed from file {}'.format(len(removed), self.file_path))
self.df = self.df.loc[combined_keep_boolean, :]
def clean_file(self, export_directory):
"""
Loads file (including filter data) and saves to the export directory.
:return
"""
# print(export_directory)
if export_directory == self.file_directory:
raise TavastlandException('Cannot export to the same directory!')
if not os.path.exists(export_directory):
os.makedirs(export_directory)
if self.data_loaded is None:
self.load_file()
export_file_path = os.path.join(export_directory, self.file_name)
self.df[self.original_columns].to_csv(export_file_path, index=False, sep='\t')
def get_df(self):
if self.data_loaded is None:
self.load_file()
return self.df
def get_time_range(self):
def get_time(line):
date = re.findall('\d{2}\.\d{2}\.\d{4}', line)
time = re.findall('\d{2}:\d{2}:\d{2}', line)
if date and time:
return datetime.datetime.strptime(date[0] + time[0], '%d.%m.%Y%H:%M:%S')
date = re.findall('\d{2}/\d{2}/\d{2}', line)
time = re.findall('\d{2}:\d{2}:\d{2}', line)
if date and time:
return datetime.datetime.strptime(date[0] + time[0], '%d/%m/%y%H:%M:%S')
self.time_start = None
self.time_end = None
if self.data_loaded:
self.time_start = self.df.time.values[0]
self.time_end = self.df.time.values[-1]
return self.time_start, self.time_end
else:
with codecs.open(self.file_path) as fid:
for r, line in enumerate(fid):
if self.valid_data_line(line):
if r == 0:
continue
elif not self.time_start:
time = get_time(line)
self.time_start = time
self.time_end = get_time(line)
return self.time_start, self.time_end
def in_time_range(self, datetime_object):
if not self.time_start:
self.get_time_range()
return (datetime_object >= self.time_start) & (datetime_object <= self.time_end)
def check_if_valid_file_name(self):
"""
External method.
Returns True if file_name follows the structure(s) described in method.
:param file_name:
:return:
"""
raise NotImplementedError
def warnings(self):
"""
Returns a list of strange things found in file. Strange things kan be handled.
:return: list with description of the warnings.
"""
raise NotImplementedError
def get_file_errors(self):
"""
Returns a list of errors in file if any. Errors are obvious faults that can not be handled.
:return list with description of the errors.
"""
raise NotImplementedError
def _get_file_errors(self):
error_list = []
if not self._len_header_equals_len_data(self.file_path):
text = 'Header is not the same length as data in file: {}.'.format(self.file_name)
error_list.append(text)
return error_list
class MITfile(File):
def __init__(self, file_path='', **kwargs):
File.__init__(self, file_path, **kwargs)
def check_if_valid_file_name(self, file_name):
"""
External method.
Returns True if file_name follows the structure(s) described in method.
:param file_name:
:return:
"""
if not file_name.endswith('.mit'):
return False
return True
def warnings(self):
"""
Returns a list of strange things found in file. Strange things kan be handled.
:return: list with description of the warnings.
"""
raise NotImplementedError
def get_file_errors(self):
"""
Returns a list of errors in file if any. Errors are obvious faults that can not be handled.
:return list with description of the errors.
"""
error_list = self._get_file_errors()
# Check time
start, end = self.get_time_range()
d = datetime.datetime(1980, 1, 1)
this_year = datetime.datetime.now().year
if not all([start, end]):
text = 'Could not find time in file {}.'.format(self.file_name)
error_list.append(text)
else:
if start < d:
text = 'Start data is too early in file {}. Before {}'.format(self.file_name, d.strftime('%Y%m%d'))
error_list.append(text)
# continue
if start > end:
text = 'Start time > end time in file {}.'.format(self.file_name)
error_list.append(text)
# continue
if any([start.year > this_year, end.year > this_year]):
text = 'Start year or end year is later than current year in file {}.'.format(self.file_name)
error_list.append(text)
# continue
if any([start.year == 1904, end.year == 1904]):
text = 'Start year or end year is 1904 in file {}.'.format(self.file_name)
self.logger.info(text)
error_list.append(text)
if error_list:
self.logger.info('; '.join(error_list))
return error_list
class CO2file(File):
def __init__(self, file_path='', **kwargs):
File.__init__(self, file_path, **kwargs)
def check_if_valid_file_name(self, file_name):
"""
External method.
Returns True if file_name follows the structure(s) described in method.
:param file_name:
:return:
"""
if not file_name.endswith('dat.txt'):
return False
return True
def warnings(self):
"""
Returns a list of strange things found in file. Strange things kan be handled.
:return: list with description of the warnings.
"""
raise NotImplementedError
def get_file_errors(self):
"""
Returns a list of errors in file if any. Errors are obvious faults that can not be handled.
:return list with description of the errors.
"""
error_list = self._get_file_errors()
# Check time
start, end = self.get_time_range()
d = datetime.datetime(1980, 1, 1)
this_year = datetime.datetime.now().year
if not all([start, end]):
text = 'Could not find time in file {}.'.format(self.file_name)
error_list.append(text)
if error_list:
self.logger.info('; '.join(error_list))
return error_list
class FileHandler(object):
def __init__(self, **kwargs):
self._set_logger(kwargs.get('logger'))
self.logger.debug('Starting FileHandler for Tavastland')
self.directories = {}
self.directories['mit'] = kwargs.get('mit_directory', None)
self.directories['co2'] = kwargs.get('co2_directory', None)
self.export_directory = kwargs.get('export_directory', None)
self.save_directory = None
self.current_merge_data = pd.DataFrame()
self.df_header = ['file_id', 'file_path', 'time_start', 'time_end']
self.export_time_format_str = '%Y%m%d%H%M%S'
self.package_prefix = 'ferrybox-tavastland'
self.objects = dict()
self.dfs = dict()
self.files_with_errors = dict()
self.corruped_files = dict()
self.metadata = []
self.metadata_added = {}
self.time_frozen_between = {}
list_dir_object = ListDirectory()
self.exclude_co2_types = list_dir_object.get_file_object('list_tavastland_exclude_types.txt', comment='#').get()
self.reset_time_range()
self.reset_data()
self.set_time_delta(seconds=30)
for file_type, directory in self.directories.items():
if directory:
self.set_file_directory(file_type, directory)
def _set_logger(self, logger):
if logger:
self.logger = logger
print('SETTING LOGGER', self.logger.name)
else:
logging.config.fileConfig('logging.conf')
self.logger = logging.getLogger('timedrotating')
def set_export_directory(self, directory):
"""
Sets the export directory.
:param directory:
:return:
"""
self.export_directory = directory
def set_file_directory(self, file_type, directory):
"""
Saves path to files with the given directory for the given file_type
:param file_type:
:return:
"""
this_year = datetime.datetime.now().year
if file_type == 'mit':
File_type_class = MITfile
file_type_object = MITfile(logger=self.logger)
elif file_type == 'co2':
File_type_class = CO2file
file_type_object = CO2file(logger=self.logger)
self.files_with_errors[file_type] = []
self.corruped_files[file_type] = []
self.objects[file_type] = dict()
data_lines = []
for root, dirs, files in os.walk(directory):
for name in files:
if not file_type_object.check_if_valid_file_name(name):
continue
file_path = os.path.join(root, name)
file_object = File_type_class(file_path, logger=self.logger)
start, end = file_object.get_time_range()
errors = file_object.get_file_errors()
if errors:
print('name', name)
print('errors', errors)
errors_dict = {name: errors}
self.files_with_errors[file_type].append(errors_dict)
data_lines.append([name, file_path, start, end])
self.objects[file_type][name] = file_object
if not data_lines:
raise TavastlandException('No valid {}-files found!'.format(file_type))
self.dfs[file_type] = pd.DataFrame(data_lines, columns=self.df_header)
self.dfs[file_type].sort_values('time_start', inplace=True)
def get_file_id(self, time=None, file_type='mit'):
"""
Returns the mit file matching the given input.
:param time: datetime_object
:return:
"""
if time:
result = self.dfs[file_type].loc[(self.dfs[file_type]['time_start'] <= time) &
(time <= self.dfs[file_type]['time_end']), 'file_id'].values
if len(result) > 1:
self.logger.debug('Several files matches time stamp: {}\n{}'.format(time, '\n'.join(list(result))))
raise TavastlandException('Several files matches time stamp {}: \n{}'.format(time, '\n'.join(list(result))))
elif len(result) == 0:
return None
else:
return result[0]
else:
raise AttributeError('Missing input parameter "time"')
def get_previous_file_id(self, file_id=None, time_stamp=None, file_type='mit'):
"""
Returns the previous file_id
:param file_id:
:return:
"""
df = self.dfs.get(file_type)
if file_id:
if file_id in df['file_id'].values:
index = df.index[df['file_id'] == file_id][0]
if index == 0:
return None
else:
return df.at[index-1, 'file_id']
else:
return None
elif time_stamp:
end_time_boolean = df['time_end'] < time_stamp
matching_file_id_list = df.loc[end_time_boolean]['file_id'].values
# print('='*20)
# print('matching_file_id_list')
# print(matching_file_id_list)
# print(type(matching_file_id_list))
if any(matching_file_id_list):
return matching_file_id_list[-1]
else:
return None
def set_time_range(self, time_start=None, time_end=None, time=None, file_id=None, file_type='mit'):
"""
Selects/sets the period to work with. You can select data by giving start and end time or by file_id.
Also option to find file_id by time stamp (looking at mit_file) given in time. All time objects ar of type
datetime.datetime.
:param time_start:
:param time_end:
:param time:
:param file_name:
:return:
"""
if time:
file_id = self.get_file_id(time=time, file_type=file_type)
if file_id:
for file_type in self.objects:
if file_id in self.objects[file_type]:
time_start, time_end = self.objects[file_type][file_id].get_time_range()
break
else:
raise ValueError('Could not find file_id {}')
self.reset_time_range()
self.current_time_start = time_start
self.current_time_end = time_end
def set_time_delta(self, **kwargs):
"""
Sets the timedelta allowed for matching data.
:param kwargs:
:return:
"""
self.time_delta = pd.Timedelta(**kwargs)
def reset_time_range(self):
self.current_time_start = None
self.current_time_end = None
self.reset_data()
def load_data(self):
"""
Loades data in time range. Time range is set in method select_time_range.
:return:
"""
t0 = time.time()
if not all([self.current_time_start, self.current_time_end]):
raise Exception
self.reset_data()
# Load files within time range
self.current_data['mit'] = self.get_data_within_time_range('mit', self.current_time_start, self.current_time_end)
self.current_data['co2'] = self.get_data_within_time_range('co2', self.current_time_start, self.current_time_end)
# Reset index
self.current_data['mit'] = self.current_data['mit'].reset_index(drop=True)
self.current_data['co2'] = self.current_data['co2'].reset_index(drop=True)
# print('Load data')
# print('mit', len(self.current_data['mit']))
# print('co2', len(self.current_data['co2']))
# print('Loaded in: {}'.format(time.time()-t0))
def reset_data(self):
self.current_data = {}
self.current_merge_data = pd.DataFrame()
self.pCO2_constants = {}
self.std_val_list = []
self.std_co2_list = []
self.std_latest_time = None
self.time_frozen_between = {}
def clean_files(self, export_directory, file_list=False):
if not self.current_data:
raise TavastlandException
if not file_list:
file_list = []
for key, value in self.objects.items():
for file_name in value:
if self.objects[key][file_name].data_loaded:
file_list.append(file_name)
# Clean files and save in subdirectories
for key in self.objects:
directory = os.path.join(export_directory, 'cleaned_files', key)
for file_name in file_list:
if file_name in self.objects[key]:
self.objects[key][file_name].clean_file(directory)
def get_data_within_time_range(self, file_type, time_start, time_end):
"""
Extracts data within time range from mit or c02 files. expands time limits with self.time_delta first.
:param file_type: mit or co2
:param time_start:
:param time_end:
:return:
"""
# print('get_data_within_time_range')
self.time_frozen_between[file_type] = []
object_dict = self.objects.get(file_type)
file_id_list = self.get_file_ids_within_time_range(file_type, time_start, time_end)
ts = np.datetime64(time_start)
te = np.datetime64(time_end)
df = pd.DataFrame()
for file_id in file_id_list:
if file_id in self.files_with_errors:
self.logger.warning('Discarding file {}. File has errors!'.format(file_id))
continue
object = object_dict.get(file_id)
try:
object_df = object.get_df()
except TavastlandExceptionCorrupedFile:
self.corruped_files[file_type].append(file_id)
self.logger.warning('Discarding file {}. File has errors!'.format(file_id))
continue
df = df.append(object_df)
# print('file_id', file_id)
# print('object.time_frozen_between', object.time_frozen_between)
for t in object.time_frozen_between:
# print(t, time_start, time_end)
add = False
# print(t[0], time_start)
# print(type(t[0]), type(time_start))
if t[0] != '?' and t[0] >= ts:
add = True
elif t[1] != '?' and t[1] <= te:
add = True
if add:
self.time_frozen_between[file_type].append(t)
if not len(df):
raise TavastlandExceptionNoCO2data('No data in time range {} - {}'.format(time_start, time_end))
else:
df.sort_values('time', inplace=True)
# Add file type to header
df.columns = ['{}_{}'.format(file_type, item) for item in df.columns]
df['time'] = df['{}_time'.format(file_type)]
# Strip dates
if file_type == 'co2':
time_start = time_start - self.time_delta
time_end = time_end + self.time_delta
time_boolean = (df.time >= time_start) & (df.time <= time_end)
df = df.loc[time_boolean]
df.sort_values(by='time', inplace=True)
return df
def get_file_ids_within_time_range(self, file_type, time_start, time_end):
"""
Returns a list of the matching file_id:s found in self.dfs
:param file_type:
:param time_start:
:param time_end:
:return:
"""
df = self.dfs.get(file_type)
ts = time_start - self.time_delta
te = time_end + self.time_delta
boolean = (df['time_end'] >= ts) & (df['time_end'] <= te)
# | (df['time_start'] <= ts) & (df['time_start'] <= te)
# if not any(boolean):
# boolean = (df['time_end'] >= ts) & (df['time_end'] <= te)
return sorted(df.loc[boolean, 'file_id'])
def get_files_with_errors(self, file_type):
"""
Returns a list with all files that has errors in them.
:param file_type:
:return:
"""
file_list = []
for file_name_dict in self.files_with_errors[file_type]:
file_list.append(list(file_name_dict.keys())[0])
return file_list
def merge_data(self):
"""
Merges the dataframes in self.current_data.
:return:
"""
missing_data = []
for file_type, df in self.current_data.items():
if not len(df):
missing_data.append(file_type)
if missing_data:
raise Exception('Missing data from the following sources: {}'.format(', '.join(missing_data)))
# We do not want same co2 merging to several lines in mit.
# Therefore we start by merging co2 and mit with the given tolerance.
co2_merge = pd.merge_asof(self.current_data['co2'], self.current_data['mit'],
on='time',
tolerance=self.time_delta,
direction='nearest')
# In this df we only want to keep lines that has mit_time
co2_merge = co2_merge[~pd.isna(co2_merge['mit_time'])]
# co2_merge.sort_values('time', inplace=True)
# Now we merge (outer join) the original mit-dataframe with the one we just created.
# This will create a df that only has one match of co2 for each mit (if matching).
self.current_merge_data = pd.merge(self.current_data['mit'],
co2_merge,
left_on='mit_time',
right_on='mit_time',
suffixes=('', '_remove'),
how='outer')
remove_columns = [col for col in self.current_merge_data.columns if col.endswith('_remove')]
self.current_merge_data.drop(remove_columns, axis=1, inplace=True)
self.current_merge_data = self.current_merge_data.reset_index(drop=True)
# Add time par
self.current_merge_data['time'] = self.current_merge_data['mit_time']
# Add position par
self.current_merge_data['lat'] = self.current_merge_data['mit_lat']
self.current_merge_data['lon'] = self.current_merge_data['mit_lon']
self.mit_columns = [col for col in self.current_merge_data.columns if col.startswith('mit_')]
self.co2_columns = [col for col in self.current_merge_data.columns if col.startswith('co2_')]
# Add diffs
self.current_merge_data['diff_time'] = abs(self.current_merge_data['co2_time'] - \
self.current_merge_data['mit_time']).astype('timedelta64[s]')
self.current_merge_data['diff_lat'] = self.current_merge_data['co2_lat'] - \
self.current_merge_data['mit_lat']
self.current_merge_data['diff_lon'] = self.current_merge_data['co2_lon'] - \
self.current_merge_data['mit_lon']
self.diff_columns = [col for col in self.current_merge_data.columns if col.startswith('diff_')]
if self.current_merge_data['diff_time'].isnull().values.all():
raise TavastlandExceptionNoMatchWhenMerging('No match in data between {} and {} '
'with time tolerance {} seconds'.format(self.current_time_start,
self.current_time_end,
self.time_delta.seconds))
self._sort_merge_data_columns()
# Add merge comment
if not self.metadata_added.get('time_tolerance'):
self.metadata = [f'COMMENT_MERGE;{self._get_time_string()};Data merged with time tolerance '
f'{self.time_delta.seconds} seconds.']
self.metadata_added['time_tolerance'] = True
def _sort_merge_data_columns(self):
columns = sorted(self.current_merge_data.columns)
columns.pop(columns.index('time'))
columns.pop(columns.index('lat'))
columns.pop(columns.index('lon'))
new_columns = ['time', 'lat', 'lon'] + columns
self.current_merge_data = self.current_merge_data[new_columns]
self.current_merge_data.fillna('', inplace=True)
def _mapp_columns(self, df=None):
if df is None:
df = self.current_merge_data
mapping_dir_object = MappingDirectory()
mapping = mapping_dir_object.get_file_object('mapping_tavastland.txt', from_col='co2_merged_file', to_col='nodc')
df.columns = mapping.get_mapped_list(df.columns)
def _remove_types(self):
boolean = self.current_merge_data['co2_Type'].isin(self.exclude_co2_types)
self.current_merge_data.loc[boolean, self.co2_columns] = ''
def old_remove_areas(self, file_path):
"""
Remove areas listed in file_path. file_path should be of type gismo.qc.qc_trijectory.
Maybe this class should be located in a more general place.
:param file_path:
:return:
"""
area_object = gismo.qc.qc_trajectory.FlagAreasFile(file_path)
areas = area_object.get_areas()
df = self.current_merge_data
masked_areas = []
combined_boolean = df['time'] == ''
for name, area in areas.items():
lat_min = area.get('lat_min')
lat_max = area.get('lat_max')
lon_min = area.get('lon_min')
lon_max = area.get('lon_max')
boolean = (df['lat'].astype(float) >= lat_min) & \
(df['lat'].astype(float) <= lat_max) & \
(df['lon'].astype(float) >= lon_min) & \
(df['lon'].astype(float) <= lon_max)
if len(np.where(boolean)):
masked_areas.append(name)
combined_boolean = combined_boolean | boolean
# Remove areas
self.current_merge_data = self.current_merge_data.loc[~combined_boolean, :]
return masked_areas
def get_nr_rows(self, file_type):
return len(self.current_data[file_type])
def get_min_and_max_time(self):
"""
Returns the minimum and maximum time found looking in both time_start and time_end and all file_types.
:return:
"""
time_list = []
for df in self.dfs.values():
time_list.extend(list(df['time_start']))
time_list.extend(list(df['time_end']))
return min(time_list), max(time_list)
def get_merge_data(self):
"""
Returns merge data limited by time range
:return:
"""
boolean = (self.current_merge_data['time'] >= self.current_time_start) & \
(self.current_merge_data['time'] <= self.current_time_end)
return self.current_merge_data.loc[boolean, :].copy()
def old_map_header_like_iocftp(self):
"""
:return:
"""
mappings = mappinglib.MappingDirectory()
mapping_object = mappings.get_mapping_object('mapping_tavastland',
from_col='merged_file',
to_col='IOCFTP_tavastland')
new_header = []
for col in self.current_merge_data.columns:
new_header.append(mapping_object.get(col))
self.current_merge_data.columns = new_header
def old_map_header_like_internal(self):
"""
:return:
"""
mappings = mappinglib.MappingDirectory()
mapping_object = mappings.get_mapping_object('mapping_tavastland',
from_col='IOCFTP_tavastland',
to_col='internal')
new_header = []
for col in self.current_merge_data.columns:
new_header.append(mapping_object.get(col))
self.current_merge_data.columns = new_header
def calculate_pCO2(self):
"""
Calculates pCO2 on self.current_merge_data
:return:
"""
self.current_merge_data['calc_k'] = np.nan
self.current_merge_data['calc_m'] = np.nan
self.current_merge_data['calc_Pequ'] = np.nan
self.current_merge_data['calc_pCO2 dry air'] = np.nan
self.current_merge_data['calc_xCO2'] = np.nan
self.current_merge_data['calc_pCO2'] = np.nan
items = ['calc_k', 'calc_m', 'calc_xCO2', 'calc_Pequ', 'calc_pCO2 dry air', 'calc_time_since_latest_std']
for i in self.current_merge_data.index:
values = self._get_pCO2_data_from_row(self.current_merge_data.iloc[i])
for key in items:
self.current_merge_data.at[i, key] = values.get(key, np.nan)
# self.current_merge_data.at[i, 'calc_k'] = values.get('calc_k', np.nan)
# self.current_merge_data.at[i, 'calc_m'] = values.get('calc_m', np.nan)
# self.current_merge_data.at[i, 'calc_Pequ'] = values.get('calc_Pequ', np.nan)
# self.current_merge_data.at[i, 'calc_pCO2 dry air'] = values.get('calc_pCO2 dry air', np.nan)
# self.current_merge_data.at[i, 'calc_xCO2'] = values.get('calc_xCO2', np.nan)
self._calculate_pCO2()
self._sort_merge_data_columns()
self._remove_types()
# self._mapp_columns()
def _calculate_pCO2(self):
salinity_par = 'mit_Sosal'
temp_par = 'mit_Soxtemp'
equ_temp_par = 'co2_equ temp'
# Tequ = self.current_merge_data['co2_equ temp'].astype(float) + 273.15 # temp in Kelvin
try:
Tequ = np.array([as_float(item) for item in self.current_merge_data[equ_temp_par]]) + 273.15 # temp in Kelvin
except:
raise
self.current_merge_data['calc_Tequ'] = Tequ
Pequ = self.current_merge_data['calc_Pequ']
# Pequ = self.current_merge_data['co2_equ press'].astype(float) + self.current_merge_data['co2_licor press'].astype(float)
# Pequ is not in the same order as the previous calculated self.current_merge_data['calc_Pequ'] (has * 1e-3)
VP_H2O = np.exp(24.4543 - 67.4509 * 100 / Tequ -
4.8489 * np.log(Tequ / 100) -
0.000544 * self.current_merge_data[salinity_par].astype(float))
self.current_merge_data['calc_VP_H2O'] = VP_H2O
pCO2 = self.current_merge_data['calc_xCO2'] * (Pequ / 1013.25 - VP_H2O) * np.exp(
0.0423 * (self.current_merge_data[temp_par].astype(float) + 273.15 - Tequ))
fCO2 = pCO2 * np.exp(((-1636.75 + 12.0408 * Tequ - 0.0327957 * Tequ ** 2 + 3.16528 * 1e-5 * Tequ ** 3)
+ 2 * (1 - self.current_merge_data['calc_xCO2'] * 1e-6) ** 2 * (
57.7 - 0.118 * Tequ)) * Pequ / 1013.25 / (82.0575 * Tequ))
self.current_merge_data['calc_pCO2'] = pCO2
self.current_merge_data['calc_fCO2 SST'] = fCO2
def _get_pCO2_data_from_row(self, series):
"""
Calculates xCO2 etc. for row or saves information needed to calculate pCO2.
:param row_series: pandas.Series (row in df)
:return:
"""
return_dict = {'calc_k': np.nan,
'calc_m': np.nan,
'calc_xCO2': np.nan,
'calc_Pequ': np.nan,
'calc_pCO2 dry air': np.nan,
'calc_time_since_latest_std': np.nan}
type_value = series['co2_Type']
if type(type_value) == float and np.isnan(type_value):
return return_dict
co2_time = series['co2_time']
co2_value = as_float(series['co2_CO2 um/m'])
std_value = as_float(series['co2_std val'])
# print('co2_equ press in series', 'co2_equ press' in series) False
# print('co2_licor press in series', 'co2_licor press' in series) True
equ_press_value = as_float(series['co2_equ press'])
# equ_press_value = as_float(series['calc_Pequ'])
licor_press_value = as_float(series['co2_licor press'])
# print('-'*30)
# print('SERIES')
# print(series['co2_time'])
# print(series['co2_source_file'])
# print(series['mit_time'])
# print(series['mit_source_file'])
if not type_value:
return dict()
# Added by Johannes 2020-04-29
if not hasattr(self, 'co2_time_list'):
self.co2_time_list = []
if not hasattr(self, 'std_val_list'):
self.std_val_list = []
if not hasattr(self, 'std_co2_list'):
self.std_co2_list = []
if 'STD' in type_value:
if is_std(type_value):
if co2_time in self.co2_time_list:
return dict()
# print('ยค'*40)
# print('STD', type_value)
# print(self.std_val_list)
# This row should be saved for regression calculation
self.co2_time_list.append(co2_time)
self.std_val_list.append(std_value)
self.std_co2_list.append(co2_value)
self.std_latest_time = series['time']
# print('STD: self.std_latest_time', self.std_latest_time)
return dict()
else:
return dict()
else:
# Calculate/save constants if data is available
if self.std_val_list:
# print('self.std_latest_time', self.std_latest_time)
# print()
# print('ยค'*40)
# for t, st, co in zip(self.co2_time_list, self.std_val_list, self.std_co2_list):
# print(t, st, co)
# print('-'*40)
self._set_constants(self.std_val_list, self.std_co2_list, file_id=self.get_file_id(time=series['time'],
file_type='co2'))
# # Reset lists
# self.std_val_list = []
# self.std_co2_list = []
if not self.pCO2_constants:
self._set_constants_for_timestamp(series['time'])
# return {'calc_pCO2 dry air': co2_value,
# 'calc_xCO2': co2_value}
# Reset lists
self.co2_time_list = []
self.std_val_list = []
self.std_co2_list = []
# Make calculations
k = self.pCO2_constants['calc_k'] # k in y = kx + m
m = self.pCO2_constants['calc_m'] # m in y = kx + m
x = (co2_value - m) / k # x in y = kx + m
xCO2 = co2_value + (1 - k) * x + m
# value = measured Value + correction (correction = diff between y = x and y = kx + m)
Pequ = (equ_press_value + licor_press_value)
# pressure due to EQU press and licor press
pCO2_dry_air = xCO2 * Pequ * 1e-3
# Check time since latest standard gas
time_since_latest_std = np.nan
if self.std_latest_time:
time_since_latest_std = int(abs((self.std_latest_time - series['time']).total_seconds()))
return_dict = {'calc_k': k,
'calc_m': m,
'calc_xCO2': xCO2,
'calc_Pequ': Pequ,
'calc_pCO2 dry air': pCO2_dry_air,
'calc_time_since_latest_std': time_since_latest_std}
return return_dict
def _set_constants(self, std_val_list=[], std_co2_list=[], file_id='', **kwargs):
"""
Returns the constants from the regression calculated from standard gases.
:return:
"""
# if len(std_val_list) < 3:
# return
try:
# print('std_val_list', std_val_list, len(std_val_list)/3.)
# print('std_co2_list', std_co2_list, len(std_co2_list)/3.)
adapt = np.polyfit(np.array(std_val_list), np.array(std_co2_list), 1)
except:
# print('='*30)
# print(file_id)
# for val, co2 in zip(std_val_list, std_co2_list):
# print(val, co2, type(val), type(co2))
raise
self.pCO2_constants = dict(calc_k=adapt[0],
calc_m=adapt[1],
file_id=file_id)
def _set_constants_for_timestamp(self, time_stamp):
"""
Search in file or previous files to find closest STD rows. Sets constants and saves self.std_latest_time.
:return:
"""
data = self.get_std_basis_for_timestamp(time_stamp)
self._set_constants(**data)
self.std_latest_time = data.get('std_latest_time')
def get_std_basis_for_timestamp(self, time_object):
"""
Finds information of the most resent std gasses
:param time_object:
:return:
"""
index_list = []
file_id = self.get_file_id(time=time_object, file_type='co2')
if not file_id:
# Cannot find file id for the given time stamp. Need to find the latest file id.
file_id = self.get_previous_file_id(time_stamp=time_object, file_type='co2')
if not file_id:
raise TavastlandExceptionNoCO2data('No CO2 file found for time {} or earlier!'.format(time_object))
while file_id and not index_list:
# print('=' * 40)
# print('looking for get_std_basis_for_timestamp for time: {}'.format(time_object))
# print('in file_id:', file_id)
obj = self.objects['co2'][file_id]
try:
df = obj.get_df()
except TavastlandExceptionCorrupedFile:
continue
df = df.loc[df['time'] <= time_object]
for i in list(df.index)[::-1]:
value = df.at[i, 'Type']
if 'STD' in value:
if is_std(value):
index_list.append(i)
elif index_list:
break
if not index_list:
# No STD values found
# print('-', file_id)
file_id = self.get_previous_file_id(file_id=file_id, file_type='co2')
# print('-', file_id)
index_list.reverse()
# print(index_list)
std_latest_time = df.at[index_list[-1], 'time']
std_df = df.iloc[index_list, :]
std_val_list = [as_float(item) for item in std_df['std val']]
std_co2_list = [as_float(item) for item in std_df['CO2 um/m']]
return_dict = dict(file_id=file_id,
std_latest_time=std_latest_time,
std_val_list=std_val_list,
std_co2_list=std_co2_list)
return return_dict
def get_types_in_merge_data(self):
"""
Returns a list of types in loaded merged data
:return:
"""
merge_data = self.get_merge_data()
all_types = sorted(set(merge_data['co2_Type']))
if '' in all_types:
all_types.pop(all_types.index(''))
return all_types
def save_data(self, directory=None, overwrite=False, **kwargs):
self.save_dir = self._get_export_directory(directory)
if os.path.exists(self.save_dir):
if not overwrite:
raise FileExistsError('One or more files exists. Set overwrite=True to overwrite package')
if not os.path.exists(self.save_dir):
os.makedirs(self.save_dir)
processed_file_path = self._save_merge_data(directory=self.save_dir, **kwargs)
raw_mit_file_path = self._save_mit_data(directory=self.save_dir, **kwargs)
raw_co2_file_path = self._save_co2_data(directory=self.save_dir, **kwargs)
# Add comment to metadata
if not self.metadata_added.get('merged_files'):
mit_file_name = os.path.basename(raw_mit_file_path)
co2_file_name = os.path.basename(raw_co2_file_path)
time_string = self._get_time_string()
self.metadata.append(';'.join(['COMMENT_MERGE', time_string, f'Data merged are in files: {mit_file_name} and {co2_file_name}']))
self.metadata_added['merged_files'] = True
# Add "time frozen" comment to metadata
if not self.metadata_added.get('frozen_time'):
self._add_frozen_time_comment()
self.metadata_added['frozen_time'] = True
# Write metadata file
merge_file_base = os.path.basename(processed_file_path).split('.')[0]
metadata_file_path = os.path.join(self.save_dir, f'metadata_{merge_file_base}.txt')
self._save_metadata(metadata_file_path)
return self.save_dir
def _add_frozen_time_comment(self):
for file_type, between in self.time_frozen_between.items():
if not between:
continue
time_string = self._get_time_string()
between_list = []
for (f, t) in between:
if f != '?':
f = | pd.to_datetime(f) | pandas.to_datetime |
"""
Copyright 2018 <NAME>.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
"""
import pandas as pd
import pytest
from pandas.util.testing import assert_series_equal
from gs_quant.timeseries import *
def test_first():
dates = [
date(2019, 1, 1),
date(2019, 1, 2),
date(2019, 1, 3),
date(2019, 1, 4),
]
x = pd.Series([1.0, 2.0, 3.0, 4.0], index=dates)
result = first(x)
expected = pd.Series([1.0, 1.0, 1.0, 1.0], index=dates)
assert_series_equal(result, expected, obj="First")
def test_last():
dates = [
date(2019, 1, 1),
date(2019, 1, 2),
date(2019, 1, 3),
date(2019, 1, 4),
]
x = pd.Series([1.0, 2.0, 3.0, 4.0], index=dates)
result = last(x)
expected = pd.Series([4.0, 4.0, 4.0, 4.0], index=dates)
assert_series_equal(result, expected, obj="First")
y = pd.Series([1.0, 2.0, 3.0, np.nan], index=dates)
result = last(y)
expected = pd.Series([3.0, 3.0, 3.0, 3.0], index=dates)
assert_series_equal(result, expected, obj="Last non-NA")
def test_last_value():
with pytest.raises(MqValueError):
last_value(pd.Series())
x = pd.Series([1.0, 2.0, 3.0, 4.0], index=(pd.date_range("2020-01-01", periods=4, freq="D")))
assert last_value(x) == 4.0
y = pd.Series([5])
assert last_value(y) == 5
y = pd.Series([1.0, 2.0, 3.0, np.nan], index=(pd.date_range("2020-01-01", periods=4, freq="D")))
assert last_value(y) == 3.0
def test_count():
dates = [
date(2019, 1, 1),
date(2019, 1, 2),
date(2019, 1, 3),
date(2019, 1, 4),
]
x = pd.Series([1.0, 2.0, 3.0, 4.0], index=dates)
result = count(x)
expected = pd.Series([1.0, 2.0, 3.0, 4.0], index=dates)
assert_series_equal(result, expected, obj="Count")
def test_compare():
dates1 = [
date(2019, 1, 1),
date(2019, 1, 2),
date(2019, 1, 3),
date(2019, 1, 4),
]
dates2 = [
date(2019, 1, 1),
date(2019, 1, 2),
date(2019, 1, 3),
]
x = pd.Series([1.0, 2.0, 2.0, 4.0], index=dates1)
y = pd.Series([2.0, 1.0, 2.0], index=dates2)
expected = pd.Series([-1.0, 1.0, 0.0], index=dates2)
result = compare(x, y, method=Interpolate.INTERSECT)
| assert_series_equal(expected, result, obj="Compare series intersect") | pandas.util.testing.assert_series_equal |
# Question 07, Lab 07
# AB Satyaprakash, 180123062
# imports
import pandas as pd
import numpy as np
# functions
def f(t, y):
return y - t**2 + 1
def F(t):
return (t+1)**2 - 0.5*np.exp(t)
def RungeKutta4(t, y, h):
k1 = f(t, y)
k2 = f(t+h/2, y+h*k1/2)
k3 = f(t+h/2, y+h*k2/2)
k4 = f(t+h, y+h*k3)
return y + h*(k1 + 2*k2 + 2*k3 + k4)/6
def AdamsBashforth(t, y, h):
return y[-1] + h*(55*f(t[-1], y[-1]) - 59*f(t[-2], y[-2]) + 37*f(t[-3], y[-3]) - 9*f(t[-4], y[-4]))/24
def AdasmMoulton(t, y, h):
t1 = t[-1]+h
y1 = AdamsBashforth(t, y, h)
return y[-1] + h*(9*f(t1, y1) + 19*f(t[-1], y[-1]) - 5*f(t[-2], y[-2]) + f(t[-3], y[-3]))/24
# program body
t = [0]
y = [0.5]
h = 0.2
t.append(round(t[-1]+h, 1))
y.append(RungeKutta4(t[-1], y[-1], h))
t.append(round(t[-1]+h, 1))
y.append(RungeKutta4(t[-1], y[-1], h))
t.append(round(t[-1]+h, 1))
y.append(RungeKutta4(t[-1], y[-1], h))
yact = []
while t[-1] < 2:
y.append(AdasmMoulton(t, y, h))
t.append(round(t[-1]+h, 1))
for T in t:
yact.append(F(T))
df = pd.DataFrame()
df["Adam's Predictor-Corrector Method"] = | pd.Series(y) | pandas.Series |
# pylint: disable-msg=E1101,W0612
from datetime import datetime, time, timedelta, date
import sys
import os
import operator
from distutils.version import LooseVersion
import nose
import numpy as np
randn = np.random.randn
from pandas import (Index, Series, TimeSeries, DataFrame,
isnull, date_range, Timestamp, Period, DatetimeIndex,
Int64Index, to_datetime, bdate_range, Float64Index)
import pandas.core.datetools as datetools
import pandas.tseries.offsets as offsets
import pandas.tseries.tools as tools
import pandas.tseries.frequencies as fmod
import pandas as pd
from pandas.util.testing import assert_series_equal, assert_almost_equal
import pandas.util.testing as tm
from pandas.tslib import NaT, iNaT
import pandas.lib as lib
import pandas.tslib as tslib
import pandas.index as _index
from pandas.compat import range, long, StringIO, lrange, lmap, zip, product
import pandas.core.datetools as dt
from numpy.random import rand
from numpy.testing import assert_array_equal
from pandas.util.testing import assert_frame_equal
import pandas.compat as compat
import pandas.core.common as com
from pandas import concat
from pandas import _np_version_under1p7
from numpy.testing.decorators import slow
def _skip_if_no_pytz():
try:
import pytz
except ImportError:
raise nose.SkipTest("pytz not installed")
def _skip_if_has_locale():
import locale
lang, _ = locale.getlocale()
if lang is not None:
raise nose.SkipTest("Specific locale is set {0}".format(lang))
class TestTimeSeriesDuplicates(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
dates = [datetime(2000, 1, 2), datetime(2000, 1, 2),
datetime(2000, 1, 2), datetime(2000, 1, 3),
datetime(2000, 1, 3), datetime(2000, 1, 3),
datetime(2000, 1, 4), datetime(2000, 1, 4),
datetime(2000, 1, 4), datetime(2000, 1, 5)]
self.dups = Series(np.random.randn(len(dates)), index=dates)
def test_constructor(self):
tm.assert_isinstance(self.dups, TimeSeries)
tm.assert_isinstance(self.dups.index, DatetimeIndex)
def test_is_unique_monotonic(self):
self.assertFalse(self.dups.index.is_unique)
def test_index_unique(self):
uniques = self.dups.index.unique()
expected = DatetimeIndex([datetime(2000, 1, 2), datetime(2000, 1, 3),
datetime(2000, 1, 4), datetime(2000, 1, 5)])
self.assertEqual(uniques.dtype, 'M8[ns]') # sanity
self.assertTrue(uniques.equals(expected))
self.assertEqual(self.dups.index.nunique(), 4)
# #2563
self.assertTrue(isinstance(uniques, DatetimeIndex))
dups_local = self.dups.index.tz_localize('US/Eastern')
dups_local.name = 'foo'
result = dups_local.unique()
expected = DatetimeIndex(expected, tz='US/Eastern')
self.assertTrue(result.tz is not None)
self.assertEqual(result.name, 'foo')
self.assertTrue(result.equals(expected))
# NaT
arr = [ 1370745748 + t for t in range(20) ] + [iNaT]
idx = DatetimeIndex(arr * 3)
self.assertTrue(idx.unique().equals(DatetimeIndex(arr)))
self.assertEqual(idx.nunique(), 21)
arr = [ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT]
idx = DatetimeIndex(arr * 3)
self.assertTrue(idx.unique().equals(DatetimeIndex(arr)))
self.assertEqual(idx.nunique(), 21)
def test_index_dupes_contains(self):
d = datetime(2011, 12, 5, 20, 30)
ix = DatetimeIndex([d, d])
self.assertTrue(d in ix)
def test_duplicate_dates_indexing(self):
ts = self.dups
uniques = ts.index.unique()
for date in uniques:
result = ts[date]
mask = ts.index == date
total = (ts.index == date).sum()
expected = ts[mask]
if total > 1:
assert_series_equal(result, expected)
else:
assert_almost_equal(result, expected[0])
cp = ts.copy()
cp[date] = 0
expected = Series(np.where(mask, 0, ts), index=ts.index)
assert_series_equal(cp, expected)
self.assertRaises(KeyError, ts.__getitem__, datetime(2000, 1, 6))
# new index
ts[datetime(2000,1,6)] = 0
self.assertEqual(ts[datetime(2000,1,6)], 0)
def test_range_slice(self):
idx = DatetimeIndex(['1/1/2000', '1/2/2000', '1/2/2000', '1/3/2000',
'1/4/2000'])
ts = Series(np.random.randn(len(idx)), index=idx)
result = ts['1/2/2000':]
expected = ts[1:]
assert_series_equal(result, expected)
result = ts['1/2/2000':'1/3/2000']
expected = ts[1:4]
assert_series_equal(result, expected)
def test_groupby_average_dup_values(self):
result = self.dups.groupby(level=0).mean()
expected = self.dups.groupby(self.dups.index).mean()
assert_series_equal(result, expected)
def test_indexing_over_size_cutoff(self):
import datetime
# #1821
old_cutoff = _index._SIZE_CUTOFF
try:
_index._SIZE_CUTOFF = 1000
# create large list of non periodic datetime
dates = []
sec = datetime.timedelta(seconds=1)
half_sec = datetime.timedelta(microseconds=500000)
d = datetime.datetime(2011, 12, 5, 20, 30)
n = 1100
for i in range(n):
dates.append(d)
dates.append(d + sec)
dates.append(d + sec + half_sec)
dates.append(d + sec + sec + half_sec)
d += 3 * sec
# duplicate some values in the list
duplicate_positions = np.random.randint(0, len(dates) - 1, 20)
for p in duplicate_positions:
dates[p + 1] = dates[p]
df = DataFrame(np.random.randn(len(dates), 4),
index=dates,
columns=list('ABCD'))
pos = n * 3
timestamp = df.index[pos]
self.assertIn(timestamp, df.index)
# it works!
df.ix[timestamp]
self.assertTrue(len(df.ix[[timestamp]]) > 0)
finally:
_index._SIZE_CUTOFF = old_cutoff
def test_indexing_unordered(self):
# GH 2437
rng = date_range(start='2011-01-01', end='2011-01-15')
ts = Series(randn(len(rng)), index=rng)
ts2 = concat([ts[0:4],ts[-4:],ts[4:-4]])
for t in ts.index:
s = str(t)
expected = ts[t]
result = ts2[t]
self.assertTrue(expected == result)
# GH 3448 (ranges)
def compare(slobj):
result = ts2[slobj].copy()
result = result.sort_index()
expected = ts[slobj]
assert_series_equal(result,expected)
compare(slice('2011-01-01','2011-01-15'))
compare(slice('2010-12-30','2011-01-15'))
compare(slice('2011-01-01','2011-01-16'))
# partial ranges
compare(slice('2011-01-01','2011-01-6'))
compare(slice('2011-01-06','2011-01-8'))
compare(slice('2011-01-06','2011-01-12'))
# single values
result = ts2['2011'].sort_index()
expected = ts['2011']
assert_series_equal(result,expected)
# diff freq
rng = date_range(datetime(2005, 1, 1), periods=20, freq='M')
ts = Series(np.arange(len(rng)), index=rng)
ts = ts.take(np.random.permutation(20))
result = ts['2005']
for t in result.index:
self.assertTrue(t.year == 2005)
def test_indexing(self):
idx = date_range("2001-1-1", periods=20, freq='M')
ts = Series(np.random.rand(len(idx)),index=idx)
# getting
# GH 3070, make sure semantics work on Series/Frame
expected = ts['2001']
df = DataFrame(dict(A = ts))
result = df['2001']['A']
assert_series_equal(expected,result)
# setting
ts['2001'] = 1
expected = ts['2001']
df.loc['2001','A'] = 1
result = df['2001']['A']
assert_series_equal(expected,result)
# GH3546 (not including times on the last day)
idx = date_range(start='2013-05-31 00:00', end='2013-05-31 23:00', freq='H')
ts = Series(lrange(len(idx)), index=idx)
expected = ts['2013-05']
assert_series_equal(expected,ts)
idx = date_range(start='2013-05-31 00:00', end='2013-05-31 23:59', freq='S')
ts = Series(lrange(len(idx)), index=idx)
expected = ts['2013-05']
assert_series_equal(expected,ts)
idx = [ Timestamp('2013-05-31 00:00'), Timestamp(datetime(2013,5,31,23,59,59,999999))]
ts = Series(lrange(len(idx)), index=idx)
expected = ts['2013']
assert_series_equal(expected,ts)
# GH 3925, indexing with a seconds resolution string / datetime object
df = DataFrame(randn(5,5),columns=['open','high','low','close','volume'],index=date_range('2012-01-02 18:01:00',periods=5,tz='US/Central',freq='s'))
expected = df.loc[[df.index[2]]]
result = df['2012-01-02 18:01:02']
assert_frame_equal(result,expected)
# this is a single date, so will raise
self.assertRaises(KeyError, df.__getitem__, df.index[2],)
def test_recreate_from_data(self):
if _np_version_under1p7:
freqs = ['M', 'Q', 'A', 'D', 'B', 'T', 'S', 'L', 'U', 'H']
else:
freqs = ['M', 'Q', 'A', 'D', 'B', 'T', 'S', 'L', 'U', 'H', 'N', 'C']
for f in freqs:
org = DatetimeIndex(start='2001/02/01 09:00', freq=f, periods=1)
idx = DatetimeIndex(org, freq=f)
self.assertTrue(idx.equals(org))
# unbale to create tz-aware 'A' and 'C' freq
if _np_version_under1p7:
freqs = ['M', 'Q', 'D', 'B', 'T', 'S', 'L', 'U', 'H']
else:
freqs = ['M', 'Q', 'D', 'B', 'T', 'S', 'L', 'U', 'H', 'N']
for f in freqs:
org = DatetimeIndex(start='2001/02/01 09:00', freq=f, tz='US/Pacific', periods=1)
idx = DatetimeIndex(org, freq=f, tz='US/Pacific')
self.assertTrue(idx.equals(org))
def assert_range_equal(left, right):
assert(left.equals(right))
assert(left.freq == right.freq)
assert(left.tz == right.tz)
class TestTimeSeries(tm.TestCase):
_multiprocess_can_split_ = True
def test_is_(self):
dti = DatetimeIndex(start='1/1/2005', end='12/1/2005', freq='M')
self.assertTrue(dti.is_(dti))
self.assertTrue(dti.is_(dti.view()))
self.assertFalse(dti.is_(dti.copy()))
def test_dti_slicing(self):
dti = DatetimeIndex(start='1/1/2005', end='12/1/2005', freq='M')
dti2 = dti[[1, 3, 5]]
v1 = dti2[0]
v2 = dti2[1]
v3 = dti2[2]
self.assertEqual(v1, Timestamp('2/28/2005'))
self.assertEqual(v2, Timestamp('4/30/2005'))
self.assertEqual(v3, Timestamp('6/30/2005'))
# don't carry freq through irregular slicing
self.assertIsNone(dti2.freq)
def test_pass_datetimeindex_to_index(self):
# Bugs in #1396
rng = date_range('1/1/2000', '3/1/2000')
idx = Index(rng, dtype=object)
expected = Index(rng.to_pydatetime(), dtype=object)
self.assert_numpy_array_equal(idx.values, expected.values)
def test_contiguous_boolean_preserve_freq(self):
rng = date_range('1/1/2000', '3/1/2000', freq='B')
mask = np.zeros(len(rng), dtype=bool)
mask[10:20] = True
masked = rng[mask]
expected = rng[10:20]
self.assertIsNotNone(expected.freq)
assert_range_equal(masked, expected)
mask[22] = True
masked = rng[mask]
self.assertIsNone(masked.freq)
def test_getitem_median_slice_bug(self):
index = date_range('20090415', '20090519', freq='2B')
s = Series(np.random.randn(13), index=index)
indexer = [slice(6, 7, None)]
result = s[indexer]
expected = s[indexer[0]]
assert_series_equal(result, expected)
def test_series_box_timestamp(self):
rng = date_range('20090415', '20090519', freq='B')
s = Series(rng)
tm.assert_isinstance(s[5], Timestamp)
rng = date_range('20090415', '20090519', freq='B')
s = Series(rng, index=rng)
tm.assert_isinstance(s[5], Timestamp)
tm.assert_isinstance(s.iget_value(5), Timestamp)
def test_date_range_ambiguous_arguments(self):
# #2538
start = datetime(2011, 1, 1, 5, 3, 40)
end = datetime(2011, 1, 1, 8, 9, 40)
self.assertRaises(ValueError, date_range, start, end,
freq='s', periods=10)
def test_timestamp_to_datetime(self):
_skip_if_no_pytz()
rng = date_range('20090415', '20090519',
tz='US/Eastern')
stamp = rng[0]
dtval = stamp.to_pydatetime()
self.assertEqual(stamp, dtval)
self.assertEqual(stamp.tzinfo, dtval.tzinfo)
def test_index_convert_to_datetime_array(self):
_skip_if_no_pytz()
def _check_rng(rng):
converted = rng.to_pydatetime()
tm.assert_isinstance(converted, np.ndarray)
for x, stamp in zip(converted, rng):
tm.assert_isinstance(x, datetime)
self.assertEqual(x, stamp.to_pydatetime())
self.assertEqual(x.tzinfo, stamp.tzinfo)
rng = date_range('20090415', '20090519')
rng_eastern = date_range('20090415', '20090519', tz='US/Eastern')
rng_utc = date_range('20090415', '20090519', tz='utc')
_check_rng(rng)
_check_rng(rng_eastern)
_check_rng(rng_utc)
def test_ctor_str_intraday(self):
rng = DatetimeIndex(['1-1-2000 00:00:01'])
self.assertEqual(rng[0].second, 1)
def test_series_ctor_plus_datetimeindex(self):
rng = date_range('20090415', '20090519', freq='B')
data = dict((k, 1) for k in rng)
result = Series(data, index=rng)
self.assertIs(result.index, rng)
def test_series_pad_backfill_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
result = s[:2].reindex(index, method='pad', limit=5)
expected = s[:2].reindex(index).fillna(method='pad')
expected[-3:] = np.nan
assert_series_equal(result, expected)
result = s[-2:].reindex(index, method='backfill', limit=5)
expected = s[-2:].reindex(index).fillna(method='backfill')
expected[:3] = np.nan
assert_series_equal(result, expected)
def test_series_fillna_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
result = s[:2].reindex(index)
result = result.fillna(method='pad', limit=5)
expected = s[:2].reindex(index).fillna(method='pad')
expected[-3:] = np.nan
assert_series_equal(result, expected)
result = s[-2:].reindex(index)
result = result.fillna(method='bfill', limit=5)
expected = s[-2:].reindex(index).fillna(method='backfill')
expected[:3] = np.nan
assert_series_equal(result, expected)
def test_frame_pad_backfill_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
result = df[:2].reindex(index, method='pad', limit=5)
expected = df[:2].reindex(index).fillna(method='pad')
expected.values[-3:] = np.nan
tm.assert_frame_equal(result, expected)
result = df[-2:].reindex(index, method='backfill', limit=5)
expected = df[-2:].reindex(index).fillna(method='backfill')
expected.values[:3] = np.nan
tm.assert_frame_equal(result, expected)
def test_frame_fillna_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
result = df[:2].reindex(index)
result = result.fillna(method='pad', limit=5)
expected = df[:2].reindex(index).fillna(method='pad')
expected.values[-3:] = np.nan
tm.assert_frame_equal(result, expected)
result = df[-2:].reindex(index)
result = result.fillna(method='backfill', limit=5)
expected = df[-2:].reindex(index).fillna(method='backfill')
expected.values[:3] = np.nan
tm.assert_frame_equal(result, expected)
def test_frame_setitem_timestamp(self):
# 2155
columns = DatetimeIndex(start='1/1/2012', end='2/1/2012',
freq=datetools.bday)
index = lrange(10)
data = DataFrame(columns=columns, index=index)
t = datetime(2012, 11, 1)
ts = Timestamp(t)
data[ts] = np.nan # works
def test_sparse_series_fillna_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
ss = s[:2].reindex(index).to_sparse()
result = ss.fillna(method='pad', limit=5)
expected = ss.fillna(method='pad', limit=5)
expected = expected.to_dense()
expected[-3:] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
ss = s[-2:].reindex(index).to_sparse()
result = ss.fillna(method='backfill', limit=5)
expected = ss.fillna(method='backfill')
expected = expected.to_dense()
expected[:3] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
def test_sparse_series_pad_backfill_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
s = s.to_sparse()
result = s[:2].reindex(index, method='pad', limit=5)
expected = s[:2].reindex(index).fillna(method='pad')
expected = expected.to_dense()
expected[-3:] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
result = s[-2:].reindex(index, method='backfill', limit=5)
expected = s[-2:].reindex(index).fillna(method='backfill')
expected = expected.to_dense()
expected[:3] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
def test_sparse_frame_pad_backfill_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
sdf = df.to_sparse()
result = sdf[:2].reindex(index, method='pad', limit=5)
expected = sdf[:2].reindex(index).fillna(method='pad')
expected = expected.to_dense()
expected.values[-3:] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
result = sdf[-2:].reindex(index, method='backfill', limit=5)
expected = sdf[-2:].reindex(index).fillna(method='backfill')
expected = expected.to_dense()
expected.values[:3] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
def test_sparse_frame_fillna_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
sdf = df.to_sparse()
result = sdf[:2].reindex(index)
result = result.fillna(method='pad', limit=5)
expected = sdf[:2].reindex(index).fillna(method='pad')
expected = expected.to_dense()
expected.values[-3:] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
result = sdf[-2:].reindex(index)
result = result.fillna(method='backfill', limit=5)
expected = sdf[-2:].reindex(index).fillna(method='backfill')
expected = expected.to_dense()
expected.values[:3] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
def test_pad_require_monotonicity(self):
rng = date_range('1/1/2000', '3/1/2000', freq='B')
rng2 = rng[::2][::-1]
self.assertRaises(ValueError, rng2.get_indexer, rng,
method='pad')
def test_frame_ctor_datetime64_column(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50',
freq='10s')
dates = np.asarray(rng)
df = DataFrame({'A': np.random.randn(len(rng)), 'B': dates})
self.assertTrue(np.issubdtype(df['B'].dtype, np.dtype('M8[ns]')))
def test_frame_add_datetime64_column(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50',
freq='10s')
df = DataFrame(index=np.arange(len(rng)))
df['A'] = rng
self.assertTrue(np.issubdtype(df['A'].dtype, np.dtype('M8[ns]')))
def test_frame_datetime64_pre1900_repr(self):
df = DataFrame({'year': date_range('1/1/1700', periods=50,
freq='A-DEC')})
# it works!
repr(df)
def test_frame_add_datetime64_col_other_units(self):
n = 100
units = ['h', 'm', 's', 'ms', 'D', 'M', 'Y']
ns_dtype = np.dtype('M8[ns]')
for unit in units:
dtype = np.dtype('M8[%s]' % unit)
vals = np.arange(n, dtype=np.int64).view(dtype)
df = DataFrame({'ints': np.arange(n)}, index=np.arange(n))
df[unit] = vals
ex_vals = to_datetime(vals.astype('O'))
self.assertEqual(df[unit].dtype, ns_dtype)
self.assertTrue((df[unit].values == ex_vals).all())
# Test insertion into existing datetime64 column
df = DataFrame({'ints': np.arange(n)}, index=np.arange(n))
df['dates'] = np.arange(n, dtype=np.int64).view(ns_dtype)
for unit in units:
dtype = np.dtype('M8[%s]' % unit)
vals = np.arange(n, dtype=np.int64).view(dtype)
tmp = df.copy()
tmp['dates'] = vals
ex_vals = to_datetime(vals.astype('O'))
self.assertTrue((tmp['dates'].values == ex_vals).all())
def test_to_datetime_unit(self):
epoch = 1370745748
s = Series([ epoch + t for t in range(20) ])
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ])
assert_series_equal(result,expected)
s = Series([ epoch + t for t in range(20) ]).astype(float)
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ])
assert_series_equal(result,expected)
s = Series([ epoch + t for t in range(20) ] + [iNaT])
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT])
assert_series_equal(result,expected)
s = Series([ epoch + t for t in range(20) ] + [iNaT]).astype(float)
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT])
assert_series_equal(result,expected)
s = concat([Series([ epoch + t for t in range(20) ]).astype(float),Series([np.nan])],ignore_index=True)
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT])
assert_series_equal(result,expected)
def test_series_ctor_datetime64(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50',
freq='10s')
dates = np.asarray(rng)
series = Series(dates)
self.assertTrue(np.issubdtype(series.dtype, np.dtype('M8[ns]')))
def test_index_cast_datetime64_other_units(self):
arr = np.arange(0, 100, 10, dtype=np.int64).view('M8[D]')
idx = Index(arr)
self.assertTrue((idx.values == tslib.cast_to_nanoseconds(arr)).all())
def test_index_astype_datetime64(self):
idx = Index([datetime(2012, 1, 1)], dtype=object)
if not _np_version_under1p7:
raise nose.SkipTest("test only valid in numpy < 1.7")
casted = idx.astype(np.dtype('M8[D]'))
expected = DatetimeIndex(idx.values)
tm.assert_isinstance(casted, DatetimeIndex)
self.assertTrue(casted.equals(expected))
def test_reindex_series_add_nat(self):
rng = date_range('1/1/2000 00:00:00', periods=10, freq='10s')
series = Series(rng)
result = series.reindex(lrange(15))
self.assertTrue(np.issubdtype(result.dtype, np.dtype('M8[ns]')))
mask = result.isnull()
self.assertTrue(mask[-5:].all())
self.assertFalse(mask[:-5].any())
def test_reindex_frame_add_nat(self):
rng = date_range('1/1/2000 00:00:00', periods=10, freq='10s')
df = DataFrame({'A': np.random.randn(len(rng)), 'B': rng})
result = df.reindex(lrange(15))
self.assertTrue(np.issubdtype(result['B'].dtype, np.dtype('M8[ns]')))
mask = com.isnull(result)['B']
self.assertTrue(mask[-5:].all())
self.assertFalse(mask[:-5].any())
def test_series_repr_nat(self):
series = Series([0, 1000, 2000, iNaT], dtype='M8[ns]')
result = repr(series)
expected = ('0 1970-01-01 00:00:00\n'
'1 1970-01-01 00:00:00.000001\n'
'2 1970-01-01 00:00:00.000002\n'
'3 NaT\n'
'dtype: datetime64[ns]')
self.assertEqual(result, expected)
def test_fillna_nat(self):
series = Series([0, 1, 2, iNaT], dtype='M8[ns]')
filled = series.fillna(method='pad')
filled2 = series.fillna(value=series.values[2])
expected = series.copy()
expected.values[3] = expected.values[2]
assert_series_equal(filled, expected)
assert_series_equal(filled2, expected)
df = DataFrame({'A': series})
filled = df.fillna(method='pad')
filled2 = df.fillna(value=series.values[2])
expected = DataFrame({'A': expected})
assert_frame_equal(filled, expected)
assert_frame_equal(filled2, expected)
series = Series([iNaT, 0, 1, 2], dtype='M8[ns]')
filled = series.fillna(method='bfill')
filled2 = series.fillna(value=series[1])
expected = series.copy()
expected[0] = expected[1]
assert_series_equal(filled, expected)
assert_series_equal(filled2, expected)
df = DataFrame({'A': series})
filled = df.fillna(method='bfill')
filled2 = df.fillna(value=series[1])
expected = DataFrame({'A': expected})
assert_frame_equal(filled, expected)
assert_frame_equal(filled2, expected)
def test_string_na_nat_conversion(self):
# GH #999, #858
from pandas.compat import parse_date
strings = np.array(['1/1/2000', '1/2/2000', np.nan,
'1/4/2000, 12:34:56'], dtype=object)
expected = np.empty(4, dtype='M8[ns]')
for i, val in enumerate(strings):
if com.isnull(val):
expected[i] = iNaT
else:
expected[i] = parse_date(val)
result = tslib.array_to_datetime(strings)
assert_almost_equal(result, expected)
result2 = to_datetime(strings)
tm.assert_isinstance(result2, DatetimeIndex)
assert_almost_equal(result, result2)
malformed = np.array(['1/100/2000', np.nan], dtype=object)
result = to_datetime(malformed)
assert_almost_equal(result, malformed)
self.assertRaises(ValueError, to_datetime, malformed,
errors='raise')
idx = ['a', 'b', 'c', 'd', 'e']
series = Series(['1/1/2000', np.nan, '1/3/2000', np.nan,
'1/5/2000'], index=idx, name='foo')
dseries = Series([to_datetime('1/1/2000'), np.nan,
to_datetime('1/3/2000'), np.nan,
to_datetime('1/5/2000')], index=idx, name='foo')
result = to_datetime(series)
dresult = to_datetime(dseries)
expected = Series(np.empty(5, dtype='M8[ns]'), index=idx)
for i in range(5):
x = series[i]
if isnull(x):
expected[i] = iNaT
else:
expected[i] = to_datetime(x)
assert_series_equal(result, expected)
self.assertEqual(result.name, 'foo')
assert_series_equal(dresult, expected)
self.assertEqual(dresult.name, 'foo')
def test_to_datetime_iso8601(self):
result = to_datetime(["2012-01-01 00:00:00"])
exp = Timestamp("2012-01-01 00:00:00")
self.assertEqual(result[0], exp)
result = to_datetime(['20121001']) # bad iso 8601
exp = Timestamp('2012-10-01')
self.assertEqual(result[0], exp)
def test_to_datetime_default(self):
rs = to_datetime('2001')
xp = datetime(2001, 1, 1)
self.assertTrue(rs, xp)
#### dayfirst is essentially broken
#### to_datetime('01-13-2012', dayfirst=True)
#### self.assertRaises(ValueError, to_datetime('01-13-2012', dayfirst=True))
def test_to_datetime_on_datetime64_series(self):
# #2699
s = Series(date_range('1/1/2000', periods=10))
result = to_datetime(s)
self.assertEqual(result[0], s[0])
def test_to_datetime_with_apply(self):
# this is only locale tested with US/None locales
_skip_if_has_locale()
# GH 5195
# with a format and coerce a single item to_datetime fails
td = Series(['May 04', 'Jun 02', 'Dec 11'], index=[1,2,3])
expected = pd.to_datetime(td, format='%b %y')
result = td.apply(pd.to_datetime, format='%b %y')
assert_series_equal(result, expected)
td = pd.Series(['May 04', 'Jun 02', ''], index=[1,2,3])
self.assertRaises(ValueError, lambda : pd.to_datetime(td,format='%b %y'))
self.assertRaises(ValueError, lambda : td.apply(pd.to_datetime, format='%b %y'))
expected = pd.to_datetime(td, format='%b %y', coerce=True)
result = td.apply(lambda x: pd.to_datetime(x, format='%b %y', coerce=True))
assert_series_equal(result, expected)
def test_nat_vector_field_access(self):
idx = DatetimeIndex(['1/1/2000', None, None, '1/4/2000'])
fields = ['year', 'quarter', 'month', 'day', 'hour',
'minute', 'second', 'microsecond', 'nanosecond',
'week', 'dayofyear']
for field in fields:
result = getattr(idx, field)
expected = [getattr(x, field) if x is not NaT else -1
for x in idx]
self.assert_numpy_array_equal(result, expected)
def test_nat_scalar_field_access(self):
fields = ['year', 'quarter', 'month', 'day', 'hour',
'minute', 'second', 'microsecond', 'nanosecond',
'week', 'dayofyear']
for field in fields:
result = getattr(NaT, field)
self.assertEqual(result, -1)
self.assertEqual(NaT.weekday(), -1)
def test_to_datetime_types(self):
# empty string
result = to_datetime('')
self.assertIs(result, NaT)
result = to_datetime(['', ''])
self.assertTrue(isnull(result).all())
# ints
result = Timestamp(0)
expected = to_datetime(0)
self.assertEqual(result, expected)
# GH 3888 (strings)
expected = to_datetime(['2012'])[0]
result = to_datetime('2012')
self.assertEqual(result, expected)
### array = ['2012','20120101','20120101 12:01:01']
array = ['20120101','20120101 12:01:01']
expected = list(to_datetime(array))
result = lmap(Timestamp,array)
tm.assert_almost_equal(result,expected)
### currently fails ###
### result = Timestamp('2012')
### expected = to_datetime('2012')
### self.assertEqual(result, expected)
def test_to_datetime_unprocessable_input(self):
# GH 4928
self.assert_numpy_array_equal(
to_datetime([1, '1']),
np.array([1, '1'], dtype='O')
)
self.assertRaises(TypeError, to_datetime, [1, '1'], errors='raise')
def test_to_datetime_other_datetime64_units(self):
# 5/25/2012
scalar = np.int64(1337904000000000).view('M8[us]')
as_obj = scalar.astype('O')
index = DatetimeIndex([scalar])
self.assertEqual(index[0], scalar.astype('O'))
value = Timestamp(scalar)
self.assertEqual(value, as_obj)
def test_to_datetime_list_of_integers(self):
rng = date_range('1/1/2000', periods=20)
rng = DatetimeIndex(rng.values)
ints = list(rng.asi8)
result = DatetimeIndex(ints)
self.assertTrue(rng.equals(result))
def test_to_datetime_dt64s(self):
in_bound_dts = [
np.datetime64('2000-01-01'),
np.datetime64('2000-01-02'),
]
for dt in in_bound_dts:
self.assertEqual(
pd.to_datetime(dt),
Timestamp(dt)
)
oob_dts = [
np.datetime64('1000-01-01'),
np.datetime64('5000-01-02'),
]
for dt in oob_dts:
self.assertRaises(ValueError, pd.to_datetime, dt, errors='raise')
self.assertRaises(ValueError, tslib.Timestamp, dt)
self.assertIs(pd.to_datetime(dt, coerce=True), NaT)
def test_to_datetime_array_of_dt64s(self):
dts = [
np.datetime64('2000-01-01'),
np.datetime64('2000-01-02'),
]
# Assuming all datetimes are in bounds, to_datetime() returns
# an array that is equal to Timestamp() parsing
self.assert_numpy_array_equal(
pd.to_datetime(dts, box=False),
np.array([Timestamp(x).asm8 for x in dts])
)
# A list of datetimes where the last one is out of bounds
dts_with_oob = dts + [np.datetime64('9999-01-01')]
self.assertRaises(
ValueError,
pd.to_datetime,
dts_with_oob,
coerce=False,
errors='raise'
)
self.assert_numpy_array_equal(
pd.to_datetime(dts_with_oob, box=False, coerce=True),
np.array(
[
Timestamp(dts_with_oob[0]).asm8,
Timestamp(dts_with_oob[1]).asm8,
iNaT,
],
dtype='M8'
)
)
# With coerce=False and errors='ignore', out of bounds datetime64s
# are converted to their .item(), which depending on the version of
# numpy is either a python datetime.datetime or datetime.date
self.assert_numpy_array_equal(
pd.to_datetime(dts_with_oob, box=False, coerce=False),
np.array(
[dt.item() for dt in dts_with_oob],
dtype='O'
)
)
def test_index_to_datetime(self):
idx = Index(['1/1/2000', '1/2/2000', '1/3/2000'])
result = idx.to_datetime()
expected = DatetimeIndex(datetools.to_datetime(idx.values))
self.assertTrue(result.equals(expected))
today = datetime.today()
idx = Index([today], dtype=object)
result = idx.to_datetime()
expected = DatetimeIndex([today])
self.assertTrue(result.equals(expected))
def test_to_datetime_freq(self):
xp = bdate_range('2000-1-1', periods=10, tz='UTC')
rs = xp.to_datetime()
self.assertEqual(xp.freq, rs.freq)
self.assertEqual(xp.tzinfo, rs.tzinfo)
def test_range_misspecified(self):
# GH #1095
self.assertRaises(ValueError, date_range, '1/1/2000')
self.assertRaises(ValueError, date_range, end='1/1/2000')
self.assertRaises(ValueError, date_range, periods=10)
self.assertRaises(ValueError, date_range, '1/1/2000', freq='H')
self.assertRaises(ValueError, date_range, end='1/1/2000', freq='H')
self.assertRaises(ValueError, date_range, periods=10, freq='H')
def test_reasonable_keyerror(self):
# GH #1062
index = DatetimeIndex(['1/3/2000'])
try:
index.get_loc('1/1/2000')
except KeyError as e:
self.assertIn('2000', str(e))
def test_reindex_with_datetimes(self):
rng = date_range('1/1/2000', periods=20)
ts = Series(np.random.randn(20), index=rng)
result = ts.reindex(list(ts.index[5:10]))
expected = ts[5:10]
tm.assert_series_equal(result, expected)
result = ts[list(ts.index[5:10])]
tm.assert_series_equal(result, expected)
def test_promote_datetime_date(self):
rng = date_range('1/1/2000', periods=20)
ts = Series(np.random.randn(20), index=rng)
ts_slice = ts[5:]
ts2 = ts_slice.copy()
ts2.index = [x.date() for x in ts2.index]
result = ts + ts2
result2 = ts2 + ts
expected = ts + ts[5:]
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
# test asfreq
result = ts2.asfreq('4H', method='ffill')
expected = ts[5:].asfreq('4H', method='ffill')
assert_series_equal(result, expected)
result = rng.get_indexer(ts2.index)
expected = rng.get_indexer(ts_slice.index)
self.assert_numpy_array_equal(result, expected)
def test_asfreq_normalize(self):
rng = date_range('1/1/2000 09:30', periods=20)
norm = date_range('1/1/2000', periods=20)
vals = np.random.randn(20)
ts = Series(vals, index=rng)
result = ts.asfreq('D', normalize=True)
norm = date_range('1/1/2000', periods=20)
expected = Series(vals, index=norm)
assert_series_equal(result, expected)
vals = np.random.randn(20, 3)
ts = DataFrame(vals, index=rng)
result = ts.asfreq('D', normalize=True)
expected = DataFrame(vals, index=norm)
assert_frame_equal(result, expected)
def test_date_range_gen_error(self):
rng = date_range('1/1/2000 00:00', '1/1/2000 00:18', freq='5min')
self.assertEqual(len(rng), 4)
def test_first_subset(self):
ts = _simple_ts('1/1/2000', '1/1/2010', freq='12h')
result = ts.first('10d')
self.assertEqual(len(result), 20)
ts = _simple_ts('1/1/2000', '1/1/2010')
result = ts.first('10d')
self.assertEqual(len(result), 10)
result = ts.first('3M')
expected = ts[:'3/31/2000']
assert_series_equal(result, expected)
result = ts.first('21D')
expected = ts[:21]
assert_series_equal(result, expected)
result = ts[:0].first('3M')
assert_series_equal(result, ts[:0])
def test_last_subset(self):
ts = _simple_ts('1/1/2000', '1/1/2010', freq='12h')
result = ts.last('10d')
self.assertEqual(len(result), 20)
ts = _simple_ts('1/1/2000', '1/1/2010')
result = ts.last('10d')
self.assertEqual(len(result), 10)
result = ts.last('21D')
expected = ts['12/12/2009':]
assert_series_equal(result, expected)
result = ts.last('21D')
expected = ts[-21:]
assert_series_equal(result, expected)
result = ts[:0].last('3M')
assert_series_equal(result, ts[:0])
def test_add_offset(self):
rng = date_range('1/1/2000', '2/1/2000')
result = rng + offsets.Hour(2)
expected = date_range('1/1/2000 02:00', '2/1/2000 02:00')
self.assertTrue(result.equals(expected))
def test_format_pre_1900_dates(self):
rng = date_range('1/1/1850', '1/1/1950', freq='A-DEC')
rng.format()
ts = Series(1, index=rng)
repr(ts)
def test_repeat(self):
rng = date_range('1/1/2000', '1/1/2001')
result = rng.repeat(5)
self.assertIsNone(result.freq)
self.assertEqual(len(result), 5 * len(rng))
def test_at_time(self):
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = Series(np.random.randn(len(rng)), index=rng)
rs = ts.at_time(rng[1])
self.assertTrue((rs.index.hour == rng[1].hour).all())
self.assertTrue((rs.index.minute == rng[1].minute).all())
self.assertTrue((rs.index.second == rng[1].second).all())
result = ts.at_time('9:30')
expected = ts.at_time(time(9, 30))
assert_series_equal(result, expected)
df = DataFrame(np.random.randn(len(rng), 3), index=rng)
result = ts[time(9, 30)]
result_df = df.ix[time(9, 30)]
expected = ts[(rng.hour == 9) & (rng.minute == 30)]
exp_df = df[(rng.hour == 9) & (rng.minute == 30)]
# expected.index = date_range('1/1/2000', '1/4/2000')
assert_series_equal(result, expected)
tm.assert_frame_equal(result_df, exp_df)
chunk = df.ix['1/4/2000':]
result = chunk.ix[time(9, 30)]
expected = result_df[-1:]
tm.assert_frame_equal(result, expected)
# midnight, everything
rng = date_range('1/1/2000', '1/31/2000')
ts = Series(np.random.randn(len(rng)), index=rng)
result = ts.at_time(time(0, 0))
assert_series_equal(result, ts)
# time doesn't exist
rng = date_range('1/1/2012', freq='23Min', periods=384)
ts = Series(np.random.randn(len(rng)), rng)
rs = ts.at_time('16:00')
self.assertEqual(len(rs), 0)
def test_at_time_frame(self):
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
rs = ts.at_time(rng[1])
self.assertTrue((rs.index.hour == rng[1].hour).all())
self.assertTrue((rs.index.minute == rng[1].minute).all())
self.assertTrue((rs.index.second == rng[1].second).all())
result = ts.at_time('9:30')
expected = ts.at_time(time(9, 30))
assert_frame_equal(result, expected)
result = ts.ix[time(9, 30)]
expected = ts.ix[(rng.hour == 9) & (rng.minute == 30)]
assert_frame_equal(result, expected)
# midnight, everything
rng = date_range('1/1/2000', '1/31/2000')
ts = DataFrame(np.random.randn(len(rng), 3), index=rng)
result = ts.at_time(time(0, 0))
assert_frame_equal(result, ts)
# time doesn't exist
rng = date_range('1/1/2012', freq='23Min', periods=384)
ts = DataFrame(np.random.randn(len(rng), 2), rng)
rs = ts.at_time('16:00')
self.assertEqual(len(rs), 0)
def test_between_time(self):
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = Series(np.random.randn(len(rng)), index=rng)
stime = time(0, 0)
etime = time(1, 0)
close_open = product([True, False], [True, False])
for inc_start, inc_end in close_open:
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = 13 * 4 + 1
if not inc_start:
exp_len -= 5
if not inc_end:
exp_len -= 4
self.assertEqual(len(filtered), exp_len)
for rs in filtered.index:
t = rs.time()
if inc_start:
self.assertTrue(t >= stime)
else:
self.assertTrue(t > stime)
if inc_end:
self.assertTrue(t <= etime)
else:
self.assertTrue(t < etime)
result = ts.between_time('00:00', '01:00')
expected = ts.between_time(stime, etime)
assert_series_equal(result, expected)
# across midnight
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = Series(np.random.randn(len(rng)), index=rng)
stime = time(22, 0)
etime = time(9, 0)
close_open = product([True, False], [True, False])
for inc_start, inc_end in close_open:
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = (12 * 11 + 1) * 4 + 1
if not inc_start:
exp_len -= 4
if not inc_end:
exp_len -= 4
self.assertEqual(len(filtered), exp_len)
for rs in filtered.index:
t = rs.time()
if inc_start:
self.assertTrue((t >= stime) or (t <= etime))
else:
self.assertTrue((t > stime) or (t <= etime))
if inc_end:
self.assertTrue((t <= etime) or (t >= stime))
else:
self.assertTrue((t < etime) or (t >= stime))
def test_between_time_frame(self):
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
stime = time(0, 0)
etime = time(1, 0)
close_open = product([True, False], [True, False])
for inc_start, inc_end in close_open:
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = 13 * 4 + 1
if not inc_start:
exp_len -= 5
if not inc_end:
exp_len -= 4
self.assertEqual(len(filtered), exp_len)
for rs in filtered.index:
t = rs.time()
if inc_start:
self.assertTrue(t >= stime)
else:
self.assertTrue(t > stime)
if inc_end:
self.assertTrue(t <= etime)
else:
self.assertTrue(t < etime)
result = ts.between_time('00:00', '01:00')
expected = ts.between_time(stime, etime)
assert_frame_equal(result, expected)
# across midnight
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
stime = time(22, 0)
etime = time(9, 0)
close_open = product([True, False], [True, False])
for inc_start, inc_end in close_open:
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = (12 * 11 + 1) * 4 + 1
if not inc_start:
exp_len -= 4
if not inc_end:
exp_len -= 4
self.assertEqual(len(filtered), exp_len)
for rs in filtered.index:
t = rs.time()
if inc_start:
self.assertTrue((t >= stime) or (t <= etime))
else:
self.assertTrue((t > stime) or (t <= etime))
if inc_end:
self.assertTrue((t <= etime) or (t >= stime))
else:
self.assertTrue((t < etime) or (t >= stime))
def test_dti_constructor_preserve_dti_freq(self):
rng = date_range('1/1/2000', '1/2/2000', freq='5min')
rng2 = DatetimeIndex(rng)
self.assertEqual(rng.freq, rng2.freq)
def test_normalize(self):
rng = date_range('1/1/2000 9:30', periods=10, freq='D')
result = rng.normalize()
expected = date_range('1/1/2000', periods=10, freq='D')
self.assertTrue(result.equals(expected))
rng_ns = pd.DatetimeIndex(np.array([1380585623454345752, 1380585612343234312]).astype("datetime64[ns]"))
rng_ns_normalized = rng_ns.normalize()
expected = pd.DatetimeIndex(np.array([1380585600000000000, 1380585600000000000]).astype("datetime64[ns]"))
self.assertTrue(rng_ns_normalized.equals(expected))
self.assertTrue(result.is_normalized)
self.assertFalse(rng.is_normalized)
def test_to_period(self):
from pandas.tseries.period import period_range
ts = _simple_ts('1/1/2000', '1/1/2001')
pts = ts.to_period()
exp = ts.copy()
exp.index = period_range('1/1/2000', '1/1/2001')
assert_series_equal(pts, exp)
pts = ts.to_period('M')
self.assertTrue(pts.index.equals(exp.index.asfreq('M')))
def create_dt64_based_index(self):
data = [Timestamp('2007-01-01 10:11:12.123456Z'),
Timestamp('2007-01-01 10:11:13.789123Z')]
index = DatetimeIndex(data)
return index
def test_to_period_millisecond(self):
index = self.create_dt64_based_index()
period = index.to_period(freq='L')
self.assertEqual(2, len(period))
self.assertEqual(period[0], Period('2007-01-01 10:11:12.123Z', 'L'))
self.assertEqual(period[1], Period('2007-01-01 10:11:13.789Z', 'L'))
def test_to_period_microsecond(self):
index = self.create_dt64_based_index()
period = index.to_period(freq='U')
self.assertEqual(2, len(period))
self.assertEqual(period[0], Period('2007-01-01 10:11:12.123456Z', 'U'))
self.assertEqual(period[1], Period('2007-01-01 10:11:13.789123Z', 'U'))
def test_to_period_tz(self):
_skip_if_no_pytz()
from dateutil.tz import tzlocal
from pytz import utc as UTC
xp = date_range('1/1/2000', '4/1/2000').to_period()
ts = date_range('1/1/2000', '4/1/2000', tz='US/Eastern')
result = ts.to_period()[0]
expected = ts[0].to_period()
self.assertEqual(result, expected)
self.assertTrue(ts.to_period().equals(xp))
ts = date_range('1/1/2000', '4/1/2000', tz=UTC)
result = ts.to_period()[0]
expected = ts[0].to_period()
self.assertEqual(result, expected)
self.assertTrue(ts.to_period().equals(xp))
ts = date_range('1/1/2000', '4/1/2000', tz=tzlocal())
result = ts.to_period()[0]
expected = ts[0].to_period()
self.assertEqual(result, expected)
self.assertTrue(ts.to_period().equals(xp))
def test_frame_to_period(self):
K = 5
from pandas.tseries.period import period_range
dr = date_range('1/1/2000', '1/1/2001')
pr = period_range('1/1/2000', '1/1/2001')
df = DataFrame(randn(len(dr), K), index=dr)
df['mix'] = 'a'
pts = df.to_period()
exp = df.copy()
exp.index = pr
assert_frame_equal(pts, exp)
pts = df.to_period('M')
self.assertTrue(pts.index.equals(exp.index.asfreq('M')))
df = df.T
pts = df.to_period(axis=1)
exp = df.copy()
exp.columns = pr
assert_frame_equal(pts, exp)
pts = df.to_period('M', axis=1)
self.assertTrue(pts.columns.equals(exp.columns.asfreq('M')))
self.assertRaises(ValueError, df.to_period, axis=2)
def test_timestamp_fields(self):
# extra fields from DatetimeIndex like quarter and week
idx = tm.makeDateIndex(100)
fields = ['dayofweek', 'dayofyear', 'week', 'weekofyear', 'quarter', 'is_month_start', 'is_month_end', 'is_quarter_start', 'is_quarter_end', 'is_year_start', 'is_year_end']
for f in fields:
expected = getattr(idx, f)[-1]
result = getattr(Timestamp(idx[-1]), f)
self.assertEqual(result, expected)
self.assertEqual(idx.freq, Timestamp(idx[-1], idx.freq).freq)
self.assertEqual(idx.freqstr, Timestamp(idx[-1], idx.freq).freqstr)
def test_woy_boundary(self):
# make sure weeks at year boundaries are correct
d = datetime(2013,12,31)
result = Timestamp(d).week
expected = 1 # ISO standard
self.assertEqual(result, expected)
d = datetime(2008,12,28)
result = Timestamp(d).week
expected = 52 # ISO standard
self.assertEqual(result, expected)
d = datetime(2009,12,31)
result = Timestamp(d).week
expected = 53 # ISO standard
self.assertEqual(result, expected)
d = datetime(2010,1,1)
result = Timestamp(d).week
expected = 53 # ISO standard
self.assertEqual(result, expected)
d = datetime(2010,1,3)
result = Timestamp(d).week
expected = 53 # ISO standard
self.assertEqual(result, expected)
result = np.array([Timestamp(datetime(*args)).week for args in
[(2000,1,1),(2000,1,2),(2005,1,1),(2005,1,2)]])
self.assertTrue((result == [52, 52, 53, 53]).all())
def test_timestamp_date_out_of_range(self):
self.assertRaises(ValueError, Timestamp, '1676-01-01')
self.assertRaises(ValueError, Timestamp, '2263-01-01')
# 1475
self.assertRaises(ValueError, DatetimeIndex, ['1400-01-01'])
self.assertRaises(ValueError, DatetimeIndex, [datetime(1400, 1, 1)])
def test_timestamp_repr(self):
# pre-1900
stamp = Timestamp('1850-01-01', tz='US/Eastern')
repr(stamp)
iso8601 = '1850-01-01 01:23:45.012345'
stamp = Timestamp(iso8601, tz='US/Eastern')
result = repr(stamp)
self.assertIn(iso8601, result)
def test_timestamp_from_ordinal(self):
# GH 3042
dt = datetime(2011, 4, 16, 0, 0)
ts = Timestamp.fromordinal(dt.toordinal())
self.assertEqual(ts.to_pydatetime(), dt)
# with a tzinfo
stamp = Timestamp('2011-4-16', tz='US/Eastern')
dt_tz = stamp.to_pydatetime()
ts = Timestamp.fromordinal(dt_tz.toordinal(),tz='US/Eastern')
self.assertEqual(ts.to_pydatetime(), dt_tz)
def test_datetimeindex_integers_shift(self):
rng = date_range('1/1/2000', periods=20)
result = rng + 5
expected = rng.shift(5)
self.assertTrue(result.equals(expected))
result = rng - 5
expected = rng.shift(-5)
self.assertTrue(result.equals(expected))
def test_astype_object(self):
# NumPy 1.6.1 weak ns support
rng = date_range('1/1/2000', periods=20)
casted = rng.astype('O')
exp_values = list(rng)
self.assert_numpy_array_equal(casted, exp_values)
def test_catch_infinite_loop(self):
offset = datetools.DateOffset(minute=5)
# blow up, don't loop forever
self.assertRaises(Exception, date_range, datetime(2011, 11, 11),
datetime(2011, 11, 12), freq=offset)
def test_append_concat(self):
rng = date_range('5/8/2012 1:45', periods=10, freq='5T')
ts = Series(np.random.randn(len(rng)), rng)
df = DataFrame(np.random.randn(len(rng), 4), index=rng)
result = ts.append(ts)
result_df = df.append(df)
ex_index = DatetimeIndex(np.tile(rng.values, 2))
self.assertTrue(result.index.equals(ex_index))
self.assertTrue(result_df.index.equals(ex_index))
appended = rng.append(rng)
self.assertTrue(appended.equals(ex_index))
appended = rng.append([rng, rng])
ex_index = DatetimeIndex(np.tile(rng.values, 3))
self.assertTrue(appended.equals(ex_index))
# different index names
rng1 = rng.copy()
rng2 = rng.copy()
rng1.name = 'foo'
rng2.name = 'bar'
self.assertEqual(rng1.append(rng1).name, 'foo')
self.assertIsNone(rng1.append(rng2).name)
def test_append_concat_tz(self):
#GH 2938
_skip_if_no_pytz()
rng = date_range('5/8/2012 1:45', periods=10, freq='5T',
tz='US/Eastern')
rng2 = date_range('5/8/2012 2:35', periods=10, freq='5T',
tz='US/Eastern')
rng3 = date_range('5/8/2012 1:45', periods=20, freq='5T',
tz='US/Eastern')
ts = Series(np.random.randn(len(rng)), rng)
df = DataFrame(np.random.randn(len(rng), 4), index=rng)
ts2 = Series(np.random.randn(len(rng2)), rng2)
df2 = DataFrame(np.random.randn(len(rng2), 4), index=rng2)
result = ts.append(ts2)
result_df = df.append(df2)
self.assertTrue(result.index.equals(rng3))
self.assertTrue(result_df.index.equals(rng3))
appended = rng.append(rng2)
self.assertTrue(appended.equals(rng3))
def test_set_dataframe_column_ns_dtype(self):
x = DataFrame([datetime.now(), datetime.now()])
self.assertEqual(x[0].dtype, np.dtype('M8[ns]'))
def test_groupby_count_dateparseerror(self):
dr = date_range(start='1/1/2012', freq='5min', periods=10)
# BAD Example, datetimes first
s = Series(np.arange(10), index=[dr, lrange(10)])
grouped = s.groupby(lambda x: x[1] % 2 == 0)
result = grouped.count()
s = Series(np.arange(10), index=[lrange(10), dr])
grouped = s.groupby(lambda x: x[0] % 2 == 0)
expected = grouped.count()
assert_series_equal(result, expected)
def test_datetimeindex_repr_short(self):
dr = date_range(start='1/1/2012', periods=1)
repr(dr)
dr = date_range(start='1/1/2012', periods=2)
repr(dr)
dr = date_range(start='1/1/2012', periods=3)
repr(dr)
def test_constructor_int64_nocopy(self):
# #1624
arr = np.arange(1000, dtype=np.int64)
index = DatetimeIndex(arr)
arr[50:100] = -1
self.assertTrue((index.asi8[50:100] == -1).all())
arr = np.arange(1000, dtype=np.int64)
index = DatetimeIndex(arr, copy=True)
arr[50:100] = -1
self.assertTrue((index.asi8[50:100] != -1).all())
def test_series_interpolate_method_values(self):
# #1646
ts = _simple_ts('1/1/2000', '1/20/2000')
ts[::2] = np.nan
result = ts.interpolate(method='values')
exp = ts.interpolate()
assert_series_equal(result, exp)
def test_frame_datetime64_handling_groupby(self):
# it works!
df = DataFrame([(3, np.datetime64('2012-07-03')),
(3, np.datetime64('2012-07-04'))],
columns=['a', 'date'])
result = df.groupby('a').first()
self.assertEqual(result['date'][3], Timestamp('2012-07-03'))
def test_series_interpolate_intraday(self):
# #1698
index = pd.date_range('1/1/2012', periods=4, freq='12D')
ts = pd.Series([0, 12, 24, 36], index)
new_index = index.append(index + pd.DateOffset(days=1)).order()
exp = ts.reindex(new_index).interpolate(method='time')
index = pd.date_range('1/1/2012', periods=4, freq='12H')
ts = pd.Series([0, 12, 24, 36], index)
new_index = index.append(index + pd.DateOffset(hours=1)).order()
result = ts.reindex(new_index).interpolate(method='time')
self.assert_numpy_array_equal(result.values, exp.values)
def test_frame_dict_constructor_datetime64_1680(self):
dr = date_range('1/1/2012', periods=10)
s = Series(dr, index=dr)
# it works!
DataFrame({'a': 'foo', 'b': s}, index=dr)
DataFrame({'a': 'foo', 'b': s.values}, index=dr)
def test_frame_datetime64_mixed_index_ctor_1681(self):
dr = date_range('2011/1/1', '2012/1/1', freq='W-FRI')
ts = Series(dr)
# it works!
d = DataFrame({'A': 'foo', 'B': ts}, index=dr)
self.assertTrue(d['B'].isnull().all())
def test_frame_timeseries_to_records(self):
index = date_range('1/1/2000', periods=10)
df = DataFrame(np.random.randn(10, 3), index=index,
columns=['a', 'b', 'c'])
result = df.to_records()
result['index'].dtype == 'M8[ns]'
result = df.to_records(index=False)
def test_frame_datetime64_duplicated(self):
dates = date_range('2010-07-01', end='2010-08-05')
tst = DataFrame({'symbol': 'AAA', 'date': dates})
result = tst.duplicated(['date', 'symbol'])
self.assertTrue((-result).all())
tst = DataFrame({'date': dates})
result = tst.duplicated()
self.assertTrue((-result).all())
def test_timestamp_compare_with_early_datetime(self):
# e.g. datetime.min
stamp = Timestamp('2012-01-01')
self.assertFalse(stamp == datetime.min)
self.assertFalse(stamp == datetime(1600, 1, 1))
self.assertFalse(stamp == datetime(2700, 1, 1))
self.assertNotEqual(stamp, datetime.min)
self.assertNotEqual(stamp, datetime(1600, 1, 1))
self.assertNotEqual(stamp, datetime(2700, 1, 1))
self.assertTrue(stamp > datetime(1600, 1, 1))
self.assertTrue(stamp >= datetime(1600, 1, 1))
self.assertTrue(stamp < datetime(2700, 1, 1))
self.assertTrue(stamp <= datetime(2700, 1, 1))
def test_to_html_timestamp(self):
rng = date_range('2000-01-01', periods=10)
df = DataFrame(np.random.randn(10, 4), index=rng)
result = df.to_html()
self.assertIn('2000-01-01', result)
def test_to_csv_numpy_16_bug(self):
frame = DataFrame({'a': date_range('1/1/2000', periods=10)})
buf = StringIO()
frame.to_csv(buf)
result = buf.getvalue()
self.assertIn('2000-01-01', result)
def test_series_map_box_timestamps(self):
# #2689, #2627
s = Series(date_range('1/1/2000', periods=10))
def f(x):
return (x.hour, x.day, x.month)
# it works!
s.map(f)
s.apply(f)
DataFrame(s).applymap(f)
def test_concat_datetime_datetime64_frame(self):
# #2624
rows = []
rows.append([datetime(2010, 1, 1), 1])
rows.append([datetime(2010, 1, 2), 'hi'])
df2_obj = DataFrame.from_records(rows, columns=['date', 'test'])
ind = date_range(start="2000/1/1", freq="D", periods=10)
df1 = DataFrame({'date': ind, 'test':lrange(10)})
# it works!
pd.concat([df1, df2_obj])
def test_period_resample(self):
# GH3609
s = Series(range(100),index=date_range('20130101', freq='s', periods=100), dtype='float')
s[10:30] = np.nan
expected = Series([34.5, 79.5], index=[Period('2013-01-01 00:00', 'T'), Period('2013-01-01 00:01', 'T')])
result = s.to_period().resample('T', kind='period')
assert_series_equal(result, expected)
result2 = s.resample('T', kind='period')
assert_series_equal(result2, expected)
def test_period_resample_with_local_timezone(self):
# GH5430
_skip_if_no_pytz()
import pytz
local_timezone = pytz.timezone('America/Los_Angeles')
start = datetime(year=2013, month=11, day=1, hour=0, minute=0, tzinfo=pytz.utc)
# 1 day later
end = datetime(year=2013, month=11, day=2, hour=0, minute=0, tzinfo=pytz.utc)
index = pd.date_range(start, end, freq='H')
series = pd.Series(1, index=index)
series = series.tz_convert(local_timezone)
result = series.resample('D', kind='period')
# Create the expected series
expected_index = (pd.period_range(start=start, end=end, freq='D') - 1) # Index is moved back a day with the timezone conversion from UTC to Pacific
expected = pd.Series(1, index=expected_index)
assert_series_equal(result, expected)
def test_pickle(self):
#GH4606
from pandas.compat import cPickle
import pickle
for pick in [pickle, cPickle]:
p = pick.loads(pick.dumps(NaT))
self.assertTrue(p is NaT)
idx = pd.to_datetime(['2013-01-01', NaT, '2014-01-06'])
idx_p = pick.loads(pick.dumps(idx))
self.assertTrue(idx_p[0] == idx[0])
self.assertTrue(idx_p[1] is NaT)
self.assertTrue(idx_p[2] == idx[2])
def _simple_ts(start, end, freq='D'):
rng = date_range(start, end, freq=freq)
return Series(np.random.randn(len(rng)), index=rng)
class TestDatetimeIndex(tm.TestCase):
_multiprocess_can_split_ = True
def test_hash_error(self):
index = date_range('20010101', periods=10)
with tm.assertRaisesRegexp(TypeError,
"unhashable type: %r" %
type(index).__name__):
hash(index)
def test_stringified_slice_with_tz(self):
#GH2658
import datetime
start=datetime.datetime.now()
idx=DatetimeIndex(start=start,freq="1d",periods=10)
df=DataFrame(lrange(10),index=idx)
df["2013-01-14 23:44:34.437768-05:00":] # no exception here
def test_append_join_nondatetimeindex(self):
rng = date_range('1/1/2000', periods=10)
idx = Index(['a', 'b', 'c', 'd'])
result = rng.append(idx)
tm.assert_isinstance(result[0], Timestamp)
# it works
rng.join(idx, how='outer')
def test_astype(self):
rng = date_range('1/1/2000', periods=10)
result = rng.astype('i8')
self.assert_numpy_array_equal(result, rng.asi8)
def test_to_period_nofreq(self):
idx = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-04'])
self.assertRaises(ValueError, idx.to_period)
idx = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-03'],
freq='infer')
idx.to_period()
def test_000constructor_resolution(self):
# 2252
t1 = Timestamp((1352934390 * 1000000000) + 1000000 + 1000 + 1)
idx = DatetimeIndex([t1])
self.assertEqual(idx.nanosecond[0], t1.nanosecond)
def test_constructor_coverage(self):
rng = date_range('1/1/2000', periods=10.5)
exp = date_range('1/1/2000', periods=10)
self.assertTrue(rng.equals(exp))
self.assertRaises(ValueError, DatetimeIndex, start='1/1/2000',
periods='foo', freq='D')
self.assertRaises(ValueError, DatetimeIndex, start='1/1/2000',
end='1/10/2000')
self.assertRaises(ValueError, DatetimeIndex, '1/1/2000')
# generator expression
gen = (datetime(2000, 1, 1) + timedelta(i) for i in range(10))
result = DatetimeIndex(gen)
expected = DatetimeIndex([datetime(2000, 1, 1) + timedelta(i)
for i in range(10)])
self.assertTrue(result.equals(expected))
# NumPy string array
strings = np.array(['2000-01-01', '2000-01-02', '2000-01-03'])
result = DatetimeIndex(strings)
expected = DatetimeIndex(strings.astype('O'))
self.assertTrue(result.equals(expected))
from_ints = DatetimeIndex(expected.asi8)
self.assertTrue(from_ints.equals(expected))
# non-conforming
self.assertRaises(ValueError, DatetimeIndex,
['2000-01-01', '2000-01-02', '2000-01-04'],
freq='D')
self.assertRaises(ValueError, DatetimeIndex,
start='2011-01-01', freq='b')
self.assertRaises(ValueError, DatetimeIndex,
end='2011-01-01', freq='B')
self.assertRaises(ValueError, DatetimeIndex, periods=10, freq='D')
def test_constructor_name(self):
idx = DatetimeIndex(start='2000-01-01', periods=1, freq='A',
name='TEST')
self.assertEqual(idx.name, 'TEST')
def test_comparisons_coverage(self):
rng = date_range('1/1/2000', periods=10)
# raise TypeError for now
self.assertRaises(TypeError, rng.__lt__, rng[3].value)
result = rng == list(rng)
exp = rng == rng
self.assert_numpy_array_equal(result, exp)
def test_map(self):
rng = date_range('1/1/2000', periods=10)
f = lambda x: x.strftime('%Y%m%d')
result = rng.map(f)
exp = [f(x) for x in rng]
self.assert_numpy_array_equal(result, exp)
def test_add_union(self):
rng = date_range('1/1/2000', periods=5)
rng2 = date_range('1/6/2000', periods=5)
result = rng + rng2
expected = rng.union(rng2)
self.assertTrue(result.equals(expected))
def test_misc_coverage(self):
rng = date_range('1/1/2000', periods=5)
result = rng.groupby(rng.day)
tm.assert_isinstance(list(result.values())[0][0], Timestamp)
idx = DatetimeIndex(['2000-01-03', '2000-01-01', '2000-01-02'])
self.assertTrue(idx.equals(list(idx)))
non_datetime = Index(list('abc'))
self.assertFalse(idx.equals(list(non_datetime)))
def test_union_coverage(self):
idx = DatetimeIndex(['2000-01-03', '2000-01-01', '2000-01-02'])
ordered = DatetimeIndex(idx.order(), freq='infer')
result = ordered.union(idx)
self.assertTrue(result.equals(ordered))
result = ordered[:0].union(ordered)
self.assertTrue(result.equals(ordered))
self.assertEqual(result.freq, ordered.freq)
def test_union_bug_1730(self):
rng_a = date_range('1/1/2012', periods=4, freq='3H')
rng_b = date_range('1/1/2012', periods=4, freq='4H')
result = rng_a.union(rng_b)
exp = DatetimeIndex(sorted(set(list(rng_a)) | set(list(rng_b))))
self.assertTrue(result.equals(exp))
def test_union_bug_1745(self):
left = DatetimeIndex(['2012-05-11 15:19:49.695000'])
right = DatetimeIndex(['2012-05-29 13:04:21.322000',
'2012-05-11 15:27:24.873000',
'2012-05-11 15:31:05.350000'])
result = left.union(right)
exp = DatetimeIndex(sorted(set(list(left)) | set(list(right))))
self.assertTrue(result.equals(exp))
def test_union_bug_4564(self):
from pandas import DateOffset
left = date_range("2013-01-01", "2013-02-01")
right = left + DateOffset(minutes=15)
result = left.union(right)
exp = DatetimeIndex(sorted(set(list(left)) | set(list(right))))
self.assertTrue(result.equals(exp))
def test_intersection_bug_1708(self):
from pandas import DateOffset
index_1 = date_range('1/1/2012', periods=4, freq='12H')
index_2 = index_1 + DateOffset(hours=1)
result = index_1 & index_2
self.assertEqual(len(result), 0)
# def test_add_timedelta64(self):
# rng = date_range('1/1/2000', periods=5)
# delta = rng.values[3] - rng.values[1]
# result = rng + delta
# expected = rng + timedelta(2)
# self.assertTrue(result.equals(expected))
def test_get_duplicates(self):
idx = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-02',
'2000-01-03', '2000-01-03', '2000-01-04'])
result = idx.get_duplicates()
ex = DatetimeIndex(['2000-01-02', '2000-01-03'])
self.assertTrue(result.equals(ex))
def test_argmin_argmax(self):
idx = DatetimeIndex(['2000-01-04', '2000-01-01', '2000-01-02'])
self.assertEqual(idx.argmin(), 1)
self.assertEqual(idx.argmax(), 0)
def test_order(self):
idx = DatetimeIndex(['2000-01-04', '2000-01-01', '2000-01-02'])
ordered = idx.order()
self.assertTrue(ordered.is_monotonic)
ordered = idx.order(ascending=False)
self.assertTrue(ordered[::-1].is_monotonic)
ordered, dexer = idx.order(return_indexer=True)
self.assertTrue(ordered.is_monotonic)
self.assert_numpy_array_equal(dexer, [1, 2, 0])
ordered, dexer = idx.order(return_indexer=True, ascending=False)
self.assertTrue(ordered[::-1].is_monotonic)
self.assert_numpy_array_equal(dexer, [0, 2, 1])
def test_insert(self):
idx = DatetimeIndex(['2000-01-04', '2000-01-01', '2000-01-02'])
result = idx.insert(2, datetime(2000, 1, 5))
exp = DatetimeIndex(['2000-01-04', '2000-01-01', '2000-01-05',
'2000-01-02'])
self.assertTrue(result.equals(exp))
# insertion of non-datetime should coerce to object index
result = idx.insert(1, 'inserted')
expected = Index([datetime(2000, 1, 4), 'inserted', datetime(2000, 1, 1),
datetime(2000, 1, 2)])
self.assertNotIsInstance(result, DatetimeIndex)
tm.assert_index_equal(result, expected)
idx = date_range('1/1/2000', periods=3, freq='M')
result = idx.insert(3, datetime(2000, 4, 30))
self.assertEqual(result.freqstr, 'M')
def test_map_bug_1677(self):
index = DatetimeIndex(['2012-04-25 09:30:00.393000'])
f = index.asof
result = index.map(f)
expected = np.array([f(index[0])])
self.assert_numpy_array_equal(result, expected)
def test_groupby_function_tuple_1677(self):
df = DataFrame(np.random.rand(100),
index=date_range("1/1/2000", periods=100))
monthly_group = df.groupby(lambda x: (x.year, x.month))
result = monthly_group.mean()
tm.assert_isinstance(result.index[0], tuple)
def test_append_numpy_bug_1681(self):
# another datetime64 bug
dr = date_range('2011/1/1', '2012/1/1', freq='W-FRI')
a = DataFrame()
c = DataFrame({'A': 'foo', 'B': dr}, index=dr)
result = a.append(c)
self.assertTrue((result['B'] == dr).all())
def test_isin(self):
index = tm.makeDateIndex(4)
result = index.isin(index)
self.assertTrue(result.all())
result = index.isin(list(index))
self.assertTrue(result.all())
assert_almost_equal(index.isin([index[2], 5]),
[False, False, True, False])
def test_union(self):
i1 = Int64Index(np.arange(0, 20, 2))
i2 = Int64Index(np.arange(10, 30, 2))
result = i1.union(i2)
expected = Int64Index(np.arange(0, 30, 2))
self.assert_numpy_array_equal(result, expected)
def test_union_with_DatetimeIndex(self):
i1 = Int64Index(np.arange(0, 20, 2))
i2 = DatetimeIndex(start='2012-01-03 00:00:00', periods=10, freq='D')
i1.union(i2) # Works
i2.union(i1) # Fails with "AttributeError: can't set attribute"
def test_time(self):
rng = pd.date_range('1/1/2000', freq='12min', periods=10)
result = pd.Index(rng).time
expected = [t.time() for t in rng]
self.assertTrue((result == expected).all())
def test_date(self):
rng = pd.date_range('1/1/2000', freq='12H', periods=10)
result = pd.Index(rng).date
expected = [t.date() for t in rng]
self.assertTrue((result == expected).all())
def test_does_not_convert_mixed_integer(self):
df = tm.makeCustomDataframe(10, 10, data_gen_f=lambda *args, **kwargs:
randn(), r_idx_type='i', c_idx_type='dt')
cols = df.columns.join(df.index, how='outer')
joined = cols.join(df.columns)
self.assertEqual(cols.dtype, np.dtype('O'))
self.assertEqual(cols.dtype, joined.dtype)
assert_array_equal(cols.values, joined.values)
def test_slice_keeps_name(self):
# GH4226
st = pd.Timestamp('2013-07-01 00:00:00', tz='America/Los_Angeles')
et = pd.Timestamp('2013-07-02 00:00:00', tz='America/Los_Angeles')
dr = pd.date_range(st, et, freq='H', name='timebucket')
self.assertEqual(dr[1:].name, dr.name)
def test_join_self(self):
index = date_range('1/1/2000', periods=10)
kinds = 'outer', 'inner', 'left', 'right'
for kind in kinds:
joined = index.join(index, how=kind)
self.assertIs(index, joined)
def assert_index_parameters(self, index):
assert index.freq == '40960N'
assert index.inferred_freq == '40960N'
def test_ns_index(self):
if _np_version_under1p7:
raise nose.SkipTest
nsamples = 400
ns = int(1e9 / 24414)
dtstart = np.datetime64('2012-09-20T00:00:00')
dt = dtstart + np.arange(nsamples) * np.timedelta64(ns, 'ns')
freq = ns * pd.datetools.Nano()
index = pd.DatetimeIndex(dt, freq=freq, name='time')
self.assert_index_parameters(index)
new_index = pd.DatetimeIndex(start=index[0], end=index[-1], freq=index.freq)
self.assert_index_parameters(new_index)
def test_join_with_period_index(self):
df = tm.makeCustomDataframe(10, 10, data_gen_f=lambda *args:
np.random.randint(2), c_idx_type='p',
r_idx_type='dt')
s = df.iloc[:5, 0]
joins = 'left', 'right', 'inner', 'outer'
for join in joins:
with tm.assertRaisesRegexp(ValueError, 'can only call with other '
'PeriodIndex-ed objects'):
df.columns.join(s.index, how=join)
def test_factorize(self):
idx1 = DatetimeIndex(['2014-01', '2014-01', '2014-02',
'2014-02', '2014-03', '2014-03'])
exp_arr = np.array([0, 0, 1, 1, 2, 2])
exp_idx = DatetimeIndex(['2014-01', '2014-02', '2014-03'])
arr, idx = idx1.factorize()
self.assert_numpy_array_equal(arr, exp_arr)
self.assertTrue(idx.equals(exp_idx))
arr, idx = idx1.factorize(sort=True)
self.assert_numpy_array_equal(arr, exp_arr)
self.assertTrue(idx.equals(exp_idx))
# tz must be preserved
idx1 = idx1.tz_localize('Asia/Tokyo')
exp_idx = exp_idx.tz_localize('Asia/Tokyo')
arr, idx = idx1.factorize()
self.assert_numpy_array_equal(arr, exp_arr)
self.assertTrue(idx.equals(exp_idx))
idx2 = pd.DatetimeIndex(['2014-03', '2014-03', '2014-02', '2014-01',
'2014-03', '2014-01'])
exp_arr = np.array([2, 2, 1, 0, 2, 0])
exp_idx = DatetimeIndex(['2014-01', '2014-02', '2014-03'])
arr, idx = idx2.factorize(sort=True)
self.assert_numpy_array_equal(arr, exp_arr)
self.assertTrue(idx.equals(exp_idx))
exp_arr = np.array([0, 0, 1, 2, 0, 2])
exp_idx = DatetimeIndex(['2014-03', '2014-02', '2014-01'])
arr, idx = idx2.factorize()
self.assert_numpy_array_equal(arr, exp_arr)
self.assertTrue(idx.equals(exp_idx))
# freq must be preserved
idx3 = date_range('2000-01', periods=4, freq='M', tz='Asia/Tokyo')
exp_arr = np.array([0, 1, 2, 3])
arr, idx = idx3.factorize()
self.assert_numpy_array_equal(arr, exp_arr)
self.assertTrue(idx.equals(idx3))
class TestDatetime64(tm.TestCase):
"""
Also test support for datetime64[ns] in Series / DataFrame
"""
def setUp(self):
dti = DatetimeIndex(start=datetime(2005, 1, 1),
end=datetime(2005, 1, 10), freq='Min')
self.series = Series(rand(len(dti)), dti)
def test_datetimeindex_accessors(self):
dti = DatetimeIndex(
freq='D', start=datetime(1998, 1, 1), periods=365)
self.assertEqual(dti.year[0], 1998)
self.assertEqual(dti.month[0], 1)
self.assertEqual(dti.day[0], 1)
self.assertEqual(dti.hour[0], 0)
self.assertEqual(dti.minute[0], 0)
self.assertEqual(dti.second[0], 0)
self.assertEqual(dti.microsecond[0], 0)
self.assertEqual(dti.dayofweek[0], 3)
self.assertEqual(dti.dayofyear[0], 1)
self.assertEqual(dti.dayofyear[120], 121)
self.assertEqual(dti.weekofyear[0], 1)
self.assertEqual(dti.weekofyear[120], 18)
self.assertEqual(dti.quarter[0], 1)
self.assertEqual(dti.quarter[120], 2)
self.assertEqual(dti.is_month_start[0], True)
self.assertEqual(dti.is_month_start[1], False)
self.assertEqual(dti.is_month_start[31], True)
self.assertEqual(dti.is_quarter_start[0], True)
self.assertEqual(dti.is_quarter_start[90], True)
self.assertEqual(dti.is_year_start[0], True)
self.assertEqual(dti.is_year_start[364], False)
self.assertEqual(dti.is_month_end[0], False)
self.assertEqual(dti.is_month_end[30], True)
self.assertEqual(dti.is_month_end[31], False)
self.assertEqual(dti.is_month_end[364], True)
self.assertEqual(dti.is_quarter_end[0], False)
self.assertEqual(dti.is_quarter_end[30], False)
self.assertEqual(dti.is_quarter_end[89], True)
self.assertEqual(dti.is_quarter_end[364], True)
self.assertEqual(dti.is_year_end[0], False)
self.assertEqual(dti.is_year_end[364], True)
self.assertEqual(len(dti.year), 365)
self.assertEqual(len(dti.month), 365)
self.assertEqual(len(dti.day), 365)
self.assertEqual(len(dti.hour), 365)
self.assertEqual(len(dti.minute), 365)
self.assertEqual(len(dti.second), 365)
self.assertEqual(len(dti.microsecond), 365)
self.assertEqual(len(dti.dayofweek), 365)
self.assertEqual(len(dti.dayofyear), 365)
self.assertEqual(len(dti.weekofyear), 365)
self.assertEqual(len(dti.quarter), 365)
self.assertEqual(len(dti.is_month_start), 365)
self.assertEqual(len(dti.is_month_end), 365)
self.assertEqual(len(dti.is_quarter_start), 365)
self.assertEqual(len(dti.is_quarter_end), 365)
self.assertEqual(len(dti.is_year_start), 365)
self.assertEqual(len(dti.is_year_end), 365)
dti = DatetimeIndex(
freq='BQ-FEB', start=datetime(1998, 1, 1), periods=4)
self.assertEqual(sum(dti.is_quarter_start), 0)
self.assertEqual(sum(dti.is_quarter_end), 4)
self.assertEqual(sum(dti.is_year_start), 0)
self.assertEqual(sum(dti.is_year_end), 1)
# Ensure is_start/end accessors throw ValueError for CustomBusinessDay, CBD requires np >= 1.7
if not _np_version_under1p7:
bday_egypt = offsets.CustomBusinessDay(weekmask='Sun Mon Tue Wed Thu')
dti = date_range(datetime(2013, 4, 30), periods=5, freq=bday_egypt)
self.assertRaises(ValueError, lambda: dti.is_month_start)
dti = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-03'])
self.assertEqual(dti.is_month_start[0], 1)
tests = [
(Timestamp('2013-06-01', offset='M').is_month_start, 1),
(Timestamp('2013-06-01', offset='BM').is_month_start, 0),
(Timestamp('2013-06-03', offset='M').is_month_start, 0),
(Timestamp('2013-06-03', offset='BM').is_month_start, 1),
(Timestamp('2013-02-28', offset='Q-FEB').is_month_end, 1),
(Timestamp('2013-02-28', offset='Q-FEB').is_quarter_end, 1),
(Timestamp('2013-02-28', offset='Q-FEB').is_year_end, 1),
(Timestamp('2013-03-01', offset='Q-FEB').is_month_start, 1),
(Timestamp('2013-03-01', offset='Q-FEB').is_quarter_start, 1),
(Timestamp('2013-03-01', offset='Q-FEB').is_year_start, 1),
(Timestamp('2013-03-31', offset='QS-FEB').is_month_end, 1),
(Timestamp('2013-03-31', offset='QS-FEB').is_quarter_end, 0),
(Timestamp('2013-03-31', offset='QS-FEB').is_year_end, 0),
(Timestamp('2013-02-01', offset='QS-FEB').is_month_start, 1),
(Timestamp('2013-02-01', offset='QS-FEB').is_quarter_start, 1),
(Timestamp('2013-02-01', offset='QS-FEB').is_year_start, 1),
(Timestamp('2013-06-30', offset='BQ').is_month_end, 0),
(Timestamp('2013-06-30', offset='BQ').is_quarter_end, 0),
(Timestamp('2013-06-30', offset='BQ').is_year_end, 0),
(Timestamp('2013-06-28', offset='BQ').is_month_end, 1),
(Timestamp('2013-06-28', offset='BQ').is_quarter_end, 1),
(Timestamp('2013-06-28', offset='BQ').is_year_end, 0),
(Timestamp('2013-06-30', offset='BQS-APR').is_month_end, 0),
(Timestamp('2013-06-30', offset='BQS-APR').is_quarter_end, 0),
(Timestamp('2013-06-30', offset='BQS-APR').is_year_end, 0),
(Timestamp('2013-06-28', offset='BQS-APR').is_month_end, 1),
(Timestamp('2013-06-28', offset='BQS-APR').is_quarter_end, 1),
(Timestamp('2013-03-29', offset='BQS-APR').is_year_end, 1),
(Timestamp('2013-11-01', offset='AS-NOV').is_year_start, 1),
(Timestamp('2013-10-31', offset='AS-NOV').is_year_end, 1)]
for ts, value in tests:
self.assertEqual(ts, value)
def test_nanosecond_field(self):
dti = DatetimeIndex(np.arange(10))
self.assert_numpy_array_equal(dti.nanosecond, np.arange(10))
def test_datetimeindex_diff(self):
dti1 = DatetimeIndex(freq='Q-JAN', start=datetime(1997, 12, 31),
periods=100)
dti2 = DatetimeIndex(freq='Q-JAN', start=datetime(1997, 12, 31),
periods=98)
self.assertEqual(len(dti1.diff(dti2)), 2)
def test_fancy_getitem(self):
dti = DatetimeIndex(freq='WOM-1FRI', start=datetime(2005, 1, 1),
end=datetime(2010, 1, 1))
s = Series(np.arange(len(dti)), index=dti)
self.assertEqual(s[48], 48)
self.assertEqual(s['1/2/2009'], 48)
self.assertEqual(s['2009-1-2'], 48)
self.assertEqual(s[datetime(2009, 1, 2)], 48)
self.assertEqual(s[lib.Timestamp(datetime(2009, 1, 2))], 48)
self.assertRaises(KeyError, s.__getitem__, '2009-1-3')
assert_series_equal(s['3/6/2009':'2009-06-05'],
s[datetime(2009, 3, 6):datetime(2009, 6, 5)])
def test_fancy_setitem(self):
dti = DatetimeIndex(freq='WOM-1FRI', start=datetime(2005, 1, 1),
end=datetime(2010, 1, 1))
s = Series(np.arange(len(dti)), index=dti)
s[48] = -1
self.assertEqual(s[48], -1)
s['1/2/2009'] = -2
self.assertEqual(s[48], -2)
s['1/2/2009':'2009-06-05'] = -3
self.assertTrue((s[48:54] == -3).all())
def test_datetimeindex_constructor(self):
arr = ['1/1/2005', '1/2/2005', 'Jn 3, 2005', '2005-01-04']
self.assertRaises(Exception, DatetimeIndex, arr)
arr = ['1/1/2005', '1/2/2005', '1/3/2005', '2005-01-04']
idx1 = DatetimeIndex(arr)
arr = [datetime(2005, 1, 1), '1/2/2005', '1/3/2005', '2005-01-04']
idx2 = DatetimeIndex(arr)
arr = [lib.Timestamp(datetime(2005, 1, 1)), '1/2/2005', '1/3/2005',
'2005-01-04']
idx3 = DatetimeIndex(arr)
arr = np.array(['1/1/2005', '1/2/2005', '1/3/2005',
'2005-01-04'], dtype='O')
idx4 = DatetimeIndex(arr)
arr = to_datetime(['1/1/2005', '1/2/2005', '1/3/2005', '2005-01-04'])
idx5 = DatetimeIndex(arr)
arr = to_datetime(
['1/1/2005', '1/2/2005', 'Jan 3, 2005', '2005-01-04'])
idx6 = DatetimeIndex(arr)
idx7 = DatetimeIndex(['12/05/2007', '25/01/2008'], dayfirst=True)
idx8 = DatetimeIndex(['2007/05/12', '2008/01/25'], dayfirst=False,
yearfirst=True)
self.assertTrue(idx7.equals(idx8))
for other in [idx2, idx3, idx4, idx5, idx6]:
self.assertTrue((idx1.values == other.values).all())
sdate = datetime(1999, 12, 25)
edate = datetime(2000, 1, 1)
idx = DatetimeIndex(start=sdate, freq='1B', periods=20)
self.assertEqual(len(idx), 20)
self.assertEqual(idx[0], sdate + 0 * dt.bday)
self.assertEqual(idx.freq, 'B')
idx = DatetimeIndex(end=edate, freq=('D', 5), periods=20)
self.assertEqual(len(idx), 20)
self.assertEqual(idx[-1], edate)
self.assertEqual(idx.freq, '5D')
idx1 = DatetimeIndex(start=sdate, end=edate, freq='W-SUN')
idx2 = DatetimeIndex(start=sdate, end=edate,
freq=dt.Week(weekday=6))
self.assertEqual(len(idx1), len(idx2))
self.assertEqual(idx1.offset, idx2.offset)
idx1 = DatetimeIndex(start=sdate, end=edate, freq='QS')
idx2 = DatetimeIndex(start=sdate, end=edate,
freq=dt.QuarterBegin(startingMonth=1))
self.assertEqual(len(idx1), len(idx2))
self.assertEqual(idx1.offset, idx2.offset)
idx1 = DatetimeIndex(start=sdate, end=edate, freq='BQ')
idx2 = DatetimeIndex(start=sdate, end=edate,
freq=dt.BQuarterEnd(startingMonth=12))
self.assertEqual(len(idx1), len(idx2))
self.assertEqual(idx1.offset, idx2.offset)
def test_dayfirst(self):
# GH 5917
arr = ['10/02/2014', '11/02/2014', '12/02/2014']
expected = DatetimeIndex([datetime(2014, 2, 10),
datetime(2014, 2, 11),
datetime(2014, 2, 12)])
idx1 = DatetimeIndex(arr, dayfirst=True)
idx2 = DatetimeIndex(np.array(arr), dayfirst=True)
idx3 = to_datetime(arr, dayfirst=True)
idx4 = to_datetime(np.array(arr), dayfirst=True)
idx5 = DatetimeIndex(Index(arr), dayfirst=True)
idx6 = DatetimeIndex(Series(arr), dayfirst=True)
self.assertTrue(expected.equals(idx1))
self.assertTrue(expected.equals(idx2))
self.assertTrue(expected.equals(idx3))
self.assertTrue(expected.equals(idx4))
self.assertTrue(expected.equals(idx5))
self.assertTrue(expected.equals(idx6))
def test_dti_snap(self):
dti = DatetimeIndex(['1/1/2002', '1/2/2002', '1/3/2002', '1/4/2002',
'1/5/2002', '1/6/2002', '1/7/2002'], freq='D')
res = dti.snap(freq='W-MON')
exp = date_range('12/31/2001', '1/7/2002', freq='w-mon')
exp = exp.repeat([3, 4])
self.assertTrue((res == exp).all())
res = dti.snap(freq='B')
exp = date_range('1/1/2002', '1/7/2002', freq='b')
exp = exp.repeat([1, 1, 1, 2, 2])
self.assertTrue((res == exp).all())
def test_dti_reset_index_round_trip(self):
dti = DatetimeIndex(start='1/1/2001', end='6/1/2001', freq='D')
d1 = DataFrame({'v': np.random.rand(len(dti))}, index=dti)
d2 = d1.reset_index()
self.assertEqual(d2.dtypes[0], np.dtype('M8[ns]'))
d3 = d2.set_index('index')
assert_frame_equal(d1, d3, check_names=False)
# #2329
stamp = datetime(2012, 11, 22)
df = DataFrame([[stamp, 12.1]], columns=['Date', 'Value'])
df = df.set_index('Date')
self.assertEqual(df.index[0], stamp)
self.assertEqual(df.reset_index()['Date'][0], stamp)
def test_dti_set_index_reindex(self):
# GH 6631
df = DataFrame(np.random.random(6))
idx1 = date_range('2011/01/01', periods=6, freq='M', tz='US/Eastern')
idx2 = date_range('2013', periods=6, freq='A', tz='Asia/Tokyo')
df = df.set_index(idx1)
self.assertTrue(df.index.equals(idx1))
df = df.reindex(idx2)
self.assertTrue(df.index.equals(idx2))
def test_datetimeindex_union_join_empty(self):
dti = DatetimeIndex(start='1/1/2001', end='2/1/2001', freq='D')
empty = Index([])
result = dti.union(empty)
tm.assert_isinstance(result, DatetimeIndex)
self.assertIs(result, result)
result = dti.join(empty)
tm.assert_isinstance(result, DatetimeIndex)
def test_series_set_value(self):
# #1561
dates = [datetime(2001, 1, 1), datetime(2001, 1, 2)]
index = DatetimeIndex(dates)
s = Series().set_value(dates[0], 1.)
s2 = s.set_value(dates[1], np.nan)
exp = Series([1., np.nan], index=index)
assert_series_equal(s2, exp)
# s = Series(index[:1], index[:1])
# s2 = s.set_value(dates[1], index[1])
# self.assertEqual(s2.values.dtype, 'M8[ns]')
@slow
def test_slice_locs_indexerror(self):
times = [datetime(2000, 1, 1) + timedelta(minutes=i * 10)
for i in range(100000)]
s = Series(lrange(100000), times)
s.ix[datetime(1900, 1, 1):datetime(2100, 1, 1)]
class TestSeriesDatetime64(tm.TestCase):
def setUp(self):
self.series = Series(date_range('1/1/2000', periods=10))
def test_auto_conversion(self):
series = Series(list( | date_range('1/1/2000', periods=10) | pandas.date_range |
import os
import tempfile
import glob
import tqdm
import pandas as pd
import geopandas as gpd
from maskrcnn.postprocess.polygonize import load_ann
# AOI index data w/ georeferencing info
AOI_IN_DIR = 'data/Siaya/Meta/aoi.csv'
# download log data
LOG_IN_DIR = 'data/Siaya/Meta/aoi_download_log.csv'
# satellite derived data
SAT_IN_ANN_DIR = 'data/Siaya/Pred/infer/'
SAT_IN_IMG_DIR = 'data/Siaya/Image/'
SAT_OUT_GEOM_DIR = 'data/Siaya/Merged/sat_raw.geojson'
SAT_OUT_CSV_DIR = 'data/Siaya/Merged/sat_raw.csv'
# boundary
BOUND_IN_DIR = 'data/External/GiveDirectly/figure2/SampleArea.shp'
# read boundary shapefile
bound, = gpd.read_file(BOUND_IN_DIR)['geometry']
# read image index data frame
df = pd.merge(pd.read_csv(AOI_IN_DIR),
| pd.read_csv(LOG_IN_DIR) | pandas.read_csv |
import pandas as pd
import re
import win32com.client
from graphviz import Digraph
def LoadExcelStructure(fileFolder,fileName):
"""
Return a dataframe containing information about your Excel file VB structure
fileFolder: Your Excel file folder
fileName: Your Excel file name including the extension
"""
fileFolder=fileFolder + "/"
xl = win32com.client.Dispatch('Excel.Application')
wb = xl.Workbooks.Open(fileFolder + fileName)
xl.Visible = 1
df_ModInfo= | pd.DataFrame() | pandas.DataFrame |
import pytest
from pandas import Series
from cellengine.utils.scale_utils import apply_scale
@pytest.fixture(scope="module")
def scale():
scale = {"minimum": 5, "maximum": 10, "type": "LinearScale"}
return scale
def test_should_apply_scale(scale):
input = Series([10, 0, 1.2, 10, 40])
output = Series([], dtype="float64")
output = input.map(lambda a: apply_scale(scale, a, False))
assert all(output == Series([10, 0, 1.2, 10, 40]))
def test_should_apply_clamped(scale):
input = Series([10, 7, 1.2, 9, 40])
output = Series([], dtype="float64")
output = input.map(lambda a: apply_scale(scale, a, True))
assert all(output == ([10, 7, 5, 9, 10]))
def test_should_handle_0_length_arrays(scale):
input = Series([], dtype="float64")
output = Series([], dtype="float64")
output = input.map(lambda a: apply_scale(scale, a, True))
assert type(output) is Series
assert output.size == 0
def test_correctly_applies_scale_of_length_n(scale):
for n in range(1, 32):
input = Series([1] * n)
output = | Series([], dtype="float64") | pandas.Series |
#--------------------------------------------------------------- Imports
from dotenv import load_dotenv
import alpaca_trade_api as tradeapi
import os
from pathlib import Path
import string
import pandas as pd
import numpy as np
import seaborn as sns
import panel as pn
from panel.interact import interact, interactive, fixed, interact_manual
from panel import widgets
import plotly.graph_objects as go
from plotly.subplots import make_subplots
import plotly.express as px
pn.extension('plotly')
from pytrends.request import TrendReq
#--------------------------------------------------------------- Environment
# Loads .env
load_dotenv()
# Sets Alpaca API key and secret
alpaca_key = os.getenv('ALPACA_API_KEY')
alpaca_secret = os.getenv('ALPACA_API_SECRET')
# Creates the Alpaca API object
alpaca = tradeapi.REST(alpaca_key, alpaca_secret, api_version = "v2")
timeframe = "1D"
start = pd.Timestamp('2016-05-26', tz = 'US/Pacific').isoformat()
end = pd.Timestamp('2021-06-6', tz = 'US/Pacific').isoformat()
#--------------------------------------------------------------- Global Variables
pytrend = TrendReq()
sectors = [
'Communications',
'Consumer Discretionary',
'Consumer Staples',
'Energy',
'Financial',
'Healthcare',
'Industrial',
'Information Technology',
'Materials',
'Real Estate',
'Utilities'
]
beta = ['Min', 'Max', 'Median', 'Mutual Fund']
z_field = ['Close', 'Volume']
sector_tickers = {
'Communications':
{'Min': 'VZ', 'Max': 'LYV', 'Median': 'TMUS', 'Mutual Fund': 'VOX'},
'Consumer Discretionary':
{'Min': 'NVR', 'Max': 'F', 'Median': 'HLT', 'Mutual Fund': 'VCR'},
'Consumer Staples':
{'Min': 'CLX', 'Max': 'SYY', 'Median': 'PM', 'Mutual Fund': 'VDC'},
'Energy':
{'Min': 'COG', 'Max': 'OXY', 'Median': 'SLB', 'Mutual Fund': 'VDE'},
'Financial':
{'Min': 'CBOE', 'Max': 'LNC', 'Median': 'BAC', 'Mutual Fund': 'VFH'},
'Healthcare':
{'Min': 'DGX', 'Max': 'ALGN', 'Median': 'CAH', 'Mutual Fund': 'VHT'},
'Industrial':
{'Min': 'DGX', 'Max': 'TDG', 'Median': 'DE', 'Mutual Fund': 'VIS'},
'Information Technology':
{'Min': 'ORCL', 'Max': 'ENPH', 'Median': 'NTAP', 'Mutual Fund': 'VGT'},
'Materials':
{'Min': 'NEM', 'Max': 'FCX', 'Median': 'AVY', 'Mutual Fund': 'VAW'},
'Real Estate':
{'Min': 'PSA', 'Max': 'SPG', 'Median': 'UDR', 'Mutual Fund': 'VNQ'},
'Utilities':
{'Min': 'ED', 'Max': 'AES', 'Median': 'SRE', 'Mutual Fund': 'VPU'}
}
member_picks = {
'Boomer': ['VDC', 'VNQ', 'VOX', 'VAW'],
'Stonks': ['GME', 'AMC', 'PSLV', 'BB'],
'Pro Gamer': ['AAPL', 'TSLA', 'AMC', 'WMT'],
'Real American': ['LMT', 'TAP', 'PM', 'HAL']
}
#--------------------------------------------------------------- Functions
# Generates Correlation Heatmap of Sector Mutual Funds & Index
def df_to_plotly(df):
return {'z': df.values.tolist(),
'x': df.columns.tolist(),
'y': df.index.tolist()}
@interact(Beta = beta)
def heatmap(Beta):
df = pd.DataFrame()
sp_file = Path('../Data/SP500.csv')
sp_df = pd.read_csv(sp_file, infer_datetime_format=True, parse_dates=True, index_col='Date')
df['SP500'] = sp_df['Close']
for k, v in sector_tickers.items():
ticker = sector_tickers[k][Beta]
file = Path('../Data/{}.csv'.format(ticker))
ticker_df = | pd.read_csv(file, infer_datetime_format=True, parse_dates=True, index_col='Date') | pandas.read_csv |
# To add a new cell, type '# %%'
# To add a new markdown cell, type '# %% [markdown]'
# %%
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
# %%
DATA_ROOT = '../../data/raw'
# %% [markdown]
# ## LOADING DATA
# %%
print('Loading raw datasets...', flush=True)
GIT_COMMITS_PATH = f"{DATA_ROOT}/GIT_COMMITS.csv"
GIT_COMMITS_CHANGES = f"{DATA_ROOT}/GIT_COMMITS_CHANGES.csv"
SONAR_MEASURES_PATH = f"{DATA_ROOT}/SONAR_MEASURES.csv"
SZZ_FAULT_INDUCING_COMMITS = f"{DATA_ROOT}/SZZ_FAULT_INDUCING_COMMITS.csv"
JIRA_ISSUES = f"{DATA_ROOT}/JIRA_ISSUES.csv"
# %%
git_commits = pd.read_csv(GIT_COMMITS_PATH)
git_commits_changes = pd.read_csv(GIT_COMMITS_CHANGES)
sonar_measures = pd.read_csv(SONAR_MEASURES_PATH)
szz_fault_inducing_commits = pd.read_csv(SZZ_FAULT_INDUCING_COMMITS)
jira_issues = pd.read_csv(JIRA_ISSUES)
# %%
git_commits_changes[git_commits_changes['linesAdded'].isna()]
# %%
len(git_commits_changes.commitHash.unique())
# %% [markdown]
# ## FILTERING COLUMNS
print('Filtering columns...', flush=True)
# %% [markdown]
# -------------------------------------------------------------------------------------------------------------------------------
# %%
git_dates = git_commits[['commitHash','committerDate']]
# %%
agg = {
'linesAdded': ['sum'],
'linesRemoved': ['sum'],
'projectID': ['count'],
}
gcg_by_commit = git_commits_changes.groupby(['projectID', 'commitHash']).agg(agg)
# %%
len(gcg_by_commit)
# %%
gcg_by_commit = gcg_by_commit.reset_index()
# %%
gcg_by_commit.columns = ['projectID', 'commitHash', 'lines_added', 'lines_removed', 'entropylike']
# %%
gcg_by_commit = pd.merge(gcg_by_commit, git_dates, on='commitHash', how='inner')
# %%
gcg_by_commit = gcg_by_commit.sort_values(by=['projectID', 'committerDate'])
# %%
print('Computing metrics...', flush=True)
total_lines = []
project = 'accumulo'
la_counter = 0
lr_counter = 0
for i, row in gcg_by_commit.iterrows():
if project!=row['projectID']:
project=row['projectID']
la_counter = 0
lr_counter = 0
la_counter+=row['lines_added']
lr_counter+=row['lines_removed']
total_lines.append(la_counter-lr_counter)
gcg_by_commit['total_lines'] = total_lines
# %%
gcg_by_commit = gcg_by_commit[gcg_by_commit['total_lines']>=0] #to avoid 2 lines of wrong data in te commons-cli project
# %%
gcg_by_commit['added/total_lines'] = gcg_by_commit['lines_added']/gcg_by_commit['total_lines']
# %%
gcg_by_commit = gcg_by_commit[gcg_by_commit['added/total_lines']<=1] #to avoid 1 line of wrong data in commons-cli project
# %%
gcg_by_commit = gcg_by_commit[['commitHash', 'entropylike', 'added/total_lines']]
# %%
jira_bugs = jira_issues[jira_issues['type'] == 'Bug']
jira_bugs = jira_bugs[['key', 'priority']]
# %%
print('Merging datasets...', flush=True)
szz_fault_inducing_commits = szz_fault_inducing_commits[['faultInducingCommitHash', 'key']]
szz_fault_inducing_commits = szz_fault_inducing_commits.rename(columns={'faultInducingCommitHash':'commitHash'})
szz_fault_inducing_commits.head()
# %%
Y = | pd.merge(szz_fault_inducing_commits, jira_bugs, on='key') | pandas.merge |
#
# unility libraary that provides capabilies to interact with the SCM instances and provide
# the retrieved data in a format for presentation.
#
# this library provides functions for communicating directly with an SCM instnace
# and it also provides functions with the _proxy naming where the data is retrived
# from the proxy cache function instance of going directly to the SCM.
# this is the help with scalability of the implmentation where the number of transactions
# against the SCM rest interfaces are limited and it also provdes for a more responsive
# rendering of the application pages ( at the expense of data age.)
#
import os
import datetime
import pandas as pd
import requests as rq
import scm as scm
import gpslocation as gps
from math import sin, cos, atan2, sqrt, radians, degrees
import json
gpsdict = gps.gendict()
#
# Pandas dataframe are used in a global fashion to hold the data that is receoved form the SCM instances.
def init_sitedf():
return pd.DataFrame([], columns = ['site', 'lat', 'lon','leafs','region', 'fm_state'])
def init_sites_snmp():
return pd.DataFrame([], columns = ['site', 'v4ip', 'wan'])
def init_nodedf():
return pd.DataFrame([], columns = ['site','serial','router_id','region'])
def init_uplinkdf():
return pd.DataFrame([], columns = ['site', 'v4ip', 'wan','region'])
def init_eventdf():
return pd.DataFrame([], columns = ['Time','utc', 'Message', 'Severity','region'])
def init_sitelinksdf():
return pd.DataFrame([], columns = ['localcity','remotecity', 'local_site','local_node_serial', 'remote_node_serial','status', 'state','region'])
sitedf = init_sitedf()
sites_snmpdf = init_sites_snmp()
nodedf = init_nodedf()
uplinkdf = init_uplinkdf()
eventdf = init_eventdf()
sitelinksdf = init_sitelinksdf()
def get_sites(sitedf, realm, user, pw, region=0):
''' populate the provided pandas dataframe with the site information,
as this data is entered into the dataframe each row is annotated with the region that
the site is assocated with
'''
r = scm.get('sites', realm, user,pw)
if r.status_code == 200:
f = r.json()
for a in f['items']:
try:
p = gpsdict[a['city']]
except:
#unknown cites will be stacked at (0,0) on the map as the gps data is not extensive
p = { 'lat':0, 'lon':0}
lat = p['lat']
lon = p['lon']
sitedf.loc[a['id']] = [a['city'].replace(" ","_"), lat, lon,a['sitelink_leafs'],region,{'size':10,'color': 'rgb(255, 0, 0)'}]
return
def get_sites_proxy(proxy,user="",pw=""):
''' get the sites data using the proxy instead of directly
returns a pandas data frame with the received data or empty on a problem
'''
df = init_sitedf()
r = rq.get(proxy + '/api/sites', auth=(user,pw))
if r.status_code == 200:
df = pd.read_json(r.content, orient='index')
return df
else:
return df
def get_nodes(nodedf, sitedf, realm, user, pw, region=0):
''' populate the provided pandas dataframe with the node information,
as this data is entered into the dataframe each row is annotated with the region that
the node is assocated with, defaults to 0 region
'''
r = scm.get('nodes', realm, user,pw)
if r.status_code == 200:
f = r.json()
for a in f['items']:
city = sitedf.loc[a['site']]['site']
nodedf.loc[a['id']] = [city, a['serial'], a['router_id'],region]
return
def get_nodes_proxy(proxy,user="",pw=""):
''' get the nodes data using the proxy instead of directly
returns a pandas data frame with the received data or empty on a problem
'''
r = rq.get( proxy + '/api/nodes', auth=(user,pw))
if r.status_code == 200:
return pd.read_json(r.content, orient='index')
else:
return init_nodesdf()
def get_eventlogs(eventdf,realm, user, pw, region=0):
''' populate the provided pandas dataframe with the eventlog information,
as this data is entered into the dataframe each row is annotated with the region that
the node is assocated with, defaults to 0 region
'''
r = scm.get('eventlogs', realm, user,pw)
if r.status_code == 200:
f = r.json()
for a in f['items']:
eventdf.loc[a['id']] = [datetime.datetime.fromtimestamp(a['utc']).strftime('%c'),
a['utc'],
a['msg'],
a['severity'],
region]
return
def get_eventlogs_proxy(proxy, user="",pw=""):
''' get the eventlog data using the proxy instead of directly
returns a pandas data frame with the received data or empty on a problem
'''
r = rq.get( proxy + '/api/eventlogs', auth=(user,pw))
if r.status_code == 200:
return pd.read_json(r.content, orient='index')
else:
return init_eventdf()
def get_uplinks(uplinkdf, sitedf, realm, user, pw, region=0):
''' populate the provided pandas dataframe with the uplink information,
as this data is entered into the dataframe each row is annotated with the region that
the node is assocated with, defaults to 0 region
'''
r = scm.get('uplinks_r', realm, user,pw)
if r.status_code == 200:
f = r.json()
for a in f['items']:
city = sitedf.loc[a['site']]['site']
uplinkdf.loc[a['id']] = [city, a['v4ip'], a['wan'],region]
return
def get_uplinks_proxy(proxy,user="",pw=""):
''' get the uplinks data using the proxy instead of directly
returns a pandas data frame with the received data or empty on a problem
'''
r = rq.get( proxy + '/api/uplinks', auth=(user,pw))
if r.status_code == 200:
return pd.read_json(r.content, orient='index')
else:
return init_uplinksdf()
def gen_sites_snmp(sites_snmpdf,uplinkdf):
''' using the uplinks data, massage and filter it to provide a
frame of the site/appliance name/ip that should be polled.
currently this information is just getting an ipv4 address off the
internet uplink -- which may not be the correct/desired action in a real
deployment
'''
a = uplinkdf[uplinkdf['wan'].str.contains('wan-Internet')].dropna()
for i, row in a.iterrows():
sites_snmpdf.loc[i] = row
return
def post_sites_snmp(proxy, sites_snmpdf):
''' post the snmp site detail information onto the proxy cache'''
r = rq.post(proxy+'/api/snmp_details', json=sites_snmpdf.to_json(orient='index'))
return
def get_sites_snmp_proxy(proxy,user="",pw=""):
''' get the snmp site information from the proxy cache'''
r = rq.get( proxy + '/api/snmp_details', auth=(user,pw))
if r.status_code == 200:
return | pd.read_json(r.content, orient='index') | pandas.read_json |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import geopandas as gpd
from shapely.geometry import Point
moz_zipfile = "zip:///home/leo/Desktop/ml_flood_prediction/data/floods_13-03-2020/moz_flood.zip"
moz_gdf = gpd.read_file(moz_zipfile)
moz_geo = gpd.GeoDataFrame(moz_gdf)
xmin,ymin,xmax,ymax = 33.50, -20.60, 35.50, -19.00
moz_geo = moz_gdf.cx[xmin:xmax, ymin:ymax]
mwi_zipfile = "zip:///home/leo/Desktop/ml_flood_prediction/data/floods_13-03-2020/mwi_flood.zip"
mwi_gdf = gpd.read_file(mwi_zipfile)
mwi_geo = gpd.GeoDataFrame(mwi_gdf)
xmin, ymin, xmax, ymax = 34.26, -16.64, 35.86, -15.21
mwi_geo = mwi_gdf.cx[xmin:xmax, ymin:ymax]
ken_zipfile = "zip:///home/leo/Desktop/ml_flood_prediction/data/floods_06-05-2020/ky_flood_vec.zip"
ken_gdf = gpd.read_file(ken_zipfile)
ken_geo = gpd.GeoDataFrame(ken_gdf)
xmin,ymin,xmax,ymax = 38.12, 0.77, 39.50, 2.66
ken_geo = ken_gdf.cx[xmin:xmax, ymin:ymax]
import ee
import datetime
ee.Initialize()
def rainfall_images(new_geo, flood_start, folder):
""" Export GEE images for rainfall.
Example: flood_start = '2019-03-13' for mozambique.
"""
trmm = ee.ImageCollection("TRMM/3B42")# new_shapefile = "mwi_3-18.shp"
trmm = trmm.select(['precipitation'])
# setting the Area of Interest (AOI)
mwi_aoi = ee.Geometry.Rectangle(list(new_geo.total_bounds))
# Get dates for rainfall a month before flood
base = pd.to_datetime(flood_start)
date_list = [(base - datetime.timedelta(days=x)).strftime('%Y-%m-%d') for x in range(31)]
date_list = list(reversed(date_list))
dates = [(date_list[num-1], date_list[num]) for num in range(1, 31)]
# Create a list of all images filtered by date
trmm_files = [trmm.filterDate(dates[i][0], dates[i][1]) for i in range(30)]
# Assign export tasks for GEE
task_list = []
count = -30
for trmm in trmm_files:
# Filter by are of interest and return the mean for each image
trmm_aoi = trmm.filterBounds(mwi_aoi)
total = trmm_aoi.reduce(ee.Reducer.sum())
task = ee.batch.Export.image.toDrive(image=total,
region=mwi_aoi.getInfo()['coordinates'],
description='13-mar',
folder=folder,
fileNamePrefix='total_precip_t' + str(count),
maxPixels=1e12,
scale=30,
crs='EPSG:4326')
task_list.append(task)
count += 1
for i in task_list:
i.start()
return i.status()
def rainfall_images(new_geo, flood_start, folder):
""" Export GEE images for rainfall.
Example: flood_start = '2019-03-13' for mozambique.
"""
chirps = ee.ImageCollection("UCSB-CHG/CHIRPS/DAILY")# new_shapefile = "mwi_3-18.shp"
chirps = chirps.select(['precipitation'])
# setting the Area of Interest (AOI)
mwi_aoi = ee.Geometry.Rectangle(list(new_geo.total_bounds))
# Get dates for rainfall a month before flood
base = | pd.to_datetime(flood_start) | pandas.to_datetime |
from copy import deepcopy
import networkx as nx
import numpy as np
import pandas as pd
from graspologic.utils import largest_connected_component
from ..utils import get_paired_inds, to_pandas_edgelist
class MaggotGraph:
def __init__(self, g, nodes=None, edges=None):
self.g = g
# TODO add checks for when nodes/edges are passed, do they actually match the
# graph?
if nodes is None:
# TODO
raise NotImplementedError()
self.nodes = nodes
if edges is None:
edges = to_pandas_edgelist(g)
self.edges = edges
self._node_columns = nodes.columns
def to_edge_type_graph(self, edge_type):
type_edges = self.edges[self.edges["edge_type"] == edge_type]
view = nx.edge_subgraph(self.g, type_edges.index).copy()
return MaggotGraph(view, self.nodes, type_edges)
@property
def edge_types(self):
return sorted(self.edges["edge_type"].unique())
@property
def is_single_type(self):
return len(self.edge_types) == 1
@property
def aa(self):
return self.to_edge_type_graph("aa")
@property
def ad(self):
return self.to_edge_type_graph("ad")
@property
def da(self):
return self.to_edge_type_graph("da")
@property
def dd(self):
return self.to_edge_type_graph("dd")
@property
def sum(self):
return self.to_edge_type_graph("sum")
@property
def adj(self):
if self.is_single_type:
adj = nx.to_numpy_array(self.g, nodelist=self.nodes.index)
return adj
else:
msg = "Current MaggotGraph has more than one edge type. "
msg += "Use .adjs() method instead to specify multple edge types."
raise ValueError(msg)
@property
def adjs(self):
adjs = []
for edge_type in self.edge_types:
adj = self.to_edge_type_graph(edge_type).adj
adjs.append(adj)
adjs = np.stack(adjs)
return adjs
def node_subgraph(self, source_node_ids, target_node_ids=None):
# if target_node_ids is None: # induced subgraph on source nodes
# # TODO don't really need two cases here
# sub_g = self.g.subgraph(source_node_ids)
# sub_nodes = self.nodes.reindex(source_node_ids)
# sub_edges = to_pandas_edgelist(sub_g)
# return MaggotGraph(sub_g, sub_nodes, sub_edges)
# else: # subgraph defined on a set of nodes, but not necessarily induced
induced = False
if target_node_ids is None:
target_node_ids = source_node_ids
induced = True
edges = self.edges
nodes = self.nodes
source_edges = edges[edges.source.isin(source_node_ids)]
source_target_edges = source_edges[source_edges.target.isin(target_node_ids)]
sub_g = self.g.edge_subgraph(source_target_edges.index).copy()
sub_nodes = nodes[
nodes.index.isin(source_node_ids) | nodes.index.isin(target_node_ids)
]
if induced:
sub_nodes = sub_nodes.reindex(source_node_ids)
# TODO what ordering makes sense when the subgraph is not induced
return MaggotGraph(sub_g, sub_nodes, source_target_edges)
def copy(self):
return deepcopy(self)
def __len__(self):
return len(self.g)
def __repr__(self):
return self.summary_statistics.__repr__()
def _repr_html_(self):
return self.summary_statistics._repr_html_()
@property
def summary_statistics(self):
edge_types = self.edge_types
edges = self.edges
cols = []
for edge_type in edge_types:
type_edges = edges[edges["edge_type"] == edge_type]
# number of actual nodes being used (ignoring edgeless ones)
n_nodes = len(np.unique(type_edges[["source", "target"]].values.ravel()))
n_edges = len(type_edges)
edgesum = type_edges["weight"].sum()
data = [n_nodes, n_edges, edgesum]
index = ["n_nodes", "n_edges", "sum_edge_weights"]
cols.append(pd.Series(index=index, data=data, name=edge_type))
results = | pd.DataFrame(cols) | pandas.DataFrame |
# import libraries
import glob
import os
from collections import OrderedDict
from pathlib import Path
import cv2
import face_recognition
import numpy as np
import pandas as pd
from PIL import Image
from tqdm import tqdm
def wget_video(
name,
url,
cmd="youtube-dl --continue --write-auto-sub --get-thumbnail --write-all-thumbnails --get-description --all-subs -o {} {}",
dir_out=None,
):
"""
Fetch video from youtube
:param dir_out: directory to save to - if directory does not exist, then sets to save in current directory,
:param name: identifier to name video with
:param url: youtube URL to download as MP4
:param cmd: command line call (Note, str.format() assumes 2 placeholders
:return: True if successfully downloaded; else, return False.
"""
if not Path(dir_out).is_dir():
dir_out = ""
try:
if not glob.glob(dir_out + name + "/*.mp4") + glob.glob(
dir_out + name + "/*.mkv"
):
os.system(cmd.format(dir_out + name, url))
return True
finally:
print(name)
return False
def encode_face_files(imfiles):
images = OrderedDict({impath: cv2.imread(impath)[:, :, ::-1] for impath in imfiles})
encodings = {}
for impath, image in images.items():
try:
encodings[impath] = face_recognition.face_encodings(image)[0]
except Exception as e:
print(f"Error encoding {impath} {e.message}")
return encodings
def read_family_member_list(f_csv):
df = pd.read_csv(f_csv)
# df['last'] = df["surname"].apply(lambda x: x.split('.')[0])
df["ref"] = df["firstname"] + "_" + df["surname"]
df = df.loc[df.video.notna()]
df.reset_index(inplace=True)
del df["index"]
return df
def fetch_videos(df, dir_out=None):
df.apply(lambda x: wget_video(x["ref"], x["video"], dir_out=dir_out), axis=1)
def encode_mids(d_mid, f_encodings=None, save_pickle=False):
if f_encodings and Path(f_encodings).is_file():
encodings = pd.read_pickle(f_encodings)
else:
impaths = glob.glob(f"{d_mid}/*.jpg")
encodings = encode_face_files(impaths)
if save_pickle:
f_encodings = f"{d_mid}/encodings.pkl"
pd.to_pickle(encodings, f_encodings)
return encodings
def crop_detection(face, locations):
w, h = (
locations["bb"][2] - locations["bb"][0],
locations["bb"][3] - locations["bb"][1],
)
left, right, bottom, top = (
locations["bb"][0] - w * 0.1,
locations["bb"][2] + w * 0.1,
locations["bb"][3] + h * 0.1,
locations["bb"][1] - h * 0.1,
)
return face.crop([left, top, right, bottom])
def get_video_metadata(f_video):
cap = cv2.VideoCapture(f_video)
meta = {
"fps": cap.get(cv2.CAP_PROP_FPS),
"frame_count": int(cap.get(cv2.CAP_PROP_FRAME_COUNT)),
}
meta["duration"] = meta["frame_count"] / meta["fps"]
cap.release()
return meta
def print_video_metadata(f_video):
cap = cv2.VideoCapture(f_video)
fps = cap.get(cv2.CAP_PROP_FPS)
frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
duration = frame_count / fps
print(f"{f_video} meta:\n=====")
print(f"fps = {fps}")
print(f"number of frames = {frame_count}")
print(f"duration (S) = {duration}")
print(f"duration (M:S) = {int(duration / 60)}:{duration % 60}")
cap.release()
def process_subject(encodings, f_video, dir_out):
print_video_metadata(f_video)
video_capture = cv2.VideoCapture(f_video)
# Check if camera opened successfully
if not video_capture.isOpened():
print("Error opening video file")
return
frame_id = 0
# Read until video is completed
while video_capture.isOpened():
print(Path(f"fr{frame_id}_face{0}.png"))
print(dir_out)
# Capture frame-by-frame
ret, frame = video_capture.read()
if Path(f"{dir_out}faces/fr{frame_id}_face{0}.png").is_file():
print("skipping")
frame_id += 1
continue
if ret:
# Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)
rgb_frame = frame[:, :, ::-1]
# Find all the faces in the current frame of video
face_locations = face_recognition.face_locations(rgb_frame, model="cnn")
# Initialize variables
# face_locations_all = []
# frame_id = 0
# Display the results
for j, (top, right, bottom, left) in enumerate(face_locations):
# Draw a box around the face
face_locations_dict = {
"frame": frame_id,
"face": j,
"bb": (left, top, right, bottom),
"landmarks": face_locations[j],
}
face_image = crop_detection(
Image.fromarray(rgb_frame), face_locations_dict
)
# try:
unknown_encoding = face_recognition.face_encodings(np.array(face_image))
# cv2.cvtColor(np.array(face), cv2.COLOR_RGB2BGR)
# )
if not len(unknown_encoding):
continue
unknown_encoding = unknown_encoding[0]
results = face_recognition.compare_faces(encodings, unknown_encoding)
face_image.save(f"{dir_out}faces/fr{frame_id}_face{j}.png")
pd.to_pickle(
unknown_encoding,
f"{dir_out}encodings/fr{frame_id}_face{j}-encoding.csv",
)
| pd.DataFrame(results) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
@author: pattenh1
"""
import os
import cv2
import SimpleITK as sitk
import ijroi
import numpy as np
import pandas as pd
import lxml.etree
import lxml.builder
import matplotlib
from matplotlib import cm
class ROIhandler(object):
"""Container class for handling ROIs loaded from ImageJ or a binary mask image.
Parameters
----------
roi_image_fp : str
filepath to image that defines extents of box or a mask.
img_res : float
Pixel resolution of loaded image.
is_mask : bool
Whether the image filepath is a mask or only defines image extents
"""
def __init__(self, roi_image_fp, img_res, is_mask=False):
self.type = 'ROI Container'
self.roi_image_fp = roi_image_fp
target_image = sitk.ReadImage(roi_image_fp)
self.img_res = float(img_res)
self.zero_image = np.zeros(target_image.GetSize()[::-1])
self.roi_corners = []
if is_mask == True:
self.roi_mask = sitk.ReadImage(roi_image_fp)
self.roi_mask.SetSpacing((self.img_res, self.img_res))
##this function parses the ImageJ ROI file into all corners and far corners for rectangle ROIs
#it only keeps the corners necessary for cv2 drawing
def get_rectangles_ijroi(self, ij_rois_fp):
"""Short summary.
Parameters
----------
ij_rois_fp : str
Filepath to an ImageJ ROI file
Returns
-------
list
Python lists of rectangle corners and all 4 corner coords.
"""
rois = ijroi.read_roi_zip(ij_rois_fp)
allcoords = [poly[1] for poly in rois]
corners = [rect[[0, 2]] for rect in allcoords]
self.roi_corners = corners
self.allcoords = allcoords
###grabs polygonal ijrois
def get_polygons_ijroi(self, ij_rois_fp):
"""Short summary.
Parameters
----------
ij_rois_fp : str
Filepath to an ImageJ ROI file
Returns
-------
list
Python list of polygon verteces as numpy arrays
"""
fn, fe = os.path.splitext(ij_rois_fp)
print(fe)
if fe == '.zip':
rois = ijroi.read_roi_zip(ij_rois_fp)
if fe == '.roi':
rois = ijroi.read_roi(open(ij_rois_fp, "rb"))
polyallcoords = [poly[1] for poly in rois]
self.polygons = polyallcoords
##this function draws the mask needed for general FI rois
def draw_rect_mask(self):
"""Draws uint8 binary mask image based on rectangle coords.
Returns
-------
SimpleITK image
Binary mask of loaded rect coords.
"""
if len(self.roi_corners) == 0:
raise ValueError('Rois have not been generated')
for i in range(len(self.roi_corners)):
if i == 0:
filled = cv2.rectangle(
self.zero_image,
(self.roi_corners[i][0][1], self.roi_corners[i][0][0]),
(self.roi_corners[i][1][1], self.roi_corners[i][1][0]),
(255),
thickness=-1)
else:
filled = cv2.rectangle(
filled,
(self.roi_corners[i][0][1], self.roi_corners[i][0][0]),
(self.roi_corners[i][1][1], self.roi_corners[i][1][0]),
(255),
thickness=-1)
self.box_mask = sitk.GetImageFromArray(filled.astype(np.int8))
self.box_mask.SetSpacing((self.img_res, self.img_res))
##this function slices all the rois into sitk images
def get_rect_rois_as_images(self, image_fp):
"""Slice images based on loaded rectangles.
Parameters
----------
image_fp : str
Filepath to image to be sliced by rectangles
Returns
-------
list
Python list of SimpleITK images sliced by rectangles
"""
if len(self.roi_corners) == 0:
raise ValueError('Rois have not been generated')
bg_image = sitk.ReadImage(image_fp)
roi_slices = []
for i in range(len(self.allcoords)):
roi_slices.append(bg_image[self.allcoords[i][0][1]:self.allcoords[
i][1][1], self.allcoords[i][0][0]:self.allcoords[i][3][0]])
self.roi_slices = []
self.roi_slices.append(roi_slices)
def get_index_and_overlap(self,
ims_index_map_fp,
ims_res,
img_res,
use_key=False,
key_filepath=None):
if self.polygons:
ims_idx_np = sitk.GetArrayFromImage(
sitk.ReadImage(ims_index_map_fp))
scale_factor = ims_res / img_res
zero_img = np.zeros(ims_idx_np.shape[::-1])
for i in range(len(self.polygons)):
fill = cv2.fillConvexPoly(zero_img, self.polygons[i].astype(
np.int32), i + 1)
fill = np.transpose(fill)
dfs = []
for i in range(len(self.polygons)):
whereresult = ims_idx_np[[
np.where(fill == i + 1)[0],
np.where(fill == i + 1)[1]
]]
uniques, counts = np.unique(whereresult, return_counts=True)
df_intermed = pd.DataFrame({
'roi_index':
i + 1,
'ims_index':
uniques,
'percentage':
counts / scale_factor**2
})
dfs.append(df_intermed)
df = pd.concat(dfs)
self.rois_ims_indexed = df
if use_key == True and key_filepath != None:
key = pd.read_csv(key_filepath, index_col=0)
self.rois_ims_indexed['x_original'] = key.loc[np.searchsorted(
key.index.values, self.rois_ims_indexed['ims_index']
.values), ['x']].values
self.rois_ims_indexed['y_original'] = key.loc[np.searchsorted(
key.index.values, self.rois_ims_indexed['ims_index']
.values), ['y']].values
self.rois_ims_indexed['x_minimized'] = key.loc[np.searchsorted(
key.index.values, self.rois_ims_indexed['ims_index']
.values), ['x_minimized']].values
self.rois_ims_indexed['y_minimized'] = key.loc[np.searchsorted(
key.index.values, self.rois_ims_indexed['ims_index']
.values), ['y_minimized']].values
else:
raise ValueError('polygon coordinates have not been loaded')
def draw_polygon_mask(self, binary_mask=True, flip_xy=True):
if self.polygons:
zero_img = self.zero_image.copy()
for i in range(len(self.polygons)):
draw_polygons = self.polygons[i].astype(np.int32)
if flip_xy == True:
draw_polygons[:, [0, 1]] = draw_polygons[:, [1, 0]]
if binary_mask == True:
cc = cv2.fillConvexPoly(
zero_img, draw_polygons, 255, lineType=4)
self.pg_mask = sitk.GetImageFromArray(cc.astype(np.uint8))
else:
cc = cv2.fillConvexPoly(
zero_img, draw_polygons, i + 1, lineType=4)
self.pg_mask = sitk.GetImageFromArray(cc.astype(np.uint32))
#cc = np.transpose(cc)
self.pg_mask.SetSpacing((self.img_res, self.img_res))
else:
raise ValueError('polygon coordinates have not been loaded')
def mask_contours_to_polygons(binary_mask, arcLenPercent=0.05):
ret, threshsrc = cv2.threshold(binary_mask, 1, 256, 0)
im2, contours, hierarchy = cv2.findContours(threshsrc, cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_NONE)
approxPolygon = []
for i in range(len(contours)):
cnt = contours[i]
epsilon = arcLenPercent * cv2.arcLength(cnt, True)
polygon = cv2.approxPolyDP(cnt, epsilon, True)
approxPolygon.append(polygon[:, 0, :])
return (approxPolygon)
def mask_contours_to_boxes(binary_mask):
ret, threshsrc = cv2.threshold(binary_mask, 1, 256, 0)
im2, contours, hierarchy = cv2.findContours(threshsrc, cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_NONE)
xs = []
ys = []
ws = []
hs = []
for i in range(len(contours)):
cnt = contours[i]
x, y, w, h = cv2.boundingRect(cnt)
xs.append(x)
ys.append(y)
ws.append(w)
hs.append(h)
boxes = | pd.DataFrame(xs, columns=['x1']) | pandas.DataFrame |
"""
Evaluate the fair model on a dataset;
Also evaluate benchmark algorithms: OLS, SEO, Logistic regression
Main function: evaluate_FairModel
Input:
- (x, a, y): evaluation set (can be training/test set)
- loss: loss function name
- result: returned by exp_grad
- Theta: the set of Threshold
Output:
- predictions over the data set
- weighted loss
- distribution over the predictions
- DP Disparity
TODO: decide the support when we compute disparity
"""
from __future__ import print_function
import functools
import numpy as np
import pandas as pd
import fairlearn.regression.data_parser as parser
import fairlearn.regression.data_augment as augment
from sklearn.metrics import mean_squared_error, mean_absolute_error
from sklearn.metrics import log_loss
from scipy.stats import norm
print = functools.partial(print, flush=True)
_LOGISTIC_C = 5 # Constant for rescaled logisitic loss
_QEO_EVAL = False # For now not handling the QEO disparity
#given the result returned by exponentiaed gradient, calculate the demographic parity and loss
def evaluate_FairModel(x, a, y, loss, result, Theta):
"""
Evaluate the performance of the fair model on a dataset
Input:
- X, Y: augmented data
- loss: loss function name
- result returned by exp_grad #a named tuple
- Theta: list of thresholds
- y: original labels
"""
if loss == "square": # squared loss reweighting
X, A, Y, W = augment.augment_data_sq(x, a, y, Theta)
elif loss == "absolute": # absolute loss reweighting (uniform)
X, A, Y, W = augment.augment_data_ab(x, a, y, Theta)
elif loss == "logistic": # logisitic reweighting
X, A, Y, W = augment.augment_data_logistic(x, a, y, Theta)
else:
raise Exception('Loss not supported: ', str(loss))
##if we already have the result returned by exp_grad.py, why do we use argument_data function again?
hs = result.hs
weights = result.weights##the weights of each classifier in a random classifier
# first make sure the lengths of hs and weights are the same;
off_set = len(hs) - len(weights)
if (off_set > 0):
off_set_list = pd.Series(np.zeros(off_set), index=[i +
len(weights)
for i in
range(off_set)])
result_weights = weights.append(off_set_list)
##add additional zero weights for weights. but how can we make sure that they are in accordance with each other???
else:
result_weights = weights
# second filter out hypotheses with zero weights
hs = hs[result_weights > 0]
result_weights = result_weights[result_weights > 0]
num_h = len(hs)
num_t = len(Theta)
n = int(len(X) / num_t)
#the number of original examples.
# predictions
pred_list = [pd.Series(extract_pred(X, h(X), Theta),
index=range(n)) for h in hs]
total_pred = pd.concat(pred_list, axis=1, keys=range(num_h))
#lists of predictions for different hs.
# predictions across different groups
pred_group = extract_group_pred(total_pred, a)
weighted_loss_vec = loss_vec(total_pred, y, result_weights, loss)
# Fit a normal distribution to the sq_loss vector
loss_mean, loss_std = norm.fit(weighted_loss_vec)
# DP disp
PMF_all = weighted_pmf(total_pred, result_weights, Theta)
## probably understood as the probability of each theta-threshold.
PMF_group = [weighted_pmf(pred_group[g], result_weights, Theta) for g in pred_group]
## probably understood as the probability of each theta-threshold inside each protected group
DP_disp = max([pmf2disp(PMF_g, PMF_all) for PMF_g in PMF_group])
## calculate the maximum gamma(a,z) as the statistical parity for this classifier.
# TODO: make sure at least one for each subgroup
evaluation = {}
evaluation['pred'] = total_pred
evaluation['classifier_weights'] = result_weights
evaluation['weighted_loss'] = loss_mean
evaluation['loss_std'] = loss_std / np.sqrt(n)
evaluation['disp_std'] = KS_confbdd(n, alpha=0.05)
evaluation['DP_disp'] = DP_disp
evaluation['n_oracle_calls'] = result.n_oracle_calls
return evaluation
def eval_BenchmarkModel(x, a, y, model, loss):
"""
Given a dataset (x, a, y) along with predictions,
loss function name
evaluate the following:
- average loss on the dataset
- DP disp
"""
pred = model(x) # apply model to get predictions
n = len(y)
if loss == "square":
err = mean_squared_error(y, pred) # mean square loss
elif loss == "absolute":
err = mean_absolute_error(y, pred) # mean absolute loss
## functions from sklearn.metrics library.
## The strange thing is that in the evaluate_FairModel function, the author uses his own function.
elif loss == "logistic": # assuming probabilistic predictions
# take the probability of the positive class
pred = pd.DataFrame(pred).iloc[:, 1]
err = log_loss(y, pred, eps=1e-15, normalize=True)
else:
raise Exception('Loss not supported: ', str(loss))
disp = pred2_disp(pred, a, y, loss)
## this function seems incomplete
## because i cannot find the definition of function argument quantization.
loss_vec = loss_vec2(pred, y, loss)
## Isn't this equal to the error part???
loss_mean, loss_std = norm.fit(loss_vec)
evaluation = {}
evaluation['pred'] = pred
evaluation['average_loss'] = err
evaluation['DP_disp'] = disp['DP']
evaluation['disp_std'] = KS_confbdd(n, alpha=0.05)
evaluation['loss_std'] = loss_std / np.sqrt(n)
return evaluation
def loss_vec(tp, y, result_weights, loss='square'):
"""
Given a list of predictions and a set of weights, compute
(weighted average) loss for each point
"""
num_h = len(result_weights)
if loss == 'square':
loss_list = [(tp.iloc[:, i] - y)**2 for i in range(num_h)]
## y here is pandas.Series rather than pandas.DataFrame
elif loss == 'absolute':
loss_list = [abs(tp.iloc[:, i] - y) for i in range(num_h)]
elif loss == 'logistic':
logistic_prob_list = [1/(1 + np.exp(- _LOGISTIC_C * (2 * tp[i]
- 1))) for i in range(num_h)]
# logistic_prob_list = [tp[i] for i in range(num_h)]
loss_list = [log_loss_vec(y, prob_pred, eps=1e-15) for
prob_pred in logistic_prob_list]
else:
raise Exception('Loss not supported: ', str(loss))
df = pd.concat(loss_list, axis=1)
## a matrix of shape n * n_hs, each represents the loss of an example under a given classifier
weighted_loss_vec = pd.DataFrame(np.dot(df,
pd.DataFrame(result_weights)))
## averaged in terms of different classifiers.
return weighted_loss_vec.iloc[:, 0]
## make it into one dimension.
def loss_vec2(pred, y, loss='square'):
"""
Given a list of predictions and a set of weights, compute
(weighted average) loss for each point
"""
if loss == 'square':
loss_vec = (pred - y)**2
elif loss == 'absolute':
loss_vec = abs(pred - y)
elif loss == 'logistic':
loss_vec = log_loss_vec(y, pred)
else:
raise Exception('Loss not supported: ', str(loss))
return loss_vec
def extract_pred(X, pred_aug, Theta):
"""
Given a list of pred over the augmented dataset, produce
the real-valued predictions over the original dataset
"""
width = Theta[1] - Theta[0]
Theta_mid = Theta + (width / 2)
num_t = len(Theta)
n = int(len(X) / num_t) # TODO: check whether things divide
pred_list = [pred_aug[((j) * n):((j+1) * n)] for j in range(num_t)]
total_pred_list = []
for i in range(n):
theta_index = max(0, (sum([p_vec.iloc[i] for p_vec in pred_list]) - 1))
## this assumes that given an example, the prediction for lower z must be less than or equal to that for larger z.
## But how do we guarantee this?
total_pred_list.append(Theta_mid[theta_index])
#$ get the corresponding the largest z + alpha/2 such that the prediction for this threshold is 1.
return total_pred_list
def extract_group_pred(total_pred, a):
"""
total_pred: predictions over the data
a: protected group attributes
extract the relevant predictions for each protected group
"""
groups = list(pd.Series.unique(a))
pred_per_group = {}
for g in groups:
pred_per_group[g] = total_pred[a == g]
#I guess here the index will still indicate the exact examples. The code verifies this guess.
return pred_per_group
def extract_group_quantile_pred(total_pred, a, y, loss):
"""
total_pred: a list of prediction Series
a: protected group attributes
y: the true label, which also gives us the quantile assignment
"""
if loss == "logistic":
y_quant = y # for binary prediction task, just use labels
else:
y_quant = augment.quantization(y)
groups = list(pd.Series.unique(a))
quants = list(pd.Series.unique(y_quant))
pred_group_quantile = {}
pred_quantile = {}
for q in quants:
pred_quantile[q] = total_pred[y_quant == q]
for g in groups:
pred_group_quantile[(g, q)] = total_pred[(a == g) & (y_quant == q)]
return pred_quantile, pred_group_quantile
def weighted_pmf(pred, classifier_weights, Theta):
"""
Given a list of predictions and a set of weights, compute pmf.
pred: a list of prediction vectors
result_weights: a vector of weights over the classifiers
"""
width = Theta[1] - Theta[0]
theta_indices = pd.Series(Theta + width/2)
weights = list(classifier_weights)
weighted_histograms = [(get_histogram(pred.iloc[:, i],
theta_indices)) * weights[i]
for i in range(pred.shape[1])]
## element-wise multiplication.
theta_counts = sum(weighted_histograms)
pmf = theta_counts / sum(theta_counts)
#probably understood as the probability of each theta-threshold.
return pmf
def get_histogram(pred, theta_indices):
"""
Given a list of discrete predictions and Theta, compute a histogram
pred: discrete prediction Series vector
Theta: the discrete range of predictions as a Series vector
"""
theta_counts = pd.Series(np.zeros(len(theta_indices)))
for theta in theta_indices:
theta_counts[theta_indices == theta] = len(pred[pred == theta])
#Given a classifier h, the number of examples whose classes are theta.
return theta_counts
def pmf2disp(pmf1, pmf2):
"""
Take two empirical PMF vectors with the same support and calculate
the K-S stats
"""
cdf_1 = pmf1.cumsum()
#cumsum means: Return cumulative sum over a DataFrame or Series axis.
#calculate the probability of the event (pred <= theta_threshold)
cdf_2 = pmf2.cumsum()
diff = cdf_1 - cdf_2
diff = abs(diff)
return max(diff)
#return the maximum gamma(a,z).
def pred2_disp(pred, a, y, loss):
"""
Input:
pred: real-valued predictions given by the benchmark method
a: protected group memberships
y: labels
loss: loss function names (for quantization)
Output: the DP disparity of the predictions
TODO: use the union of the predictions as the mesh
"""
Theta = sorted(set(pred)) # find the support among the predictions
## identify the existing thresholds, and sort them in order
theta_indices = pd.Series(Theta)
if loss == "logistic":
y_quant = y # for binary prediction task, just use labels
else:
y_quant = augment.quantization(y)
groups = list(pd.Series.unique(a))
quants = list( | pd.Series.unique(y_quant) | pandas.Series.unique |
from azure.cognitiveservices.vision.customvision.training import CustomVisionTrainingClient
from azure.cognitiveservices.vision.customvision.prediction import CustomVisionPredictionClient
from azure.cognitiveservices.vision.customvision.training.models import ImageFileCreateBatch, ImageFileCreateEntry, Region
from msrest.authentication import ApiKeyCredentials
import os, time, uuid
import pandas as pd
# Replace with valid values
ENDPOINT = "https://strokevision-prediction.cognitiveservices.azure.com/"
prediction_key = "d260ad67dc73424a83d249f50631f96c"
prediction_resource_id = "/subscriptions/59d64684-e7c9-4397-8982-6b775a473b74/resourceGroups/UCL_Jacob_MSc/providers/Microsoft.CognitiveServices/accounts/stroke-vision"
#project id
project = 'Stroke_class'
project_id = '6faeb988-9af6-4e9a-9b92-4ca37069ec8e'
#iteration name
iteration = "Iteration4"
def base_location(base):
base_image_location = os.path.join (os.path.dirname(__file__), base)
return base_image_location
# Now there is a trained endpoint that can be used to make a prediction
prediction_credentials = ApiKeyCredentials(in_headers={"Prediction-key": prediction_key})
predictor = CustomVisionPredictionClient(ENDPOINT, prediction_credentials)
def StrokeClassifier(name, folder):
# result = []
# label = []
for ind, filename in enumerate(os.listdir(f'pose/{folder}')):
n = 2
if ind % n == 0:
if filename.endswith('.jpg') or filename.endswith('.png') or filename.endswith('.jpeg'):
with open(os.path.join (base_location(base), (name + f'{ind}.jpg')), "rb") as image_contents:
results = predictor.classify_image(
project_id, iteration, image_contents.read())
# Display the results.
for prediction in results.predictions:
print( f'Stroke Frame # {ind}:'+ "\t" + prediction.tag_name +
": {0:.2f}%".format(prediction.probability * 100))
else:
print("NA")
def StrokeList(name,base, folder):
result = []
label = []
for ind, filename in enumerate(os.listdir(f'pose/User_test/{folder}')):
n = 5
if ind % n == 0 and ind != 0:
if filename.endswith('.jpg') or filename.endswith('.png') or filename.endswith('.jpeg'):
with open(os.path.join (base_location(f'User_test/{base}'), (name + f'{ind}.jpg')), "rb") as image_contents:
results = predictor.classify_image(
project_id, iteration, image_contents.read())
# Display the results.
first = results.predictions[0]
firstresult = first.probability
firstlabel = first.tag_name
if firstlabel == 'start':
final_label = 0
elif firstlabel == 'take_load':
final_label = 1
elif firstlabel == 'extend':
final_label = 2
elif firstlabel == 'finish':
final_label = 3
else:
final_label = 'NA'
result.append(firstresult)
label.append(final_label)
#print(name,f'{ind}.jpg')
print(firstlabel)
print(firstresult)
print(ind)
else:
print("NA")
d = {'label': label, 'probability':result}
df = | pd.DataFrame(d) | pandas.DataFrame |
import requests
import pandas as pd
import numpy as np
import time
class FMP_CONNECTION(object):
def __init__(self,api_key:str):
self._api_key = api_key
def set_apikey(self,new_apikey):
self._api_key = new_apikey
def get_apikey(self) -> str:
return self._api_key
def _merge_dfs(first_df:pd.DataFrame, second_df:pd.DataFrame, how:str = 'left'):
cols_to_use = second_df.columns.difference(first_df.columns)
new_df = pd.merge(first_df, second_df[cols_to_use], left_index=True, right_index=True, how=how)
return new_df
def _get_df(self,url:str,is_historical:bool = False) -> pd.DataFrame:
response = requests.get(url)
if response.status_code == 200:
if response.json() == {}:
print('Requested instrument is empty when retrieving data')
return None
if is_historical == False:
response_df = pd.DataFrame.from_dict(response.json())
return response_df
else:
symbol = response.json()['symbol']
df = pd.DataFrame.from_dict(response.json()['historical'])
df.insert(0,'symbol',symbol)
df['date'] = pd.to_datetime(df['date'],infer_datetime_format=True)
df.sort_values(by='date',ascending=True,inplace=True)
df.set_index('date',inplace=True)
df.set_index = pd.to_datetime(df.index, infer_datetime_format=True)
return df
else:
raise ConnectionError('Could not connect to FMP Api, this was the response: \n',response.json())
def historical_price_by_interval(self,ticker:str,interval:str='1d') -> pd.DataFrame:
"""
Retrieve historical price data from various time granularities
Parameters
----------
ticker:str :
The ticker of the financial instrument to retrieve historical price data.
api_key:str :
your FMP API Key
interval: {1min,5min,15min,30min,1hour,4hour,1d,1w,1m,1q,1y} :
The granularity of how often the price historical data must be retrieved
(Default value = '1d')
Returns
-------
pd.DataFrame
"""
url = None
# Retrieve Historical info from 1 min to 4 hours
if interval in ['4hour','1hour','30min','15min','5min','1min']:
url = f'https://financialmodelingprep.com/api/v3/historical-chart/{interval}/{ticker}?apikey={self._api_key}'
historical_df = self._get_df(url)
historical_df.insert(0,'symbol',ticker)
if 'close' and 'date' in list(historical_df.columns):
historical_df.sort_values(by='date',ascending=True,inplace=True)
historical_df.set_index('date',inplace=True)
historical_df.index = pd.to_datetime(historical_df.index, infer_datetime_format=True)
historical_df['change'] = historical_df['close'].pct_change()
historical_df['realOpen'] = historical_df['close'].shift(1)
return historical_df
# Retrieve Daily Info
elif interval == '1d':
url = f'https://financialmodelingprep.com/api/v3/historical-price-full/{ticker}?apikey={self._api_key}'
historical_df = self._get_df(url,True)
historical_df['change'] = historical_df['close'].pct_change()
historical_df['realOpen'] = historical_df['close'].shift(1)
return historical_df
url = f'https://financialmodelingprep.com/api/v3/historical-price-full/{ticker}?apikey={self._api_key}'
historical_df = self._get_df(url,True)
historical_df['daily'] = pd.to_datetime(historical_df.index, infer_datetime_format=True)
# Retrieve Weekly, Monthly, Quarterly and Yearly Price Data
if interval == '1w':
historical_df['week'] = historical_df['daily'].dt.to_period('w').apply(lambda r: r.start_time)
df = historical_df.drop_duplicates(subset=['week'],keep='first')
df['change'] = df['close'].pct_change()
df['realOpen'] = df['close'].shift(1)
return df
elif interval == '1m':
historical_df['monthly'] = historical_df['daily'].astype('datetime64[M]')
df = historical_df.drop_duplicates(subset=['monthly'],keep='first')
df['change'] = df['close'].pct_change()
df['realOpen'] = df['close'].shift(1)
return df
elif interval == '1q':
historical_df['quarter'] = historical_df['daily'].dt.to_period('q')
df = historical_df.drop_duplicates(subset=['quarter'], keep='first')
df['change'] = df['close'].pct_change()
df['realOpen'] = df['close'].shift(1)
return df
elif interval == '1y':
historical_df['year'] = historical_df['daily'].dt.year
df = historical_df.drop_duplicates(subset=['year'],keep='first')
df['change'] = df['close'].pct_change()
df['realOpen'] = df['close'].shift(1)
return df
else:
raise ValueError('unsupported interval for ',interval,'check your spelling')
def historical_closing_price(self,ticker:str,interval:str = '1d'):
url = f'https://financialmodelingprep.com/api/v3/historical-price-full/{ticker}?serietype=line&apikey={self._api_key}'
df = self._get_df(url,True)
if df is None:
return None
# df['date'] = pd.to_datetime(df.index, infer_datetime_format=True)
if interval == '1d':
return df
elif interval == '1w':
df['week'] = df['date'].dt.to_period('w').apply(lambda r: r.start_time)
df = df.drop_duplicates(subset=['week'], keep='first')
df = df.drop(columns=['week'])
elif interval == '1m':
df['monthly'] = df['date'].astype('datetime64[M]')
df = df.drop_duplicates(subset=['monthly'],keep='first')
df = df.drop(columns=['monthly'])
df['date'] = df['date'].astype('datetime64[M]')
elif interval == '1q':
df['quarter'] = df['date'].dt.to_period('q')
df = df.drop_duplicates(subset=['quarter'], keep='first')
df = df.drop(columns=['quarter'])
elif interval == '1y':
df['year'] = df['date'].dt.year
df = df.drop_duplicates(subset=['year'],keep='first')
df = df.drop(columns=['year'])
df = df.drop(columns=['date'])
return df
def get_closing_prices(self,tickers:[str], interval:str = '1d', from_date:str = None):
if isinstance(tickers,str):
df = self.historical_closing_price(tickers,interval)
closing_df = pd.pivot_table(data=df,index=df.index,columns='symbol',values='close',aggfunc='mean')
closing_df.index = pd.to_datetime(closing_df.index, infer_datetime_format=True)
from_d = from_date if from_date != None else closing_df.index.min()
return closing_df[from_d:]
else:
dfs = []
for ticker in tickers:
df = self.historical_closing_price(ticker,interval)
dfs.append(df)
x = pd.concat(dfs)
closing_df = pd.pivot_table(data=x, index=x.index, columns='symbol',values='close',aggfunc='mean')
closing_df.index = | pd.to_datetime(closing_df.index, infer_datetime_format=True) | pandas.to_datetime |
# Import Modulues
#==================================
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle
import numpy as np
from matplotlib import cm
from collections import OrderedDict
from sklearn.ensemble import RandomForestRegressor, RandomForestClassifier
from sklearn.metrics import r2_score
from sklearn.preprocessing import StandardScaler, Normalizer
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
from sklearn.utils import shuffle
from sklearn import preprocessing
from sklearn import utils
import scipy.interpolate as interp
# %%===============================
# Functions
#==================================
# Split a dataset based on an attribute and an attribute value
# Sorts the Training and Testing Datasets based upon a radii size
def test_split(index, value, dataset, num):
train, test = list(), list()
for loca in index:
t=-1
for row in dataset.iloc[:,loca]:
t=t+1
if row == value:
test.append(num[t])
train = list(set(num)-set(test))
return test, train
def test_split_MF(index, value, dataset, num):
train, test = list(), list()
t=-1
for row in dataset.iloc[:,index]:
t=t+1
if value == num[t]:
test.append(t)
train = list(set(dataset.iloc[:,0])-set(test))
return test, train
def test_split_wt(index, value, dataset, num):
train, test = list(), list()
for loca in index:
t=-1
for row in dataset.iloc[:,loca]:
t=t+1
if row in value:
test.append(num[t])
train = list(set(num)-set(test))
test = list(set(num)-set(train))
return test, train
# Identifies the different unique values in a list
def searchValue(index, dataset):
seen, values = set(), list()
uniq = []
for x in dataset.iloc[:,index]:
if x not in seen:
uniq.append(x)
seen.add(x)
uniq.sort()
values = uniq
return values
## Split into regions of Mass Fration, Volume, Standard Deviations
#def search_MVS(dataset,Rcln,Rclp,Rcun,Rcup):
#
# %%===============================
# Obtains Data
#==================================
#df = pd.read_excel('MachineLearning_13280_Reduced.xlsx')
df = pd.read_excel('Generation 4.xlsx')
df_perm = df
# Separates data
#==================================
Run = df['Run ']
ID = df['ID ']
df = df.drop(['Run '],axis=1) #Remove .stat file number
X = df.drop(['Packing_Fraction '],axis=1) #Inputs
Xt = X.drop(['ID '],axis=1) #All features
y = df['Packing_Fraction '] #Packing fraction, Output
num = df['ID '] #Number in excel file read under
# %%
# =============================================================================
# MOST EXCEPTIONAL SETUP
# =============================================================================
df_seven = df.drop(['ID '],axis=1)
df_main = df
df_main.sort_values(by='Packing_Fraction ',ascending=False)
# Main Test Train Split
# =============================================================================
cutoff = 499 #number of exceptional values
split = 0.25 #percentage used for testing
exceptional = df_main.iloc[0:cutoff, :]
normal = df_main.iloc[cutoff+1 :, :]
df_extra1 = exceptional.sample(frac=split,replace=False)
df_extra2 = exceptional[~exceptional.isin(df_extra1)].dropna()
df_norm1 = normal.sample(frac=split,replace=False)
df_norm2 = normal[~normal.isin(df_norm1)].dropna()
df_test = pd.concat([df_extra1, df_norm1]) #TESTING DATA
df_train_intermediate = pd.concat([df_extra2, df_norm2])
df_Training_y = df_train_intermediate.iloc[:,-1]
df_Training_X = df_train_intermediate.drop(['Packing_Fraction '],axis=1)
# Training Data Split
# =============================================================================
df_train_intermediate.sort_values(by='Packing_Fraction ',ascending=False)
cutoff2 = int(cutoff*(1-split)) #Number of exceptional passed into training data
excep_train = df_train_intermediate.iloc[0:cutoff2, :] #remainder of exceptional
norm_train = df_train_intermediate.iloc[cutoff2+1 :, :] #remainder of normal
split2 = 0.5 #splits the data evenly
df_extra_val = excep_train.sample(frac=split2,replace=False)
df_extra_train = excep_train[~excep_train.isin(df_extra_val)].dropna()
df_norm_val = norm_train.sample(frac=split2,replace=False)
df_norm_train = norm_train[~norm_train.isin(df_norm_val)].dropna()
df_validate = pd.concat([df_extra_val, df_norm_val]) #VALIDATION DATA
#==============================================================================
df_training = pd.concat([df_extra_train, df_norm_train]) #TRAINING DATA
df_train_y = df_training.iloc[:,-1] #Train Packing Fraction
df_train = df_training.drop(['Packing_Fraction '],axis=1) #Train Inputs
df_validate_y = df_validate.iloc[:,-1] #Validate Packing Fraction
df_validate = df_validate.drop(['Packing_Fraction '],axis=1) #Validate Inputs
df_test_y = df_test.iloc[:,-1] #Test Packing Fraction
df_test = df_test.drop(['Packing_Fraction '],axis=1) #Test Inputs
# %%===============================
# Evaluate Algorithm
#==================================
index = [0, 1, 2] #Index for weight averaged radii
trainset = 0
tests = 0
stored = list()
train, test = list(), list()
#==================================
# Predictions
#==================================
predictions = []
real_value = []
real_value2 = []
predictions_train = []
real_value_train = []
#################################################################
#################### KAAI ADDED THIS ########################
#################################################################
def label_data(df_y, N):
# sort the values
df_y_sorted = df_y.sort_values()
# manually take the top N compounds and label them as 1 for extraordinary
df_y_sorted.iloc[-N:] = [1] * N
# manually label all compounds below the top N as 0 for ordinary
df_y_sorted.iloc[:-N] = [0] * (df_y.shape[0] - N)
# resort the data so that the index matches originial df_train_y
df_y_sorted = df_y_sorted.loc[df_y.index.values]
return df_y_sorted
#################################################################
#################################################################
# %% Training/Validation
#n = 100
#y_train = label_data(df_train_y, N=n) ###################################
#X_train = df_train
#
#y_test = label_data(df_validate_y, N=n) ###################################
#X_test = df_validate
#
## Training
#rf = RandomForestClassifier(n_estimators=100)
#rf.fit(X_train, y_train)
#
## Validation
#prediction = rf.predict_proba(X_test)
# %% Added From Regressor
# =============================================================================
# MOST EXCEPTIONAL SETUP
# =============================================================================
df = df.drop(['Wt_Avg_Pt#_1_Size '],axis=1) #Remove .stat file number
df = df.drop(['Wt_Avg_Pt#_2_Size '],axis=1) #Remove .stat file number
df = df.drop(['Wt_Avg_Pt#_3_Size '],axis=1) #Remove .stat file number
df = df.drop(['Wt_Avg_Pt#_1_Fraction '],axis=1) #Remove .stat file number
df = df.drop(['Wt_Avg_Pt#_2_Fraction '],axis=1) #Remove .stat file number
df = df.drop(['Wt_Avg_Pt#_3_Fraction '],axis=1) #Remove .stat file number
df_main = df
df_main.sort_values(by='Packing_Fraction ',ascending=False)
# Main Test Train Split
# =============================================================================
cutoff = 499 #number of exceptional values
split = 0.5 #percentage used for testing
exceptional = df_main.iloc[0:cutoff, :]
normal = df_main.iloc[cutoff+1 :, :]
df_extra1 = exceptional.sample(frac=split,replace=False)
df_extra2 = exceptional[~exceptional.isin(df_extra1)].dropna()
df_norm1 = normal.sample(frac=split,replace=False)
df_norm2 = normal[~normal.isin(df_norm1)].dropna()
df_test = pd.concat([df_extra1, df_norm1]) #TESTING DATA
df_test_y = df_test.iloc[:,-1] #Validate Packing Fraction
df_test = df_test.drop(['Packing_Fraction '],axis=1) #Validate Inputs
df_test = df_test.drop(['ID '],axis=1)
df_train_intermediate = pd.concat([df_extra2, df_norm2])
df_train_intermediate_y = df_train_intermediate.iloc[:,-1] #Y data
df_train_intermediate = df_train_intermediate.drop(['Packing_Fraction '],axis=1) #Validate Inputs
df_train_intermediate = df_train_intermediate.drop(['ID '],axis=1)
# %% Training/Validation
n = 100
y_train = label_data(df_train_intermediate_y, N=n) ###################################
X_train = df_train_intermediate
y_test = label_data(df_test_y, N=n) ###################################
X_test = df_test
# Training
rf = RandomForestClassifier(n_estimators=100)
rf.fit(X_train, y_train)
# Validation
prediction = rf.predict_proba(X_test)
# %%##########################################################
# probabilities are returned for label 0, and 1. Grab prob for label
prob = [pred[1] for pred in prediction] # "list comporehension" to get prob
##############################################################
xtest_prob = X_test.copy()
xtest_prob['Probisgreat'] = prob #Prob is great controls the hue
real_value += list(df_test_y)
#xtest_prob_samp = xtest_prob.sample(100)
#import seaborn
#g = seaborn.pairplot(xtest_prob_samp, hue="Probisgreat")
rv = pd.DataFrame({'rv':real_value.copy()})
prb = pd.DataFrame({'prb':prob.copy()})
f_matrix = pd.concat([rv, prb], axis=1, join='inner')
tp = int()
tn = int()
fn = int()
fp = int()
prob_lim = 0.333
extra_lim = 0.785
for index, row in f_matrix.iterrows():
if row['rv'] > extra_lim:
if row['prb'] > prob_lim:
tp += 1
else:
fn += 1
else:
if row['prb'] > prob_lim:
fp += 1
else:
tn += 1
#Data that has a probability of being extraordinary
#=======================================================
xtest_prob_reduced = xtest_prob[xtest_prob["Probisgreat"] > 0.3]
#interp.griddata(xtest_prob)
# ===============================
## Plotting
##==================================
fig = plt.figure(1, figsize=(12, 12))
ax = fig.add_axes([0,0,1,1])
## updated it to plot against the probility of label 1
data = plt.plot(real_value, prob, 'ro', markersize=12, alpha=0.3)
ax.tick_params(direction='out', labelsize = 25, length=10, width=3, grid_color ='k')
#plt.title('Random Forrest Classifer',
# fontsize = 25, weight = 'bold')
plt.xlabel('Actual Value', fontsize = 25, weight = 'bold')
plt.ylabel('Probability of Being Extraordinary', fontsize =25, weight = 'bold')
plt.grid(True)
#==============Legend Details==================
FN = 'False Negative: '
FN += str(fn)
rect_fn = plt.Rectangle((extra_lim,prob_lim), 0.11, (-prob_lim),color='b',
alpha = 0.3,ec='k',label=str(FN))
ax.add_patch(rect_fn)
FP = 'False Positive: '
FP += str(fp)
rect_fp = plt.Rectangle((0.5,prob_lim), (extra_lim-0.5), (1-prob_lim),color='r',
alpha = 0.3,ec='k',label=str(FP))
ax.add_patch(rect_fp)
TP = 'True Positive: '
TP += str(tp)
rect_tp = plt.Rectangle((extra_lim,prob_lim), 0.11, (1-prob_lim),color='g',
alpha = 0.3,ec='k',label=str(TP))
ax.add_patch(rect_tp)
TN = 'True Negative: '
TN += str(tn)
rect_tn = plt.Rectangle((0.5,0), (extra_lim-0.5), (prob_lim),color='w',
alpha = 1,ec='k',label=str(TN))
ax.add_patch(rect_tn)
accuracy = (tp+tn)/(tp+fp+tn+fn)
acc = float('%.4g' %accuracy)
Acc = 'Accuracy: '
Acc += str(acc)
plt.plot([], [], ' ', label = str(Acc))
precision = tp/(tp+fp)
recall = tp/(fn+tp)
F1 = 2 * (precision*recall)/(precision+recall)
F1 = float('%.4g' %F1)
F1score = 'F1 Score: '
F1score += str(F1)
plt.plot([], [], ' ', label = str(F1score))
plt.legend(fontsize = 25)
#plt.legend(fontsize = 'xx-large')
plt.xlim(0.5, 0.85)
plt.ylim(0, 1)
plt.show()
# %% Predictions
#df2 = pd.read_excel('Files for Predictions.xlsx')
df2 = pd.read_excel('Partial 2-model.xlsx')
#run = df2['Run ']
#run = run.to_frame()
#df2 = df2.drop(['Run '],axis=1)
#df2 = df2.drop(['ID '],axis=1)
y_predicted = rf.predict(df2)
y_predicted = | pd.DataFrame({'Packing Fraction':y_predicted}) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
@authors: <NAME> & <NAME>
"""
#!/usr/bin/python
import matplotlib.pylab as plt
import csv
from datetime import datetime, timezone
import pandas as pd
import seaborn as sns
def reddit_plot():
reddit_x = []
reddit_y = []
reddit_y_num = []
reddit_x_filtered = []
dateList = []
testList = []
# open csv file generated using
with open('reddit_dataset.csv','r') as reddit_csv:
reddit_data = csv.reader(reddit_csv, delimiter = ',')
next(reddit_csv)
for row in reddit_data:
reddit_x.append(row[1])
reddit_y.append(row[6])
# convert all values in list y from str to int
for i in reddit_y:
j=int(float(i))
reddit_y_num.append(j)
for i in reddit_y_num:
date_time_raw = datetime.fromtimestamp(i)
post_date = datetime.date(date_time_raw)
dateList.append(post_date)
for i in range(len(dateList)):
testList.append(i)
plt.title("Reddit posts versus date/time")
plt.ylabel('posts')
plt.xlabel('date')
plt.plot(dateList,testList)
plt.show()
# Twitter plot
def twitter_plot():
Tweet_data = | pd.read_csv("Twitter_dataset.csv") | pandas.read_csv |
"""
The io module provides support for reading and writing diffusion profile data
and diffusion coefficients data to csv files.
"""
import numpy as np
import pandas as pd
from scipy.interpolate import splev
from pydiffusion.core import DiffProfile, DiffSystem
import matplotlib.pyplot as plt
import threading
# To solve the problem when matplotlib figure freezes when input used
# https://stackoverflow.com/questions/34938593/matplotlib-freezes-when-input-used-in-spyder
prompt = False
promptText = ""
done = False
waiting = False
response = ""
regular_input = input
def threadfunc():
global prompt
global done
global waiting
global response
while not done:
if prompt:
prompt = False
response = regular_input(promptText)
waiting = True
def ask_input(text):
global waiting
global prompt
global promptText
promptText = text
prompt = True
while not waiting:
plt.pause(1.0)
waiting = False
return response
def ita_start():
global done
done = False
thread = threading.Thread(target=threadfunc)
thread.start()
def ita_finish():
global done
done = True
def save_csv(name=None, profile=None, diffsys=None):
"""
Save diffusion data as csv file.
Parameters
----------
name : str
csv file name, default name is the profile name of diffsys name.
profile : DiffProfile
DiffProfile to save.
diffsys : DiffSystem
DiffSystem to save. diffsys can be saved by itself or with profile.
Examples
--------
>>> save_csv('data.csv', profile, dsys)
"""
if profile is None and diffsys is None:
raise ValueError('No data entered')
if name is not None and not name.endswith('.csv'):
name += str(name)+'.csv'
elif profile is None:
Xr, fD = diffsys.Xr, diffsys.Dfunc
X, DC = np.array([]), np.array([])
for i in range(diffsys.Np):
Xnew = np.linspace(Xr[i, 0], Xr[i, 1], 30)
Dnew = np.exp(splev(Xnew, fD[i]))
X = np.append(X, Xnew)
DC = np.append(DC, Dnew)
data = pd.DataFrame({'X': X, 'DC': DC})
if name is None:
name = diffsys.name+'.csv'
data.to_csv(name, index=False)
elif diffsys is None:
data = pd.DataFrame({'dis': profile.dis, 'X': profile.X})
if name is None:
name = profile.name+'.csv'
data.to_csv(name, index=False)
else:
dis, X, Xr, fD = profile.dis, profile.X, diffsys.Xr, diffsys.Dfunc
DC = np.zeros(len(dis))
for i in range(diffsys.Np):
pid = np.where((X >= Xr[i, 0]) & (X <= Xr[i, 1]))[0]
DC[pid] = np.exp(splev(X[pid], fD[i]))
data = | pd.DataFrame({'dis': dis, 'X': X, 'DC': DC}) | pandas.DataFrame |
"""Permutation test function as described in CellPhoneDB 2.0."""
from abc import ABC
from types import MappingProxyType
from typing import (
Any,
List,
Tuple,
Union,
Mapping,
Iterable,
Optional,
Sequence,
TYPE_CHECKING,
)
from functools import partial
from itertools import product
from collections import namedtuple
from scanpy import logging as logg
from anndata import AnnData
from numba import njit, prange # noqa: F401
from scipy.sparse import csc_matrix
import numpy as np
import pandas as pd
from squidpy._docs import d, inject_docs
from squidpy._utils import Signal, SigQueue, parallelize, _get_n_cores
from squidpy.gr._utils import (
_save_data,
_assert_positive,
_create_sparse_df,
_check_tuple_needles,
_assert_categorical_obs,
)
from squidpy._constants._constants import CorrAxis, ComplexPolicy
from squidpy._constants._pkg_constants import Key
__all__ = ["ligrec", "PermutationTest"]
StrSeq = Sequence[str]
SeqTuple = Sequence[Tuple[str, str]]
Interaction_t = Union[pd.DataFrame, Mapping[str, StrSeq], StrSeq, Tuple[StrSeq, StrSeq], SeqTuple]
Cluster_t = Union[StrSeq, Tuple[StrSeq, StrSeq], SeqTuple]
SOURCE = "source"
TARGET = "target"
TempResult = namedtuple("TempResult", ["means", "pvalues"])
_template = """
@njit(parallel={parallel}, cache=False, fastmath=False)
def _test_{n_cls}_{ret_means}_{parallel}(
interactions: np.ndarray, # [np.uint32],
interaction_clusters: np.ndarray, # [np.uint32],
data: np.ndarray, # [np.float64],
clustering: np.ndarray, # [np.uint32],
mean: np.ndarray, # [np.float64],
mask: np.ndarray, # [np.bool_],
res: np.ndarray, # [np.float64],
{args}
) -> None:
{init}
{loop}
{finalize}
for i in prange(len(interactions)):
rec, lig = interactions[i]
for j in prange(len(interaction_clusters)):
c1, c2 = interaction_clusters[j]
m1, m2 = mean[rec, c1], mean[lig, c2]
if np.isnan(res[i, j]):
continue
if m1 > 0 and m2 > 0:
{set_means}
if mask[rec, c1] and mask[lig, c2]:
# both rec, lig are sufficiently expressed in c1, c2
res[i, j] += (groups[c1, rec] + groups[c2, lig]) > (m1 + m2)
else:
res[i, j] = np.nan
else:
# res_means is initialized with 0s
res[i, j] = np.nan
"""
def _create_template(n_cls: int, return_means: bool = False, parallel: bool = True) -> str:
if n_cls <= 0:
raise ValueError(f"Expected number of clusters to be positive, found `{n_cls}`.")
rng = range(n_cls)
init = "".join(
f"""
g{i} = np.zeros((data.shape[1],), dtype=np.float64); s{i} = 0"""
for i in rng
)
loop_body = """
if cl == 0:
g0 += data[row]
s0 += 1"""
loop_body = loop_body + "".join(
f"""
elif cl == {i}:
g{i} += data[row]
s{i} += 1"""
for i in range(1, n_cls)
)
loop = f"""
for row in prange(data.shape[0]):
cl = clustering[row]
{loop_body}
else:
assert False, "Unhandled case."
"""
finalize = ", ".join(f"g{i} / s{i}" for i in rng)
finalize = f"groups = np.stack(({finalize}))"
if return_means:
args = "res_means: np.ndarray, # [np.float64]"
set_means = "res_means[i, j] = (m1 + m2) / 2.0"
else:
args = set_means = ""
return _template.format(
n_cls=n_cls,
parallel=bool(parallel),
ret_means=int(return_means),
args=args,
init=init,
loop=loop,
finalize=finalize,
set_means=set_means,
)
def _fdr_correct(
pvals: pd.DataFrame, corr_method: str, corr_axis: Union[str, CorrAxis], alpha: float = 0.05
) -> pd.DataFrame:
"""Correct p-values for FDR along specific axis in ``pvals``."""
from pandas.core.arrays.sparse import SparseArray
from statsmodels.stats.multitest import multipletests
def fdr(pvals: pd.Series) -> SparseArray:
_, qvals, _, _ = multipletests(
np.nan_to_num(pvals.values, copy=True, nan=1.0),
method=corr_method,
alpha=alpha,
is_sorted=False,
returnsorted=False,
)
qvals[np.isnan(pvals.values)] = np.nan
return SparseArray(qvals, dtype=qvals.dtype, fill_value=np.nan)
corr_axis = CorrAxis(corr_axis)
if corr_axis == CorrAxis.CLUSTERS:
# clusters are in columns
pvals = pvals.apply(fdr)
elif corr_axis == CorrAxis.INTERACTIONS:
pvals = pvals.T.apply(fdr).T
else:
raise NotImplementedError(f"FDR correction for `{corr_axis}` is not implemented.")
return pvals
@d.get_full_description(base="PT")
@d.get_sections(base="PT", sections=["Parameters"])
@d.dedent
class PermutationTestABC(ABC):
"""
Class for receptor-ligand interaction testing.
The expected workflow is::
pt = PermutationTest(adata).prepare()
res = pt.test("clusters")
Parameters
----------
%(adata)s
use_raw
Whether to access :attr:`anndata.AnnData.raw`.
"""
def __init__(self, adata: AnnData, use_raw: bool = True):
if not isinstance(adata, AnnData):
raise TypeError(f"Expected `adata` to be of type `anndata.AnnData`, found `{type(adata).__name__}`.")
if not adata.n_obs:
raise ValueError("No cells are in `adata.obs_names`.")
if not adata.n_vars:
raise ValueError("No genes are in `adata.var_names`.")
self._adata = adata
if use_raw:
if adata.raw is None:
raise AttributeError("No `.raw` attribute found. Try specifying `use_raw=False`.")
if adata.raw.n_obs != adata.n_obs:
raise ValueError(f"Expected `{adata.n_obs}` cells in `.raw` object, found `{adata.raw.n_obs}`.")
adata = adata.raw
self._data = pd.DataFrame.sparse.from_spmatrix(
csc_matrix(adata.X), index=adata.obs_names, columns=adata.var_names
)
self._interactions: Optional[pd.DataFrame] = None
self._filtered_data: Optional[pd.DataFrame] = None
@d.get_full_description(base="PT_prepare")
@d.get_sections(base="PT_prepare", sections=["Parameters", "Returns"])
@inject_docs(src=SOURCE, tgt=TARGET, cp=ComplexPolicy)
def prepare(
self, interactions: Interaction_t, complex_policy: Union[str, ComplexPolicy] = ComplexPolicy.MIN.v
) -> "PermutationTestABC":
"""
Prepare self for running the permutation test.
Parameters
----------
interactions
Interaction to test. The type can be one of:
- :class:`pandas.DataFrame` - must contain at least 2 columns named `{src!r}` and `{tgt!r}`.
- :class:`dict` - dictionary with at least 2 keys named `{src!r}` and `{tgt!r}`.
- :class:`typing.Sequence` - Either a sequence of :class:`str`, in which case all combinations are
produced, or a sequence of :class:`tuple` of 2 :class:`str` or a :class:`tuple` of 2 sequences.
If `None`, the interactions are extracted from :mod:`omnipath`. Protein complexes can be specified by
delimiting the components with `'_'`, such as `'alpha_beta_gamma'`.
complex_policy
Policy on how to handle complexes. Valid options are:
- `{cp.MIN.s!r}` - select gene with the minimum average expression. This is the same as in
:cite:`cellphonedb`.
- `{cp.ALL.s!r}` - select all possible combinations between `{src!r}` and `{tgt!r}` complexes.
Returns
-------
Sets the following attributes and returns :attr:`self`:
- :attr:`interactions` - filtered interactions whose `{src!r}` and `{tgt!r}` are both in the data.
"""
complex_policy = ComplexPolicy(complex_policy)
if isinstance(interactions, Mapping):
interactions = pd.DataFrame(interactions)
if isinstance(interactions, pd.DataFrame):
if SOURCE not in interactions.columns:
raise KeyError(f"Column `{SOURCE!r}` is not in `interactions`.")
if TARGET not in interactions.columns:
raise KeyError(f"Column `{TARGET!r}` is not in `interactions`.")
self._interactions = interactions.copy()
elif isinstance(interactions, Iterable):
interactions = tuple(interactions)
if not len(interactions):
raise ValueError("No interactions were specified.")
if isinstance(interactions[0], str):
interactions = list(product(interactions, repeat=2))
elif len(interactions) == 2:
interactions = tuple(zip(*interactions))
if not all(len(i) == 2 for i in interactions):
raise ValueError("Not all interactions are of length `2`.")
self._interactions = pd.DataFrame(interactions, columns=[SOURCE, TARGET])
else:
raise TypeError(
f"Expected either a `pandas.DataFrame`, `dict` or `iterable`, found `{type(interactions).__name__}`"
)
if TYPE_CHECKING:
assert isinstance(self.interactions, pd.DataFrame)
if self.interactions.empty:
raise ValueError("The interactions are empty")
# first uppercaseA, then drop duplicates
self._data.columns = self._data.columns.str.upper()
self.interactions[SOURCE] = self.interactions[SOURCE].str.upper()
self.interactions[TARGET] = self.interactions[TARGET].str.upper()
logg.debug("DEBUG: Removing duplicate interactions")
self.interactions.drop_duplicates(subset=(SOURCE, TARGET), inplace=True, keep="first")
logg.debug("DEBUG: Removing duplicate genes in the data")
n_genes_prior = self._data.shape[1]
self._data = self._data.loc[:, ~self._data.columns.duplicated()]
if self._data.shape[1] != n_genes_prior:
logg.warning(f"Removed `{n_genes_prior - self._data.shape[1]}` duplicate gene(s)")
self._filter_interactions_complexes(complex_policy)
self._filter_interactions_by_genes()
self._trim_data()
# this is necessary because of complexes
self.interactions.drop_duplicates(subset=(SOURCE, TARGET), inplace=True, keep="first")
return self
@d.get_full_description(base="PT_test")
@d.get_sections(base="PT_test", sections=["Parameters"])
@d.dedent
@inject_docs(src=SOURCE, tgt=TARGET, fa=CorrAxis)
def test(
self,
cluster_key: str,
clusters: Optional[Cluster_t] = None,
n_perms: int = 1000,
threshold: float = 0.01,
seed: Optional[int] = None,
corr_method: Optional[str] = None,
corr_axis: Union[str, CorrAxis] = CorrAxis.INTERACTIONS.v,
alpha: float = 0.05,
copy: bool = False,
key_added: Optional[str] = None,
numba_parallel: Optional[bool] = None,
**kwargs: Any,
) -> Optional[Mapping[str, pd.DataFrame]]:
"""
Perform the permutation test as described in :cite:`cellphonedb`.
Parameters
----------
%(cluster_key)s
clusters
Clusters from :attr:`anndata.AnnData.obs` ``['{{cluster_key}}']``. Can be specified either as a sequence
of :class:`tuple` or just a sequence of cluster names, in which case all combinations considered.
%(n_perms)s
threshold
Do not perform permutation test if any of the interacting components is being expressed
in less than ``threshold`` percent of cells within a given cluster.
%(seed)s
%(corr_method)s
corr_axis
Axis over which to perform the FDR correction. Only used when ``corr_method != None``. Valid options are:
- `{fa.INTERACTIONS.s!r}` - correct interactions by performing FDR correction across the clusters.
- `{fa.CLUSTERS.s!r}` - correct clusters by performing FDR correction across the interactions.
alpha
Significance level for FDR correction. Only used when ``corr_method != None``.
%(copy)s
key_added
Key in :attr:`anndata.AnnData.uns` where the result is stored if ``copy = False``.
If `None`, ``'{{cluster_key}}_ligrec'`` will be used.
%(numba_parallel)s
%(parallelize)s
Returns
-------
%(ligrec_test_returns)s
"""
_assert_positive(n_perms, name="n_perms")
_assert_categorical_obs(self._adata, key=cluster_key)
if corr_method is not None:
corr_axis = CorrAxis(corr_axis)
if TYPE_CHECKING:
assert isinstance(corr_axis, CorrAxis)
if len(self._adata.obs[cluster_key].cat.categories) <= 1:
raise ValueError(
f"Expected at least `2` clusters, found `{len(self._adata.obs[cluster_key].cat.categories)}`."
)
if TYPE_CHECKING:
assert isinstance(self.interactions, pd.DataFrame)
assert isinstance(self._filtered_data, pd.DataFrame)
interactions = self.interactions[[SOURCE, TARGET]]
self._filtered_data["clusters"] = self._adata.obs[cluster_key].astype("string").astype("category").values
if clusters is None:
clusters = list(map(str, self._adata.obs[cluster_key].cat.categories))
if all(isinstance(c, str) for c in clusters):
clusters = list(product(clusters, repeat=2)) # type: ignore[no-redef,assignment]
clusters = sorted(
_check_tuple_needles(
clusters, # type: ignore[arg-type]
self._filtered_data["clusters"].cat.categories,
msg="Invalid cluster `{0!r}`.",
reraise=True,
)
)
clusters_flat = list({c for cs in clusters for c in cs})
data = self._filtered_data.loc[np.isin(self._filtered_data["clusters"], clusters_flat), :]
data["clusters"] = data["clusters"].cat.remove_unused_categories()
cat = data["clusters"].cat
cluster_mapper = dict(zip(cat.categories, range(len(cat.categories))))
gene_mapper = dict(zip(data.columns[:-1], range(len(data.columns) - 1))) # -1 for 'clusters'
data.columns = [gene_mapper[c] if c != "clusters" else c for c in data.columns]
clusters_ = np.array([[cluster_mapper[c1], cluster_mapper[c2]] for c1, c2 in clusters], dtype=np.uint32)
cat.rename_categories(cluster_mapper, inplace=True)
# much faster than applymap (tested on 1M interactions)
interactions_ = np.vectorize(lambda g: gene_mapper[g])(interactions.values)
n_jobs = _get_n_cores(kwargs.pop("n_jobs", None))
start = logg.info(
f"Running `{n_perms}` permutations on `{len(interactions)}` interactions "
f"and `{len(clusters)}` cluster combinations using `{n_jobs}` core(s)"
)
res = _analysis(
data,
interactions_,
clusters_,
threshold=threshold,
n_perms=n_perms,
seed=seed,
n_jobs=n_jobs,
numba_parallel=numba_parallel,
**kwargs,
)
res = {
"means": _create_sparse_df(
res.means,
index=pd.MultiIndex.from_frame(interactions, names=[SOURCE, TARGET]),
columns=pd.MultiIndex.from_tuples(clusters, names=["cluster_1", "cluster_2"]),
fill_value=0,
),
"pvalues": _create_sparse_df(
res.pvalues,
index= | pd.MultiIndex.from_frame(interactions, names=[SOURCE, TARGET]) | pandas.MultiIndex.from_frame |
from bittrex import Bittrex
import requests
import pandas as pd
import os
import bittrex_test as btt
import quandl_api_test as qat
from scrape_coinmarketcap import scrape_data
API_K = os.environ.get('bittrex_api')
API_S = os.environ.get('bittrex_sec')
if API_K is None:
API_K = os.environ.get('btx_key')
API_S = os.environ.get('btx_sec')
bt = Bittrex(API_K, API_S)
HOME_DIR = btt.get_home_dir()
MARKETS = btt.get_all_currency_pairs()
def get_balances():
bals = bt.get_balances()
if bals['success'] == True:
return pd.io.json.json_normalize(bals['result'])
else:
print('error!', bals['message'])
return None
def get_total_dollar_balance(bals):
btc_amts = []
dollar_amts = []
for i, r in bals.iterrows():
if 'BTC-' + r['Currency'] in MARKETS:
print('getting price for', r['Currency'])
t = btt.get_ticker('BTC-' + r['Currency'])
btc_amts.append(t['Last'] * r['Balance'])
else:
# have to find which market we have, the convert to BTC
if r['Currency'] == 'BTC':
btc_amts.append(r['Balance'])
else:
print('no BTC market for', r['Currency'])
bals_copy = bals.copy()
bals_copy['BTC_equivalent'] = btc_amts
usdt = btt.get_ticker('USDT-BTC')['Last']
bals_copy['USD_equivalent'] = bals_copy['BTC_equivalent'] * usdt
return bals_copy
def get_deposit_history():
dh = bt.get_deposit_history()
if dh['success'] == True:
df = pd.io.json.json_normalize(dh['result'])
df['LastUpdated'] = pd.to_datetime(df['LastUpdated'])
return df
else:
print('error!', dh['message'])
return None
def get_deposit_amts(df):
# market_data = qat.load_save_data()
# bt_df = qat.get_bitstamp_full_df(market_data)
eth = scrape_data()
btc = scrape_data('bitcoin')
aeon = scrape_data('aeon')
xmr = scrape_data('monero')
dep_dollars = []
for i, r in df.iterrows():
date = r['LastUpdated']
d = str(date.day).zfill(2)
m = str(date.month).zfill(2)
y = str(date.year)
if r['Currency'] == 'BTC':
price = btc.loc[y + m + d, 'usd_price'][0]
dep_dollars.append(price * r['Amount'])
elif r['Currency'] == 'ETH':
price = eth.loc[y + m + d, 'usd_price'][0]
dep_dollars.append(price * r['Amount'])
elif r['Currency'] == 'AEON':
price = aeon.loc[y + m + d, 'usd_price'][0]
dep_dollars.append(price * r['Amount'])
elif r['Currency'] == 'XMR':
price = xmr.loc[y + m + d, 'usd_price'][0]
dep_dollars.append(price * r['Amount'])
df['usd'] = dep_dollars
return df
def get_order_history():
hist = bt.get_order_history()
if hist['success']:
df = pd.io.json.json_normalize(hist['result'])
df['TimeStamp'] = | pd.to_datetime(df['TimeStamp']) | pandas.to_datetime |
#############################################################
# Begin defining Dash app layout
# code sections
# 1 Environment setup
# 2 Setup Dataframes
# 3 Define Useful Functions
# 4 Heatmap UI controls
# 5 Curves plot UI controls
# 6 Navbar definition
# 7 Blank figure to display during initial app loading
# 8 Overall app layout
# 9 Dynamic UI callbacks
# 10 Callback for Updating Heat Map Figure
# 11 Callback for Adding Rows to curve_plot_df (dataframe define curves to plot)
# 12 Callback for Updating Curves Plot Figure
# 13 Callback for Updating the first Epidemiology Sandbox Figure
# 14 Callbacks to Update UI of the Second Epidemiology Sandbox
# 15 Callback for Updating the Second Epidemiology Sandbox Figure
import time
import os
import platform
import json
import pickle
import base64
from urllib.request import urlopen
import boto3
import pandas as pd
import numpy as np
from scipy.integrate import solve_ivp
import matplotlib, matplotlib.cm as cm
import datetime as dt
import dash
import dash_table
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output, State
import dash_bootstrap_components as dbc
from plotly import subplots
from plotly import graph_objects as go
#################################################################
# 1 Environment Setup
# setup global variables
proj_path = ""
print("The execution environment is: ")
print(platform.release())
if os.name == "nt":
# running on my local Windows machine
ENV = "local"
os.chdir("C:/Users/adiad/Anaconda3/envs/CovidApp36/covidapp/")
elif "microsoft" in platform.release():
# example: "4.19.104-microsoft-standard"
# running on wsl in a docker container I made
ENV = "docker-wsl"
proj_path = "/docker_app/"
elif ("aws" in platform.release()) | ("amzn" in platform.release()):
# examples: "4.4.0-1074-aws", "4.14.158-129.185.amzn2.x86_64"
# running in a docker container I made
ENV = "docker-aws"
else:
# running on heroku server
ENV = "heroku"
if ENV in ["docker-aws", "heroku"]:
# download covid data from aws s3 bucket
os.environ["AWS_CONFIG_FILE"] = proj_path + "secret_credentials/config"
os.environ["AWS_SHARED_CREDENTIALS_FILE"] = proj_path + "secret_credentials/credentials"
bucket_name = "my-covid-data-7918"
local_file_path = proj_path + "data_clean/"
covid_filenames = ["Johns_Hopkins_Clean.pkl", "init_heatmap.pkl"]
s3 = boto3.client("s3")
for file_name in covid_filenames:
os.remove(local_file_path + file_name)
s3.download_file(bucket_name, file_name, local_file_path + file_name)
print("Finished downloading covid data from AWS.")
# set graphic elements & color palette
invis = "rgba(0,0,0,0)"
update_jh_data = True # controls whether Johns Hopkins data will be updated
data_path = proj_path + "data_clean/"
secrets_path = proj_path + "secret_credentials/"
# setting up images for rendering
image_path = proj_path + "images/"
cross_icon_image = base64.b64encode(open(image_path + "icon.png", "rb").read())
herd_immunity_image = base64.b64encode(open(image_path + "Herd_Immunity_Fig.png", "rb").read())
# get mapbox token
token = open(secrets_path + ".mapbox_token").read()
# read US county geojson file
# from: https://raw.githubusercontent.com/plotly/datasets/master/geojson-counties-fips.json
with open(data_path + "us_county_geo.json") as f:
us_counties_json = json.load(f)
# read US county geojson file
# from: https://eric.clst.org/tech/usgeojson/ (States with 5m resolution)#
with open(data_path + "us_states_geo.json") as f:
us_states_json = json.load(f)
# read China province geojson file
# from: https://github.com/secsilm/plotly-choropleth-mapbox-demo/blob/master/china_province.geojson
with open(data_path + "china_province_geo2.json") as f:
china_json = json.load(f)
# read Australia state geojson file
# from: https://github.com/rowanhogan/australian-states/blob/master/states.geojson
with open(data_path + "australia_state_geo2.json") as f:
australia_json = json.load(f)
# read Canadian geojson file
# from: https://download2.exploratory.io/maps/canada_provinces.zip
with open(data_path + "canada_provinces_geo.json") as f:
canada_json = json.load(f)
# read world geojson file
# from: https://github.com/datasets/geo-countries/blob/master/data/countries.geojson
with open(data_path + "all_countries_geo.json") as f:
world_json = json.load(f)
# read initial heatmap figure file
with open(data_path + "init_heatmap.pkl", "rb") as f:
init_heatmap = pickle.load(f)
#################################################################
# 2 Setup Dataframes
# read dataframes from pickle files
df = pd.read_pickle(data_path + "Johns_Hopkins_Clean.pkl")
# add Active variables
def add_active_col(var_suffix, df):
confirmed = df["Confirmed" + var_suffix].values
recovered = np.clip(df["Recovered" + var_suffix].values, 0, None)
deaths = np.clip(df["Deaths" + var_suffix].values, 0, None)
df["Active" + var_suffix] = confirmed - recovered - deaths
# correct occurrences where Recovered + Deaths > Confirmed
# (where negative value rolls back to an enormous positive value)
mask = ((recovered + deaths) > confirmed)
df.loc[mask, "Active" + var_suffix] = 0
return df
df = add_active_col("", df)
df = add_active_col("PerDate", df)
df = add_active_col("PerCapita", df)
df = add_active_col("PerDatePerCapita", df)
# define a dataframe that defines which geographic areas to plot infection curves
curve_plot_data = [[0, "United States of America", "New York", "nan"],
[1, "United States of America", "Massachusetts", "nan"],
[2, "United States of America", "Indiana", "nan"]]
curve_plot_cols = ["Row ID", "Country/Region", "Province/State", "County"]
curve_plot_df = pd.DataFrame(curve_plot_data, columns=curve_plot_cols)
# define a dataframe that defines the dynamic parameter values for the simulation
# in sandbox 2
sandbox2_df = pd.DataFrame([[0, 14, 3.0, True, True], \
[50, 14, 1.5, False, True]], \
columns=["t", "d", "r", "In Base", "In Alt"])
#################################################################
# 3 Define Useful Functions
# converts numpy's datetime64 dtype (used by pandas) to datetime.datetime()
def numpy_dt64_to_dt(dt64):
day_timestamp_dt = (dt64 - np.datetime64('1970-01-01T00:00:00Z')) / np.timedelta64(1, 's')
day_dt = dt.datetime.utcfromtimestamp(day_timestamp_dt)
return day_dt
# converts numpy's datetime64 dtype (used by pandas) to a string
def numpy_dt64_to_str(dt64):
day_dt = numpy_dt64_to_dt(dt64)
return day_dt.strftime("%b %d")
# Define function for predicting epidemic, used in sandboxes
# assuming 1 person is infected in the whole population of size N
# and the params d & r0 are providef in a listed arrange as:
# [[t0, d0, r0], [t1, d1, r1], ...]
# where t1, t2, etc. reprsent the beginning of new values for d & r
# dur defines the time point to terminate the simulation
def predict_sir(N, params_t, dur):
# define a function which
def SIR(t, y):
S = y[0]
I = y[1]
R = y[2]
return [-beta*S*I/N, beta*S*I/N-gamma*I, gamma*I]
# define a function which extras individual parameters given the time index
def get_params(t_ind):
# get basic parameters
t = params_t[t_ind][0]
d = params_t[t_ind][1]
r = params_t[t_ind][2]
# derive exponential function parameters
gamma = 1 / d
beta = r * gamma
return t, gamma, beta
# simulatd population sub-group sizes
sir_init_pop = [N - 1, 1, 0] # [S, I, R]
# set initial values for loop variables
epidemic_stopped = False
n_days = 0
continue_calc = True
removed = 0
n_periods = len(params_t)
period_ind = 0
t_period_loop = params_t[0][1] # sim will pause to check termination criterion
t_start, gamma, beta = get_params(period_ind)
if n_periods == 1:
t_period_end = t_period_loop
else:
period_ind_max = n_periods - 1
t_end, ignore1, ignore2 = get_params(period_ind + 1)
t_period_end = t_end
while continue_calc:
# predict SIR for loop period days
predict_period_sir = solve_ivp(SIR, [0, t_period_end], sir_init_pop, \
t_eval=np.arange(0, t_period_end, 1))
# append loop results to previous results
if removed == 0:
t = predict_period_sir["t"]
s = predict_period_sir["y"][0]
i = predict_period_sir["y"][1]
r = predict_period_sir["y"][2]
else:
# segmenting the sim into periods causes the first day's prediction
# to be a repeat of the results from the last loop's last day, so
# drop the first day
t = np.concatenate((t, t_start - 1 + predict_period_sir["t"][1:]))
s = np.concatenate((s, predict_period_sir["y"][0][1:]))
i = np.concatenate((i, predict_period_sir["y"][1][1:]))
r = np.concatenate((r, predict_period_sir["y"][2][1:]))
# update loop variables with new period results
n_days = len(t)
removed = r[-1]
sir_init_pop = [s[-1], i[-1], r[-1]]
# look for epidemic burnout
period_i = predict_period_sir["y"][1]
if period_i[-1] < period_i[0]:
# infected population is shrinking
if (period_i[0] - period_i[-1]) < 1:
# change in the size of the infected population
# over the loop period is < 1
epidemic_stopped = True
if n_periods > 1:
if period_ind_max > period_ind + 1:
# simulate the next period until its end
period_ind += 1
t_start, gamma, beta = get_params(period_ind)
t_end, ignore1, ignore2 = get_params(period_ind + 1)
t_period_end = t_end - t_start + 1
elif period_ind_max > period_ind:
# simulate the last period until termination criteria are met
period_ind += 1
t_start, gamma, beta = get_params(period_ind)
t_period_end = params_t[period_ind][1]
else:
# continue simulating the last period until termination criteria are met
t_start = t[-1] + 1
else:
# continue simulating the only period until termination criteria are met
t_start = t[-1] + 1
# determine whether to continue looping
if np.isinf(dur):
continue_calc = not epidemic_stopped
else:
continue_calc = (dur > n_days)
# trim results to desired duration
if len(t) > dur:
t = t[:dur + 1]
s = s[:dur + 1]
i = i[:dur + 1]
r = r[:dur + 1]
return np.column_stack((t, s, i, r))
# find the most recent date in the initial heatmap
init_days = np.sort(df[df["MapScope"] == "US Counties"].Date.unique())
init_heatmap_date = numpy_dt64_to_dt(init_days[-1])
# Basic setup of Dash app
external_stylesheets = [dbc.themes.COSMO]
btn_color = "primary"
navbar_color = "primary"
navbar_is_dark = True
# dash instantiation
app = dash.Dash(__name__, external_stylesheets=external_stylesheets, assets_folder='assets')
server = app.server
# adding Google Analytics
app.index_string = """<!DOCTYPE html>
<html>
<head>
<!-- Global site tag (gtag.js) - Google Analytics -->
<script async src="https://www.googletagmanager.com/gtag/js?id=UA-44205806-4"></script>
<script>
window.dataLayer = window.dataLayer || [];
function gtag(){dataLayer.push(arguments);}
gtag('js', new Date());
gtag('config', 'UA-44205806-4');
</script>
{%metas%}
<title>{%title%}</title>
{%favicon%}
{%css%}
</head>
<body>
{%app_entry%}
<footer>
{%config%}
{%scripts%}
{%renderer%}
</footer>
</body>
</html>"""
#################################################################
# 4 Heatmap UI controls
heat_ctrls_row1 = \
dbc.Row([
dbc.Col(html.Div([
dbc.InputGroup([
dbc.InputGroupAddon("Map Scope", addon_type="prepend"),
dbc.Select(
id="map-scope",
options=[
{"label": "Australian States", "value": "Australia"},
{"label": "Canadian Provinces", "value": "Canada"},
{"label": "Chinese Provinces", "value": "China"},
{"label": "US Counties", "value": "UScounties"},
{"label": "US States", "value": "USstates"},
{"label": "Whole World", "value": "World"}
],
value="UScounties"
)
])
]), md=4, xs=12, style={"padding": "5px 10px"}),
dbc.Col(html.Div([
dbc.InputGroup([
dbc.InputGroupAddon("Heat Variable", addon_type="prepend"),
dbc.Select(
id="map-var",
options=[
{"label": "Confirmed", "value": "Confirmed"},
{"label": "Active", "value": "Active"},
{"label": "Recovered", "value": "Recovered"},
{"label": "Deaths", "value": "Deaths"}
],
value="Confirmed"
)
])
]), md=4, xs=12, style={"padding": "5px 10px"}),
dbc.Col(html.Div([
dbc.InputGroup([
dbc.InputGroupAddon("New or Total Cases", addon_type="prepend"),
dbc.Select(
id="map-calc",
options=[
{"label": "Total Cases to Date", "value": "Total"},
{"label": "New Cases on Date", "value": "PerDate"}
],
value="Total"
)
])
]), md=4, xs=12, style={"padding": "5px 10px"}),
dbc.Col(html.Div([
dbc.InputGroup([
dbc.InputGroupAddon("Heat & Bar Scales", addon_type="prepend"),
dbc.Select(
id="map-scale",
options=[
{"label": "Linear", "value": "Linear"},
{"label": "Logarithmic", "value": "Logarithmic"}
],
value="Logarithmic"
)
])
]), md=4, xs=12, style={"padding": "5px 10px"}),
dbc.Col(html.Div([
dbc.InputGroup([
dbc.InputGroupAddon("Normalize Heat & Bar", addon_type="prepend"),
dbc.Select(
id="map-norm-type",
options=[
{"label": "None", "value": "None"},
{"label": "Per Capita", "value": "PerCapita"}
],
value="None"
)
])
]), md=4, xs=12, style={"padding": "5px 10px"}),
dbc.Col(html.Div([
dbc.InputGroup([
dbc.InputGroupAddon("Normalize Per", addon_type="prepend"),
dbc.Select(
id="map-norm-val",
options=[
{"label": "1 Capita", "value": 1},
{"label": "10 Capita", "value": 10},
{"label": "100 Capita", "value": 100},
{"label": "1k Capita", "value": 1000},
{"label": "10k Capita", "value": 10000},
{"label": "100k Capita", "value": 100000},
{"label": "1M Capita", "value": 1000000}
],
value=100000
)
])
]), md=4, xs=12, style={"padding": "5px 10px"}),
], style={'padding-left': 20, 'padding-right': 20, 'margin-top': 5})
heat_cntrls_accordion = html.Div([
dbc.Card([
dbc.CardHeader(
html.H1(
dbc.Button(
"Plot Controls",
color=btn_color,
id="heat-edit-toggle",
), style={"padding-bottom": 6}
), style={"padding-bottom": 0, "padding-top": 0}
),
dbc.Collapse([
heat_ctrls_row1],
id="collapse-heat-edit",
),
]),
], className="accordion")
#################################################################
# 5 Curves plot UI controls
curve_ctrls_row1 = \
dbc.Row([
dbc.Col(html.Div([
dbc.InputGroup([
dbc.InputGroupAddon("Country", addon_type="prepend"),
dbc.Select(
id="curve-country",
options=[{"label": country, "value": country} for country in \
np.sort(df["Country/Region"].unique())],
value="United States of America"
)
])
]), md=4, xs=12, style={"padding": "5px 10px"}),
dbc.Col(html.Div([
dbc.InputGroup([
dbc.InputGroupAddon("State", addon_type="prepend"),
dbc.Select(
id="curve-state",
options=[],
disabled=True,
value=""
)
])
]), md=4, xs=12, style={"padding": "5px 10px"}),
dbc.Col(html.Div([
dbc.InputGroup([
dbc.InputGroupAddon("County", addon_type="prepend"),
dbc.Select(
id="curve-county",
options=[],
disabled=True,
value=""
)
])
]), md=4, xs=12, style={"padding": "5px 10px"}),
], style={'padding-left': 20, 'padding-right': 20, 'margin-top': 5})
curve_ctrls_row2 = \
dbc.Row([
dbc.Col(html.Div(""), md=3, xs=1, style={'textAlign': 'right', 'margin-top': 0}),
dbc.Col(html.Div([
dbc.Button("Add", id="curve-add", n_clicks=0, color=btn_color)
]), md=1, xs=2, style={"textAlign": "center", "margin-top": 0, "padding-left": 0}),
# Hidden div inside the app that tracks how many times the Add button has been clicked
# This enables a determination for whether Add button triggered the edit_plotted_curves callback
html.Div(0, id='curve-add-click-count', style={'display': 'none'}),
dbc.Col(html.Div([
dbc.Button("Clear All", id="curve-clear", n_clicks=0, color=btn_color)
]), md=2, xs=2, style={"textAlign": "center", "margin-top": 0, "padding-left": 0}),
# Hidden div inside the app that tracks how many times the Clear All button has been clicked
# This enables a determination for whether Clear All button triggered the edit_plotted_curves callback
html.Div(0, id='curve-clear-click-count', style={'display': 'none'}),
dbc.Col(html.Div([
dbc.InputGroup([
dbc.InputGroupAddon("Drop Row by ID", addon_type="prepend"),
dbc.Select(
id="curve-drop",
options=[{"label": val, "value": val} for val in curve_plot_df["Row ID"].values],
value=""
)
])
]), md=3, xs=6, style={"padding": "5px 10px"}),
dbc.Col(html.Div(""), md=3, xs=1, style={'textAlign': 'right', 'margin-top': 0})
], style={'padding-left': 20, 'padding-right': 20, 'margin-top': 5})
curve_ctrls_row3 = \
dbc.Row([
dbc.Col(html.Div([
dbc.InputGroup([
dbc.InputGroupAddon("New or Total Case", addon_type="prepend"),
dbc.Select(
id="curve-calc",
options=[
{"label": "Total Cases to Date", "value": "Total"},
{"label": "New Cases on Date", "value": "PerDate"}
],
value="PerDate"
)
])
]), md=4, xs=12, style={"padding": "5px 10px"}),
dbc.Col(html.Div([
dbc.InputGroup([
dbc.InputGroupAddon("Normalize", addon_type="prepend"),
dbc.Select(
id="curve-norm-type",
options=[
{"label": "None", "value": "None"},
{"label": "Per Capita", "value": "PerCapita"}
],
value="None"
)
])
]), md=4, xs=12, style={"padding": "5px 10px"}),
dbc.Col(html.Div([
dbc.InputGroup([
dbc.InputGroupAddon("Normalize Per", addon_type="prepend"),
dbc.Select(
id="curve-norm-val",
options=[
{"label": "1 Capita", "value": 1},
{"label": "10 Capita", "value": 10},
{"label": "100 Capita", "value": 100},
{"label": "1k Capita", "value": 1000},
{"label": "10k Capita", "value": 10000},
{"label": "100k Capita", "value": 100000},
{"label": "1M Capita", "value": 1000000}
],
value=100000
)
])
]), md=4, xs=12, style={"padding": "5px 10px"}),
dbc.Col(html.Div([
dbc.InputGroup([
dbc.InputGroupAddon("Zero Date", addon_type="prepend"),
dbc.Select(
id="curve-zero",
options=[
{"label": "None (just use dates)", "value": "None"},
{"label": "When 1 case is reported", "value": "Total"},
{"label": "When 1 case per 10k capita", "value": "PerCapita"},
],
value="None"
)
])
]), md=6, xs=12, style={"padding": "5px 10px"}),
dbc.Col(html.Div([
dbc.InputGroup([
dbc.InputGroupAddon("Case Scale", addon_type="prepend"),
dbc.Select(
id="curve-scale",
options=[
{"label": "Linear", "value": "linear"},
{"label": "Logarithmic", "value": "log"},
],
value="log"
)
])
]), md=6, xs=12, style={"padding": "5px 10px"}),
dbc.Col(html.Div([
dbc.InputGroup([
dbc.InputGroupAddon("Case Types", addon_type="prepend"),
dbc.Checklist(
id="curve-type",
options=[
{"label": "Confirmed", "value": "Confirmed"},
{"label": "Active", "value": "Active"},
{"label": "Recovered", "value": "Recovered"},
{"label": "Deaths", "value": "Deaths"}
],
value=["Confirmed", "Deaths"],
inline=True,
custom=True,
style={"display": "inline-block", "margin-left": 10, "margin-top": 8}
)
])
]), xl=6, lg=7, md=12, xs=12, style={"padding": "5px 10px"}),
dbc.Col(html.Div([
dbc.InputGroup([
dbc.InputGroupAddon(
"Tune Curve Fit",
addon_type="prepend",
id="curve-avg-label",
style={"width": 140, "padding-right": 0}
),
html.Span([
html.Span([
dcc.Slider(
id="curve-avg-period",
marks={1: "1", 7: "7", 14: "14", 21: "21", 28: "28"},
min=1,
max=28,
step=1,
value=14,
included=False
),
], style={"width": "100%", "display": "inline-block"})
], style={"width": "60%", "text-align": "left", "padding": "10px 0 0 0"})
]),
dbc.Tooltip(
"Curve fitting is calculated with a moving average. This parameter " + \
"determines the max number of days to use in averaging each point.",
target="curve-avg-label",
)
]), xl=6, lg=5, md=8, xs=12, style={"padding": "5px 10px"}),
], style={'padding-left': 20, 'padding-right': 20, 'margin-top': 5, 'margin-bottom': 10})
data_tbl = \
dbc.Row([
dbc.Col(
html.Div(""), md=3, xs=1, style={'textAlign': 'right', 'margin-top': 0}
),
dbc.Col(html.Div([
dash_table.DataTable(
id="data-table",
data=curve_plot_df.to_dict('records'),
columns=[{"id": c, "name": c}
for c in curve_plot_df.columns],
editable=False,
style_cell={
'textAlign': 'center'
},
style_cell_conditional=[
{
'if': {'column_id': c},
'textAlign': 'center'
} for c in ['Date', 'Region']
],
style_data_conditional=[
{
'if': {'row_index': 'odd'},
'backgroundColor': 'rgb(248, 248, 248)'
}
],
style_header={
'backgroundColor': 'rgb(230, 230, 230)',
'fontWeight': 'bold'
}
)]), md=6, xs=10, style={'textAlign': 'right', 'margin-top': 0}
),
dbc.Col(
html.Div(""), md=3, xs=1, style={'textAlign': 'right', 'margin-top': 0}
),
], style={'margin-bottom': 10, 'margin-top': 10})
curve_cntrls_accordion = html.Div([
dbc.Card([
dbc.CardHeader(
html.H1(
dbc.Button(
"Curve Picker",
color=btn_color,
id="curve-edit-toggle",
), style={"padding-bottom": 6}
), style={'padding-bottom': 0, 'padding-top': 0}
),
dbc.Collapse([
curve_ctrls_row1,
curve_ctrls_row2,
# visualize the curve_plot_df
data_tbl,
# Hidden div inside the app that allows the curve_plot_df tp be shared among callbacks
html.Div([curve_plot_df.to_json(date_format='iso', orient='split')],
id='curve-plot-df', style={'display': 'none'})],
id="collapse-curve-edit",
),
]),
dbc.Card([
dbc.CardHeader(
html.H1(
dbc.Button(
"Plot Settings",
color=btn_color,
id="curve-setting-toggle",
), style={"padding-bottom": 6}
), style={'padding-bottom': 0, 'padding-top': 0}
),
dbc.Collapse([
curve_ctrls_row3],
id="collapse-curve-setting",
),
]),
], className="accordion")
#################################################################
# 6 Navbar definition
dropdown_menu_items = dbc.DropdownMenu(
children=[
dbc.DropdownMenuItem("Discussion of this App", href="https://buckeye17.github.io/COVID-Dashboard/"),
dbc.DropdownMenuItem("About the Author", href="https://buckeye17.github.io/about/"),
dbc.DropdownMenuItem("LinkedIn Profile", href="https://www.linkedin.com/in/chris-raper/"),
dbc.DropdownMenuItem("Github Repo", href="https://github.com/buckeye17/seecovid"),
dbc.DropdownMenuItem("Powered by plotly|Dash", href="https://plotly.com/dash/")
],
nav=True,
in_navbar=True,
label="Menu",
)
#################################################################
# 7 Blank figure to display during initial app loading
axopts = dict(showticklabels=False)
blank_fig = go.Figure()
blank_fig.update_layout(
paper_bgcolor=invis,
plot_bgcolor=invis,
xaxis=axopts,
yaxis=axopts,
annotations=[dict(x=2.5,
y=4,
xref="x1",
yref="y1",
text="Please wait while the heatmap is initialized",
showarrow=False,
font=dict(size=16)
)]
)
# define sandbox2 dynamic table
sandbox2_tbl = \
html.Div([
dash_table.DataTable(
id="sandbox2-data-table",
data=sandbox2_df.to_dict('records'),
columns=[{"id": c, "name": c} for c in sandbox2_df.columns],
editable=False,
style_cell={
'fontSize': '14px',
'textAlign': 'center',
'width': '100px',
'minWidth': '100px',
'maxWidth': '100px'
},
style_data_conditional=[
{
'if': {'row_index': 'odd'},
'backgroundColor': 'rgb(248, 248, 248)'
}
],
style_header={
'backgroundColor': 'rgb(230, 230, 230)',
'fontWeight': 'bold'
}
)
], style={"margin": 10, "width": "40%", "padding-left": 15})
#################################################################
# 8 Overall app layout
app.layout = html.Div([
# Banner/header block
dbc.Navbar(
dbc.Container([
# left side of navbar: logo & app name
html.A(
# Use row and col to control vertical alignment of logo / brand-
dbc.Row(
[
dbc.Col(html.Img(
src='data:image/png;base64,{}'.format(cross_icon_image.decode()),
height="40px"
)),
dbc.Col(dbc.NavbarBrand([
"COVID-19 Dashboard",
html.Br(),
html.Div("Developed by <NAME>", style={"fontSize": "small"})
], className="ml-2")),
],
align="center", no_gutters=True, className="ml-2",
),
href="https://seecovid.herokuapp.com/",
),
# right side of navbar: nav links & menu
dbc.NavbarToggler(id="navbar-toggler"),
dbc.Collapse(
dbc.Nav([
dbc.NavItem(dbc.NavLink("My Portfolio", href="https://buckeye17.github.io/")),
dropdown_menu_items
], className="ml-auto", navbar=True, style={"margin-right": 100}),
id="navbar-collapse", navbar=True,
),
]),
color=navbar_color,
dark=navbar_is_dark
),
# define tabs which provide different perspectives on data
dbc.Tabs([
# heatmap tab
dbc.Tab([
heat_cntrls_accordion,
dbc.Row(dbc.Spinner(type="grow", color="primary", fullscreen=True), id="initial-spinner"),
# Date Picker
dbc.Row([
dbc.Col(html.Div([""]), xs=5),
dbc.Col(html.Div([
dbc.InputGroup([
dbc.InputGroupAddon("Select Plotted Date:", addon_type="prepend"),
dcc.DatePickerSingle(
id="heat-date-picker",
date=init_heatmap_date,
display_format="MMM Do, YYYY"
)
])
]), xs=7),
], style={'padding-left': 20, 'padding-right': 20, 'margin-top': 15, 'margin-bottom': 0}),
dbc.Row(
dbc.Col(html.Div([dcc.Loading(dcc.Graph(id="heatmap", figure=blank_fig), type="cube")]))
),
], label="Heatmaps"),
# curves tab
dbc.Tab([
curve_cntrls_accordion,
dbc.Row(dbc.Col(html.Div([dcc.Loading(dcc.Graph(id="curves",
responsive=True,
style={"height": 400}),
type="cube")]))),
], label="The Curves"),
# epidemiology tab
dbc.Tab([
# this tab will consist of a single row, which contains a single column
# that is centered horizontally in the page
dbc.Row([
dbc.Col([
# section header for Sandbox #1
html.Div([dcc.Markdown('''
#### Sandbox #1: Varying Basic Parameters of Infectious Disease
''', style={"margin": 20, "textAlign": "center"}
)]),
# intro for Sandbox #1
html.Div([dcc.Markdown('''
The following sandbox allows you to simulate two scenarios of a generic epidemic,
assuming a population of 10,000 people and that 1 of them is infected
on day zero. The sandbox allows you to adjust the underlying parameters of
the epidemic. These parameters are:
* **d**: the average number of days someone is infectious
* **r**: AKA the basic reproduction number, the number of people an infectious
person would infect if everyone they contact is infectious.
With these parameters, the sandbox will predict how the fixed population
will move through the 3 stages of infection: susceptible, infected,
removed. For further discussion of the underlying modeling method, it has
been provided further below.
''', style={"margin": 20, "textAlign": "justify"}
)]),
# Sandbox #1
html.Div([
# Sandbox #1 title
html.Div([dcc.Markdown('''
##### Epidemic Simulation Sandbox #1
''', style={"margin": 20, "textAlign": "center"}
)]),
# UI for Scenario #1 of Sanbox #1
dbc.Row([
dbc.Col(["Scenario #1"], md=2, sm=12, \
style={"text-align": "center", "margin": "10px 0"}),
dbc.Col([
html.B("d0"), ": ", html.Span("28", id="sandbox1-scenario1-d-text"),
], md=1, sm=2, style={"text-align": "right", "margin": "10px 0", \
"padding-right": 0}),
dbc.Col([
dcc.Slider(
id="sandbox1-scenario1-d",
marks={1: "1", 7: "7", 14: "14", 21: "21", 28: "28"},
min=1,
max=28,
step=1,
value=14,
included=False
)
], md=4, sm=10, style={"margin": "10px 0", "padding-left": 0}),
dbc.Col([
html.B("r0"), ": ", html.Span("8", id="sandbox1-scenario1-r-text"),
], md=1, sm=2, style={"text-align": "right", "margin": "10px 0", \
"padding-right": 0}),
dbc.Col([
dcc.Slider(
id="sandbox1-scenario1-r",
marks={0: "0", 1: "1", 2: "2", 4: "4", 6: "6", 8: "8"},
min=0,
max=8,
step=0.1,
value=1.5,
included=False
)
], md=4, sm=10, style={"margin": "10px 0", "padding-left": 0}),
]),
# UI for Scenario #2 of Sanbox #1
dbc.Row([
dbc.Col(["Scenario #2"], md=2, sm=12, \
style={"text-align": "center", "margin": "10px 0"}),
dbc.Col([
html.B("d0"), ": ", html.Span("28", id="sandbox1-scenario2-d-text"),
], md=1, sm=2, style={"text-align": "right", "margin": "10px 0", \
"padding-right": 0}),
dbc.Col([
dcc.Slider(
id="sandbox1-scenario2-d",
marks={1: "1", 7: "7", 14: "14", 21: "21", 28: "28"},
min=1,
max=28,
step=1,
value=14,
included=False
)
], md=4, sm=10, style={"margin": "10px 0", "padding-left": 0}),
dbc.Col([
html.B("r0"), ": ", html.Span("8", id="sandbox1-scenario2-r-text"),
], md=1, sm=2, style={"text-align": "right", "margin": "10px 0", \
"padding-right": 0}),
dbc.Col([
dcc.Slider(
id="sandbox1-scenario2-r",
marks={0: "0", 1: "1", 2: "2", 4: "4", 6: "6", 8: "8"},
min=0,
max=8,
step=0.1,
value=3,
included=False
)
], md=4, sm=10, style={"margin": "10px 0", "padding-left": 0}),
]),
# Area Plot for Sandbox #1
dcc.Loading(dcc.Graph(id="sandbox1-area-fig",
responsive=True,
style={"height": 400}), type="dot"),
# Lines Plot for Sandbox #1
dcc.Loading(dcc.Graph(id="sandbox1-line-fig",
responsive=True,
style={"height": 200}), type="dot"),
], style={"border-style": "solid", "border-color": "#aaaaaa", "padding": 10}),
# section header for Sandbox #2
html.Div([dcc.Markdown('''
#### Sandbox #2: Time-Dependence of Basic Parameters of Infectious Disease
''', style={"margin-top": 40, "margin-bottom": 20, "textAlign": "center"}
)]),
# intro for Sandbox #2
html.Div([dcc.Markdown('''
This second sandbox is similar to the first, but it allows you
to vary the parameters of the epidemic in time, whereas the first
sandbox simulated constant parameters values. With COVID-19,
social distancing was implemented to reduce the **r** parameter
of the disease (AKA "slowing the spread") and would reduce the
total number of infected if sustained.
In this sandbox you can chose the initial parameter values, then
add time points when the parameters will change values. You can add
as many time points as you want. The Baseline scenario will
consist of all the parmeter value changes except for the final
change. The Alternate scenario will consist of all the parameter value
changes. The "In Base" and "In Alt" table columns should clarify
this point.
''', style={"margin": 20, "textAlign": "justify"}
)]),
# Sandbox #2
html.Div([
# Title for Sandbox #2
html.Div([dcc.Markdown('''
##### Epidemic Simulation Sandbox #2
''', style={"padding": "20px 0", "textAlign": "center", \
"border-bottom": "solid", "border-width": "thin"}
)]),
# UI for initial conditions of Sandbox #2
dbc.Row([
dbc.Col(["Initial (t0) Values"], md=2, sm=12, \
style={"text-align": "center", "margin": "10px 0"}),
dbc.Col([
html.B("d0"), ": ", html.Span("28", id="sandbox2-baseline-d-text"),
], md=1, sm=2, style={"text-align": "right", "margin": "10px 0", \
"padding-right": 0}),
dbc.Col([
dcc.Slider(
id="sandbox2-baseline-d",
marks={1: "1", 7: "7", 14: "14", 21: "21", 28: "28"},
min=1,
max=28,
step=1,
value=14,
included=False
)
], md=4, sm=10, style={"margin": "10px 0", "padding-left": 0}),
dbc.Col([
html.B("r0"), ": ", html.Span("8", id="sandbox2-baseline-r-text"),
], md=1, sm=2, style={"text-align": "right", "margin": "10px 0", \
"padding-right": 0}),
dbc.Col([
dcc.Slider(
id="sandbox2-baseline-r",
marks={0: "0", 1: "1", 2: "2", 4: "4", 6: "6", 8: "8"},
min=0,
max=8,
step=0.1,
value=3,
included=False
)
], md=4, sm=10, style={"margin": "10px 0", "padding-left": 0}),
]),
# UI for adding or editing dynamic values of Sandbox #2
# these UI element have a light blue background to distinguish
# their function from the row above, which pertains to
# initial values, not dnamic values
html.Div([
# UI for defining new dynamic value of d & r in Sandbox #2
dbc.Row([
dbc.Col(["New Values at time t"], md=2, sm=12, \
style={"text-align": "center", "margin": "10px 0"}),
dbc.Col([
html.B("d"), html.Span(": "), html.Span("28", id="sandbox2-new-d-text"),
], md=1, sm=2, style={"text-align": "right", "margin": "10px 0", \
"padding-right": 0}),
dbc.Col([
dcc.Slider(
id="sandbox2-new-d",
marks={1: "1", 7: "7", 14: "14", 21: "21", 28: "28"},
min=1,
max=28,
step=1,
value=14,
included=False
)
], md=4, sm=10, style={"margin": "10px 0", "padding-left": 0}),
dbc.Col([
html.B("r"), html.Span(": "), html.Span("8", id="sandbox2-new-r-text"),
], md=1, sm=2, style={"text-align": "right", "margin": "10px 0", \
"padding-right": 0}),
dbc.Col([
dcc.Slider(
id="sandbox2-new-r",
marks={0: "0", 1: "1", 2: "2", 4: "4", 6: "6", 8: "8"},
min=0,
max=8,
step=0.1,
value=1.5,
included=False
)
], md=4, sm=10, style={"margin": "10px 0", "padding-left": 0}),
]),
# UI for defining the time point when the new dynamic values
# of d & r will take effect, as well to add, clear & edit
# these dynamic values for Sandbox #2
dbc.Row([
dbc.Col([dbc.InputGroup([
dbc.InputGroupAddon("t=", addon_type="prepend"),
dbc.Input(id="sandbox2-new-t", placeholder="", type="number", min=0),
dbc.Tooltip(dcc.Markdown('''
Enter the time (in days) when when **d** & **r** values
should change. The value must be positive.
'''
), target="sandbox2-new-t"
),
])], md=3, sm=4, style={"margin": "10px 0"}),
dbc.Col([
dbc.Button("Add", id="sandbox2-add", n_clicks=0, color=btn_color)
], md=2, sm=4, style={"margin": "10px 0"}),
# Hidden span inside the app that tracks how many times the Add button has been clicked
# This enables a determination for whether Add button triggered the edit_plotted_curves callback
html.Span(0, id='sandbox2-add-click-count', style={'display': 'none'}),
dbc.Col([
dbc.Button("Clear All", id="sandbox2-clear", n_clicks=0, color=btn_color)
], md=2, sm=4, style={"margin": "10px 0"}),
# Hidden span inside the app that tracks how many times the Clear All button has been clicked
# This enables a determination for whether Clear All button triggered the edit_plotted_curves callback
html.Span(0, id='sandbox2-clear-click-count', style={'display': 'none'}),
dbc.Col([
dbc.InputGroup([
dbc.InputGroupAddon("Drop Row @ t=", addon_type="prepend"),
dbc.Select(
id="sandbox2-drop",
options=[{"label": val, "value": val} for val in sandbox2_df.t.values],
value=""
)
]),
], md=5, sm=12, style={"margin": "10px 0"})
]),
], style={"background-color": "#e8f6fc", "padding": 10}),
# UI to specify the current dynamic values in table form for.
# both the baseline and alternate scenarios for Sandbox #2
dbc.Row([
dbc.Col([
html.Div("All Dynamic Values", \
style={"padding-top": 10, "text-align": "center"}),
# visualize the sandbox2_df
sandbox2_tbl,
# Hidden span inside the app that allows the sandbox2_df tp be shared among callbacks
html.Span([
sandbox2_df.to_json(date_format='iso', orient='split')
], id='sandbox2-df', style={'display': 'none'})
], width=9),
], justify="center"),
dcc.Loading(dcc.Graph(id="sandbox2-area-fig",
responsive=True,
style={"height": 400}
), type="dot"),
dcc.Loading(dcc.Graph(id="sandbox2-lines-fig",
responsive=True,
style={"height": 200}
), type="dot"),
], style={"border-style": "solid", "border-color": "#aaaaaa"}),
# section header for discussing modeling methodology
html.Div([dcc.Markdown('''
#### Examining the Fundamentals of Epidemics
''', style={"margin": 20, "textAlign": "center"}
)]),
# body of section discussing modeling methodology
html.Div([dcc.Markdown('''
##### Introduction
The sandboxes above use a simple class of models for epidemics called
compartmental models. Specifically they use an
[SIR compartmental model](https://en.wikipedia.org/wiki/Compartmental_models_in_epidemiology),
which segments a population into 3 stages of infection. These stages are
named **Susceptible**, **Infected** and **Removed**. The meaning of these
segments should be self-explanatory, with the clarification that the Removed
segment includes those who have survived the infection (then becoming immune)
as well as those who have died from it.
SIR models have two parameters which govern how quickly infection spreads through
the population. The first is **d**, which represents the average time
someone is infectious. The second is **r0** (pronounced r naught), representing
the average number of people a single infected person would infect
if everyone they contact is susceptible. In some simulations, this value may
change with time, in which case **r0** is the initial value of **r**.
All that remains before simulating an epidemic is to make some assumptions
about the initial condition of the population. The sandboxes above assumed the
population has 10,000 people, with 1 infected person and the remainder are
susceptible.
##### Examining Simulation Results
An epidemic is technically defined as a disease which has **r0** > 1. If a
disease yields **r0** < 1, then each chain of infections will die out. But it
should be noted that **r0** is not solely dependent on the nature of the
disease. It also depends on the behavior of disease's host. So the occurrence
of an epidemic depends on the combination of a disease's efficiency to infect
the susceptible as well as the host's social behavior.
The following figure depicts two scenarios. Scenario 1 assumes **d** = 10 days and
**r0** = 1.5 people while Scenario 2 assumes **d** = 10 and **r0** = 3. Notice
both of these epidemic scenarios end without the disease infecting the whole
population. If **r0** > 1, this occurs because the number of infected people
will peak when the number of susceptible people makes up 50% of the total
population. After this point, the number of infected will decline until it
reaches zero. The combination of the declining susceptible sub-population along
with the recovery or death of infected people ensures that epidemic will die out
before it infects everyone. This phenomenon is called **herd immunity**.
''', style={"margin": 20, "textAlign": "justify"}
)]),
dbc.Row(dbc.Col(
html.Div(
html.Img(src="data:image/png;base64,{}".format(herd_immunity_image.decode()),
height=300,
style={"display": "inline-block"}
),
style={"text-align": "center"}
),
width=12,
)),
html.Div([dcc.Markdown('''
Also note that in the two scenarios above, herd immunity was reached with
different sizes of the population never being infected (i.e. those who are still
susceptible). Scenario #1 ends the epidemic with 4,175 never being infected,
while Scenario #2 ends with 595. This illustrates that herd immunity is not only
dependent on the parameters of the epidemic, but is also very sensitive to those
values. The difference was solely due to **r0** being 1.5 or 3.
##### Examining Weaknesses of SIR Models
One manner in which SIR models over-simplify reality is that they assume that
there is no variation in the parameter models. Even if the the parmeters
are allowed to change with time, they still assume that at each time point the
infected will be sick for X days on average and will infect Y people. But in
reality, some people will recover quicker than others, some will shed more
virus and some will be more social.
This leads to the concept of so called "super-spreaders". These
people account for a drastic number of infections. One example is a South Korean
woman referred to as patient 31. Through contact tracing the government had
identified 3,700 COVID-19 patients who could be traced back to her, representing
60% of all known cases by March 1 in South Korea. This was reported by the
[Wall Street Journal](https://www.wsj.com/articles/why-a-south-korean-church-was-the-perfect-petri-dish-for-coronavirus-11583082110).
Compartmental models do not account for any variation in parameters as exhibited
with super-spreaders.
Another shortcoming of this modeling method is that the parameter **d** is a little
misleading. If **d** = 14, then the precise calculation to determine how many
infected people have been removed (by recovery or death) is to divide the number
of infected by 14. This implies that when the number of infected is peaking,
the rate of removal will be greatest at this time. Conversely, when the number of
infected is small, the rate of recovery will be much slower. In reality, the
number of infected should not affect the rate of recovery. This is why **d**
is referred to as an "average" number of infectious days, because this
characteristic actually varies with time in the simulation, even when **d**
is constant.
If you'd like more information on this subject, I would recommend the following
YouTube video.
''', style={"margin": 20, "textAlign": "justify"}
)]),
html.Div(
html.Iframe(width="560", height="315", src="https://www.youtube.com/embed/gxAaO2rsdIs"),
style={"padding": 20})
], sm=12, md=10, xl=8),
], justify="center")
], label="Epidemiology Sandbox"),
# links tab
dbc.Tab([
# this tab will consist of a single row, which contains a single column
# that is centered horizontally in the page
dbc.Row([
dbc.Col([
html.Div([dcc.Markdown('''
##### Useful Dashboards & Visualizations
* [Johns Hopkins COVID-19 Dashboard](https://www.arcgis.com/apps/opsdashboard/index.html#/bda7594740fd40299423467b48e9ecf6)
* [CDC's COVID-NET](https://gis.cdc.gov/grasp/COVIDNet/COVID19_5.html): Provides US Demographics for COVID-19
* [University of Washington IHME COVID-19 Predictions for US](https://covid19.healthdata.org/united-states-of-america)
* [University of Minnesota Center for Infectious Disease Research and Policy](https://www.cidrap.umn.edu/): Provides latest research news on many infectious diseases, including COVID-19
* [Covidly.com](https://covidly.com/): Another COVID-19 dashboard
* Many US state health agencies have produced great COVID-19 dashboards for their state. Just search for them.
##### References
SIR Model Help:
* [Wikipedia](https://en.wikipedia.org/wiki/Compartmental_models_in_epidemiology)
* [Oregon State University presentation](http://sites.science.oregonstate.edu/~gibsonn/Teaching/MTH323-010S18/Supplements/Infectious.pdf)
* [A blog post](https://www.lewuathe.com/covid-19-dynamics-with-sir-model.html)
All of the data sources used for this dashboard:
* [Johns Hopkins COVID data CSV files](https://github.com/CSSEGISandData/COVID-19/tree/master/csse_covid_19_data/csse_covid_19_daily_reports)
* [Australian State Populations](https://en.wikipedia.org/wiki/States_and_territories_of_Australia)
* [Australian State Geo JSON File](https://github.com/rowanhogan/australian-states/blob/master/states.geojson)
* [Canadian Province Populations](https://en.wikipedia.org/wiki/Population_of_Canada_by_province_and_territory)
* [Canadian Province Geo JSON File](https://download2.exploratory.io/maps/canada_provinces.zip)
* [Chinese Province Populations](https://en.wikipedia.org/wiki/Provinces_of_China#List_of_province-level_divisions)
* [Chinese Province Geo JSON File](https://github.com/secsilm/plotly-choropleth-mapbox-demo/blob/master/china_province.geojson)
* [US County Populations (2019 Census Estimate)](https://www2.census.gov/programs-surveys/popest/datasets/2010-2019/counties/totals/co-est2019-alldata.csv)
* [US County Geo JSON File](https://raw.githubusercontent.com/plotly/datasets/master/geojson-counties-fips.json)
* [US State Geo JSON File](https://eric.clst.org/tech/usgeojson/)
* [All other Country Populations](https://en.wikipedia.org/wiki/List_of_countries_by_population_%28United_Nations%29)
* [All Countries Geo JSON File](https://github.com/datasets/geo-countries/blob/master/data/countries.geojson)
''', style={"margin": 20}
)])
], sm=12, md=10, xl=8, style={"border": "solid", "border-width": "thin", "margin-top": 40}),
], justify="center")
], label="Links & References")
])
])
#################################################################
# 9 Dynamic UI callbacks
# add callback for toggling the right nav menu collapse on small screens
@app.callback(
Output("navbar-collapse", "is_open"),
[Input("navbar-toggler", "n_clicks")],
[State("navbar-collapse", "is_open")],
)
def toggle_navbar_collapse(n, is_open):
if n:
return not is_open
return is_open
# callback for curve plot accordion containing plot controls
@app.callback(
[Output("collapse-heat-edit", "is_open"),
Output("collapse-curve-edit", "is_open"),
Output("collapse-curve-setting", "is_open")],
[Input("heat-edit-toggle", "n_clicks"),
Input("curve-edit-toggle", "n_clicks"),
Input("curve-setting-toggle", "n_clicks")],
[State("collapse-heat-edit", "is_open"),
State("collapse-curve-edit", "is_open"),
State("collapse-curve-setting", "is_open")])
def toggle_accordion(n_heat, n_curve_edit, n_curve_set, is_open_heat, \
is_open_curve_edit, is_open_curve_set):
ctx = dash.callback_context
if ctx.triggered:
button_id = ctx.triggered[0]["prop_id"].split(".")[0]
else:
return False, False, False
if button_id == "heat-edit-toggle" and n_heat:
return not is_open_heat, is_open_curve_edit, is_open_curve_set
elif button_id == "curve-edit-toggle" and n_curve_edit:
return is_open_heat, not is_open_curve_edit, is_open_curve_set
elif button_id == "curve-setting-toggle" and n_curve_set:
return is_open_heat, is_open_curve_edit, not is_open_curve_set
# define curves tab control callbacks
# add callback for defining the contents of the State dropdown
@app.callback(
[Output("curve-state", "options"),
Output("curve-state", "value"),
Output("curve-state", "disabled")],
[Input("curve-country", "value")]
)
def country_selected(country):
default_val = "All of " + country
options = [{"label": default_val, "value": "nan"}]
states_ls = np.sort(df.loc[df["Country/Region"] == country, "Province/State"].unique().tolist())
states_ls = states_ls[states_ls != "nan"]
state_options = [{"label": state, "value": state} for state in states_ls]
if len(states_ls) > 0:
options.extend(state_options)
return options, default_val, False
# add callback for defining the contents of the County dropdown
@app.callback(
[Output("curve-county", "options"),
Output("curve-county", "value"),
Output("curve-county", "disabled")],
[Input("curve-state", "value")]
)
def state_selected(state):
if state == "":
# no state has been selected, so don't give county options
options = []
default_value = ""
county_disabled = True
elif state.startswith("All of ") | (state == "nan"):
# whole state has been selected, so don't give county options
options = []
default_value = ""
county_disabled = True
else:
# a state was selected, determine county options
county_disabled = False
default_value = "All of " + state
options = [{"label": default_value, "value": "nan"}]
county_ls = np.sort(df.loc[df["Province/State"] == state, "County"].unique().tolist())
county_ls = county_ls[county_ls != "nan"]
county_options = [{"label": county, "value": county} for county in county_ls]
if len(county_ls) > 0:
options.extend(county_options)
return options, default_value, county_disabled
#################################################################
# 10 Callback for Updating Heat Map Figure
@app.callback(
[Output("heatmap", "figure"),
Output("initial-spinner", "style"),
Output("heat-date-picker", "min_date_allowed"),
Output("heat-date-picker", "max_date_allowed")],
[Input("map-scope", "value"),
Input("map-var", "value"),
Input("map-calc", "value"),
Input("map-scale", "value"),
Input("map-norm-type", "value"),
Input("map-norm-val", "value"),
Input("heat-date-picker", "date")],
[State("initial-spinner", "style")]
)
def update_heatmap(map_scope, map_var, map_calc, map_scale, map_norm_type, map_norm_val, map_date,
init_spinner_style):
#tic = time.perf_counter()
#toc_a = tic
#toc_b = tic
# for an unknown reason, [map_norm_val] is provided as a string, so cast it back to an int
map_norm_val = int(map_norm_val)
# test if this is the initial execution of this callback
is_init = (init_spinner_style is None)
# only generate a new heatmap if the user initialized this callback
if is_init:
fig = init_heatmap
# determine valid date range for the date picker
plot_df = df[df["MapScope"] == "US Counties"]
days = np.sort(plot_df.Date.unique())
picker_min_date = numpy_dt64_to_dt(days[0])
picker_max_date = numpy_dt64_to_dt(days[-1])
else:
# set null values of map parameters
if map_calc == "Total":
map_calc = ""
if map_norm_type == "None":
map_norm_type = ""
plot_var = map_var + map_calc + map_norm_type
# set variables conditioned on the map scope
if map_scope == "UScounties":
geo_json = us_counties_json
plot_df = df[df["MapScope"] == "US Counties"]
plot_df["AreaLabel"] = plot_df.County.astype(str) + ", " + plot_df["Province/State"].astype(str)
location_var = "FIPS"
geo_json_name_field = None
map_center = {"lat": 37.0902, "lon": -95.7129}
title = "US counties"
init_zoom = 3
elif map_scope == "USstates":
geo_json = us_states_json
plot_df = df[df["MapScope"] == "US States"]
plot_df["AreaLabel"] = plot_df["Province/State"].astype(str)
location_var = "Province/State"
geo_json_name_field = "properties.NAME"
map_center = {"lat": 37.0902, "lon": -95.7129}
title = "US states"
init_zoom = 3
elif map_scope == "China":
geo_json = china_json
plot_df = df[df["MapScope"] == "China Provinces"]
plot_df["AreaLabel"] = plot_df["Province/State"].astype(str)
location_var = "Province/State"
geo_json_name_field = "properties.NL_NAME_1"
map_center = {"lat": 37.110573, "lon": 106.493924}
title = "Chinese provinces"
init_zoom = 2
elif map_scope == "Australia":
geo_json = australia_json
plot_df = df[df["MapScope"] == "Australia States"]
plot_df["AreaLabel"] = plot_df["Province/State"].astype(str)
location_var = "Province/State"
geo_json_name_field = None
map_center = {"lat": -26, "lon": 133 + 25/60}
title = "Australian states"
init_zoom = 3
elif map_scope == "Canada":
geo_json = canada_json
plot_df = df[df["MapScope"] == "Canada Provinces"]
plot_df["AreaLabel"] = plot_df["Province/State"].astype(str)
location_var = "Province/State"
geo_json_name_field = "properties.PRENAME"
map_center = {"lat": 58, "lon": -96 - 48/60}
title = "Canadian Provinces"
init_zoom = 2
elif map_scope == "World":
geo_json = world_json
plot_df = df[df["MapScope"] == "Countries"]
plot_df["AreaLabel"] = plot_df["Country/Region"].astype(str)
location_var = "Country/Region"
geo_json_name_field = "properties.ADMIN"
map_center = {"lat": 0, "lon": 0}
title = "Countries"
init_zoom = 0
# set axis variables conditioned on scale settings
def get_min_max(x_arr):
var_finite = x_arr[(x_arr != 0) & (x_arr != -np.inf) & (x_arr != np.inf)]
if len(var_finite) > 0:
var_min = min(var_finite)
var_max = max(var_finite)
else:
var_min = 0
var_max = 0
return var_min, var_max
# determine valid date range for the date picker
days = np.sort(plot_df.Date.unique())
picker_min_date = numpy_dt64_to_dt(days[0])
picker_max_date = numpy_dt64_to_dt(days[-1])
# setup scales
log_txt = ["1e-6", "1e-5", "1e-4", ".001", ".01", ".1", \
"1", "10", "100", "1K", "10K", "100K", "1M"]
map_log_hvr_txt = "Cases per " + log_txt[int(np.log10(map_norm_val)) + 6] + " Capita: "
if map_scale == "Logarithmic":
bar_scale_type = "log"
map_tick_mode = "array"
map_tick_vals = np.arange(-6, 7)
map_tick_txt = log_txt
if map_norm_type == "PerCapita":
plot_df["CaseVar"] = np.log10(plot_df[plot_var]*map_norm_val)
else:
plot_df["CaseVar"] = np.log10(plot_df[plot_var])
var_min, var_max = get_min_max(plot_df.CaseVar.values)
plot_range = np.array([var_min, var_max])
else:
bar_scale_type = "linear"
map_tick_mode = "auto"
map_tick_vals = []
map_tick_txt = []
if map_norm_type == "PerCapita":
plot_df["CaseVar"] = plot_df[plot_var]*map_norm_val
else:
plot_df["CaseVar"] = plot_df[plot_var]
var_min, var_max = get_min_max(plot_df.CaseVar.values)
plot_range = np.array([0, var_max])
if map_var == "Recovered":
heat_color_scale = "ylgn"
bar_color = "rgb(69, 161, 69)"
else:
heat_color_scale = "ylorrd"
bar_color = "rgb(236, 62, 19)"
# limit remaining calcs to data pertaining to picked date
plot_day_df = plot_df[plot_df.Date == map_date]
# define custom hover data
cust_data = np.dstack((plot_day_df.loc[:, map_var + map_calc].values, \
plot_day_df.loc[:, map_var + map_calc + "PerCapita"]. \
values*map_norm_val))[0]
location_series = plot_day_df[location_var]
if map_norm_type == "PerCapita":
bar_txt_format = "{:.2e}"
else:
bar_txt_format = "{:,.0f}"
# define the left bar plot
bar_df = plot_day_df.nlargest(10, "CaseVar", keep="all").reset_index()
bar_df = bar_df.head(10) # nlargest may return more than 10 rows if there are duplicate values
bar_df = bar_df[bar_df.CaseVar > -np.inf]
nrows = bar_df.shape[0]
bar_df = bar_df.iloc[np.arange(nrows - 1, -1, -1),:] # reverse order of top 10 rows
# plotly does not tolerate changing the number of bars in
# a bar graph during animation define a function to pad
# data arrays with blank elements so the bar graph always
# has 10 elements
def pad_10_arr(x, pad_val, unique_fill_bool):
xlen = len(x)
if xlen == 10:
result = x
else:
npad = 10 - xlen
fill_arr = np.array([pad_val for i in range(npad)])
# shorten each string fill element in array to make the elements unique
if unique_fill_bool:
fill_arr = [item[i:] for i, item in enumerate(fill_arr)]
result = np.append(fill_arr, x)
return result
# only build the bar plot if there is data to plot
if plot_day_df[plot_var].max() > 0:
no_data = False
max_width_label = 25
if map_scope == "UScounties":
# some of the county, state labels are too long, taking up too much space
# in the figure. Long labels will have the county label trimmed with an ellipsis appended.
labels_to_trim = bar_df["AreaLabel"].astype(str).str.len() > max_width_label
county_len_arr = max_width_label - 5 - bar_df.loc[labels_to_trim, "Province/State"].astype(str).str.len().values
county_abbr = [bar_df.loc[labels_to_trim, "County"].astype(str).values[i][:county_len_arr[i]] \
for i in range(len(county_len_arr))]
state_abbr = bar_df.loc[labels_to_trim, "Province/State"].astype(str).values.tolist()
county_state_abbr = [county_abbr[i] + "..., " + state_abbr[i] for i in range(len(county_abbr))]
bar_df.loc[labels_to_trim, "AreaLabel"] = county_state_abbr
elif map_scope == "Australia":
# only one label needs to be trimmed
long_label = "Australian Capital Territory"
labels_to_trim = bar_df["AreaLabel"].astype(str) == long_label
bar_df.loc[labels_to_trim, "AreaLabel"] = long_label[:(max_width_label - 3)] + "..."
# bar labels must be padded so all labels have the same length
# as some labels disappear and others are introduced,
# varied-length label cause bad animation behavior
area_labels = [label.rjust(max_width_label) for label in bar_df.AreaLabel.values]
if map_scale == "Logarithmic":
bar_df["CaseVarPlot"] = np.power(np.ones(10)*10, bar_df.CaseVar.values)
else:
bar_df["CaseVarPlot"] = bar_df.CaseVar
bar_df["ValLabels"] = bar_df.CaseVarPlot.astype("float")
bar_fig_data = go.Bar(x=pad_10_arr(bar_df.CaseVarPlot.values, 0, False),
y=pad_10_arr(area_labels, " " * max_width_label, True),
text=pad_10_arr(bar_df.ValLabels.map(bar_txt_format.format).values, "", False),
textposition="auto",
hoverinfo="none",
orientation="h",
marker_color=bar_color,
name="")
else:
no_data = True
bar_fig_data = go.Bar(x=[],
y=[],
orientation="h",
name="")
# build the heatmap
heat_fig_data =go.Choroplethmapbox(geojson=geo_json,
locations=location_series,
featureidkey=geo_json_name_field,
z=plot_day_df.CaseVar,
zmin=plot_range[0], # min([plot_df.CaseVar.min(), 0]),
zmax=plot_range[1], # plot_df.CaseVar.max(),
customdata=cust_data,
name="",
text=plot_day_df.AreaLabel,
hovertemplate="<b>%{text}</b><br>" + \
"<b>Cases</b>: %{customdata[0]:,}<br>" + \
"<b>" + map_log_hvr_txt + "</b>: %{customdata[1]:.2e}",
colorbar=dict(outlinewidth=1,
outlinecolor="#333333",
len=0.9,
lenmode="fraction",
xpad=30,
xanchor="right",
bgcolor=None,
title=dict(text="Cases",
font=dict(size=14)),
tickmode=map_tick_mode,
tickvals=map_tick_vals,
ticktext=map_tick_txt,
tickcolor="#333333",
tickwidth=2,
tickfont=dict(color="#333333",
size=12)),
colorscale=heat_color_scale,
marker_opacity=0.7,
marker_line_width=0)
###########################################################
# The following code block was used in the original app to
# build an animation of the heatmap and bar charts. But
# this function has become too slow as months of data have
# accumulated.
#
## define animation controls
#frame_dur = 1000 # milliseconds, controls animation speed
#fig_ctrls = []
#sliders_dict = dict()
#
## only define the animation controls of there is data to plot
#if plot_df[plot_var].max() > 0:
# fig_ctrls = [dict(type="buttons",
# buttons=[dict(label="Play",
# method="animate",
# args=[None,
# dict(frame=dict(duration=frame_dur,
# redraw=True),
# fromcurrent=True)]),
# dict(label="Pause",
# method="animate",
# args=[[None],
# dict(frame=dict(duration=0,
# redraw=True),
# mode="immediate")])],
# direction="left",
# pad={"r": 10, "t": 35},
# showactive=False,
# x=0.1,
# xanchor="right",
# y=0,
# yanchor="top")]
#
# if (not is_init):
# sliders_dict = dict(active=init_date_ind,
# visible=True,
# yanchor="top",
# xanchor="left",
# currentvalue=dict(font=dict(size=14),
# prefix="Plotted Date: ",
# visible=True,
# xanchor="center"),
# pad=dict(b=10,
# t=10),
# len=0.875,
# x=0.125,
# y=0,
# steps=[])
#
##toc_a = time.perf_counter()
#
## define the animation frames
#fig_frames = []
#
#for day in days:
#
# # this code repeating what was done to build the initial bar plot above
# # .query() method provides faster filtering
# plot_day_df = plot_df.query("Date == @day") #plot_day_df = plot_df[plot_df.Date == day]
# bar_df = plot_day_df.nlargest(10, "CaseVar", keep="all").reset_index()
# bar_df = bar_df.head(10) # nlargest may return more than 10 rows if there are duplicate values
# INF = np.inf
# bar_df = bar_df.query("CaseVar > - @INF") #bar_df = bar_df[bar_df.CaseVar > -np.inf]
# nrows = bar_df.shape[0]
# bar_df = bar_df.iloc[np.arange(nrows - 1, -1, -1),:] # reverse order of top 10 rows
# if map_scope == "UScounties":
# labels_to_trim = bar_df["AreaLabel"].astype(str).str.len() > max_width_label
# county_len_arr = max_width_label - 5 - bar_df.loc[labels_to_trim, "Province/State"].astype(str).str.len().values
# county_abbr = [bar_df.loc[labels_to_trim, "County"].astype(str).values[i][:county_len_arr[i]] \
# for i in range(len(county_len_arr))]
# state_abbr = bar_df.loc[labels_to_trim, "Province/State"].astype(str).values.tolist()
# county_state_abbr = [county_abbr[i] + "..., " + state_abbr[i] for i in range(len(county_abbr))]
# bar_df.loc[labels_to_trim, "AreaLabel"] = county_state_abbr
# elif map_scope == "Australia":
# long_label = "Australian Capital Territory"
# labels_to_trim = bar_df["AreaLabel"].astype(str) == long_label
# bar_df.loc[labels_to_trim, "AreaLabel"] = long_label[:(max_width_label - 3)] + "..."
# area_labels = [label.rjust(max_width_label) for label in bar_df.AreaLabel.values]
# bar_df["ValLabels"] = bar_df.CaseVar.astype("float")
#
# # this code repeats what was done to build the initial heatmap above
# cust_data = np.dstack((plot_day_df.loc[:, map_var + map_calc].values, \
# plot_day_df.loc[:, map_var + map_calc + "PerCapita"]. \
# values*map_norm_val))[0]
# location_series = plot_day_df[location_var]
#
# # define the frame, repeating what was done for the initial plots above
# frame = go.Frame(data=[go.Bar(x=pad_10_arr(bar_df[plot_var].values, 0, False),
# y=pad_10_arr(area_labels, " " * max_width_label, True),
# text=pad_10_arr(bar_df.ValLabels.map(bar_txt_format.format). \
# values, "", False),
# textposition="auto",
# hoverinfo="none",
# name=""),
# go.Choroplethmapbox(locations=location_series,
# featureidkey=geo_json_name_field,
# z=plot_day_df.CaseVar,
# customdata=cust_data,
# name="",
# text=plot_day_df.AreaLabel,
# hovertemplate="<b>%{text}</b><br>" + \
# "<b>Cases</b>: %{customdata[0]:,}<br>" + \
# "<b>" + map_log_hvr_txt + "</b>: %{customdata[1]:.2e}")],
# name=numpy_dt64_to_str(day))
# fig_frames.append(frame)
#
# # define the slider step
# slider_step = dict(args=[[numpy_dt64_to_str(day)],
# dict(mode="immediate",
# frame=dict(duration=300,
# redraw=True))],
# method="animate",
# label=numpy_dt64_to_str(day))
# sliders_dict["steps"].append(slider_step)
#
##toc_b = time.perf_counter()
#
# End of code block for building an animation
###############################################################
# Assemble the entire figure based on the components defined above
fig = subplots.make_subplots(rows=1, cols=2, column_widths=[0.2, 0.8],
subplot_titles=("Top 10 " + title, ""),
horizontal_spacing=0.05,
specs=[[{"type": "bar"},
{"type": "choroplethmapbox"}]])
fig.add_trace(bar_fig_data, row=1, col=1)
fig.add_trace(heat_fig_data, row=1, col=2)
fig.update_layout(mapbox_style="light",
mapbox_zoom=init_zoom,
mapbox_accesstoken=token,
mapbox_center=map_center,
margin={"r": 10, "t": 20, "l": 10, "b": 10},
plot_bgcolor="white")
#sliders=[sliders_dict],
#updatemenus=fig_ctrls)
#fig["frames"] = fig_frames
# update the bar plot axes
if no_data:
fig.update_xaxes(showticklabels=False)
fig.update_yaxes(showticklabels=False)
else:
fig.update_xaxes(type=bar_scale_type,
ticks="outside",
range=plot_range,
showgrid=True,
gridwidth=0.5,
gridcolor="#CCCCCC")
fig.update_yaxes(tickfont=dict(family="Courier New, monospace",
size=13))
if no_data:
# add annotation when theres no data explaining as such
fig["layout"]["annotations"] = [dict(x=0,
y=0,
xref="x1",
yref="y1",
text="All<br>" + title + "<br>have reported<br>zero " + \
map_var + "<br>cases to date",
showarrow=False,
font=dict(size=16))]
else:
# modify the bar plot title font properties
fig["layout"]["annotations"][0]["font"] = dict(size=16)
#toc = time.perf_counter()
#print(toc - tic)
#print(toc_a - tic)
#print(toc_b - toc_a)
#print(toc - toc_b)
# return the figure and hide the dbc.Spinner which is shown during initial app loading
return fig, {"display": "none"}, picker_min_date, picker_max_date
#################################################################
# 11 Callback for Adding Rows to curve_plot_df (dataframe define curves to plot)
@app.callback(
[Output("data-table", "data"),
Output("curve-plot-df", "children"),
Output("curve-drop", "options"),
Output("curve-add-click-count", "children"),
Output("curve-clear-click-count", "children")],
[Input("curve-add", "n_clicks"),
Input("curve-clear", "n_clicks"),
Input("curve-drop", "value")],
[State("curve-country", "value"),
State("curve-state", "value"),
State("curve-county", "value"),
State("curve-plot-df", "children"),
State("curve-add-click-count", "children"),
State("curve-clear-click-count", "children")],
)
def edit_plotted_curves(add_click, clear_click, drop_row_id, country, state, \
county, df_as_json, add_click_last, clear_click_last):
# read the df from the hidden div json data
curve_plot_df = pd.read_json(df_as_json[0], orient='split')
# determine whether this callback was triggered by the Add button, the Clear All button
# or Drop Row dropdown
if add_click > add_click_last:
if state.startswith("All of "):
state = "nan"
county = "nan"
elif county.startswith("All of "):
county = "nan"
nrows = curve_plot_df.shape[0]
curve_plot_df.loc[nrows] = [nrows, country, state, county]
elif clear_click > clear_click_last:
curve_plot_df = curve_plot_df.loc[curve_plot_df["Row ID"] == -999]
elif drop_row_id != "":
curve_plot_df = curve_plot_df.loc[curve_plot_df["Row ID"] != int(drop_row_id)]
curve_plot_df = curve_plot_df.reset_index(drop=True)
curve_plot_df["Row ID"] = curve_plot_df.index
# write the new df to the ui data table and to the hidden div
return curve_plot_df.replace("nan", "").to_dict("records"), \
[curve_plot_df.to_json(date_format='iso', orient='split')], \
[{"label": val, "value": val} for val in [""] + curve_plot_df["Row ID"].tolist()], \
add_click, clear_click
#################################################################
# 12 Callback for Updating Curves Plot Figure
@app.callback(
Output("curves", "figure"),
[Input("curve-plot-df", "children"),
Input("curve-calc", "value"),
Input("curve-norm-type", "value"),
Input("curve-norm-val", "value"),
Input("curve-zero", "value"),
Input("curve-type", "value"),
Input("curve-scale", "value"),
Input("curve-avg-period", "value")]
)
def update_curves_plot(curve_plot_df_as_json, calc, norm_type, norm_val, zero_opt, \
types_ls, y_axis_type, avg_period):
# for an unknown reason, [norm_val] is provided as a string, so cast it back to an int
norm_val = int(norm_val)
# define function which gives a string label for order of magnitude (log10)
def logtxt(val):
log_txt_opts = ["1e-6", "1e-5", "1e-4", ".001", ".01", ".1", \
"1", "10", "100", "1K", "10K", "100K", "1M"]
log_txt = log_txt_opts[int(np.log10(val)) + 6]
return log_txt
# define a function which will scatter points and a fit curve line corresponding to
# place and type of variable
def add_cust_traces(fig, var, place_name, df, color):
# determine the basic variable type to be plotted
if var[:1] == "A":
var_type = "Active"
elif var[:1] == "C":
var_type = "Confirmed"
elif var[:1] == "R":
var_type = "Recovered"
elif var[:1] == "D":
var_type = "Deaths"
# assign marker and line styles depending on how many basic types
# of variables are to plotted
var_id = np.where(np.array(types_ls) == var_type)[0][0]
dash_ls = ["solid", "dash", "dot", "longdash"]
symbol_ls = ["circle", "square", "diamond", "triangle-up"]
if zero_opt == "None":
x_axis_var = "Date"
else:
x_axis_var = "Zero_Day"
# define hover text for scatter points
per_cap = " Cases per " + logtxt(norm_val) + " Capita"
base_hover_txt = "<b>" + place_name + "</b><br>" + \
"<b>Date</b>: %{text}" + \
"<br><b>Days Elapsed</b>: %{customdata[0]}"
if calc == "":
hover_txt = base_hover_txt + \
"<br><b>Total " + var_type + " To Date</b>: %{customdata[1]:,.0f}" + \
"<br><b>Total " + var_type + " To Date</b>:<br>" + \
"%{customdata[2]:.2e}" + per_cap
elif calc == "PerDate":
hover_txt = base_hover_txt + \
"<br><b>New " + var_type + " On Date</b>: %{customdata[1]:,.0f}" + \
"<br><b>New " + var_type + " On Date</b>:<br>" + \
"%{customdata[2]:.2e}" + per_cap
# plot scatter data points
fig.add_trace(go.Scatter(x=df[x_axis_var],
y=df[var],
mode='markers',
name="",
marker=dict(symbol=symbol_ls[var_id],
size=8,
color=color,
opacity=0.4),
customdata=np.dstack((df.loc[:, "Zero_Day"].values, \
df.loc[:, var_type + calc].values, \
df.loc[:, var_type + calc + "PerCapita"].values))[0],
text=df.Date.dt.strftime('%B %d, %Y'),
hovertemplate=hover_txt,
showlegend=False))
# define the hover text for the fit curve line
fit_hover_txt = "<b>Curve Fit for " + place_name + "</b><br>" + \
"<b>Date</b>: %{text}" + \
"<br><b>Days Elapsed</b>: %{customdata[0]}"
if calc == "":
fit_hover_txt = fit_hover_txt + \
"<br><b>Fit Total " + var_type + " To Date</b>: %{customdata[1]:,.0f}" + \
"<br><b>Fit Total " + var_type + " To Date</b>:<br>" + \
"%{customdata[2]:.2e}" + per_cap
elif calc == "PerDate":
fit_hover_txt = fit_hover_txt + \
"<br><b>Fit New " + var_type + " On Date</b>: %{customdata[1]:,.0f}" + \
"<br><b>Fit New " + var_type + " On Date</b>:<br>" + \
"%{customdata[2]:.2e}" + per_cap
# plot the fit curve line
fig.add_trace(go.Scatter(x=df[x_axis_var],
y=df[var + "Avg"],
mode='lines',
name="",
line=dict(width=3, dash=dash_ls[var_id], color=color),
customdata=np.dstack((df.loc[:, "Zero_Day"].values, \
df.loc[:, var_type + calc + "Avg"].values, \
df.loc[:, var_type + calc + "PerCapita" + "Avg"].values))[0],
text=df.Date.dt.strftime('%B %d, %Y'),
hovertemplate=fit_hover_txt,
showlegend=False))
return fig
# set null values of plot parameters
if calc == "Total":
calc = ""
if norm_type == "None":
norm_type = ""
# make a list of all curves to be plotted
plot_vars_ls = [plot_type + calc + norm_type for plot_type in types_ls]
# read the df from the hidden div json data
# this df defines the country/state/county areas which are to be plotted
curve_plot_df = pd.read_json(curve_plot_df_as_json[0], orient='split')
# setup matplotlib colors for distinguishing curves
nplaces = curve_plot_df.shape[0]
ncolors = max(nplaces, len(types_ls))
cmap = cm.get_cmap("tab10", ncolors) # PiYG
colors = ["" for i in range(ncolors)]
for i in range(cmap.N):
rgb = cmap(i)[:3] # will return rgba, we take only first 3 so we get rgb
colors[i] = matplotlib.colors.rgb2hex(rgb)
# set options for deriving the Zero_Day column & X-axis label
max_zero_day = 0
y_min, y_max = 0, 1
if zero_opt == "None":
zero_thresh = 1
thresh_var = "Confirmed"
elif zero_opt == "Total":
zero_thresh = 1
thresh_var = "Confirmed"
elif zero_opt == "PerCapita":
zero_thresh = 1/10000
thresh_var = "ConfirmedPerCapita"
# define a blank figure as the default
fig = go.Figure()
# fill the figure with data if places have been identified by the user
if nplaces > 0:
# pandas doesn't like df == np.nan, so tests are needed to determine proper syntax
# define a function which generically filters for country, state & county
def filter_mask_csc(df, country, state, county):
if isinstance(county, str):
mask = (df["Country/Region"] == country) & \
(df["Province/State"] == state) & \
(df["County"] == county)
elif isinstance(state, str):
mask = (df["Country/Region"] == country) & \
(df["Province/State"] == state) & \
(df["County"] == "nan")
else:
mask = (df["Country/Region"] == country) & \
(df["Province/State"] == "nan") & \
(df["County"] == "nan")
return mask
# generate a local df containing only the data that will be plotted
# this will make subsequent df manipulation faster
mask_bool_ls = [filter_mask_csc(df, curve_plot_df["Country/Region"][i],
curve_plot_df["Province/State"][i],
curve_plot_df.County[i]
)
for i in range(nplaces)
]
# the list of masks needs to consolidated via OR into a single mask
mask_bool = np.array([False for i in range(df.shape[0])])
for mask_bool_item in mask_bool_ls:
mask_bool = mask_bool | mask_bool_item
plot_df = df[mask_bool]
# ensure line plots will move left to right
plot_df = plot_df.sort_values(["Date"]).reset_index()
# initialize values to be ammended in subsequent for loops
item_counter = 0
min_date = plot_df.Date.max()
max_date = plot_df.Date.min()
# build the figure piecewise, adding traces within for loops
for place_i in range(nplaces):
# isolate data for place_i
curve_row = curve_plot_df.iloc[place_i, :]
var_mask_bool = filter_mask_csc(plot_df, \
curve_row["Country/Region"], \
curve_row["Province/State"], \
curve_row["County"])
plot_var_df = plot_df[var_mask_bool]
# calculate zero day column for place_i
plot_var_df["Zero_Day"] = 0
started_df = plot_var_df[plot_var_df[thresh_var] >= zero_thresh]
start_date_series = started_df.Date[:1] - pd.Timedelta(days=1)
plot_var_df.Zero_Day = plot_var_df.Date - start_date_series.squeeze()
plot_var_df.Zero_Day = plot_var_df.Zero_Day.dt.days
plot_var_df = plot_var_df[plot_var_df.Zero_Day > 0]
# scale per capita columns
per_cap_vars = ["ConfirmedPerCapita", "ActivePerCapita", "RecoveredPerCapita", "DeathsPerCapita", \
"ConfirmedPerDatePerCapita", "ActivePerDatePerCapita", \
"RecoveredPerDatePerCapita", "DeathsPerDatePerCapita"]
plot_var_df[per_cap_vars] = plot_var_df[per_cap_vars].values*norm_val
# keep track of x-axis range limits across all plotted places
max_zero_day = max([max_zero_day, plot_var_df.Zero_Day.max()])
min_date = min([min_date, plot_var_df.Date.min()])
max_date = max([max_date, plot_var_df.Date.max()])
# calculate moving average columns for place_i
for plot_type in types_ls:
# calculate moving average for accumulating cases
var_to_avg = plot_type + calc
plot_var_df[var_to_avg + "Avg"] = \
plot_var_df[var_to_avg].rolling(avg_period, center=True, min_periods=1).mean()
# calculate moving average for new cases
var_pc_to_avg = plot_type + calc + "PerCapita"
plot_var_df[var_pc_to_avg + "Avg"] = \
plot_var_df[var_pc_to_avg].rolling(avg_period, center=True, min_periods=1).mean()
# get the name of place_i
place_elements = [elem for elem in curve_row.replace(np.nan, "").values[1:] if elem != ""]
place_name = ", ".join(place_elements)
# add traces for each variable type to be plotted for place_i
for var in plot_vars_ls:
fig = add_cust_traces(fig, var, place_name, plot_var_df, colors[item_counter])
# add dummy trace for legend only if a single place is being plotted
# this will utilize different colors for variable types
if nplaces == 1:
fig.add_trace(go.Scatter(x=[None],
y=[None],
mode='lines+markers',
name=types_ls[item_counter],
line=dict(dash="solid",
color=colors[item_counter]),
showlegend=True))
item_counter += 1
# add a dummy trace for legend only if more than one place is being plotted
# this will utilize different colors for places
if nplaces > 1:
fig.add_trace(go.Scatter(x=[None],
y=[None],
mode='lines',
name=place_name,
line=dict(dash="solid", color=colors[item_counter]),
showlegend=True))
item_counter += 1
axopts = dict(linecolor = "gray", linewidth = 0.5, showline = True, mirror=True)
fig.update_layout(
paper_bgcolor=invis,
plot_bgcolor=invis,
margin=go.layout.Margin(l=50, r=20, b=10, t=10),
xaxis=axopts,
yaxis=axopts,
showlegend=True,
legend=go.layout.Legend(
x=0,
y=-0.25,
traceorder="reversed",
font=dict(
family="sans-serif",
size=12,
color="black"),
bgcolor="white",
bordercolor="gray",
borderwidth=0.5),
legend_orientation="h")
# add dummy trace for basic variable types as parrt of custom legend
if nplaces > 1:
if "Confirmed" in types_ls:
fig.add_trace(go.Scatter(x=[None], y=[None], mode='lines+markers',
marker=dict(size=8, color='black', symbol="circle"),
line=dict(dash="solid"), showlegend=True, name='Confirmed'))
if "Recovered" in types_ls:
fig.add_trace(go.Scatter(x=[None], y=[None], mode='lines+markers',
marker=dict(size=8, color='black', symbol="square"),
line=dict(dash="dash"), showlegend=True, name='Recovered'))
if "Deaths" in types_ls:
fig.add_trace(go.Scatter(x=[None], y=[None], mode='lines+markers',
marker=dict(size=8, color='black', symbol="diamond"),
line=dict(dash="dot"), showlegend=True, name='Deaths'))
# setup x-axis
if nplaces == 0:
fig.layout.xaxis.range = [0, 1]
elif zero_opt == "None":
x_margin = 0.1*(max_date - min_date)
fig.update_xaxes(title_text="Date", showspikes=True, spikesnap="data", spikemode="across", \
spikethickness=2)
fig.layout.xaxis.range = [pd.to_datetime(min_date - x_margin), | pd.to_datetime(max_date + x_margin) | pandas.to_datetime |
# -*- coding: utf-8 -*-
# @author: Elie
#%% ==========================================================
# Import libraries set library params
# ============================================================
import pandas as pd
import numpy as np
import os
pd.options.mode.chained_assignment = None #Pandas warnings off
#plotting
import seaborn as sns
from matplotlib import pyplot as plt
from matplotlib.ticker import Locator
import matplotlib as mpl
# stats
from scipy import stats
#set matplotlib rcparams
mpl.rcParams['savefig.transparent'] = "False"
mpl.rcParams['axes.facecolor'] = "white"
mpl.rcParams['figure.facecolor'] = "white"
mpl.rcParams['font.size'] = "5"
plt.rcParams['savefig.transparent'] = "False"
plt.rcParams['axes.facecolor'] = "white"
plt.rcParams['figure.facecolor'] = "white"
plt.rcParams['font.size'] = "5"
#%% ==========================================================
# define these feature/headers here in case the headers
# are out of order in input files (often the case)
# ============================================================
snv_categories = ["sample",
"A[C>A]A", "A[C>A]C", "A[C>A]G", "A[C>A]T",
"C[C>A]A", "C[C>A]C", "C[C>A]G", "C[C>A]T",
"G[C>A]A", "G[C>A]C", "G[C>A]G", "G[C>A]T",
"T[C>A]A", "T[C>A]C", "T[C>A]G", "T[C>A]T",
"A[C>G]A", "A[C>G]C", "A[C>G]G", "A[C>G]T",
"C[C>G]A", "C[C>G]C", "C[C>G]G", "C[C>G]T",
"G[C>G]A", "G[C>G]C", "G[C>G]G", "G[C>G]T",
"T[C>G]A", "T[C>G]C", "T[C>G]G", "T[C>G]T",
"A[C>T]A", "A[C>T]C", "A[C>T]G", "A[C>T]T",
"C[C>T]A", "C[C>T]C", "C[C>T]G", "C[C>T]T",
"G[C>T]A", "G[C>T]C", "G[C>T]G", "G[C>T]T",
"T[C>T]A", "T[C>T]C", "T[C>T]G", "T[C>T]T",
"A[T>A]A", "A[T>A]C", "A[T>A]G", "A[T>A]T",
"C[T>A]A", "C[T>A]C", "C[T>A]G", "C[T>A]T",
"G[T>A]A", "G[T>A]C", "G[T>A]G", "G[T>A]T",
"T[T>A]A", "T[T>A]C", "T[T>A]G", "T[T>A]T",
"A[T>C]A", "A[T>C]C", "A[T>C]G", "A[T>C]T",
"C[T>C]A", "C[T>C]C", "C[T>C]G", "C[T>C]T",
"G[T>C]A", "G[T>C]C", "G[T>C]G", "G[T>C]T",
"T[T>C]A", "T[T>C]C", "T[T>C]G", "T[T>C]T",
"A[T>G]A", "A[T>G]C", "A[T>G]G", "A[T>G]T",
"C[T>G]A", "C[T>G]C", "C[T>G]G", "C[T>G]T",
"G[T>G]A", "G[T>G]C", "G[T>G]G", "G[T>G]T",
"T[T>G]A", "T[T>G]C", "T[T>G]G", "T[T>G]T"]
indel_categories = ["sample",
"1:Del:C:0", "1:Del:C:1", "1:Del:C:2", "1:Del:C:3", "1:Del:C:4", "1:Del:C:5",
"1:Del:T:0", "1:Del:T:1", "1:Del:T:2", "1:Del:T:3", "1:Del:T:4", "1:Del:T:5",
"1:Ins:C:0", "1:Ins:C:1", "1:Ins:C:2", "1:Ins:C:3", "1:Ins:C:4", "1:Ins:C:5",
"1:Ins:T:0", "1:Ins:T:1", "1:Ins:T:2", "1:Ins:T:3", "1:Ins:T:4", "1:Ins:T:5",
"2:Del:R:0", "2:Del:R:1", "2:Del:R:2", "2:Del:R:3", "2:Del:R:4", "2:Del:R:5",
"3:Del:R:0", "3:Del:R:1", "3:Del:R:2", "3:Del:R:3", "3:Del:R:4", "3:Del:R:5",
"4:Del:R:0", "4:Del:R:1", "4:Del:R:2", "4:Del:R:3", "4:Del:R:4", "4:Del:R:5",
"5:Del:R:0", "5:Del:R:1", "5:Del:R:2", "5:Del:R:3", "5:Del:R:4", "5:Del:R:5",
"2:Ins:R:0", "2:Ins:R:1", "2:Ins:R:2", "2:Ins:R:3", "2:Ins:R:4", "2:Ins:R:5",
"3:Ins:R:0", "3:Ins:R:1", "3:Ins:R:2", "3:Ins:R:3", "3:Ins:R:4", "3:Ins:R:5",
"4:Ins:R:0", "4:Ins:R:1", "4:Ins:R:2", "4:Ins:R:3", "4:Ins:R:4", "4:Ins:R:5",
"5:Ins:R:0", "5:Ins:R:1", "5:Ins:R:2", "5:Ins:R:3", "5:Ins:R:4", "5:Ins:R:5",
"2:Del:M:1", "3:Del:M:1", "3:Del:M:2", "4:Del:M:1", "4:Del:M:2", "4:Del:M:3",
"5:Del:M:1", "5:Del:M:2", "5:Del:M:3", "5:Del:M:4", "5:Del:M:5"]
cnv_categories = ["sample",
"BCper10mb_0", "BCper10mb_1", "BCper10mb_2", "BCper10mb_3",
"CN_0", "CN_1", "CN_2", "CN_3", "CN_4", "CN_5", "CN_6", "CN_7", "CN_8",
"CNCP_0", "CNCP_1", "CNCP_2", "CNCP_3", "CNCP_4", "CNCP_5", "CNCP_6", "CNCP_7",
"BCperCA_0", "BCperCA_1", "BCperCA_2", "BCperCA_3", "BCperCA_4", "BCperCA_5",
"SegSize_0", "SegSize_1", "SegSize_2", "SegSize_3", "SegSize_4", "SegSize_5",
"SegSize_6", "SegSize_7", "SegSize_8", "SegSize_9", "SegSize_10",
"CopyFraction_0", "CopyFraction_1", "CopyFraction_2", "CopyFraction_3", "CopyFraction_4",
"CopyFraction_5", "CopyFraction_6"]
#%% ==========================================================
# make concat sig dataframe
# ============================================================
def load_data(snv_counts_path, indel_counts_path, cnv_counts_path):
df_snv = pd.read_csv(snv_counts_path, sep='\t', low_memory=False)
df_snv = df_snv[snv_categories]
df_snv["sample"] = df_snv["sample"].astype(str)
df_indel = pd.read_csv(indel_counts_path, sep='\t', low_memory=False)
df_indel = df_indel[indel_categories]
df_indel["sample"] = df_indel["sample"].astype(str)
df_cnv = | pd.read_csv(cnv_counts_path, sep='\t', low_memory=False) | pandas.read_csv |
"""Debiasing using reweighing"""
"""
This data recipe performs reweighing debiasing using the AIF360 package.
https://github.com/Trusted-AI/AIF360
<NAME>., <NAME>. Data preprocessing techniques for classification without discrimination.
Knowl Inf Syst 33, 1โ33 (2012). https://doi.org/10.1007/s10115-011-0463-8
The transformer splits the original data as specified and returns training, validation, and test sets
with weights added.
1. Update the folder_path and data_file variables to indicate the location of the dataset(s).
2. validation_test_files lists additional validation or test files that need to be updated with weights.
3. validation_split indicates the percentiles at which the original data should be split to create a
validation and test set. If it's empty, no validation or test set is created. [0.7] would create
a 70/30 training/validation split. [0.7, 0.9] would create a 70/20/10 training, validation, and test split.
4. target is the name of the target column.
5. favorable_label and unfavorable_label are the socially positive and negative target value respectively.
6. protected_group_info list of lists, where each sublist contains the name of a protected column,
the unprivledged level, and the privleged level. Each of the protected columns must be binary.
7. From the Datasets section of driverless, click on ADD DATASET and then UPLOAD DATA RECIPE to upload this file.
Be sure to use the specified validation set to be used for validation when a model is trained. The weights
can cause leakage if the validation or test data is used for determining the weights.
"""
import datatable as dt
import numpy as np
import os
from h2oaicore.data import CustomData
from h2oaicore.systemutils import config
class MyReweightingData(CustomData):
_modules_needed_by_name = ['datetime', 'fairlearn', 'aif360', 'sklearn']
@staticmethod
def create_data():
import pandas as pd
from h2oaicore.models_utils import import_tensorflow
tf = import_tensorflow()
# above is because aif360 requires tensorflow
from aif360.datasets import BinaryLabelDataset
from aif360.algorithms.preprocessing.reweighing import Reweighing
"""
Update the below as needed
"""
#########
#########
#########
# Path to the data
folder_path = 'tmp/'
# Data file
data_file = 'housing_train_proc.csv'
full_data_file = folder_path + data_file
if not os.path.isfile(full_data_file):
# for testing, just return something
if config.hard_asserts:
return dt.Frame(np.array([[1, 2, 3], [4, 5, 6]]))
else:
return []
train = pd.read_csv(full_data_file)
validation_test_files = ['housing_test_proc.csv']
validation_split = [0.6, 0.8]
# Target column
target = 'high_priced'
favorable_label = 0
unfavorable_label = 1
# Privleged_group_info = [[Protetected group name 1, prevleged level, unprivleged level], [Protetected group name 2, prevleged level, unprivleged level]]
# The protected group columns need to be binary
protected_group_info = [['hispanic', 0, 1], ['black', 0, 1]]
#########
#########
#########
# Set up protected group info
protected_groups = [group_info[0] for group_info in protected_group_info]
dataset_orig = BinaryLabelDataset(df=train, label_names=[target], favorable_label=favorable_label,
unfavorable_label=unfavorable_label,
protected_attribute_names=protected_groups)
privileged_groups = []
unprivileged_groups = []
for protected_group in protected_group_info:
privileged_groups_dict = {}
unprivileged_groups_dict = {}
privileged_groups_dict[protected_group[0]] = protected_group[1]
unprivileged_groups_dict[protected_group[0]] = protected_group[2]
privileged_groups.append(privileged_groups_dict)
unprivileged_groups.append(unprivileged_groups_dict)
# Fit weights on the full dataset to be used on the external test set, if given
RW_full = Reweighing(unprivileged_groups=unprivileged_groups, privileged_groups=privileged_groups)
RW_full.fit(dataset_orig)
# Split the original data into train, validation, and test if applicable
if len(validation_split) == 1:
dataset_orig_train, dataset_orig_valid = dataset_orig.split(validation_split, shuffle=True)
elif len(validation_split) == 2:
dataset_orig_train_valid, dataset_orig_test = dataset_orig.split([validation_split[1]], shuffle=True)
# Fit the weights on both the validation and test set for the test set split
RW_train_valid = Reweighing(unprivileged_groups=unprivileged_groups, privileged_groups=privileged_groups)
RW_train_valid.fit(dataset_orig_train_valid)
dataset_orig_train, dataset_orig_valid = dataset_orig_train_valid.split(
[validation_split[0] / (validation_split[1])], shuffle=True)
else:
dataset_orig_train = dataset_orig
# Fit weights on the training set only
RW = Reweighing(unprivileged_groups=unprivileged_groups, privileged_groups=privileged_groups)
RW.fit(dataset_orig_train)
dataset_transf_train = RW.transform(dataset_orig_train)
# Add the weigts to the training set
train_df = pd.DataFrame(dataset_transf_train.features, columns=dataset_transf_train.feature_names)
train_df[target] = dataset_transf_train.labels.ravel()
train_df['weights'] = dataset_transf_train.instance_weights.ravel()
# Create datasets with minimum features calculated the given number of days ahead
dataset_dict = {}
dataset_dict[data_file.split('.')[0] + "_rw_train.csv"] = train_df
# Add weights to the validation split (if a validation split was specified)
if len(validation_split) >= 1:
dataset_transf_valid = RW.transform(dataset_orig_valid)
valid_df = pd.DataFrame(dataset_transf_valid.features, columns=dataset_transf_valid.feature_names)
valid_df[target] = dataset_transf_valid.labels.ravel()
valid_df['weights'] = dataset_transf_valid.instance_weights.ravel()
dataset_dict[data_file.split('.')[0] + "_rw_validation.csv"] = valid_df
# Add weights to the test split (if a test split was specified)
if len(validation_split) >= 2:
dataset_transf_test = RW_train_valid.transform(dataset_orig_test)
test_df = | pd.DataFrame(dataset_transf_test.features, columns=dataset_transf_test.feature_names) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 28 15:27:09 2017
@author: Adam
run_dire()
- function to build path to a run directory
run_file()
- function to build path to a run file
cashew()
- caching wrapper
H5Scan
- class for accessing hdf5 files without groups
H5Data
- class for accessing hdf5 files with groups
"""
import os
import warnings
import inspect
import re
from functools import wraps
from datetime import datetime
import IPython
import h5py
import numpy as np
import pandas as pd
from types import FunctionType
from numbers import Number
from collections.abc import Iterable
from tqdm import tqdm
from .tools import sub_dire, utf8_attrs
# constants
MEASUREMENT_ID = "measurement"
# log file: parquet, feather, or pkl
LOG_FORMAT = "feather"
def run_dire(base, rid, dire=None, create=False):
""" Build path to directory using run ID.
base/YYYY/MM/DD/[rid]/[dire]
The first 8 characters of the run ID are assumed to be of
the format YYYYMMDD. The rest of the run ID can be anything, e.g.,
YYYYMMDD_hhmmss, or YYYYMMDD_001, or YYYYMMDD_label
args:
base
rid
dire=None
create=False create run_dire (unless it already exists)
return:
path to directory
"""
year = rid[:4]
month = rid[4:6]
day = rid[6:8]
path = os.path.join(base, year, month, day, rid)
dire = "" if dire is None else dire
if create:
path = sub_dire(path, dire)
else:
path = os.path.join(path, dire)
return path
def run_file(base, rid, ftype="_data.h5", check=True):
""" Build path to data file using run ID.
base/YYYY/MM/DD/[rid]/[rid][ftype]
The first 8 characters of the run ID are assumed to be of
the format YYYYMMDD. The rest of the run ID could be a
complete timestamp, or the date appended by a padded integer,
or a descriptive label, e.g.,
YYYYMMDD_hhmmss, or YYYYMMDD_001, or YYYYMMDD_label
args:
base
rid
ftype='_data.h5'
check=True does file exist?
return:
path to rid h5 file
"""
year = rid[:4]
month = rid[4:6]
day = rid[6:8]
dire = os.path.join(base, year, month, day, rid)
fil = os.path.join(dire, rid + ftype)
if check:
if not os.path.isdir(base):
# base directory
raise OSError(f"{base} is not a directory")
if not os.path.isdir(dire):
# run ID directory
raise OSError(f"{dire} is not a directory")
elif not os.path.isfile(fil):
# run ID file
raise OSError(f"{fil} is not a file")
return fil
def cashew(method):
""" Decorator to save or load method result to or from a pickle file.
args:
<passed to method>
kwargs:
cache=None If cache is not None, save result to
cache_file, or read from it if it exists.
cache_dire=None
update_cache=False Overwrite the cache.
get_info=False Get information about method / cache.
notes:
# file name
if cache is an absolute path, matching *.pkl:
cache_file = cache
elif isinstance(cache, bool):
cache_file = dire/[method.__name__].pkl
elif isinstance(cache, str):
cache_file = dire/cache.[method.__name__].pkl
else:
cache_file = None
# directory
if cache_dire is None:
if hasattr(args[0], "cache_dire"):
cache_dire = args[0].cache_dire
else:
cache_dire = os.getcwd()
"""
@wraps(method)
def wrapper(*args, **kwargs):
""" function wrapper
"""
cache = kwargs.pop("cache", None)
cache_dire = kwargs.pop("cache_dire", None)
update_cache = kwargs.pop("update_cache", False)
get_info = kwargs.pop("get_info", False)
# info
sig = inspect.signature(method)
arg_names = list(sig.parameters.keys())
arg_values = []
for a in args:
if isinstance(a, (str, Number, Iterable)):
arg_values.append(a)
elif hasattr(a, "__name__"):
arg_values.append(a.__name__)
else:
arg_values.append(a.__class__.__name__)
info = dict(zip(arg_names, arg_values))
info = {**info, **kwargs}
info["method"] = method.__name__
# config cache
if cache:
# absolute path
if isinstance(cache, str) and os.path.isabs(cache):
cache_file = cache
cache_dire, fname = os.path.split(cache_file)
# relative path
else:
# directory
if cache_dire is None:
if hasattr(args[0], "cache_dire"):
cache_dire = args[0].cache_dire
else:
cache_dire = os.getcwd()
# file name
if isinstance(cache, bool):
fname = method.__name__ + ".pkl"
elif isinstance(cache, str):
fname = f"{os.path.splitext(cache)[0]}.{method.__name__}.pkl"
else:
raise TypeError("kwarg cache dtype must be str or True")
cache_file = os.path.join(cache_dire, fname)
# checks
if not os.path.exists(cache_dire):
raise OSError(f"{cache_dire} not found")
_, ext = os.path.splitext(cache_file)
if ext != ".pkl":
raise NameError(f"{fname} should have `.pkl` extension")
# read cache ...
if not update_cache and cache and os.path.isfile(cache_file):
try:
result, info = | pd.read_pickle(cache_file) | pandas.read_pickle |
# pylint: disable-msg=W0612,E1101,W0141
import nose
from numpy.random import randn
import numpy as np
from pandas.core.index import Index, MultiIndex
from pandas import Panel, DataFrame, Series, notnull, isnull
from pandas.util.testing import (assert_almost_equal,
assert_series_equal,
assert_frame_equal,
assertRaisesRegexp)
import pandas.core.common as com
import pandas.util.testing as tm
from pandas.compat import (range, lrange, StringIO, lzip, u, cPickle,
product as cart_product, zip)
import pandas as pd
import pandas.index as _index
class TestMultiLevel(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
import warnings
warnings.filterwarnings(action='ignore', category=FutureWarning)
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
['one', 'two', 'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
self.frame = DataFrame(np.random.randn(10, 3), index=index,
columns=Index(['A', 'B', 'C'], name='exp'))
self.single_level = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux']],
labels=[[0, 1, 2, 3]],
names=['first'])
# create test series object
arrays = [['bar', 'bar', 'baz', 'baz', 'qux', 'qux', 'foo', 'foo'],
['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two']]
tuples = lzip(*arrays)
index = MultiIndex.from_tuples(tuples)
s = Series(randn(8), index=index)
s[3] = np.NaN
self.series = s
tm.N = 100
self.tdf = tm.makeTimeDataFrame()
self.ymd = self.tdf.groupby([lambda x: x.year, lambda x: x.month,
lambda x: x.day]).sum()
# use Int64Index, to make sure things work
self.ymd.index.set_levels([lev.astype('i8')
for lev in self.ymd.index.levels],
inplace=True)
self.ymd.index.set_names(['year', 'month', 'day'],
inplace=True)
def test_append(self):
a, b = self.frame[:5], self.frame[5:]
result = a.append(b)
tm.assert_frame_equal(result, self.frame)
result = a['A'].append(b['A'])
tm.assert_series_equal(result, self.frame['A'])
def test_dataframe_constructor(self):
multi = DataFrame(np.random.randn(4, 4),
index=[np.array(['a', 'a', 'b', 'b']),
np.array(['x', 'y', 'x', 'y'])])
tm.assert_isinstance(multi.index, MultiIndex)
self.assertNotIsInstance(multi.columns, MultiIndex)
multi = DataFrame(np.random.randn(4, 4),
columns=[['a', 'a', 'b', 'b'],
['x', 'y', 'x', 'y']])
tm.assert_isinstance(multi.columns, MultiIndex)
def test_series_constructor(self):
multi = Series(1., index=[np.array(['a', 'a', 'b', 'b']),
np.array(['x', 'y', 'x', 'y'])])
tm.assert_isinstance(multi.index, MultiIndex)
multi = Series(1., index=[['a', 'a', 'b', 'b'],
['x', 'y', 'x', 'y']])
tm.assert_isinstance(multi.index, MultiIndex)
multi = Series(lrange(4), index=[['a', 'a', 'b', 'b'],
['x', 'y', 'x', 'y']])
tm.assert_isinstance(multi.index, MultiIndex)
def test_reindex_level(self):
# axis=0
month_sums = self.ymd.sum(level='month')
result = month_sums.reindex(self.ymd.index, level=1)
expected = self.ymd.groupby(level='month').transform(np.sum)
assert_frame_equal(result, expected)
# Series
result = month_sums['A'].reindex(self.ymd.index, level=1)
expected = self.ymd['A'].groupby(level='month').transform(np.sum)
assert_series_equal(result, expected)
# axis=1
month_sums = self.ymd.T.sum(axis=1, level='month')
result = month_sums.reindex(columns=self.ymd.index, level=1)
expected = self.ymd.groupby(level='month').transform(np.sum).T
assert_frame_equal(result, expected)
def test_binops_level(self):
def _check_op(opname):
op = getattr(DataFrame, opname)
month_sums = self.ymd.sum(level='month')
result = op(self.ymd, month_sums, level='month')
broadcasted = self.ymd.groupby(level='month').transform(np.sum)
expected = op(self.ymd, broadcasted)
assert_frame_equal(result, expected)
# Series
op = getattr(Series, opname)
result = op(self.ymd['A'], month_sums['A'], level='month')
broadcasted = self.ymd['A'].groupby(
level='month').transform(np.sum)
expected = op(self.ymd['A'], broadcasted)
assert_series_equal(result, expected)
_check_op('sub')
_check_op('add')
_check_op('mul')
_check_op('div')
def test_pickle(self):
def _test_roundtrip(frame):
pickled = cPickle.dumps(frame)
unpickled = cPickle.loads(pickled)
assert_frame_equal(frame, unpickled)
_test_roundtrip(self.frame)
_test_roundtrip(self.frame.T)
_test_roundtrip(self.ymd)
_test_roundtrip(self.ymd.T)
def test_reindex(self):
reindexed = self.frame.ix[[('foo', 'one'), ('bar', 'one')]]
expected = self.frame.ix[[0, 3]]
assert_frame_equal(reindexed, expected)
def test_reindex_preserve_levels(self):
new_index = self.ymd.index[::10]
chunk = self.ymd.reindex(new_index)
self.assertIs(chunk.index, new_index)
chunk = self.ymd.ix[new_index]
self.assertIs(chunk.index, new_index)
ymdT = self.ymd.T
chunk = ymdT.reindex(columns=new_index)
self.assertIs(chunk.columns, new_index)
chunk = ymdT.ix[:, new_index]
self.assertIs(chunk.columns, new_index)
def test_sort_index_preserve_levels(self):
result = self.frame.sort_index()
self.assertEquals(result.index.names, self.frame.index.names)
def test_repr_to_string(self):
repr(self.frame)
repr(self.ymd)
repr(self.frame.T)
repr(self.ymd.T)
buf = StringIO()
self.frame.to_string(buf=buf)
self.ymd.to_string(buf=buf)
self.frame.T.to_string(buf=buf)
self.ymd.T.to_string(buf=buf)
def test_repr_name_coincide(self):
index = MultiIndex.from_tuples([('a', 0, 'foo'), ('b', 1, 'bar')],
names=['a', 'b', 'c'])
df = DataFrame({'value': [0, 1]}, index=index)
lines = repr(df).split('\n')
self.assert_(lines[2].startswith('a 0 foo'))
def test_getitem_simple(self):
df = self.frame.T
col = df['foo', 'one']
assert_almost_equal(col.values, df.values[:, 0])
self.assertRaises(KeyError, df.__getitem__, ('foo', 'four'))
self.assertRaises(KeyError, df.__getitem__, 'foobar')
def test_series_getitem(self):
s = self.ymd['A']
result = s[2000, 3]
result2 = s.ix[2000, 3]
expected = s.reindex(s.index[42:65])
expected.index = expected.index.droplevel(0).droplevel(0)
assert_series_equal(result, expected)
result = s[2000, 3, 10]
expected = s[49]
self.assertEquals(result, expected)
# fancy
result = s.ix[[(2000, 3, 10), (2000, 3, 13)]]
expected = s.reindex(s.index[49:51])
assert_series_equal(result, expected)
# key error
self.assertRaises(KeyError, s.__getitem__, (2000, 3, 4))
def test_series_getitem_corner(self):
s = self.ymd['A']
# don't segfault, GH #495
# out of bounds access
self.assertRaises(IndexError, s.__getitem__, len(self.ymd))
# generator
result = s[(x > 0 for x in s)]
expected = s[s > 0]
assert_series_equal(result, expected)
def test_series_setitem(self):
s = self.ymd['A']
s[2000, 3] = np.nan
self.assert_(isnull(s.values[42:65]).all())
self.assert_(notnull(s.values[:42]).all())
self.assert_(notnull(s.values[65:]).all())
s[2000, 3, 10] = np.nan
self.assert_(isnull(s[49]))
def test_series_slice_partial(self):
pass
def test_frame_getitem_setitem_boolean(self):
df = self.frame.T.copy()
values = df.values
result = df[df > 0]
expected = df.where(df > 0)
assert_frame_equal(result, expected)
df[df > 0] = 5
values[values > 0] = 5
assert_almost_equal(df.values, values)
df[df == 5] = 0
values[values == 5] = 0
assert_almost_equal(df.values, values)
# a df that needs alignment first
df[df[:-1] < 0] = 2
np.putmask(values[:-1], values[:-1] < 0, 2)
assert_almost_equal(df.values, values)
with assertRaisesRegexp(TypeError, 'boolean values only'):
df[df * 0] = 2
def test_frame_getitem_setitem_slice(self):
# getitem
result = self.frame.ix[:4]
expected = self.frame[:4]
assert_frame_equal(result, expected)
# setitem
cp = self.frame.copy()
cp.ix[:4] = 0
self.assert_((cp.values[:4] == 0).all())
self.assert_((cp.values[4:] != 0).all())
def test_frame_getitem_setitem_multislice(self):
levels = [['t1', 't2'], ['a', 'b', 'c']]
labels = [[0, 0, 0, 1, 1], [0, 1, 2, 0, 1]]
midx = MultiIndex(labels=labels, levels=levels, names=[None, 'id'])
df = DataFrame({'value': [1, 2, 3, 7, 8]}, index=midx)
result = df.ix[:, 'value']
assert_series_equal(df['value'], result)
result = df.ix[1:3, 'value']
assert_series_equal(df['value'][1:3], result)
result = df.ix[:, :]
assert_frame_equal(df, result)
result = df
df.ix[:, 'value'] = 10
result['value'] = 10
assert_frame_equal(df, result)
df.ix[:, :] = 10
assert_frame_equal(df, result)
def test_frame_getitem_multicolumn_empty_level(self):
f = DataFrame({'a': ['1', '2', '3'],
'b': ['2', '3', '4']})
f.columns = [['level1 item1', 'level1 item2'],
['', 'level2 item2'],
['level3 item1', 'level3 item2']]
result = f['level1 item1']
expected = DataFrame([['1'], ['2'], ['3']], index=f.index,
columns=['level3 item1'])
assert_frame_equal(result, expected)
def test_frame_setitem_multi_column(self):
df = DataFrame(randn(10, 4), columns=[['a', 'a', 'b', 'b'],
[0, 1, 0, 1]])
cp = df.copy()
cp['a'] = cp['b']
assert_frame_equal(cp['a'], cp['b'])
# set with ndarray
cp = df.copy()
cp['a'] = cp['b'].values
assert_frame_equal(cp['a'], cp['b'])
#----------------------------------------
# #1803
columns = MultiIndex.from_tuples([('A', '1'), ('A', '2'), ('B', '1')])
df = DataFrame(index=[1, 3, 5], columns=columns)
# Works, but adds a column instead of updating the two existing ones
df['A'] = 0.0 # Doesn't work
self.assertTrue((df['A'].values == 0).all())
# it broadcasts
df['B', '1'] = [1, 2, 3]
df['A'] = df['B', '1']
assert_series_equal(df['A', '1'], df['B', '1'])
assert_series_equal(df['A', '2'], df['B', '1'])
def test_getitem_tuple_plus_slice(self):
# GH #671
df = DataFrame({'a': lrange(10),
'b': lrange(10),
'c': np.random.randn(10),
'd': np.random.randn(10)})
idf = df.set_index(['a', 'b'])
result = idf.ix[(0, 0), :]
expected = idf.ix[0, 0]
expected2 = idf.xs((0, 0))
assert_series_equal(result, expected)
assert_series_equal(result, expected2)
def test_getitem_setitem_tuple_plus_columns(self):
# GH #1013
df = self.ymd[:5]
result = df.ix[(2000, 1, 6), ['A', 'B', 'C']]
expected = df.ix[2000, 1, 6][['A', 'B', 'C']]
assert_series_equal(result, expected)
def test_getitem_multilevel_index_tuple_unsorted(self):
index_columns = list("abc")
df = DataFrame([[0, 1, 0, "x"], [0, 0, 1, "y"]],
columns=index_columns + ["data"])
df = df.set_index(index_columns)
query_index = df.index[:1]
rs = df.ix[query_index, "data"]
xp = Series(['x'], index=MultiIndex.from_tuples([(0, 1, 0)]))
assert_series_equal(rs, xp)
def test_xs(self):
xs = self.frame.xs(('bar', 'two'))
xs2 = self.frame.ix[('bar', 'two')]
assert_series_equal(xs, xs2)
assert_almost_equal(xs.values, self.frame.values[4])
# GH 6574
# missing values in returned index should be preserrved
acc = [
('a','abcde',1),
('b','bbcde',2),
('y','yzcde',25),
('z','xbcde',24),
('z',None,26),
('z','zbcde',25),
('z','ybcde',26),
]
df = DataFrame(acc, columns=['a1','a2','cnt']).set_index(['a1','a2'])
expected = DataFrame({ 'cnt' : [24,26,25,26] }, index=Index(['xbcde',np.nan,'zbcde','ybcde'],name='a2'))
result = df.xs('z',level='a1')
assert_frame_equal(result, expected)
def test_xs_partial(self):
result = self.frame.xs('foo')
result2 = self.frame.ix['foo']
expected = self.frame.T['foo'].T
assert_frame_equal(result, expected)
assert_frame_equal(result, result2)
result = self.ymd.xs((2000, 4))
expected = self.ymd.ix[2000, 4]
assert_frame_equal(result, expected)
# ex from #1796
index = MultiIndex(levels=[['foo', 'bar'], ['one', 'two'], [-1, 1]],
labels=[[0, 0, 0, 0, 1, 1, 1, 1],
[0, 0, 1, 1, 0, 0, 1, 1],
[0, 1, 0, 1, 0, 1, 0, 1]])
df = DataFrame(np.random.randn(8, 4), index=index,
columns=list('abcd'))
result = df.xs(['foo', 'one'])
expected = df.ix['foo', 'one']
assert_frame_equal(result, expected)
def test_xs_level(self):
result = self.frame.xs('two', level='second')
expected = self.frame[self.frame.index.get_level_values(1) == 'two']
expected.index = expected.index.droplevel(1)
assert_frame_equal(result, expected)
index = MultiIndex.from_tuples([('x', 'y', 'z'), ('a', 'b', 'c'),
('p', 'q', 'r')])
df = DataFrame(np.random.randn(3, 5), index=index)
result = df.xs('c', level=2)
expected = df[1:2]
expected.index = expected.index.droplevel(2)
assert_frame_equal(result, expected)
# this is a copy in 0.14
result = self.frame.xs('two', level='second')
# setting this will give a SettingWithCopyError
# as we are trying to write a view
def f(x):
x[:] = 10
self.assertRaises(com.SettingWithCopyError, f, result)
def test_xs_level_multiple(self):
from pandas import read_table
text = """ A B C D E
one two three four
a b 10.0032 5 -0.5109 -2.3358 -0.4645 0.05076 0.3640
a q 20 4 0.4473 1.4152 0.2834 1.00661 0.1744
x q 30 3 -0.6662 -0.5243 -0.3580 0.89145 2.5838"""
df = read_table(StringIO(text), sep='\s+', engine='python')
result = df.xs(('a', 4), level=['one', 'four'])
expected = df.xs('a').xs(4, level='four')
assert_frame_equal(result, expected)
# this is a copy in 0.14
result = df.xs(('a', 4), level=['one', 'four'])
# setting this will give a SettingWithCopyError
# as we are trying to write a view
def f(x):
x[:] = 10
self.assertRaises(com.SettingWithCopyError, f, result)
# GH2107
dates = lrange(20111201, 20111205)
ids = 'abcde'
idx = MultiIndex.from_tuples([x for x in cart_product(dates, ids)])
idx.names = ['date', 'secid']
df = DataFrame(np.random.randn(len(idx), 3), idx, ['X', 'Y', 'Z'])
rs = df.xs(20111201, level='date')
xp = df.ix[20111201, :]
assert_frame_equal(rs, xp)
def test_xs_level0(self):
from pandas import read_table
text = """ A B C D E
one two three four
a b 10.0032 5 -0.5109 -2.3358 -0.4645 0.05076 0.3640
a q 20 4 0.4473 1.4152 0.2834 1.00661 0.1744
x q 30 3 -0.6662 -0.5243 -0.3580 0.89145 2.5838"""
df = read_table(StringIO(text), sep='\s+', engine='python')
result = df.xs('a', level=0)
expected = df.xs('a')
self.assertEqual(len(result), 2)
assert_frame_equal(result, expected)
def test_xs_level_series(self):
s = self.frame['A']
result = s[:, 'two']
expected = self.frame.xs('two', level=1)['A']
assert_series_equal(result, expected)
s = self.ymd['A']
result = s[2000, 5]
expected = self.ymd.ix[2000, 5]['A']
assert_series_equal(result, expected)
# not implementing this for now
self.assertRaises(TypeError, s.__getitem__, (2000, slice(3, 4)))
# result = s[2000, 3:4]
# lv =s.index.get_level_values(1)
# expected = s[(lv == 3) | (lv == 4)]
# expected.index = expected.index.droplevel(0)
# assert_series_equal(result, expected)
# can do this though
def test_get_loc_single_level(self):
s = Series(np.random.randn(len(self.single_level)),
index=self.single_level)
for k in self.single_level.values:
s[k]
def test_getitem_toplevel(self):
df = self.frame.T
result = df['foo']
expected = df.reindex(columns=df.columns[:3])
expected.columns = expected.columns.droplevel(0)
assert_frame_equal(result, expected)
result = df['bar']
result2 = df.ix[:, 'bar']
expected = df.reindex(columns=df.columns[3:5])
expected.columns = expected.columns.droplevel(0)
assert_frame_equal(result, expected)
assert_frame_equal(result, result2)
def test_getitem_setitem_slice_integers(self):
index = MultiIndex(levels=[[0, 1, 2], [0, 2]],
labels=[[0, 0, 1, 1, 2, 2],
[0, 1, 0, 1, 0, 1]])
frame = DataFrame(np.random.randn(len(index), 4), index=index,
columns=['a', 'b', 'c', 'd'])
res = frame.ix[1:2]
exp = frame.reindex(frame.index[2:])
assert_frame_equal(res, exp)
frame.ix[1:2] = 7
self.assert_((frame.ix[1:2] == 7).values.all())
series = Series(np.random.randn(len(index)), index=index)
res = series.ix[1:2]
exp = series.reindex(series.index[2:])
assert_series_equal(res, exp)
series.ix[1:2] = 7
self.assert_((series.ix[1:2] == 7).values.all())
def test_getitem_int(self):
levels = [[0, 1], [0, 1, 2]]
labels = [[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]]
index = MultiIndex(levels=levels, labels=labels)
frame = DataFrame(np.random.randn(6, 2), index=index)
result = frame.ix[1]
expected = frame[-3:]
expected.index = expected.index.droplevel(0)
assert_frame_equal(result, expected)
# raises exception
self.assertRaises(KeyError, frame.ix.__getitem__, 3)
# however this will work
result = self.frame.ix[2]
expected = self.frame.xs(self.frame.index[2])
assert_series_equal(result, expected)
def test_getitem_partial(self):
ymd = self.ymd.T
result = ymd[2000, 2]
expected = ymd.reindex(columns=ymd.columns[ymd.columns.labels[1] == 1])
expected.columns = expected.columns.droplevel(0).droplevel(0)
assert_frame_equal(result, expected)
def test_getitem_slice_not_sorted(self):
df = self.frame.sortlevel(1).T
# buglet with int typechecking
result = df.ix[:, :np.int32(3)]
expected = df.reindex(columns=df.columns[:3])
assert_frame_equal(result, expected)
def test_setitem_change_dtype(self):
dft = self.frame.T
s = dft['foo', 'two']
dft['foo', 'two'] = s > s.median()
assert_series_equal(dft['foo', 'two'], s > s.median())
# tm.assert_isinstance(dft._data.blocks[1].items, MultiIndex)
reindexed = dft.reindex(columns=[('foo', 'two')])
assert_series_equal(reindexed['foo', 'two'], s > s.median())
def test_frame_setitem_ix(self):
self.frame.ix[('bar', 'two'), 'B'] = 5
self.assertEquals(self.frame.ix[('bar', 'two'), 'B'], 5)
# with integer labels
df = self.frame.copy()
df.columns = lrange(3)
df.ix[('bar', 'two'), 1] = 7
self.assertEquals(df.ix[('bar', 'two'), 1], 7)
def test_fancy_slice_partial(self):
result = self.frame.ix['bar':'baz']
expected = self.frame[3:7]
assert_frame_equal(result, expected)
result = self.ymd.ix[(2000, 2):(2000, 4)]
lev = self.ymd.index.labels[1]
expected = self.ymd[(lev >= 1) & (lev <= 3)]
assert_frame_equal(result, expected)
def test_getitem_partial_column_select(self):
idx = MultiIndex(labels=[[0, 0, 0], [0, 1, 1], [1, 0, 1]],
levels=[['a', 'b'], ['x', 'y'], ['p', 'q']])
df = DataFrame(np.random.rand(3, 2), index=idx)
result = df.ix[('a', 'y'), :]
expected = df.ix[('a', 'y')]
assert_frame_equal(result, expected)
result = df.ix[('a', 'y'), [1, 0]]
expected = df.ix[('a', 'y')][[1, 0]]
assert_frame_equal(result, expected)
self.assertRaises(KeyError, df.ix.__getitem__,
(('a', 'foo'), slice(None, None)))
def test_sortlevel(self):
df = self.frame.copy()
df.index = np.arange(len(df))
| assertRaisesRegexp(TypeError, 'hierarchical index', df.sortlevel, 0) | pandas.util.testing.assertRaisesRegexp |
# -*- coding: utf-8 -*-
"""Tests for dataframe `adni` extension."""
# pylint: disable=W0621
# Third party imports
import numpy as np
import pandas as pd
import pytest
from adnipy import adni # noqa: F401 pylint: disable=W0611
@pytest.fixture
def test_df():
"""Provide sample dataframe for standardized testing."""
columns = [
"Subject ID",
"Description",
"Group",
"VISCODE",
"VISCODE2",
"Image ID",
"Acq Date",
"RID",
]
subjects = [
["101_S_1001", "Average", "MCI", "m12", "m12", 100001, "1/01/2001", 1001],
["101_S_1001", "Average", "MCI", "m24", "m24", 200001, "1/01/2002", 1001],
["102_S_1002", "Average", "AD", "m12", "m12", 100002, "2/02/2002", 1002],
["102_S_1002", "Dynamic", "AD", "m12", "m12", 200002, "2/02/2002", 1002],
["103_S_1003", "Average", "LMCI", "m12", "m12", 100003, "3/03/2003", 1003],
["104_S_1004", "Average", "EMCI", "m12", "m12", 100004, "4/04/2004", 1004],
]
dataframe = pd.DataFrame(subjects, columns=columns)
return dataframe
@pytest.fixture
def test_timepoints(test_df):
"""Dictionairy for the timepoints in test_df if Description is ignored."""
test_df = test_df.drop(columns=["Description"])
timepoints = {
"Timepoint 1": test_df.iloc[[0, 2, 4, 5]].set_index(["Subject ID", "Image ID"]),
"Timepoint 2": test_df.iloc[[1, 3]].set_index(["Subject ID", "Image ID"]),
}
return timepoints
def test_rid_from_subject_id(test_df):
"""Test creating RID from Subject ID."""
correct = test_df
test_df = test_df.drop(columns="RID")
with_rid = test_df.adni.rid()
| pd.testing.assert_frame_equal(correct, with_rid) | pandas.testing.assert_frame_equal |
from datetime import timedelta
import operator
import numpy as np
import pytest
import pytz
from pandas._libs.tslibs import IncompatibleFrequency
from pandas.core.dtypes.common import is_datetime64_dtype, is_datetime64tz_dtype
import pandas as pd
from pandas import (
Categorical,
Index,
IntervalIndex,
Series,
Timedelta,
bdate_range,
date_range,
isna,
)
import pandas._testing as tm
from pandas.core import nanops, ops
def _permute(obj):
return obj.take(np.random.permutation(len(obj)))
class TestSeriesFlexArithmetic:
@pytest.mark.parametrize(
"ts",
[
(lambda x: x, lambda x: x * 2, False),
(lambda x: x, lambda x: x[::2], False),
(lambda x: x, lambda x: 5, True),
(lambda x: tm.makeFloatSeries(), lambda x: tm.makeFloatSeries(), True),
],
)
@pytest.mark.parametrize(
"opname", ["add", "sub", "mul", "floordiv", "truediv", "pow"]
)
def test_flex_method_equivalence(self, opname, ts):
# check that Series.{opname} behaves like Series.__{opname}__,
tser = tm.makeTimeSeries().rename("ts")
series = ts[0](tser)
other = ts[1](tser)
check_reverse = ts[2]
op = getattr(Series, opname)
alt = getattr(operator, opname)
result = op(series, other)
expected = alt(series, other)
tm.assert_almost_equal(result, expected)
if check_reverse:
rop = getattr(Series, "r" + opname)
result = rop(series, other)
expected = alt(other, series)
tm.assert_almost_equal(result, expected)
def test_flex_method_subclass_metadata_preservation(self, all_arithmetic_operators):
# GH 13208
class MySeries(Series):
_metadata = ["x"]
@property
def _constructor(self):
return MySeries
opname = all_arithmetic_operators
op = getattr(Series, opname)
m = MySeries([1, 2, 3], name="test")
m.x = 42
result = op(m, 1)
assert result.x == 42
def test_flex_add_scalar_fill_value(self):
# GH12723
s = Series([0, 1, np.nan, 3, 4, 5])
exp = s.fillna(0).add(2)
res = s.add(2, fill_value=0)
tm.assert_series_equal(res, exp)
pairings = [(Series.div, operator.truediv, 1), (Series.rdiv, ops.rtruediv, 1)]
for op in ["add", "sub", "mul", "pow", "truediv", "floordiv"]:
fv = 0
lop = getattr(Series, op)
lequiv = getattr(operator, op)
rop = getattr(Series, "r" + op)
# bind op at definition time...
requiv = lambda x, y, op=op: getattr(operator, op)(y, x)
pairings.append((lop, lequiv, fv))
pairings.append((rop, requiv, fv))
@pytest.mark.parametrize("op, equiv_op, fv", pairings)
def test_operators_combine(self, op, equiv_op, fv):
def _check_fill(meth, op, a, b, fill_value=0):
exp_index = a.index.union(b.index)
a = a.reindex(exp_index)
b = b.reindex(exp_index)
amask = isna(a)
bmask = isna(b)
exp_values = []
for i in range(len(exp_index)):
with np.errstate(all="ignore"):
if amask[i]:
if bmask[i]:
exp_values.append(np.nan)
continue
exp_values.append(op(fill_value, b[i]))
elif bmask[i]:
if amask[i]:
exp_values.append(np.nan)
continue
exp_values.append(op(a[i], fill_value))
else:
exp_values.append(op(a[i], b[i]))
result = meth(a, b, fill_value=fill_value)
expected = Series(exp_values, exp_index)
tm.assert_series_equal(result, expected)
a = Series([np.nan, 1.0, 2.0, 3.0, np.nan], index=np.arange(5))
b = Series([np.nan, 1, np.nan, 3, np.nan, 4.0], index=np.arange(6))
result = op(a, b)
exp = equiv_op(a, b)
| tm.assert_series_equal(result, exp) | pandas._testing.assert_series_equal |
'''GDELTeda.py
Project: WGU Data Management/Analytics Undergraduate Capstone
<NAME>
August 2021
Class for collecting Pymongo and Pandas operations to automate EDA on
subsets of GDELT records (Events/Mentions, GKG, or joins).
Basic use should be by import and implementation within an IDE, or by editing
section # C00 and running this script directly.
Primary member functions include descriptive docstrings for their intent and
use.
WARNING: project file operations are based on relative pathing from the
'scripts' directory this Python script is located in, given the creation of
directories 'GDELTdata' and 'EDAlogs' parallel to 'scripts' upon first
GDELTbase and GDELTeda class initializations. If those directories are not
already present, a fallback method for string-literal directory reorientation
may be found in '__init__()' at this tag: # A02b - Project directory path.
Specification for any given user's main project directory should be made for
that os.chdir() call.
See also GDELTbase.py, tag # A01a - backup path specification, as any given
user's project directory must be specified there, also.
Contents:
A00 - GDELTeda
A01 - shared class data
A02 - __init__ with instanced data
A02a - Project directory maintenance
A02b - Project directory path specification
Note: Specification at A02b should be changed to suit a user's desired
directory structure, given their local filesystem.
B00 - class methods
B01 - batchEDA()
B02 - eventsBatchEDA()
B03 - mentionsBatchEDA()
B04 - gkgBatchEDA()
Note: see GDELTedaGKGhelpers.py for helper function code & docs
B05 - realtimeEDA()
B06 - loopEDA()
C00 - main w/ testing
C01 - previously-run GDELT realtime EDA testing
'''
import json
import multiprocessing
import numpy as np
import os
import pandas as pd
import pymongo
import shutil
import wget
from datetime import datetime, timedelta, timezone
from GDELTbase import GDELTbase
from GDELTedaGKGhelpers import GDELTedaGKGhelpers
from pandas_profiling import ProfileReport
from pprint import pprint as pp
from time import time, sleep
from urllib.error import HTTPError
from zipfile import ZipFile as zf
# A00
class GDELTeda:
'''Collects Pymongo and Pandas operations for querying GDELT records
subsets and performing semi-automated EDA.
Shared class data:
-----------------
logPath - dict
Various os.path objects for EDA log storage.
configFilePaths - dict
Various os.path objects for pandas_profiling.ProfileReport
configuration files, copied to EDA log storage directories upon
__init__, for use in report generation.
Instanced class data:
--------------------
gBase - GDELTbase instance
Used for class member functions, essential for realtimeEDA().
Class methods:
-------------
batchEDA()
eventsBatchEDA()
mentionsBatchEDA()
gkgBatchEDA()
realtimeEDA()
loopEDA()
Helper functions from GDELTedaGKGhelpers.py used in gkgBatchEDA():
pullMainGKGcolumns()
applyDtypes()
convertDatetimes()
convertGKGV15Tone()
mainReport()
locationsReport()
countsReport()
themesReport()
personsReport()
organizationsReport()
'''
# A01 - shared class data
# These paths are set relative to the location of this script, one directory
# up and in 'EDAlogs' parallel to the script directory, which can be named
# arbitrarily.
logPath = {}
logPath['base'] = os.path.join(os.path.abspath(__file__),
os.path.realpath('..'),
'EDAlogs')
logPath['events'] = {}
logPath['events'] = {
'table' : os.path.join(logPath['base'], 'events'),
'batch' : os.path.join(logPath['base'], 'events', 'batch'),
'realtime' : os.path.join(logPath['base'], 'events', 'realtime'),
}
logPath['mentions'] = {
'table' : os.path.join(logPath['base'], 'mentions'),
'batch' : os.path.join(logPath['base'], 'mentions', 'batch'),
'realtime' : os.path.join(logPath['base'], 'mentions', 'realtime'),
}
logPath['gkg'] = {
'table' : os.path.join(logPath['base'], 'gkg'),
'batch' : os.path.join(logPath['base'], 'gkg', 'batch'),
'realtime' : os.path.join(logPath['base'], 'gkg', 'realtime'),
}
# Turns out, the following isn't the greatest way of keeping track
# of each configuration file. It's easiest to just leave them in the
# exact directories where ProfileReport.to_html() is aimed (via
# os.chdir()), since it's pesky maneuvering outside parameters into
# multiprocessing Pool.map() calls.
# Still, these can and are used in realtimeEDA(), since the size of
# just the most recent datafiles should permit handling them without
# regard for Pandas DataFrame RAM impact (it's greedy, easiest method
# for mitigation is multiprocessing threads, that shouldn't be
# necessary for realtimeEDA()).
# Regardless, all these entries are for copying ProfileReport config
# files to their appropriate directories for use, given base-copies
# present in the 'scripts' directory. Those base copies may be edited
# in 'scripts', since each file will be copied from there.
configFilePaths = {}
configFilePaths['events'] = {
'batch' : os.path.join(logPath['base'],
os.path.realpath('..'),
'scripts',
"GDELTeventsEDAconfig_batch.yaml"),
'realtime' : os.path.join(logPath['base'],
os.path.realpath('..'),
'scripts',
"GDELTeventsEDAconfig_realtime.yaml"),
}
configFilePaths['mentions'] = {
'batch' : os.path.join(logPath['base'],
os.path.realpath('..'),
'scripts',
"GDELTmentionsEDAconfig_batch.yaml"),
'realtime' : os.path.join(logPath['base'],
os.path.realpath('..'),
'scripts',
"GDELTmentionsEDAconfig_realtime.yaml"),
}
configFilePaths['gkg'] = {}
configFilePaths['gkg']['batch'] = {
'main' : os.path.join(logPath['base'],
os.path.realpath('..'),
'scripts',
"GDELTgkgMainEDAconfig_batch.yaml"),
'locations' : os.path.join(logPath['base'],
os.path.realpath('..'),
'scripts',
"GDELTgkgLocationsEDAconfig_batch.yaml"),
'counts' : os.path.join(logPath['base'],
os.path.realpath('..'),
'scripts',
"GDELTgkgCountsEDAconfig_batch.yaml"),
'themes' : os.path.join(logPath['base'],
os.path.realpath('..'),
'scripts',
"GDELTgkgThemesEDAconfig_batch.yaml"),
'persons' : os.path.join(logPath['base'],
os.path.realpath('..'),
'scripts',
"GDELTgkgPersonsEDAconfig_batch.yaml"),
'organizations' : os.path.join(logPath['base'],
os.path.realpath('..'),
'scripts',
"GDELTgkgOrganizationsEDAconfig_batch.yaml"),
}
configFilePaths['gkg']['realtime'] = {
'main' : os.path.join(logPath['base'],
os.path.realpath('..'),
'scripts',
"GDELTgkgMainEDAconfig_realtime.yaml"),
'locations' : os.path.join(logPath['base'],
os.path.realpath('..'),
'scripts',
"GDELTgkgLocationsEDAconfig_realtime.yaml"),
'counts' : os.path.join(logPath['base'],
os.path.realpath('..'),
'scripts',
"GDELTgkgCountsEDAconfig_realtime.yaml"),
'themes' : os.path.join(logPath['base'],
os.path.realpath('..'),
'scripts',
"GDELTgkgThemesEDAconfig_realtime.yaml"),
'persons' : os.path.join(logPath['base'],
os.path.realpath('..'),
'scripts',
"GDELTgkgPersonsEDAconfig_realtime.yaml"),
'organizations' : os.path.join(logPath['base'],
os.path.realpath('..'),
'scripts',
"GDELTgkgOrganizationsEDAconfig_realtime.yaml"),
}
# A02
def __init__(self, tableList = ['events', 'mentions', 'gkg']):
'''GDELTeda class initialization, takes a list of GDELT tables to
perform EDA on. Instantiates a GDELTbase() instance for use by class
methods and checks for presence of EDAlogs directories, creating them if
they aren't present, and copying all ProfileReport-required config files
to their applicable directories.
Parameters:
----------
tableList - list of strings, default ['events','mentions','gkg']
Controls detection and creation of .../EDALogs/... subdirectories for
collection of Pandas Profiling ProfileReport HTML EDA document output.
Also controls permission for class member functions to perform
operations on tables specified by those functions' tableList parameters
as a failsafe against a lack of project directories required for those
operations, specifically output of HTML EDA documents.
output:
------
Produces exhaustive EDA for GDELT record subsets for specified tables
through Pandas Profiling ProfileReport-output HTML documents.
All procedurally automated steps towards report generation are shown
in console output during script execution.
'''
# instancing tables for operations to be passed to member functions
self.tableList = tableList
print("Instantiating GDELTeda...\n")
self.gBase = GDELTbase()
if 'events' not in tableList and \
'mentions' not in tableList and \
'gkg' not in tableList:
print("Error! 'tableList' values do not include a valid GDELT table.",
"\nPlease use one or more of 'events', 'mentions', and/or 'gkg'.")
# instancing trackers for realtimeEDA() and loopEDA()
self.realtimeStarted = False
self.realtimeLooping = False
self.realtimeWindow = 0
self.lastRealDatetime = ''
self.nextRealDatetime = ''
# A02a - Project EDA log directories confirmation and/or creation, and
# Pandas Profiling ProfileReport configuration file copying from 'scripts'
# directory.
print(" Checking log directory...")
if not os.path.isdir(self.logPath['base']):
print(" Doesn't exist! Making...")
# A02b - Project directory path
# For obvious reasons, any user of this script should change this
# string to suit their needs. The directory described with this string
# should be one directory above the location of the 'scripts' directory
# this file should be in. If this file is not in 'scripts', unpredictable
# behavior may occur, and no guarantees of functionality are intended for
# such a state.
os.chdir('C:\\Users\\urf\\Projects\\WGU capstone')
os.mkdir(self.logPath['base'])
for table in tableList:
# switch to EDAlogs directory
os.chdir(self.logPath['base'])
# Branch: table subdirectories not found, create all
if not os.path.isdir(self.logPath[table]['table']):
print("Did not find .../EDAlogs/", table, "...")
print(" Creating .../EDAlogs/", table, "...")
os.mkdir(self.logPath[table]['table'])
os.chdir(self.logPath[table]['table'])
print(" Creating .../EDAlogs/", table, "/batch")
os.mkdir(self.logPath[table]['batch'])
print(" Creating .../EDAlogs/", table, "/realtime")
os.mkdir(self.logPath[table]['realtime'])
os.chdir(self.logPath[table]['realtime'])
# Branch: table subdirectories found, create batch/realtime directories
# if not present.
else:
print(" Found .../EDAlogs/", table,"...")
os.chdir(self.logPath[table]['table'])
if not os.path.isdir(self.logPath[table]['batch']):
print(" Did not find .../EDAlogs/", table, "/batch , creating...")
os.mkdir(self.logPath[table]['batch'])
if not os.path.isdir(self.logPath[table]['realtime']):
print(" Did not find .../EDAlogs/", table, "/realtime , creating...")
os.mkdir(self.logPath[table]['realtime'])
os.chdir(self.logPath[table]['realtime'])
# Copying pandas_profiling.ProfileReport configuration files
print(" Copying configuration files...\n")
if table == 'gkg':
# There's a lot of these, but full normalization of GKG is
# prohibitively RAM-expensive, so reports need to be generated for
# both the main columns and the main columns normalized for each
# variable-length subfield.
shutil.copy(self.configFilePaths[table]['realtime']['main'],
self.logPath[table]['realtime'])
shutil.copy(self.configFilePaths[table]['realtime']['locations'],
self.logPath[table]['realtime'])
shutil.copy(self.configFilePaths[table]['realtime']['counts'],
self.logPath[table]['realtime'])
shutil.copy(self.configFilePaths[table]['realtime']['themes'],
self.logPath[table]['realtime'])
shutil.copy(self.configFilePaths[table]['realtime']['persons'],
self.logPath[table]['realtime'])
shutil.copy(self.configFilePaths[table]['realtime']['organizations'],
self.logPath[table]['realtime'])
os.chdir(self.logPath[table]['batch'])
shutil.copy(self.configFilePaths[table]['batch']['main'],
self.logPath[table]['batch'])
shutil.copy(self.configFilePaths[table]['batch']['locations'],
self.logPath[table]['batch'])
shutil.copy(self.configFilePaths[table]['batch']['counts'],
self.logPath[table]['batch'])
shutil.copy(self.configFilePaths[table]['batch']['themes'],
self.logPath[table]['batch'])
shutil.copy(self.configFilePaths[table]['batch']['persons'],
self.logPath[table]['batch'])
shutil.copy(self.configFilePaths[table]['batch']['organizations'],
self.logPath[table]['batch'])
else:
shutil.copy(self.configFilePaths[table]['realtime'],
self.logPath[table]['realtime'])
os.chdir(self.logPath[table]['batch'])
shutil.copy(self.configFilePaths[table]['batch'],
self.logPath[table]['batch'])
# B00 - class methods
# B01
def batchEDA(self, tableList = ['events','mentions','gkg']):
'''Reshapes and re-types GDELT records for generating Pandas
Profiling ProfileReport()-automated, simple EDA reports from Pandas
DataFrames, from MongoDB-query-cursors.
WARNING: extremely RAM, disk I/O, and processing intensive. Be aware of
what resources are available for these operations at runtime.
Relies on Python multiprocessing.Pool.map() calls against class member
functions eventsBatchEDA() and mentionsBatchEDA(), and a regular call on
gkgBatchEDA(), which uses multiprocessing.Pool.map() calls within it.
Parameters:
----------
tableList - list of strings, default ['events','mentions','gkg']
Permits limiting analysis to one or more tables.
Output:
------
Displays progress through the function's operations via console output
while producing Pandas Profiling ProfileReport.to_file() html documents
for
'''
if tableList != self.tableList:
print("\n Error: this GDELTeda object may have been initialized\n",
" without checking for the presence of directories\n",
" required for this function's operations.\n",
" Please check GDELTeda parameters and try again.")
for table in tableList:
print("\n------------------------------------------------------------\n")
print("Executing batch EDA on GDELT table", table, "records...")
# WARNING: RAM, PROCESSING, and DISK I/O INTENSIVE
# Events and Mentions are both much easier to handle than GKG, so
# they're called in their own collective function threads with
# multiprocessing.Pool(1).map().
if table == 'events':
os.chdir(self.logPath['events']['batch'])
pool = multiprocessing.Pool(1)
eventsReported = pool.map(self.eventsBatchEDA(), ['batch'])
pool.close()
pool.join()
if table == 'mentions':
os.chdir(self.logPath['mentions']['batch'])
pool = multiprocessing.Pool(1)
mentionsReported = pool.map(self.mentionsBatchEDA(), ['batch'])
pool.close()
pool.join()
if table == 'gkg':
# Here's the GKG bottleneck! Future investigation of parallelization
# improvements may yield gains here, as normalization of all subfield
# and variable-length measures is very RAM expensive, given the
# expansion in records required.
# So, current handling of GKG subfield and variable-length measures
# is isolating most operations in their own process threads within
# gkgBatchEDA() execution, forcing deallocation of those resources upon
# each Pool.close(), as with Events and Mentions table operations above
# which themselves do not require any additional subfield handling.
os.chdir(self.logPath['gkg']['batch'])
self.gkgBatchEDA()
# B02
def eventsBatchEDA(mode):
'''Performs automatic EDA on GDELT Events record subsets. See
function batchEDA() for "if table == 'events':" case handling and how
this function is invoked as a multiprocessing.Pool.map() call, intended
to isolate its RAM requirements for deallocation upon Pool.close().
In its current state, this function can handle collections of GDELT
Events records up to at least the size of the batch EDA test subset used
in this capstone project, the 30 day period from 05/24/2020 to
06/22/2020.
Parameters:
----------
mode - arbitrary
This parameter is included to meet Python multiprocessing.Pool.map()
function requirements. As such, it is present only to receive a
parameter determined by map(), e.g. one iteration of the function will
execute.
Output:
------
Console displays progress through steps with time taken throughout,
and function generates EDA profile html documents in appropriate project
directories.
'''
columnNames = [
'GLOBALEVENTID',
'Actor1Code',
'Actor1Name',
'Actor1CountryCode',
'Actor1Type1Code',
'Actor1Type2Code',
'Actor1Type3Code',
'Actor2Code',
'Actor2Name',
'Actor2CountryCode',
'Actor2Type1Code',
'Actor2Type2Code',
'Actor2Type3Code',
'IsRootEvent',
'EventCode',
'EventBaseCode',
'EventRootCode',
'QuadClass',
'AvgTone',
'Actor1Geo_Type',
'Actor1Geo_FullName',
'Actor1Geo_Lat',
'Actor1Geo_Long',
'Actor2Geo_Type',
'Actor2Geo_FullName',
'Actor2Geo_Lat',
'Actor2Geo_Long',
'ActionGeo_Type',
'ActionGeo_FullName',
'ActionGeo_Lat',
'ActionGeo_Long',
'DATEADDED',
'SOURCEURL',
]
columnTypes = {
'GLOBALEVENTID' : type(1),
'Actor1Code': pd.StringDtype(),
'Actor1Name': pd.StringDtype(),
'Actor1CountryCode': pd.StringDtype(),
'Actor1Type1Code' : pd.StringDtype(),
'Actor1Type2Code' : pd.StringDtype(),
'Actor1Type3Code' : pd.StringDtype(),
'Actor2Code': pd.StringDtype(),
'Actor2Name': pd.StringDtype(),
'Actor2CountryCode': pd.StringDtype(),
'Actor2Type1Code' : pd.StringDtype(),
'Actor2Type2Code' : pd.StringDtype(),
'Actor2Type3Code' : pd.StringDtype(),
'IsRootEvent': type(True),
'EventCode': pd.StringDtype(),
'EventBaseCode': pd.StringDtype(),
'EventRootCode': pd.StringDtype(),
'QuadClass': type(1),
'AvgTone': type(1.1),
'Actor1Geo_Type': type(1),
'Actor1Geo_FullName': pd.StringDtype(),
'Actor1Geo_Lat': pd.StringDtype(),
'Actor1Geo_Long': pd.StringDtype(),
'Actor2Geo_Type': type(1),
'Actor2Geo_FullName': pd.StringDtype(),
'Actor2Geo_Lat': | pd.StringDtype() | pandas.StringDtype |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Copyright 2014-2019 OpenEEmeter contributors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import json
import numpy as np
import pandas as pd
import pytest
from eemeter.caltrack.usage_per_day import (
CalTRACKUsagePerDayCandidateModel,
CalTRACKUsagePerDayModelResults,
DataSufficiency,
_caltrack_predict_design_matrix,
fit_caltrack_usage_per_day_model,
caltrack_usage_per_day_predict,
caltrack_sufficiency_criteria,
get_intercept_only_candidate_models,
get_too_few_non_zero_degree_day_warning,
get_total_degree_day_too_low_warning,
get_parameter_negative_warning,
get_parameter_p_value_too_high_warning,
get_cdd_only_candidate_models,
get_hdd_only_candidate_models,
get_cdd_hdd_candidate_models,
select_best_candidate,
)
from eemeter.exceptions import MissingModelParameterError, UnrecognizedModelTypeError
from eemeter.features import (
compute_time_features,
compute_temperature_features,
compute_usage_per_day_feature,
merge_features,
)
from eemeter.metrics import ModelMetrics
from eemeter.warnings import EEMeterWarning
from eemeter.transform import day_counts, get_baseline_data
def test_candidate_model_minimal():
candidate_model = CalTRACKUsagePerDayCandidateModel(
model_type="model_type", formula="formula", status="status"
)
assert candidate_model.model_type == "model_type"
assert candidate_model.formula == "formula"
assert candidate_model.status == "status"
assert candidate_model.model_params == {}
assert candidate_model.warnings == []
assert str(candidate_model).startswith("CalTRACKUsagePerDayCandidateModel")
assert candidate_model.json() == {
"formula": "formula",
"model_params": {},
"model_type": "model_type",
"r_squared_adj": None,
"status": "status",
"warnings": [],
}
def test_candidate_model_json_with_warning():
eemeter_warning = EEMeterWarning(
qualified_name="qualified_name", description="description", data={}
)
candidate_model = CalTRACKUsagePerDayCandidateModel(
model_type="model_type",
formula="formula",
status="status",
warnings=[eemeter_warning],
)
assert candidate_model.json() == {
"formula": "formula",
"model_params": {},
"model_type": "model_type",
"r_squared_adj": None,
"status": "status",
"warnings": [
{
"data": {},
"description": "description",
"qualified_name": "qualified_name",
}
],
}
def test_candidate_model_json_none_and_nan_values():
eemeter_warning = EEMeterWarning(
qualified_name="qualified_name", description="description", data={}
)
candidate_model = CalTRACKUsagePerDayCandidateModel(
model_type="model_type",
formula="formula",
status="status",
warnings=[eemeter_warning],
r_squared_adj=None,
)
assert candidate_model.json()["r_squared_adj"] is None
candidate_model.r_squared_adj = np.nan
assert candidate_model.json()["r_squared_adj"] is None
def test_data_sufficiency_minimal():
data_sufficiency = DataSufficiency(status="status", criteria_name="criteria_name")
assert data_sufficiency.status == "status"
assert data_sufficiency.criteria_name == "criteria_name"
assert data_sufficiency.warnings == []
assert data_sufficiency.settings == {}
assert str(data_sufficiency).startswith("DataSufficiency")
assert data_sufficiency.json() == {
"criteria_name": "criteria_name",
"data": {},
"settings": {},
"status": "status",
"warnings": [],
}
def test_data_sufficiency_json_with_warning():
eemeter_warning = EEMeterWarning(
qualified_name="qualified_name", description="description", data={}
)
data_sufficiency = DataSufficiency(
status="status", criteria_name="criteria_name", warnings=[eemeter_warning]
)
assert data_sufficiency.json() == {
"criteria_name": "criteria_name",
"settings": {},
"status": "status",
"data": {},
"warnings": [
{
"data": {},
"description": "description",
"qualified_name": "qualified_name",
}
],
}
def test_model_results_minimal():
model_results = CalTRACKUsagePerDayModelResults(
status="status", method_name="method_name"
)
assert model_results.status == "status"
assert model_results.method_name == "method_name"
assert model_results.model is None
assert model_results.r_squared_adj is None
assert model_results.candidates == []
assert model_results.warnings == []
assert model_results.metadata == {}
assert model_results.settings == {}
assert str(model_results).startswith("CalTRACKUsagePerDayModelResults")
assert model_results.json() == {
"candidates": None,
"interval": None,
"metadata": {},
"method_name": "method_name",
"totals_metrics": None,
"avgs_metrics": None,
"model": None,
"settings": {},
"status": "status",
"r_squared_adj": None,
"warnings": [],
}
def test_model_results_json_with_objects():
candidate_model = CalTRACKUsagePerDayCandidateModel(
model_type="model_type", formula="formula", status="status"
)
eemeter_warning = EEMeterWarning(
qualified_name="qualified_name", description="description", data={}
)
model_results = CalTRACKUsagePerDayModelResults(
status="status",
method_name="method_name",
model=candidate_model,
candidates=[candidate_model],
warnings=[eemeter_warning],
)
assert model_results.json(with_candidates=True) == {
"candidates": [
{
"formula": "formula",
"model_params": {},
"model_type": "model_type",
"r_squared_adj": None,
"status": "status",
"warnings": [],
}
],
"metadata": {},
"interval": None,
"method_name": "method_name",
"totals_metrics": None,
"avgs_metrics": None,
"model": {
"formula": "formula",
"model_params": {},
"model_type": "model_type",
"r_squared_adj": None,
"status": "status",
"warnings": [],
},
"settings": {},
"status": "status",
"r_squared_adj": None,
"warnings": [
{
"data": {},
"description": "description",
"qualified_name": "qualified_name",
}
],
}
def test_model_results_json_with_nan_r_squared_adj():
candidate_model = CalTRACKUsagePerDayCandidateModel(
model_type="model_type",
formula="formula",
status="status",
r_squared_adj=np.nan,
)
model_results = CalTRACKUsagePerDayModelResults(
status="status",
method_name="method_name",
model=candidate_model,
r_squared_adj=np.nan,
)
assert model_results.json() == {
"candidates": None,
"interval": None,
"metadata": {},
"method_name": "method_name",
"totals_metrics": None,
"avgs_metrics": None,
"model": {
"formula": "formula",
"model_params": {},
"model_type": "model_type",
"r_squared_adj": None,
"status": "status",
"warnings": [],
},
"settings": {},
"status": "status",
"r_squared_adj": None,
"warnings": [],
}
def test_model_results_json_with_model_metrics():
candidate_model = CalTRACKUsagePerDayCandidateModel(
model_type="model_type", formula="formula", status="status", r_squared_adj=0.5
)
model_results = CalTRACKUsagePerDayModelResults(
status="status",
method_name="method_name",
model=candidate_model,
r_squared_adj=np.nan,
)
model_metrics = ModelMetrics(
observed_input=pd.Series([0, 1, 2]), predicted_input=pd.Series([1, 0, 2])
)
json_result = model_results.json()
json.dumps(json_result) # just make sure it's valid json
assert "totals_metrics" in json_result
assert "avgs_metrics" in json_result
json_result["totals_metrics"] = {} # overwrite because of floats
json_result["avgs_metrics"] = {} # overwrite because of floats
assert json_result == {
"candidates": None,
"interval": None,
"metadata": {},
"method_name": "method_name",
"totals_metrics": {},
"avgs_metrics": {},
"model": {
"formula": "formula",
"model_params": {},
"model_type": "model_type",
"r_squared_adj": 0.5,
"status": "status",
"warnings": [],
},
"settings": {},
"status": "status",
"r_squared_adj": None,
"warnings": [],
}
@pytest.fixture
def utc_index():
return pd.date_range("2011-01-01", freq="H", periods=365 * 24 + 1, tz="UTC")
@pytest.fixture
def temperature_data(utc_index):
series = pd.Series(
[
30.0 * ((i % (365 * 24.0)) / (365 * 24.0)) # 30 * frac of way through year
+ 50.0 # range from 50 to 80
for i in range(len(utc_index))
],
index=utc_index,
)
return series
@pytest.fixture
def prediction_index(temperature_data):
return temperature_data.resample("D").mean().index
@pytest.fixture
def candidate_model_no_model_none():
return CalTRACKUsagePerDayCandidateModel(
model_type=None,
formula="formula",
status="QUALIFIED",
model_params={"intercept": 1},
)
def test_caltrack_predict_no_model_none(
candidate_model_no_model_none, prediction_index, temperature_data
):
with pytest.raises(ValueError):
candidate_model_no_model_none.predict(prediction_index, temperature_data)
@pytest.fixture
def candidate_model_intercept_only():
return CalTRACKUsagePerDayCandidateModel(
model_type="intercept_only",
formula="formula",
status="QUALIFIED",
model_params={"intercept": 1},
)
def test_caltrack_predict_intercept_only(
candidate_model_intercept_only, prediction_index, temperature_data
):
model_prediction = candidate_model_intercept_only.predict(
prediction_index, temperature_data
)
prediction = model_prediction.result
assert prediction["predicted_usage"].sum() == 365
assert sorted(prediction.columns) == ["predicted_usage"]
def test_caltrack_predict_intercept_only_with_disaggregated(
candidate_model_intercept_only, prediction_index, temperature_data
):
model_prediction = candidate_model_intercept_only.predict(
prediction_index, temperature_data, with_disaggregated=True
)
prediction = model_prediction.result
assert prediction["base_load"].sum() == 365.0
assert prediction["cooling_load"].sum() == 0.0
assert prediction["heating_load"].sum() == 0.0
assert prediction["predicted_usage"].sum() == 365.0
assert sorted(prediction.columns) == [
"base_load",
"cooling_load",
"heating_load",
"predicted_usage",
]
def test_caltrack_predict_intercept_only_with_design_matrix(
candidate_model_intercept_only, prediction_index, temperature_data
):
model_prediction = candidate_model_intercept_only.predict(
prediction_index, temperature_data, with_design_matrix=True
)
prediction = model_prediction.result
assert sorted(prediction.columns) == [
"n_days",
"n_days_dropped",
"n_days_kept",
"predicted_usage",
"temperature_mean",
]
assert prediction.n_days.sum() == 365.0
assert prediction.n_days_dropped.sum() == 0.0
assert prediction.n_days_kept.sum() == 365
assert prediction.predicted_usage.sum() == 365.0
assert round(prediction.temperature_mean.mean()) == 65.0
@pytest.fixture
def candidate_model_missing_params():
return CalTRACKUsagePerDayCandidateModel(
model_type="intercept_only",
formula="formula",
status="QUALIFIED",
model_params={},
)
def test_caltrack_predict_missing_params(
candidate_model_missing_params, prediction_index, temperature_data
):
with pytest.raises(MissingModelParameterError):
candidate_model_missing_params.predict(prediction_index, temperature_data)
@pytest.fixture
def candidate_model_cdd_only():
return CalTRACKUsagePerDayCandidateModel(
model_type="cdd_only",
formula="formula",
status="QUALIFIED",
model_params={"intercept": 1, "beta_cdd": 1, "cooling_balance_point": 65},
)
def test_caltrack_predict_cdd_only(
candidate_model_cdd_only, prediction_index, temperature_data
):
model_prediction = candidate_model_cdd_only.predict(
prediction_index, temperature_data
)
prediction_df = model_prediction.result
assert round(prediction_df.predicted_usage.sum()) == 1733
def test_caltrack_predict_cdd_only_with_disaggregated(
candidate_model_cdd_only, prediction_index, temperature_data
):
model_prediction = candidate_model_cdd_only.predict(
prediction_index, temperature_data, with_disaggregated=True
)
prediction = model_prediction.result
assert round(prediction.predicted_usage.sum()) == 1733
assert round(prediction.base_load.sum()) == 365.0
assert round(prediction.heating_load.sum()) == 0.0
assert round(prediction.cooling_load.sum()) == 1368.0
def test_caltrack_predict_cdd_only_with_design_matrix(
candidate_model_cdd_only, prediction_index, temperature_data
):
model_prediction = candidate_model_cdd_only.predict(
prediction_index, temperature_data, with_design_matrix=True
)
prediction = model_prediction.result
assert sorted(prediction.columns) == [
"cdd_65",
"n_days",
"n_days_dropped",
"n_days_kept",
"predicted_usage",
"temperature_mean",
]
assert round(prediction.cdd_65.sum()) == 1368.0
assert prediction.n_days.sum() == 365.0
assert prediction.n_days_dropped.sum() == 0
assert prediction.n_days_kept.sum() == 365.0
assert round(prediction.predicted_usage.sum()) == 1733
assert round(prediction.temperature_mean.mean()) == 65.0
@pytest.fixture
def candidate_model_hdd_only():
return CalTRACKUsagePerDayCandidateModel(
model_type="hdd_only",
formula="formula",
status="QUALIFIED",
model_params={"intercept": 1, "beta_hdd": 1, "heating_balance_point": 65},
)
def test_caltrack_predict_hdd_only(
candidate_model_hdd_only, prediction_index, temperature_data
):
model_prediction = candidate_model_hdd_only.predict(
prediction_index, temperature_data
)
prediction = model_prediction.result
assert round(prediction.predicted_usage.sum()) == 1734
def test_caltrack_predict_hdd_only_with_disaggregated(
candidate_model_hdd_only, prediction_index, temperature_data
):
model_prediction = candidate_model_hdd_only.predict(
prediction_index, temperature_data, with_disaggregated=True
)
prediction = model_prediction.result
assert round(prediction.predicted_usage.sum()) == 1734
assert round(prediction.base_load.sum()) == 365.0
assert round(prediction.heating_load.sum()) == 1369.0
assert round(prediction.cooling_load.sum()) == 0.0
def test_caltrack_predict_hdd_only_with_design_matrix(
candidate_model_hdd_only, prediction_index, temperature_data
):
model_prediction = candidate_model_hdd_only.predict(
prediction_index, temperature_data, with_design_matrix=True
)
prediction = model_prediction.result
assert sorted(prediction.columns) == [
"hdd_65",
"n_days",
"n_days_dropped",
"n_days_kept",
"predicted_usage",
"temperature_mean",
]
assert round(prediction.hdd_65.sum()) == 1369.0
assert prediction.n_days.sum() == 365.0
assert prediction.n_days_dropped.sum() == 0
assert prediction.n_days_kept.sum() == 365.0
assert round(prediction.predicted_usage.sum()) == 1734
assert round(prediction.temperature_mean.mean()) == 65.0
@pytest.fixture
def candidate_model_cdd_hdd():
return CalTRACKUsagePerDayCandidateModel(
model_type="cdd_hdd",
formula="formula",
status="QUALIFIED",
model_params={
"intercept": 1,
"beta_hdd": 1,
"heating_balance_point": 60,
"beta_cdd": 1,
"cooling_balance_point": 70,
},
)
def test_caltrack_predict_cdd_hdd(
candidate_model_cdd_hdd, prediction_index, temperature_data
):
model_prediction = candidate_model_cdd_hdd.predict(
prediction_index, temperature_data
)
prediction = model_prediction.result
assert round(prediction.predicted_usage.sum()) == 1582.0
def test_caltrack_predict_cdd_hdd_disaggregated(
candidate_model_cdd_hdd, prediction_index, temperature_data
):
model_prediction = candidate_model_cdd_hdd.predict(
prediction_index, temperature_data, with_disaggregated=True
)
prediction = model_prediction.result
assert round(prediction.predicted_usage.sum()) == 1582.0
assert round(prediction.base_load.sum()) == 365.0
assert round(prediction.heating_load.sum()) == 609.0
assert round(prediction.cooling_load.sum()) == 608.0
def test_caltrack_predict_cdd_hdd_with_design_matrix(
candidate_model_cdd_hdd, prediction_index, temperature_data
):
model_prediction = candidate_model_cdd_hdd.predict(
prediction_index, temperature_data, with_design_matrix=True
)
prediction = model_prediction.result
assert sorted(prediction.columns) == [
"cdd_70",
"hdd_60",
"n_days",
"n_days_dropped",
"n_days_kept",
"predicted_usage",
"temperature_mean",
]
assert round(prediction.cdd_70.sum()) == 608.0
assert round(prediction.hdd_60.sum()) == 609.0
assert prediction.n_days.sum() == 365.0
assert prediction.n_days_dropped.sum() == 0
assert prediction.n_days_kept.sum() == 365.0
assert round(prediction.predicted_usage.sum()) == 1582.0
assert round(prediction.temperature_mean.mean()) == 65.0
def test_caltrack_predict_cdd_hdd_with_design_matrix_missing_temp_data(
candidate_model_cdd_hdd, il_electricity_cdd_hdd_billing_monthly
):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
prediction_index = meter_data.index[2:4]
temperature_data = il_electricity_cdd_hdd_billing_monthly["temperature_data"]
temp_data = temperature_data["2015-11":"2016-03"]
temp_data_greater_90perc_missing = temp_data[
~(
(pd.Timestamp("2016-01-27T12:00:00", tz="utc") < temp_data.index)
& (temp_data.index < pd.Timestamp("2016-01-31T12:00:00", tz="utc"))
)
].reindex(temp_data.index)
model_prediction = candidate_model_cdd_hdd.predict(
prediction_index, temp_data_greater_90perc_missing, with_design_matrix=True
)
prediction = model_prediction.result
assert sorted(prediction.columns) == [
"cdd_70",
"hdd_60",
"n_days",
"n_days_dropped",
"n_days_kept",
"predicted_usage",
"temperature_mean",
]
assert prediction.shape == (0, 7)
@pytest.fixture
def candidate_model_bad_model_type():
return CalTRACKUsagePerDayCandidateModel(
model_type="unknown", formula="formula", status="QUALIFIED", model_params={}
)
def test_caltrack_predict_bad_model_type(
candidate_model_bad_model_type, temperature_data, prediction_index
):
with pytest.raises(UnrecognizedModelTypeError):
candidate_model_bad_model_type.predict(prediction_index, temperature_data)
def test_caltrack_predict_empty(
candidate_model_bad_model_type, temperature_data, prediction_index
):
model_prediction_obj = candidate_model_bad_model_type.predict(
prediction_index[:0], temperature_data[:0]
)
assert model_prediction_obj.result.empty is True
@pytest.fixture
def cdd_hdd_h54_c67_billing_monthly_totals(il_electricity_cdd_hdd_billing_monthly):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
temperature_data = il_electricity_cdd_hdd_billing_monthly["temperature_data"]
temperature_features = compute_temperature_features(
meter_data.index,
temperature_data,
heating_balance_points=[54],
cooling_balance_points=[67],
use_mean_daily_values=False,
)
data = merge_features([meter_data, temperature_features])
return data
def test_caltrack_predict_design_matrix_input_avg_false_output_avg_true(
cdd_hdd_h54_c67_billing_monthly_totals
):
data = cdd_hdd_h54_c67_billing_monthly_totals
prediction = _caltrack_predict_design_matrix(
"cdd_hdd",
{
"intercept": 13.420093629452852,
"beta_cdd": 2.257868665412409,
"beta_hdd": 1.0479347638717025,
"cooling_balance_point": 67,
"heating_balance_point": 54,
},
data,
input_averages=False,
output_averages=True,
)
assert round(prediction.mean(), 3) == 28.253
def test_caltrack_predict_design_matrix_input_avg_false_output_avg_false(
cdd_hdd_h54_c67_billing_monthly_totals
):
data = cdd_hdd_h54_c67_billing_monthly_totals
prediction = _caltrack_predict_design_matrix(
"cdd_hdd",
{
"intercept": 13.420093629452852,
"beta_cdd": 2.257868665412409,
"beta_hdd": 1.0479347638717025,
"cooling_balance_point": 67,
"heating_balance_point": 54,
},
data,
input_averages=False,
output_averages=False,
)
assert round(prediction.mean(), 3) == 855.832
@pytest.fixture
def cdd_hdd_h54_c67_billing_monthly_avgs(il_electricity_cdd_hdd_billing_monthly):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
temperature_data = il_electricity_cdd_hdd_billing_monthly["temperature_data"]
temperature_features = compute_temperature_features(
meter_data.index,
temperature_data,
heating_balance_points=[54],
cooling_balance_points=[67],
use_mean_daily_values=True,
)
meter_data_feature = compute_usage_per_day_feature(meter_data)
data = merge_features([meter_data_feature, temperature_features])
return data
def test_caltrack_predict_design_matrix_input_avg_true_output_avg_false(
cdd_hdd_h54_c67_billing_monthly_avgs
):
data = cdd_hdd_h54_c67_billing_monthly_avgs
prediction = _caltrack_predict_design_matrix(
"cdd_hdd",
{
"intercept": 13.420093629452852,
"beta_cdd": 2.257868665412409,
"beta_hdd": 1.0479347638717025,
"cooling_balance_point": 67,
"heating_balance_point": 54,
},
data,
input_averages=True,
output_averages=False,
)
assert round(prediction.mean(), 3) == 855.832
def test_caltrack_predict_design_matrix_input_avg_true_output_avg_true(
cdd_hdd_h54_c67_billing_monthly_avgs
):
data = cdd_hdd_h54_c67_billing_monthly_avgs
prediction = _caltrack_predict_design_matrix(
"cdd_hdd",
{
"intercept": 13.420093629452852,
"beta_cdd": 2.257868665412409,
"beta_hdd": 1.0479347638717025,
"cooling_balance_point": 67,
"heating_balance_point": 54,
},
data,
input_averages=True,
output_averages=True,
)
assert round(prediction.mean(), 3) == 28.253
def test_caltrack_predict_design_matrix_n_days(cdd_hdd_h54_c67_billing_monthly_totals):
# This makes sure that the method works with n_days when
# DatetimeIndexes are not available.
data = cdd_hdd_h54_c67_billing_monthly_totals
data = data.reset_index(drop=True)
data["n_days"] = 1
prediction = _caltrack_predict_design_matrix(
"cdd_hdd",
{
"intercept": 13.420093629452852,
"beta_cdd": 2.257868665412409,
"beta_hdd": 1.0479347638717025,
"cooling_balance_point": 67,
"heating_balance_point": 54,
},
data,
input_averages=True,
output_averages=True,
)
assert prediction.mean() is not None
def test_caltrack_predict_design_matrix_no_days_fails(
cdd_hdd_h54_c67_billing_monthly_totals
):
# This makes sure that the method fails if neither n_days nor
# a DatetimeIndex is available.
data = cdd_hdd_h54_c67_billing_monthly_totals
data = data.reset_index(drop=True)
with pytest.raises(ValueError):
_caltrack_predict_design_matrix(
"cdd_hdd",
{
"intercept": 13.420093629452852,
"beta_cdd": 2.257868665412409,
"beta_hdd": 1.0479347638717025,
"cooling_balance_point": 67,
"heating_balance_point": 54,
},
data,
input_averages=True,
output_averages=True,
)
def test_get_too_few_non_zero_degree_day_warning_ok():
warnings = get_too_few_non_zero_degree_day_warning(
model_type="model_type",
balance_point=65,
degree_day_type="xdd",
degree_days=pd.Series([1, 1, 1]),
minimum_non_zero=2,
)
assert warnings == []
def test_get_too_few_non_zero_degree_day_warning_fail():
warnings = get_too_few_non_zero_degree_day_warning(
model_type="model_type",
balance_point=65,
degree_day_type="xdd",
degree_days=pd.Series([0, 0, 3]),
minimum_non_zero=2,
)
assert len(warnings) == 1
warning = warnings[0]
assert warning.qualified_name == (
"eemeter.caltrack_daily.model_type.too_few_non_zero_xdd"
)
assert warning.description == (
"Number of non-zero daily XDD values below accepted minimum."
" Candidate fit not attempted."
)
assert warning.data == {
"minimum_non_zero_xdd": 2,
"n_non_zero_xdd": 1,
"xdd_balance_point": 65,
}
def test_get_total_degree_day_too_low_warning_ok():
warnings = get_total_degree_day_too_low_warning(
model_type="model_type",
balance_point=65,
degree_day_type="xdd",
avg_degree_days=pd.Series([1, 1, 1]),
period_days=pd.Series([3, 1, 2]),
minimum_total=4,
)
assert warnings == []
def test_get_total_degree_day_too_low_warning_fail():
warnings = get_total_degree_day_too_low_warning(
model_type="model_type",
balance_point=65,
degree_day_type="xdd",
avg_degree_days=pd.Series([0.5, 0.5, 0.5]),
period_days=pd.Series([3, 1, 2]),
minimum_total=4,
)
assert len(warnings) == 1
warning = warnings[0]
assert warning.qualified_name == (
"eemeter.caltrack_daily.model_type.total_xdd_too_low"
)
assert warning.description == (
"Total XDD below accepted minimum. Candidate fit not attempted."
)
assert warning.data == {
"total_xdd": 3.0,
"total_xdd_minimum": 4,
"xdd_balance_point": 65,
}
def test_get_parameter_negative_warning_ok():
warnings = get_parameter_negative_warning(
"intercept_only", {"intercept": 0}, "intercept"
)
assert warnings == []
def test_get_parameter_negative_warning_fail():
warnings = get_parameter_negative_warning(
"intercept_only", {"intercept": -1}, "intercept"
)
assert len(warnings) == 1
warning = warnings[0]
assert warning.qualified_name == (
"eemeter.caltrack_daily.intercept_only.intercept_negative"
)
assert warning.description == (
"Model fit intercept parameter is negative. Candidate model rejected."
)
assert warning.data == {"intercept": -1}
def test_get_parameter_p_value_too_high_warning_ok():
warnings = get_parameter_p_value_too_high_warning(
"intercept_only", {"intercept": 0}, "intercept", 0.1, 0.1
)
assert warnings == []
def test_get_parameter_p_value_too_high_warning_fail():
warnings = get_parameter_p_value_too_high_warning(
"intercept_only", {"intercept": 0}, "intercept", 0.2, 0.1
)
assert len(warnings) == 1
warning = warnings[0]
assert warning.qualified_name == (
"eemeter.caltrack_daily.intercept_only.intercept_p_value_too_high"
)
assert warning.description == (
"Model fit intercept p-value is too high. Candidate model rejected."
)
assert warning.data == {
"intercept": 0,
"intercept_maximum_p_value": 0.1,
"intercept_p_value": 0.2,
}
def test_get_intercept_only_candidate_models_fail():
# should be covered by ETL, but this ensures no negative values.
data = pd.DataFrame({"meter_value": np.arange(10) * -1})
candidate_models = get_intercept_only_candidate_models(data, weights_col=None)
assert len(candidate_models) == 1
model = candidate_models[0]
assert model.model_type == "intercept_only"
assert model.formula == "meter_value ~ 1"
assert model.status == "DISQUALIFIED"
assert model.model is not None
assert model.result is not None
assert list(sorted(model.model_params.keys())) == ["intercept"]
assert round(model.model_params["intercept"], 2) == -4.5
assert model.r_squared_adj == 0
assert len(model.warnings) == 1
warning = model.warnings[0]
assert warning.qualified_name == (
"eemeter.caltrack_daily.intercept_only.intercept_negative"
)
def test_get_intercept_only_candidate_models_qualified(
prediction_index, temperature_data
):
data = pd.DataFrame({"meter_value": np.arange(10)})
candidate_models = get_intercept_only_candidate_models(data, weights_col=None)
assert len(candidate_models) == 1
model = candidate_models[0]
assert model.model_type == "intercept_only"
assert model.formula == "meter_value ~ 1"
assert model.status == "QUALIFIED"
assert model.model is not None
assert model.result is not None
assert list(sorted(model.model_params.keys())) == ["intercept"]
assert round(model.model_params["intercept"], 2) == 4.5
model_prediction = model.predict(prediction_index, temperature_data)
prediction = model_prediction.result
assert round(prediction.predicted_usage.sum(), 2) == 1642.5
assert model.r_squared_adj == 0
assert model.warnings == []
assert json.dumps(model.json()) is not None
def test_get_intercept_only_candidate_models_qualified_with_weights(
prediction_index, temperature_data
):
data = pd.DataFrame({"meter_value": np.arange(10), "weights": np.arange(10)})
candidate_models = get_intercept_only_candidate_models(data, "weights")
assert len(candidate_models) == 1
model = candidate_models[0]
assert model.model_type == "intercept_only"
assert model.formula == "meter_value ~ 1"
assert model.status == "QUALIFIED"
assert model.model is not None
assert model.result is not None
assert list(sorted(model.model_params.keys())) == ["intercept"]
assert round(model.model_params["intercept"], 2) == 6.33
model_prediction = model.predict(prediction_index, temperature_data)
prediction = model_prediction.result
assert round(prediction.predicted_usage.sum(), 2) == 2311.67
assert model.r_squared_adj == 0
assert model.warnings == []
assert json.dumps(model.json()) is not None
def test_get_intercept_only_candidate_models_error():
data = pd.DataFrame({"meter_value": []})
candidate_models = get_intercept_only_candidate_models(data, weights_col=None)
assert len(candidate_models) == 1
model = candidate_models[0]
assert len(model.warnings) == 1
warning = model.warnings[0]
assert warning.qualified_name == (
"eemeter.caltrack_daily.intercept_only.model_results"
)
assert warning.description == (
"Error encountered in statsmodels.formula.api.ols method." " (Empty data?)"
)
assert list(sorted(warning.data.keys())) == ["traceback"]
assert warning.data["traceback"] is not None
def test_get_cdd_only_candidate_models_qualified(prediction_index, temperature_data):
data = pd.DataFrame({"meter_value": [1, 1, 1, 6], "cdd_65": [0, 0.1, 0, 5]})
candidate_models = get_cdd_only_candidate_models(data, 1, 1, 0.1, None)
assert len(candidate_models) == 1
model = candidate_models[0]
assert model.model_type == "cdd_only"
assert model.formula == "meter_value ~ cdd_65"
assert model.status == "QUALIFIED"
assert model.model is not None
assert model.result is not None
assert list(sorted(model.model_params.keys())) == [
"beta_cdd",
"cooling_balance_point",
"intercept",
]
assert round(model.model_params["beta_cdd"], 2) == 1.01
assert round(model.model_params["cooling_balance_point"], 2) == 65
assert round(model.model_params["intercept"], 2) == 0.97
model_prediction = model.predict(prediction_index, temperature_data)
prediction = model_prediction.result
assert round(prediction.predicted_usage.sum(), 2) == 1730.04
assert round(model.r_squared_adj, 2) == 1.00
assert model.warnings == []
assert json.dumps(model.json()) is not None
def test_get_cdd_only_candidate_models_qualified_with_weights(
prediction_index, temperature_data
):
data = pd.DataFrame(
{
"meter_value": [1, 1, 1, 6],
"cdd_65": [0, 0.1, 0, 5],
"weights": [1, 100, 1, 1],
}
)
candidate_models = get_cdd_only_candidate_models(data, 1, 1, 0.1, "weights")
assert len(candidate_models) == 1
model = candidate_models[0]
assert model.model_type == "cdd_only"
assert model.formula == "meter_value ~ cdd_65"
assert model.status == "QUALIFIED"
assert model.model is not None
assert model.result is not None
assert list(sorted(model.model_params.keys())) == [
"beta_cdd",
"cooling_balance_point",
"intercept",
]
assert round(model.model_params["beta_cdd"], 2) == 1.02
assert round(model.model_params["cooling_balance_point"], 2) == 65
assert round(model.model_params["intercept"], 2) == 0.9
model_prediction = model.predict(prediction_index, temperature_data)
prediction = model_prediction.result
assert round(prediction.predicted_usage.sum(), 2) == 1723.19
assert round(model.r_squared_adj, 2) == 1.00
assert model.warnings == []
assert json.dumps(model.json()) is not None
def test_get_cdd_only_candidate_models_not_attempted():
data = pd.DataFrame({"meter_value": [1, 1, 1, 6], "cdd_65": [0, 0.1, 0, 5]})
candidate_models = get_cdd_only_candidate_models(data, 10, 10, 0.1, None)
assert len(candidate_models) == 1
model = candidate_models[0]
assert model.model_type == "cdd_only"
assert model.formula == "meter_value ~ cdd_65"
assert model.status == "NOT ATTEMPTED"
assert model.model is None
assert model.result is None
assert model.model_params == {}
assert model.r_squared_adj is None
assert len(model.warnings) == 2
assert json.dumps(model.json()) is not None
def test_get_cdd_only_candidate_models_disqualified(prediction_index, temperature_data):
data = pd.DataFrame({"meter_value": [1, 1, 1, -4], "cdd_65": [0, 0.1, 0, 5]})
candidate_models = get_cdd_only_candidate_models(data, 1, 1, 0.0, None)
assert len(candidate_models) == 1
model = candidate_models[0]
assert model.model_type == "cdd_only"
assert model.formula == "meter_value ~ cdd_65"
assert model.status == "DISQUALIFIED"
assert model.model is not None
assert model.result is not None
assert list(sorted(model.model_params.keys())) == [
"beta_cdd",
"cooling_balance_point",
"intercept",
]
assert round(model.model_params["beta_cdd"], 2) == -1.01
assert round(model.model_params["cooling_balance_point"], 2) == 65
assert round(model.model_params["intercept"], 2) == 1.03
model_prediction = model.predict(prediction_index, temperature_data)
prediction = model_prediction.result
assert round(prediction.predicted_usage.sum(), 2) == -1000.04
assert round(model.r_squared_adj, 2) == 1.00
assert len(model.warnings) == 2
assert json.dumps(model.json()) is not None
def test_get_cdd_only_candidate_models_error():
data = | pd.DataFrame({"meter_value": [], "cdd_65": []}) | pandas.DataFrame |
#!/usr/bin/env python
"""
analyse Elasticsearch query
"""
import json
from elasticsearch import Elasticsearch
from elasticsearch import logger as es_logger
from collections import defaultdict, Counter
import re
import os
from datetime import datetime
# Preprocess terms for TF-IDF
import numpy as np
import pandas as pd
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
# progress bar
from tqdm import tqdm
# ploting
import matplotlib.pyplot as plt
# LOG
import logging
from logging.handlers import RotatingFileHandler
# Word embedding for evaluation
from sentence_transformers import SentenceTransformer
from sklearn.manifold import TSNE
import seaborn as sns
from sklearn.cluster import KMeans, AgglomerativeClustering
from sklearn.metrics.pairwise import cosine_similarity
from scipy import sparse
import scipy.spatial as sp
# Spatial entity as descriptor :
from geopy.geocoders import Nominatim
from geopy.extra.rate_limiter import RateLimiter
# venn
from matplotlib_venn_wordcloud import venn2_wordcloud, venn3_wordcloud
import operator
# Global var on Levels on spatial and temporal axis
spatialLevels = ['city', 'state', 'country']
temporalLevels = ['day', 'week', 'month', 'period']
def elasticsearch_query(query_fname, logger):
"""
Build a ES query and return a default dict with resuls
:return: tweetsByCityAndDate
"""
# Elastic search credentials
client = Elasticsearch("http://localhost:9200")
es_logger.setLevel(logging.WARNING)
index = "twitter"
# Define a Query
query = open(query_fname, "r").read()
result = Elasticsearch.search(client, index=index, body=query, scroll='2m', size=5000)
# Append all pages form scroll search : avoid the 10k limitation of ElasticSearch
results = avoid10kquerylimitation(result, client, logger)
# Initiate a dict for each city append all Tweets content
tweetsByCityAndDate = defaultdict(list)
for hits in results:
# parse Java date : EEE MMM dd HH:mm:ss Z yyyy
inDate = hits["_source"]["created_at"]
parseDate = datetime.strptime(inDate, "%a %b %d %H:%M:%S %z %Y")
try:# geodocing may be bad
geocoding = hits["_source"]["rest"]["features"][0]["properties"]
except:
continue # skip this iteraction
if "country" in hits["_source"]["rest"]["features"][0]["properties"]:
# locaties do not necessarily have an associated stated
try:
cityStateCountry = str(hits["_source"]["rest"]["features"][0]["properties"]["city"]) + "_" + \
str(hits["_source"]["rest"]["features"][0]["properties"]["state"]) + "_" + \
str(hits["_source"]["rest"]["features"][0]["properties"]["country"])
except: # there is no state in geocoding
try:
logger.debug(hits["_source"]["rest"]["features"][0]["properties"]["city"] + " has no state")
cityStateCountry = str(hits["_source"]["rest"]["features"][0]["properties"]["city"]) + "_" + \
str("none") + "_" + \
str(hits["_source"]["rest"]["features"][0]["properties"]["country"])
except: # there is no city as well : only country
# print(json.dumps(hits["_source"], indent=4))
try: #
cityStateCountry = str("none") + "_" + \
str("none") + "_" + \
str(hits["_source"]["rest"]["features"][0]["properties"]["country"])
except:
cityStateCountry = str("none") + "_" + \
str("none") + "_" + \
str("none")
try:
tweetsByCityAndDate[cityStateCountry].append(
{
"tweet": preprocessTweets(hits["_source"]["full_text"]),
"created_at": parseDate
}
)
except:
print(json.dumps(hits["_source"], indent=4))
# biotexInputBuilder(tweetsByCityAndDate)
# pprint(tweetsByCityAndDate)
return tweetsByCityAndDate
def avoid10kquerylimitation(result, client, logger):
"""
Elasticsearch limit results of query at 10 000. To avoid this limit, we need to paginate results and scroll
This method append all pages form scroll search
:param result: a result of a ElasticSearcg query
:return:
"""
scroll_size = result['hits']['total']["value"]
logger.info("Number of elasticsearch scroll: " + str(scroll_size))
results = []
# Progress bar
pbar = tqdm(total=scroll_size)
while (scroll_size > 0):
try:
scroll_id = result['_scroll_id']
res = client.scroll(scroll_id=scroll_id, scroll='60s')
results += res['hits']['hits']
scroll_size = len(res['hits']['hits'])
pbar.update(scroll_size)
except:
pbar.close()
logger.error("elasticsearch search scroll failed")
break
pbar.close()
return results
def preprocessTweets(text):
"""
1 - Clean up tweets text cf : https://medium.com/analytics-vidhya/basic-tweet-preprocessing-method-with-python-56b4e53854a1
2 - Detection lang
3 - remove stopword ??
:param text:
:return: list : texclean, and langue detected
"""
## 1 clean up twetts
# remove URLs
textclean = re.sub('((www\.[^\s]+)|(https?://[^\s]+)|(http?://[^\s]+))', '', text)
textclean = re.sub(r'http\S+', '', textclean)
# remove usernames
# textclean = re.sub('@[^\s]+', '', textclean)
# remove the # in #hashtag
# textclean = re.sub(r'#([^\s]+)', r'\1', textclean)
return textclean
def matrixOccurenceBuilder(tweetsofcity, matrixAggDay_fout, matrixOccurence_fout, save_intermediaire_files, logger):
"""
Create a matrix of :
- line : (city,day)
- column : terms
- value of cells : TF (term frequency)
Help found here :
http://www.xavierdupre.fr/app/papierstat/helpsphinx/notebooks/artificiel_tokenize_features.html
https://towardsdatascience.com/natural-language-processing-feature-engineering-using-tf-idf-e8b9d00e7e76
:param tweetsofcity:
:param matrixAggDay_fout: file to save
:param matrixOccurence_fout: file to save
:return:
"""
# initiate matrix of tweets aggregate by day
# col = ['city', 'day', 'tweetsList', 'bow']
col = ['city', 'day', 'tweetsList']
matrixAggDay = pd.DataFrame(columns=col)
cityDayList = []
logger.info("start full_text concatenation for city & day")
pbar = tqdm(total=len(tweetsofcity))
for city in tweetsofcity:
# create a table with 2 columns : tweet and created_at for a specific city
matrix = pd.DataFrame(tweetsofcity[city])
# Aggregate list of tweets by single day for specifics cities
## Loop on days for a city
period = matrix['created_at'].dt.date
period = period.unique()
period.sort()
for day in period:
# aggregate city and date document
document = '. \n'.join(matrix.loc[matrix['created_at'].dt.date == day]['tweet'].tolist())
# Bag of Words and preprocces
# preproccesFullText = preprocessTerms(document)
tweetsOfDayAndCity = {
'city': city,
'day': day,
'tweetsList': document
}
cityDayList.append(city + "_" + str(day))
try:
matrixAggDay = matrixAggDay.append(tweetsOfDayAndCity, ignore_index=True)
except:
print("full_text empty after pre-process: "+document)
continue
pbar.update(1)
pbar.close()
if save_intermediaire_files:
logger.info("Saving file: matrix of full_text concatenated by day & city: "+str(matrixAggDay_fout))
matrixAggDay.to_csv(matrixAggDay_fout)
# Count terms with sci-kit learn
cd = CountVectorizer(
stop_words='english',
#preprocessor=sklearn_vectorizer_no_number_preprocessor,
#min_df=2, # token at least present in 2 cities : reduce size of matrix
max_features=25000,
ngram_range=(1, 1),
token_pattern='[a-zA-Z0-9#@]+', #remove user name, i.e term starting with @ for personnal data issue
# strip_accents= "ascii" # remove token with special character (trying to keep only english word)
)
cd.fit(matrixAggDay['tweetsList'])
res = cd.transform(matrixAggDay["tweetsList"])
countTerms = res.todense()
# create matrix
## get terms :
# voc = cd.vocabulary_
# listOfTerms = {term for term, index in sorted(voc.items(), key=lambda item: item[1])}
listOfTerms = cd.get_feature_names()
##initiate matrix with count for each terms
matrixOccurence = pd.DataFrame(data=countTerms[0:, 0:], index=cityDayList, columns=listOfTerms)
# save to file
if save_intermediaire_files:
logger.info("Saving file: occurence of term: "+str(matrixOccurence_fout))
matrixOccurence.to_csv(matrixOccurence_fout)
return matrixOccurence
def spatiotemporelFilter(matrix, listOfcities='all', spatialLevel='city', period='all', temporalLevel='day'):
"""
Filter matrix with list of cities and a period
:param matrix:
:param listOfcities:
:param spatialLevel:
:param period:
:param temporalLevel:
:return: matrix filtred
"""
if spatialLevel not in spatialLevels or temporalLevel not in temporalLevels:
print("wrong level, please double check")
return 1
# Extract cities and period
## cities
if listOfcities != 'all': ### we need to filter
###Initiate a numpy array of False
filter = np.zeros((1, len(matrix.index)), dtype=bool)[0]
for city in listOfcities:
### edit filter if index contains the city (for each city of the list)
filter += matrix.index.str.startswith(str(city) + "_")
matrix = matrix.loc[filter]
##period
if str(period) != 'all': ### we need a filter on date
datefilter = np.zeros((1, len(matrix.index)), dtype=bool)[0]
for date in period:
datefilter += matrix.index.str.contains(date.strftime('%Y-%m-%d'))
matrix = matrix.loc[datefilter]
return matrix
def HTFIDF(matrixOcc, matrixHTFIDF_fname, biggestHTFIDFscore_fname, listOfcities='all', spatialLevel='city',
period='all', temporalLevel='day'):
"""
Aggregate on spatial and temporel and then compute TF-IDF
:param matrixOcc: Matrix with TF already compute
:param listOfcities: filter on this cities
:param spatialLevel: city / state / country / world
:param period: Filter on this period
:param temporalLevel: day / week (month have to be implemented)
:return:
"""
matrixOcc = spatiotemporelFilter(matrix=matrixOcc, listOfcities=listOfcities,
spatialLevel='state', period=period)
# Aggregate by level
## Create 4 new columns : city, State, Country and date
def splitindex(row):
return row.split("_")
matrixOcc["city"], matrixOcc["state"], matrixOcc["country"], matrixOcc["date"] = \
zip(*matrixOcc.index.map(splitindex))
if temporalLevel == 'day':
## In space
if spatialLevel == 'city':
# do nothing
pass
elif spatialLevel == 'state' and temporalLevel == 'day':
matrixOcc = matrixOcc.groupby("state").sum()
elif spatialLevel == 'country' and temporalLevel == 'day':
matrixOcc = matrixOcc.groupby("country").sum()
elif temporalLevel == "week":
matrixOcc.date = pd.to_datetime((matrixOcc.date)) - pd.to_timedelta(7, unit='d')# convert date into datetime
## in space and time
if spatialLevel == 'country':
matrixOcc = matrixOcc.groupby(["country", pd.Grouper(key="date", freq="W")]).sum()
elif spatialLevel == 'state':
matrixOcc = matrixOcc.groupby(["state", pd.Grouper(key="date", freq="W")]).sum()
elif spatialLevel == 'city':
matrixOcc = matrixOcc.groupby(["city", pd.Grouper(key="date", freq="W")]).sum()
# Compute TF-IDF
## compute TF : for each doc, devide count by Sum of all count
### Sum fo all count by row
matrixOcc['sumCount'] = matrixOcc.sum(axis=1)
### Devide each cell by these sums
listOfTerms = matrixOcc.keys()
matrixOcc = matrixOcc.loc[:, listOfTerms].div(matrixOcc['sumCount'], axis=0)
## Compute IDF : create a vector of length = nb of termes with IDF value
idf = pd.Series(index=matrixOcc.keys(), dtype=float)
### N : nb of doucments <=> nb of rows :
N = matrixOcc.shape[0]
### DFt : nb of document that contains the term
DFt = matrixOcc.astype(bool).sum(axis=0) # Tip : convert all value in boolean. float O,O will be False, other True
#### Not a Number when value 0 because otherwise log is infinite
DFt.replace(0, np.nan, inplace=True)
### compute log(N/DFt)
idf = np.log10(N / (DFt))
# idf = np.log10( N / (DFt * 10))
## compute TF-IDF
matrixTFIDF = matrixOcc * idf
# matrixTFIDF = matrixOcc * idf * idf
## remove terms if for all documents value are Nan
matrixTFIDF.dropna(axis=1, how='all', inplace=True)
# Save file
matrixTFIDF.to_csv(matrixHTFIDF_fname)
# Export N biggest TF-IDF score:
top_n = 500
extractBiggest = pd.DataFrame(index=matrixTFIDF.index, columns=range(0, top_n))
for row in matrixTFIDF.index:
try:
row_without_zero = matrixTFIDF.loc[row]# we remove term with a score = 0
row_without_zero = row_without_zero[ row_without_zero !=0 ]
try:
extractBiggest.loc[row] = row_without_zero.nlargest(top_n).keys()
except:
extractBiggest.loc[row] = row_without_zero.nlargest(len(row_without_zero)).keys()
except:
logger.debug("H-TFIDF: city "+str(matrixTFIDF.loc[row].name)+ "not enough terms")
extractBiggest.to_csv(biggestHTFIDFscore_fname+".old.csv")
# Transpose this table in order to share the same structure with TF-IDF classifical biggest score :
hbt = pd.DataFrame()
extractBiggest = extractBiggest.reset_index()
for index, row in extractBiggest.iterrows():
hbtrow = pd.DataFrame(row.drop([spatialLevel, "date"]).values, columns=["terms"])
hbtrow[spatialLevel] = row[spatialLevel]
hbtrow["date"] = row["date"]
hbt = hbt.append(hbtrow, ignore_index=True)
hbt.to_csv(biggestHTFIDFscore_fname)
def TFIDF_TF_with_corpus_state(elastic_query_fname, logger, save_intermediaire_files, nb_biggest_terms=500, path_for_filesaved="./",
spatial_hiearchy="country", temporal_period='all', listOfCities='all'):
"""
Compute TFIDF and TF from an elastic query file
1 doc = 1 tweet
Corpus = by hiearchy level, i.e. : state or country
:param elastic_query_fname: filename and path of the elastic query
:param logger: logger of the main program
:param nb_biggest_terms: How many biggest term are to keep
:param spatial_hiearchy: define the size of the corpus : state or country
:param temporal_period:
:param listOfCities: If you want to filter out some cities, you can
:return:
"""
# tfidfStartDate = date(2020, 1, 23)
# tfidfEndDate = date(2020, 1, 30)
# temporal_period = pd.date_range(tfidfStartDate, tfidfEndDate)
# listOfCity = ['London', 'Glasgow', 'Belfast', 'Cardiff']
# listOfState = ["England", "Scotland", "Northern Ireland", "Wales"]
tweets = elasticsearch_query(elastic_query_fname, logger)
if listOfCities == 'all':
listOfCities = []
listOfStates = []
listOfCountry = []
for triple in tweets:
splitted = triple.split("_")
listOfCities.append(splitted[0])
listOfStates.append(splitted[1])
listOfCountry.append(splitted[2])
listOfCities = list(set(listOfCities))
listOfStates = list(set(listOfStates))
listOfCountry = list(set(listOfCountry))
# reorganie tweets (dict : tweets by cities) into dataframe (city and date)
matrixAllTweets = pd.DataFrame()
for tweetByCity in tweets.keys():
# Filter cities :
city = str(tweetByCity).split("_")[0]
state = str(tweetByCity).split("_")[1]
country = str(tweetByCity).split("_")[2]
if city in listOfCities:
matrix = pd.DataFrame(tweets[tweetByCity])
matrix['city'] = city
matrix['state'] = state
matrix['country'] = country
matrixAllTweets = matrixAllTweets.append(matrix, ignore_index=True)
# Split datetime into date and time
matrixAllTweets["date"] = [d.date() for d in matrixAllTweets['created_at']]
matrixAllTweets["time"] = [d.time() for d in matrixAllTweets['created_at']]
# Filter by a period
if temporal_period != "all":
mask = ((matrixAllTweets["date"] >= temporal_period.min()) & (matrixAllTweets["date"] <= temporal_period.max()))
matrixAllTweets = matrixAllTweets.loc[mask]
# Compute TF-IDF and TF by state
extractBiggestTF_allstates = pd.DataFrame()
extractBiggestTFIDF_allstates = pd.DataFrame()
if spatial_hiearchy == "country":
listOfLocalities = listOfCountry
elif spatial_hiearchy == "state":
listOfLocalities = listOfStates
elif spatial_hiearchy == "city":
listOfLocalities = listOfCities
for locality in listOfLocalities:
matrix_by_locality = matrixAllTweets[matrixAllTweets[spatial_hiearchy] == locality]
vectorizer = TfidfVectorizer(
stop_words='english',
min_df=0.001,
# max_features=50000,
ngram_range=(1, 1),
token_pattern='[<KEY>',
)
# logger.info("Compute TF-IDF on corpus = "+spatial_hiearchy)
try:
vectors = vectorizer.fit_transform(matrix_by_locality['tweet'])
feature_names = vectorizer.get_feature_names()
dense = vectors.todense()
denselist = dense.tolist()
except:
logger.info("Impossible to compute TF-IDF on: "+locality)
continue
## matrixTFIDF
TFIDFClassical = pd.DataFrame(denselist, columns=feature_names)
locality_format = locality.replace("/", "_")
locality_format = locality_format.replace(" ", "_")
if save_intermediaire_files:
logger.info("saving TF-IDF File: "+path_for_filesaved+"/tfidf_on_"+locality_format+"_corpus.csv")
TFIDFClassical.to_csv(path_for_filesaved+"/tfidf_on_"+locality_format+"_corpus.csv")
## Extract N TOP ranking score
extractBiggest = TFIDFClassical.max().nlargest(nb_biggest_terms)
extractBiggest = extractBiggest.to_frame()
extractBiggest = extractBiggest.reset_index()
extractBiggest.columns = ['terms', 'score']
extractBiggest[spatial_hiearchy] = locality
extractBiggestTFIDF_allstates = extractBiggestTFIDF_allstates.append(extractBiggest, ignore_index=True)
"""
# Compute TF
tf = CountVectorizer(
stop_words='english',
min_df=2,
ngram_range=(1,2),
token_pattern='[a-zA-Z0-9@#]+',
)
try:
tf.fit(matrix_by_locality['tweet'])
tf_res = tf.transform(matrix_by_locality['tweet'])
listOfTermsTF = tf.get_feature_names()
countTerms = tf_res.todense()
except:# locality does not have enough different term
logger.info("Impossible to compute TF on: "+locality)
continue
## matrixTF
TFClassical = pd.DataFrame(countTerms.tolist(), columns=listOfTermsTF)
### save in file
logger.info("saving TF File: "+path_for_filesaved+"/tf_on_"+locality.replace("/", "_")+"_corpus.csv")
TFClassical.to_csv(path_for_filesaved+"/tf_on_"+locality.replace("/", "_")+"_corpus.csv")
## Extract N TOP ranking score
extractBiggestTF = TFClassical.max().nlargest(nb_biggest_terms)
extractBiggestTF = extractBiggestTF.to_frame()
extractBiggestTF = extractBiggestTF.reset_index()
extractBiggestTF.columns = ['terms', 'score']
extractBiggestTF[spatial_hiearchy] = locality
extractBiggestTF_allstates = extractBiggestTF_allstates.append(extractBiggestTF, ignore_index=True)
"""
logger.info("saving TF and TF-IDF top"+str(nb_biggest_terms)+" biggest score")
extractBiggestTF_allstates.to_csv(path_for_filesaved+"/TF_BiggestScore_on_"+spatial_hiearchy+"_corpus.csv")
extractBiggestTFIDF_allstates.to_csv(path_for_filesaved+"/TF-IDF_BiggestScore_on_"+spatial_hiearchy+"_corpus.csv")
def TFIDF_TF_on_whole_corpus(elastic_query_fname, logger, save_intermediaire_files, path_for_filesaved="./",
temporal_period='all', listOfCities='all'):
"""
Compute TFIDF and TF from an elastic query file
1 doc = 1 tweet
Corpus = on the whole elastic query (with filter out cities that are not in listOfCities
:param elastic_query_fname: filename and path of the elastic query
:param logger: logger of the main program
:param nb_biggest_terms: How many biggest term are to keep. It has to be greater than H-TF-IDF or
TF-IDF classical on corpus = localitรฉ because a lot of temrs have 1.0 has the score
:param spatial_hiearchy: define the size of the corpus : state or country
:param temporal_period:
:param listOfCities: If you want to filter out some cities, you can
:return:
"""
# tfidfStartDate = date(2020, 1, 23)
# tfidfEndDate = date(2020, 1, 30)
# temporal_period = pd.date_range(tfidfStartDate, tfidfEndDate)
# listOfCity = ['London', 'Glasgow', 'Belfast', 'Cardiff']
# listOfState = ["England", "Scotland", "Northern Ireland", "Wales"]
# Query Elasticsearch to get all tweets from UK
tweets = elasticsearch_query(elastic_query_fname, logger)
if listOfCities == 'all':
listOfCities = []
listOfStates = []
listOfCountry = []
for triple in tweets:
splitted = triple.split("_")
listOfCities.append(splitted[0])
listOfStates.append(splitted[1])
listOfCountry.append(splitted[2])
listOfCities = list(set(listOfCities))
listOfStates = list(set(listOfStates))
listOfCountry = list(set(listOfCountry))
# reorganie tweets (dict : tweets by cities) into dataframe (city and date)
matrixAllTweets = pd.DataFrame()
for tweetByCity in tweets.keys():
# Filter cities :
city = str(tweetByCity).split("_")[0]
state = str(tweetByCity).split("_")[1]
country = str(tweetByCity).split("_")[2]
if city in listOfCities:
matrix = pd.DataFrame(tweets[tweetByCity])
matrix["country"] = country
matrixAllTweets = matrixAllTweets.append(matrix, ignore_index=True)
# Split datetime into date and time
matrixAllTweets["date"] = [d.date() for d in matrixAllTweets['created_at']]
matrixAllTweets["time"] = [d.time() for d in matrixAllTweets['created_at']]
# Filter by a period
if temporal_period != "all":
mask = ((matrixAllTweets["date"] >= temporal_period.min()) & (matrixAllTweets["date"] <= temporal_period.max()))
matrixAllTweets = matrixAllTweets.loc[mask]
vectorizer = TfidfVectorizer(
stop_words='english',
min_df=0.001,
# max_features=50000,
ngram_range=(1, 1),
token_pattern='[a-zA-Z0-9#]+', #remove user name, i.e term starting with @ for personnal data issue
)
try:
vectors = vectorizer.fit_transform(matrixAllTweets['tweet'])
feature_names = vectorizer.get_feature_names()
dense = vectors.todense()
denselist = dense.tolist()
except:
logger.info("Impossible to compute TF-IDF")
exit(-1)
## matrixTFIDF
TFIDFClassical = pd.DataFrame(denselist, columns=feature_names)
TFIDFClassical["country"] = matrixAllTweets["country"]
if save_intermediaire_files:
logger.info("saving TF-IDF File: "+path_for_filesaved+"/tfidf_on_whole_corpus.csv")
TFIDFClassical.to_csv(path_for_filesaved+"/tfidf_on_whole_corpus.csv")
extractBiggest = pd.DataFrame()
for term in TFIDFClassical.keys():
try:
index = TFIDFClassical[term].idxmax()
score = TFIDFClassical[term].max()
country = TFIDFClassical.iloc[index]["country"]
row = {
'terms': term,
'score': score,
'country': country
}
extractBiggest = extractBiggest.append(row, ignore_index=True)
except:
logger.info(term+' : '+str(index)+" : "+str(score)+" : "+country)
## Extract N TOP ranking score
# extractBiggest = TFIDFClassical.max()
extractBiggest = extractBiggest[extractBiggest['score'] == 1] # we keep only term with high score TF-IDF, i.e 1.0
# extractBiggest = extractBiggest.to_frame()
# extractBiggest = extractBiggest.reset_index()
# extractBiggest.columns = ['terms', 'score', 'country']
logger.info("saving TF-IDF top"+str(extractBiggest['terms'].size)+" biggest score")
extractBiggest.to_csv(path_for_filesaved+"/TFIDF_BiggestScore_on_whole_corpus.csv")
def logsetup(log_fname):
"""
Initiate a logger object :
- Log in file : collectweets.log
- also print on screen
:return: logger object
"""
logger = logging.getLogger()
logger.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s :: %(levelname)s :: %(funcName)20s() ::%(message)s')
now = datetime.now()
file_handler = RotatingFileHandler(log_fname + "_" + now.strftime("%Y-%m-%d_%H-%M-%S") + ".log", 'a', 1000000, 1)
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
stream_handler = logging.StreamHandler()
# Only display on screen INFO
stream_handler.setLevel(logging.INFO)
logger.addHandler(stream_handler)
return logger
def t_SNE_bert_embedding_visualization(biggest_score, logger, listOfLocalities="all", spatial_hieararchy="country",
plotname="colored by country", paht2save="./"):
"""
Plot t-SNE representation of terms by country
ressources:
+ https://colab.research.google.com/drive/1FmREx0O4BDeogldyN74_7Lur5NeiOVye?usp=sharing#scrollTo=Fbq5MAv0jkft
+ https://github.com/UKPLab/sentence-transformers
:param biggest_score:
:param listOfLocalities:
:param spatial_hieararchy:
:param plotname:
:param paht2save:
:return:
"""
modelSentenceTransformer = SentenceTransformer('distilbert-base-nli-mean-tokens')
# filter by localities
for locality in biggest_score[spatial_hieararchy].unique():
if locality not in listOfLocalities:
biggest_score = biggest_score.drop(biggest_score[biggest_score[spatial_hieararchy] == locality].index)
embeddings = modelSentenceTransformer.encode(biggest_score['terms'].to_list(), show_progress_bar=True)
# embeddings.tofile(paht2save+"/tsne_bert-embeddings_"+plotname+"_matrix-embeddig")
modelTSNE = TSNE(n_components=2) # n_components means the lower dimension
low_dim_data = modelTSNE.fit_transform(embeddings)
label_tsne = biggest_score[spatial_hieararchy]
# Style Plots a bit
sns.set_style('darkgrid')
sns.set_palette('muted')
sns.set_context("notebook", font_scale=1, rc={"lines.linewidth": 2.5})
plt.rcParams['figure.figsize'] = (20, 14)
tsne_df = pd.DataFrame(low_dim_data, label_tsne)
tsne_df.columns = ['x', 'y']
ax = sns.scatterplot(data=tsne_df, x='x', y='y', hue=tsne_df.index)
plt.setp(ax.get_legend().get_texts(), fontsize='40') # for legend text
plt.setp(ax.get_legend().get_title(), fontsize='50') # for legend title
plt.ylim(-100,100)
plt.xlim(-100, 100)
#ax.set_title('T-SNE BERT Sentence Embeddings for '+plotname)
plt.savefig(paht2save+"/tsne_bert-embeddings_"+plotname)
logger.info("file: "+paht2save+"/tsne_bert-embeddings_"+plotname+" has been saved.")
#plt.show()
plt.close()
# Perform kmean clustering
# num_clusters = 5
# clustering_model = KMeans(n_clusters=num_clusters)
# clustering_model.fit(embeddings)
# cluster_assignment = clustering_model.labels_
# Normalize the embeddings to unit length
corpus_embeddings = embeddings / np.linalg.norm(embeddings, axis=1, keepdims=True)
# Perform kmean clustering
clustering_model = AgglomerativeClustering(n_clusters=None,
distance_threshold=1.5) # , affinity='cosine', linkage='average', distance_threshold=0.4)
clustering_model.fit(corpus_embeddings)
cluster_assignment = clustering_model.labels_
# clustered_sentences = [[] for i in range(num_clusters)]
# for sentence_id, cluster_id in enumerate(cluster_assignment):
# clustered_sentences[cluster_id].append(biggest_score['terms'].iloc[sentence_id])
clustered_sentences = {}
for sentence_id, cluster_id in enumerate(cluster_assignment):
if cluster_id not in clustered_sentences:
clustered_sentences[cluster_id] = []
clustered_sentences[cluster_id].append(biggest_score['terms'].iloc[sentence_id])
#for i, cluster in enumerate(clustered_sentences):
# for i, cluster in clustered_sentences.items():
# print("Cluster ", i+1)
# print(cluster)
# print("")
def bert_embedding_filtred(biggest_score, listOfLocalities="all", spatial_hieararchy="country"):
"""
Retrieve embedding of a matrix of terms (possibility of filtring by a list of locality)
:param biggest_score: pd.Datraframe with columns : [terms, country/state/city]
:param listOfLocalities:
:param spatial_hieararchy:
:return:
"""
modelSentenceTransformer = SentenceTransformer('distilbert-base-nli-mean-tokens')
# filter by localities
if listOfLocalities != "all":
for locality in biggest_score[spatial_hieararchy].unique():
if locality not in listOfLocalities:
biggest_score = biggest_score.drop(biggest_score[biggest_score[spatial_hieararchy] == locality].index)
embeddings = modelSentenceTransformer.encode(biggest_score['terms'].to_list(), show_progress_bar=True)
return embeddings
def similarity_intra_matrix_pairwise(matrix):
"""
Compute pairwise cosine similarity on the rows of a Matrix and retrieve unique score by pair.
indeed, cosine_similarity pairwise retrive a matrix with duplication : let's take an exemple :
Number of terms : 4, cosine similarity :
w1 w2 w3 w4
+---+---+----+--+
w1 | 1 | | | |
w2 | | 1 | | |
w3 | | | 1 | |
w4 | | | | 1 |
+---+---+----+--+
(w1, w2) = (w2, w1), so we have to keep only : (number_of_terms)^2/2 - (number_of_terms)/2
for nb_term = 4 :
4*4/2 - 4/2 = 16/2 - 4/2 = 6 => we have 6 unique scores
:param matrix:
:return: list of unique similarity score
"""
similarity = cosine_similarity(sparse.csr_matrix(matrix))
similarity_1D = np.array([])
for i, row in enumerate(similarity):
similarity_1D = np.append(similarity_1D, row[i+1:]) # We remove duplicate pairwise value
return similarity_1D
def similarity_inter_matrix(matrix1, matrix2):
"""
:param matrix1:
:param matrix2:
:return:
"""
similarity = 1 - sp.distance.cdist(matrix1, matrix2, 'cosine')
return similarity
def clustering_terms(biggest, logger, cluster_f_out, listOfLocalities="all", spatial_hieararchy="country", method="kmeans"):
"""
:param biggest:
:param method:
:return:
"""
method_list = ["kmeans", "agglomerative_clustering"]
if method not in method_list:
logger.error("This method is not implemented for clustering: "+str(method))
return -1
# filter by localities
if listOfLocalities != "all":
for locality in biggest[spatial_hieararchy].unique():
if locality not in listOfLocalities:
biggest = biggest.drop(biggest[biggest[spatial_hieararchy] == locality].index)
embeddings = bert_embedding_filtred(biggest)
if method == "kmeans":
# Perform kmean clustering
num_clusters = 5
clustering_model = KMeans(n_clusters=num_clusters)
clustering_model.fit(embeddings)
cluster_assignment = clustering_model.labels_
elif method == "agglomerative_clustering":
# Normalize the embeddings to unit length
corpus_embeddings = embeddings / np.linalg.norm(embeddings, axis=1, keepdims=True)
# Perform Agglomerative clustering
clustering_model = AgglomerativeClustering(n_clusters=None,
distance_threshold=1.5) # , affinity='cosine', linkage='average', distance_threshold=0.4)
clustering_model.fit(corpus_embeddings)
cluster_assignment = clustering_model.labels_
clustered_sentences = {}
for sentence_id, cluster_id in enumerate(cluster_assignment):
if str(cluster_id) not in clustered_sentences:
clustered_sentences[str(cluster_id)] = []
clustered_sentences[str(cluster_id)].append(biggest['terms'].iloc[sentence_id])
with open(cluster_f_out, "w") as outfile:
json.dump(clustered_sentences, outfile)
logger.info("file " + cluster_f_out + " has been saved")
def geocoding_token(biggest, listOfLocality, spatial_hieararchy, logger):
"""
Find and geocode Spatial entity with OSM data (nominatim)
Respect terms and use of OSM and Nomitim :
- Specify a name for the application, Ie.e user agent
- add delay between each query : min_delay_seconds = 1.
See : https://geopy.readthedocs.io/en/stable/#module-geopy.extra.rate_limiter
- define a time out for waiting nomatim answer : to 10 seconds
:param biggest:
:return: biggest with geocoding information
"""
try:
if listOfLocality != "all":
for locality in biggest[spatial_hieararchy].unique():
if locality not in listOfLocality:
biggest = biggest.drop(biggest[biggest[spatial_hieararchy] == locality].index)
except:
logger.info("could not filter, certainly because there is no spatial hiearchy on biggest score")
geolocator = Nominatim(user_agent="h-tfidf-evaluation", timeout=10)
geocoder = RateLimiter(geolocator.geocode, min_delay_seconds=1)
tqdm.pandas()
biggest["geocode"] = biggest["terms"].progress_apply(geocoder)
return biggest
def post_traitement_flood(biggest, logger, spatialLevel, ratio_of_flood=0.5):
"""
Remove terms from people flooding : return same dataframe with 1 more column : user_flooding
With default ratio_of_flood : If an twitter.user use a term in more than 50% of occurence of this terms,
we consider this user is flooding
:param biggest: File of terms to process
:param logger:
:param: spatialLevel : work on Country / State / City
:param: ratio_of_flood
:return: return same dataframe with 1 more column : user_flooding
"""
ratio_of_flood_global = ratio_of_flood
es_logger.setLevel(logging.WARNING)
# pre-build elastic query for spatialLevel :
rest_user_osm_level = ""
if spatialLevel == "country":
rest_user_osm_level = "rest_user_osm.country"
elif spatialLevel == "state":
rest_user_osm_level = "rest.features.properties.state"
elif spatialLevel == "city":
rest_user_osm_level = "rest.features.properties.city"
def is_an_user_flooding(term, locality):
client = Elasticsearch("http://localhost:9200")
index = "twitter"
# Query :
## Retrieve only user name where in full_text = term and rest_user_osm.country = locality
if term is not np.NAN:
query = {"_source": "user.name","query":{"bool":{"filter":[{"bool":{"should":[{"match_phrase":{"full_text":term}}],"minimum_should_match":1}},
{"bool":{"should":[{"match_phrase":{rest_user_osm_level:locality}}],"minimum_should_match":1}}]}}}
try:
result = Elasticsearch.search(client, index=index, body=query)
list_of_user = []
if len(result["hits"]["hits"]) != 0:
for hit in result["hits"]["hits"]:
user = hit["_source"]["user"]["name"]
list_of_user.append(user)
dict_user_nbtweet = dict(Counter(list_of_user))
d = dict((k, v) for k, v in dict_user_nbtweet.items() if v >= (ratio_of_flood_global * len(list_of_user)))
if len(d) > 0 : # there is a flood on this term:
return 1
else:
return 0
else: # not found in ES why ?
return "not_in_es"
except:
logger.info("There is a trouble with this term: " + str(term))
return np.NAN
else:
return 0
logger.debug("start remove terms if they coming from a flooding user, ie, terms in "+str(ratio_of_flood_global*100)+"% of tweets from an unique user over tweets with this words")
tqdm.pandas()
biggest["user_flooding"] = biggest.progress_apply(lambda t: is_an_user_flooding(t.terms, t[spatialLevel]), axis=1)
return biggest
def venn(biggest, logger, spatial_level, result_path, locality):
"""
Build Venn diagramm in word_cloud
Save fig in result_path
Discussion about font size :
In each subset (common or specific), the font size of term is related with the H-TFIDF Rank inside the subset
:param biggest:
:param logger:
:param spatialLevel:
:return:
"""
# Post-traitement
biggest = biggest[biggest["user_flooding"] == "0"]
# Select locality
biggest = biggest[biggest[spatial_level] == locality]
# select week
weeks = biggest['date'].unique()
if len(weeks) == 2:
sets = []
weeks_list = []
for week in weeks:
sets.append(set(biggest[biggest["date"] == week].terms[0:100]))
weeks_list.append(week)
try:
venn = venn2_wordcloud(sets, set_labels=weeks_list, wordcloud_kwargs=dict(min_font_size=10),)
except:
logger.info("Can't build venn for: "+locality)
elif len(weeks) == 3 or len(weeks) > 3:
sets = []
weeks_list = []
word_frequency = {} # for font-size of wordcloud : based on H-TFIDF Rank
for nb, week in enumerate(weeks[-3:]):
sets.append(set(biggest[biggest["date"] == week].terms[0:100]))
weeks_list.append(week)
for rank, term in enumerate(biggest[biggest["date"] == week].terms[0:100]):
if term not in word_frequency:
word_frequency[term] = (100 - rank)
try:
venn = venn3_wordcloud(sets, set_labels=weeks_list, word_to_frequency=word_frequency,
wordcloud_kwargs=dict(min_font_size=4,),)
except:
logger.info("Can't build venn for: "+locality)
sorted_word_frequency = dict(sorted(word_frequency.items(), key=operator.itemgetter(1),reverse=True))
logger.info(locality + ": " + str(sorted_word_frequency))
plt.savefig(result_path + "/venn_" + locality)
def frequent_terms_by_level(matrixOcc, logger, most_frequent_terms_fpath, listOfLocalities='all', spatialLevel='country'):
"""
:param matrixOcc:
:param most_frequent_terms_fpath:
:param listOfLocalities:
:param spatialLevel:
:return:
"""
#matrixOcc = spatiotemporelFilter(matrix=matrixOcc, listOfcities=listOfLocalities,
# spatialLevel=spatialLevel, period='all')
# Aggregate by level
## Create 4 new columns : city, State, Country and date
def splitindex(row):
return row.split("_")
matrixOcc["city"], matrixOcc["state"], matrixOcc["country"], matrixOcc["date"] = \
zip(*matrixOcc.index.map(splitindex))
matrixOcc.date = pd.to_datetime((matrixOcc.date)) # convert date into datetime
if spatialLevel == 'city':
matrixOcc = matrixOcc.groupby(["city", pd.Grouper(key="date", freq="Y")]).sum()
elif spatialLevel == 'state':
matrixOcc = matrixOcc.groupby(["state", pd.Grouper(key="date", freq="Y")]).sum()
elif spatialLevel == 'country':
matrixOcc = matrixOcc.groupby(["country", pd.Grouper(key="date", freq="Y")]).sum()
# Export N biggest TF-IDF score:
top_n = 500
extractBiggest = pd.DataFrame(index=matrixOcc.index, columns=range(0, top_n))
for row in matrixOcc.index:
try:
row_without_zero = matrixOcc.loc[row]# we remove term with a score = 0
row_without_zero = row_without_zero[ row_without_zero !=0 ]
try:
extractBiggest.loc[row] = row_without_zero.nlargest(top_n).keys()
except:
extractBiggest.loc[row] = row_without_zero.nlargest(len(row_without_zero)).keys()
except:
logger.debug("H-TFIDF: city " + str(matrixOcc.loc[row].name) + "not enough terms")
# Transpose this table in order to share the same structure with TF-IDF classifical biggest score :
hbt = pd.DataFrame()
extractBiggest = extractBiggest.reset_index()
for index, row in extractBiggest.iterrows():
hbtrow = pd.DataFrame(row.drop([spatialLevel, "date"]).values, columns=["terms"])
hbtrow[spatialLevel] = row[spatialLevel]
hbtrow["date"] = row["date"]
hbt = hbt.append(hbtrow, ignore_index=True)
# save file
logger.info("saving file: "+most_frequent_terms_fpath)
hbt.to_csv(most_frequent_terms_fpath)
return hbt
def comparison_htfidf_tfidf_frequentterms(htfidf_f, tfidf_corpus_country_f, frequent_terms, logger, plot_f_out, listOfCountries="all"):
# Open dataframes
htfidf = pd.read_csv(htfidf_f, index_col=0)
tfidf = pd.read_csv(tfidf_corpus_country_f, index_col=0)
for nb_terms in [100, 200, 500]:
# barchart building
barchart_df_col = ["country", "h-tfidf", "tf-idf"]
barchart_df = pd.DataFrame(columns=barchart_df_col, index=range(len(listOfCountries)))
# loop on countries
for country in listOfCountries:
htfidf_country = htfidf[htfidf["country"] == country]
tfidf_country = tfidf[tfidf["country"] == country]
frequent_terms_country = frequent_terms[frequent_terms["country"] == country]
# loop on weeks
htfidf_overlap_per_week_df = pd.DataFrame(index=range(1))
for week in htfidf_country.date.unique():
htfidf_country_week = htfidf_country[htfidf_country["date"] == week]
# build on venn comparison H-TFIDF with Frequent terms
sets = []
sets.append(set(htfidf_country_week.terms[0:nb_terms]))
sets.append(set(frequent_terms_country.terms[0:nb_terms]))
try:
venn_htfidf = venn2_wordcloud(sets)
htfidf_overlap_per_week_df[week] = len(venn_htfidf.get_words_by_id('11'))
except:
htfidf_overlap_per_week_df[week] = np.NAN
# mean value for all weeks :
mean_htfidf_overlap_per_week_df = htfidf_overlap_per_week_df.mean(axis=1).iloc[0] * 100 / nb_terms
# Compute TF-IDF overlap with Frequent termes
sets = []
sets.append(set(tfidf_country.terms[0:nb_terms]))
sets.append(set(frequent_terms_country.terms[0:nb_terms]))
logger.info(country)
venn_tfidf = venn2_wordcloud(sets)
plt.close('all')
# barchart_df['TFIDF_' + country] = len(venn_tfidf.get_words_by_id('11'))
tfidf_overlap = len(venn_tfidf.get_words_by_id('11')) * 100 / nb_terms
# build the row for barchart
if country == "แผฮปฮปฮฌฯ":
country = "Greece"
row = {"country": country, "h-tfidf": mean_htfidf_overlap_per_week_df, "tf-idf": tfidf_overlap}
barchart_df = barchart_df.append(row, ignore_index=True)
# Plot bar chart
barchart_df = barchart_df.set_index("country")
barchart_df = barchart_df.dropna()
barchart_df.plot.bar(figsize=(8,6))
plt.subplots_adjust(bottom=0.27)
plt.ylabel("% overlap between H-TFIDF / TF-IDF with most frequent terms")
plt.savefig(plot_f_out + "_" + str(nb_terms) + ".png")
# build venn diagramm
## Choose a country
country = "United Kingdom"
nb_terms = 100
week = "2020-01-26"
## Filtering matrix to keep TOP15 terms without term with 1 caracter or digital number
htfidf_country = htfidf[(htfidf["country"] == country) & (htfidf["date"] == week)]
tfidf_country = tfidf[tfidf["country"] == country]
frequent_terms_country = frequent_terms[frequent_terms["country"] == country]
htfidf_country = htfidf_country[htfidf_country["terms"].map(len) > 3]
tfidf_country = tfidf_country[tfidf_country["terms"].map(len) > 3]
frequent_terms_country = frequent_terms_country[frequent_terms_country["terms"].map(len) > 3]
### Remove number
htfidf_country_terms = htfidf_country["terms"].replace("^\d+", np.nan, regex=True).dropna().head(nb_terms)
tfidf_country_terms = tfidf_country["terms"].replace("^\d+", np.nan, regex=True).dropna().head(nb_terms)
frequent_terms_country_terms = frequent_terms_country["terms"].replace("^\d+", np.nan, regex=True).dropna().head(nb_terms)
columns_name = []
latex_table_nb_terms = 30
for i in range(latex_table_nb_terms):
columns_name.append("rank "+str(i))
latex_table = pd.DataFrame(index=range(3), columns=columns_name)
latex_table.loc["H-TFIDF"] = htfidf_country_terms.head(latex_table_nb_terms).values
latex_table.loc["TF-IDF"] = tfidf_country_terms.head(latex_table_nb_terms).values
latex_table.loc["Frequent terms"] = frequent_terms_country_terms.head(latex_table_nb_terms).values
print(latex_table.T[["H-TFIDF", "TF-IDF", "Frequent terms"]].to_latex(index=False))
sets = []
sets.append(set(htfidf_country_terms))
sets.append(set(tfidf_country_terms))
sets.append(set(frequent_terms_country_terms))
fig, ax = plt.subplots(figsize=(8, 6))
venn_3 = venn3_wordcloud(sets, set_labels=["H-TFIDF", "TF-IDF", "Frequent terms"], ax=ax)
plt.savefig(plot_f_out + "_"+ country + "venn3.png")
plt.show()
def comparison_htfidf_tfidfwhole_frequentterms(htfidf_f, tfidf_whole_f, frequent_terms, logger, plot_f_out, listOfCountries="all"):
# Open dataframes
htfidf = pd.read_csv(htfidf_f, index_col=0)
tfidf = | pd.read_csv(tfidf_whole_f, index_col=0) | pandas.read_csv |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
class RedRio:
def __init__(self,codigo = None,**kwargs):
self.info = pd.Series()
self.codigo = codigo
self.info.slug = None
self.fecha = '2006-06-06 06:06'
self.workspace = '/media/'
self.seccion = pd.DataFrame(columns = [u'vertical', u'x', u'y', u'v01', u'v02', u'v03', u'v04',
u'v05', u'v06', u'v07', u'v08', u'v09', u'vsup'])
self.parametros = "id_aforo,fecha,ancho_superficial,caudal_medio,velocidad_media,perimetro,area_total,profundidad_media,radio_hidraulico"
self.aforo = pd.Series(index = [u'fecha', u'ancho_superficial', u'caudal_medio',
u'velocidad_media',u'perimetro', u'area_total',
u'profundidad_media', u'radio_hidraulico',u'levantamiento'])
self.levantamiento = pd.DataFrame(columns = ['vertical','x','y'])
self.alturas = pd.DataFrame(index=pd.date_range(start = pd.to_datetime('2018').strftime('%Y-%m-%d 06:00'),periods=13,freq='H'),columns = ['profundidad','offset','lamina','caudal'])
self.alturas.index = map(lambda x:x.strftime('%H:00'),self.alturas.index)
@property
def caudales(self):
pass
@property
def folder_path(self):
return self.workspace+pd.to_datetime(self.fecha).strftime('%Y%m%d')+'/'+self.info.slug+'/'
def insert_vel(self,vertical,v02,v04,v08):
self.seccion.loc[vertical,'v02'] = v02
self.seccion.loc[vertical,'v04'] = v04
self.seccion.loc[vertical,'v08'] = v08
def velocidad_media_dovela(self):
columns = [u'vertical', u'x', u'y', u'v01', u'v02', u'v03',
u'v04', u'v05', u'v06', u'v07', u'v08', u'v09', u'vsup']
dfs = self.seccion[columns].copy()
self.seccion['vm'] = np.NaN
vm = []
for index in dfs.index:
vm.append(round(self.estima_velocidad_media_vertical(dfs.loc[index].dropna()),3))
self.seccion['vm'] = vm
def area_dovela(self):
self.seccion['area'] = self.get_area(self.seccion['x'].abs().values,self.seccion['y'].abs().values)
def estima_velocidad_media_vertical(self,vertical,factor=0.0,v_index=0.8):
vertical = vertical[vertical.index!='vm']
index = list(vertical.index)
if index == ['vertical','x','y']:
if vertical['x'] == 0.0:
vm = factor * self.seccion.loc[vertical.name+1,'vm']
else:
vm = factor * self.seccion.loc[vertical.name-1,'vm']
elif (index == ['vertical','x','y','vsup']) or (index == ['vertical','x','y','v08']):
try:
vm = v_index*vertical['vsup']
except:
vm = v_index*vertical['v08']
elif (index == ['vertical','x','y','v04']) or (index == ['vertical','x','y','v04','vsup']):
vm = vertical['v04']
elif (index == ['vertical','x','y','v04','v08']) or (index == ['vertical','x','y','v04','v08','vsup']) or (index == ['vertical','x','y','v02','v04']):
vm = vertical['v04']
elif index == ['vertical','x','y','v08','vsup']:
vm = v_index*vertical['vsup']
elif (index == ['vertical','x','y','v02','v04','v08']) or (index == ['vertical','x','y','v02','v04','v08','vsup']):
vm = (2*vertical['v04']+vertical['v08']+vertical['v02'])/4.0
elif (index == ['vertical','x','y','v02','v08']):
vm = (vertical['v02']+vertical['v08'])/2.0
return vm
def perimetro(self):
x,y = (self.seccion['x'].values,self.seccion['y'].values)
def perimeter(x,y):
p = []
for i in range(len(x)-1):
p.append(round(float(np.sqrt(abs(x[i]-x[i+1])**2.0+abs(y[i]-y[i+1])**2.0)),3))
return [0]+p
self.seccion['perimetro'] = perimeter(self.seccion['x'].values,self.seccion['y'].values)
def get_area(self,x,y):
'''Calcula las รกreas y los caudales de cada
una de las verticales, con el mรฉtodo de mid-section
Input:
x = Distancia desde la banca izquierda, type = numpy array
y = Produndidad
v = Velocidad en la vertical
Output:
area = รrea de la subsecciรณn
Q = Caudal de la subsecciรณn
'''
# cรกlculo de รกreas
d = np.absolute(np.diff(x))/2.
b = x[:-1]+d
area = np.diff(b)*y[1:-1]
area = np.insert(area, 0, d[0]*y[0])
area = np.append(area,d[-1]*y[-1])
area = np.absolute(area)
# cรกlculo de caudal
return np.round(area,3)
def read_excel_format(self,file):
df = pd.read_excel(file)
df = df.loc[df['x'].dropna().index]
df['vertical'] = range(1,df.index.size+1)
df['y'] = df['y'].abs()*-1
df.columns = map(lambda x:x.lower(),df.columns)
self.seccion = df[self.seccion.columns]
df = | pd.read_excel(file,sheetname=1) | pandas.read_excel |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.