prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Oct 13 14:18:46 2018
@author: rick
Module with functions for importing and integrating biostratigraphic
age-depth data from DSDP, ODP, and IODP into a single, standardized csv file.
Age-depth data are not available for Chikyu expeditions.
IODP: Must first manually download each "List of Assets" from IODP LIMS DESC Reports web portal with "workbook" and "fossil" selected
Functions:
"""
import glob
import os
import pandas as pd
import numpy as np
from ocean_drilling_db import data_filepaths as dfp
def load_dsdp_age_depth():
# Read in data and rename columns
dsdp_data = pd.read_csv(dfp.dsdp_age_depth, sep="\t", header=0,
skiprows=None, encoding='windows-1252')
dsdp_data = dsdp_data.reindex(['leg', 'site', 'hole', 'top of section depth(m)',
'bottom of section depth(m)', 'age top of section(million years)',
'age bottom of section(million years)', 'data source'], axis=1)
dsdp_data.columns = ('leg', 'site', 'hole', 'top_depth', 'bottom_depth',
'top_age', 'bottom_age', 'source')
dsdp_data[['top_age', 'bottom_age']] = np.multiply(dsdp_data[['top_age', 'bottom_age']], 1000000)
dsdp_data = dsdp_data.applymap(str)
# Assign site keys
site_keys = pd.read_csv('hole_metadata.csv', sep='\t', index_col=0)
site_keys = site_keys[['site_key','site']]
full_data = pd.merge(site_keys, dsdp_data, how = 'inner', on = 'site')
full_data = full_data.reindex(['site_key', 'leg', 'site', 'hole',
'top_depth', 'bottom_depth', 'top_age',
'bottom_age', 'type', 'source'], axis=1)
# Use both top and bottom picks
top_values = full_data.reindex(['site_key', 'leg', 'site', 'hole',
'top_depth','top_age', 'type', 'source'], axis=1)
top_values = top_values.rename(columns={'top_depth': 'depth', 'top_age': 'age'})
bottom_values = full_data.reindex(['site_key', 'leg', 'site', 'hole',
'bottom_depth', 'bottom_age', 'type', 'source'], axis=1)
bottom_values = bottom_values.rename(columns={'bottom_depth': 'depth', 'bottom_age': 'age'})
final_data = pd.concat([top_values, bottom_values])
final_data[['age', 'depth']] = final_data.loc[:,['age', 'depth']].applymap(float)
# Sort and clean
final_data = final_data.sort_values(['site_key', 'depth'])
final_data = final_data.replace(to_replace='nan', value=np.nan)
return final_data
### Difference between age-depth and age-profiles files??
def load_odp_age_depth():
odp_data = pd.read_csv(dfp.odp_age_depth, sep="\t", header=0,
skiprows=None, encoding='windows-1252')
# Rename and reorder columns, change units to years
odp_data.columns = ('leg', 'site', 'hole', 'source', 'depth', 'age', 'type')
odp_data = odp_data.reindex(['leg', 'site', 'hole', 'depth', 'age', 'type', 'source'], axis=1)
odp_data['age'] = np.multiply(odp_data['age'], 1000000)
odp_data = odp_data.applymap(str)
# Assign site keys
site_keys = pd.read_csv('hole_metadata.csv', sep='\t', index_col=0)
site_keys = site_keys[['site_key','site']]
full_data = pd.merge(site_keys, odp_data, how = 'inner', on = 'site')
full_data = full_data.reindex(['site_key', 'leg', 'site', 'hole', 'depth', 'age', 'type', 'source'], axis=1)
full_data[['age', 'depth']] = full_data.loc[:,['age', 'depth']].applymap(float)
return full_data
def load_odp_age_profiles():
data = pd.read_csv(dfp.odp_age_profile, sep="\t", header=0,
skiprows=None, encoding='windows-1252')
# Filter out those with depth difference greater than 1 core length (10m) (11m to account for 10% error/expansion)
diff = data['Ageprofile Depth Base']-data['Ageprofile Depth Top']
data = data.iloc[diff[diff < 11].index.tolist(),:]
data['Ageprofile Age Old'] = data['Ageprofile Age Old'].str.strip().replace('',np.nan).astype(float)
# Average depths and ages
data['depth'] = (data['Ageprofile Depth Top'] + data['Ageprofile Depth Base'])/2
data['age'] = (data['Ageprofile Depth Top'] + data['Ageprofile Depth Base'])/2
data.columns = data.columns.str.strip()
data = data.reindex(['Leg', 'Site', 'Hole', 'depth', 'age',
'Ageprofile Datum Description'], axis=1)
data = data.rename(columns={'Leg':'leg', 'Site':'site', 'Hole':'hole',
'Ageprofile Datum Description': 'type'})
data.hole = data.hole.str.strip()
data.type = data.type.str.strip()
data.site = data['site'].astype(str)
# Get site keys and add to DataFrame
site_keys = | pd.read_csv('hole_metadata.csv', sep='\t', index_col=0) | pandas.read_csv |
import numpy as np
import pandas as pd
from datetime import date
df = | pd.read_csv('supermarket.csv') | pandas.read_csv |
import numpy as np
import pandas as pd
import time
import cv2
import pylab
import os
import sys
from .dl_face_detector import get_face_from_img
def resource_path(relative_path):
""" Get absolute path to resource, works for dev and for PyInstaller """
try:
# PyInstaller creates a temp folder and stores path in _MEIPASS
base_path = sys._MEIPASS
except Exception:
base_path = os.path.abspath(".")
return os.path.join(base_path, relative_path)
default_color = (100, 255, 100)
red_color = (100, 100, 255)
class findFaceGetPulse(object):
def __init__(self, fps=None, running_on_video=False):
self.running_on_video = running_on_video
self.fps = fps
self.seconds_per_frame = 1 / fps if running_on_video else None
self.fps_calculator_ticks = None
self.fps_calculator_start = None
# we need few seconds to calculate FPS
self.fps_calculator_min_seconds = 3
self.frame_in = np.zeros((10, 10))
self.frame_out = np.zeros((10, 10))
self.buffer_size = 250
self.last_face_rects = | pd.DataFrame(columns=['x', 'y', 'h', 'w']) | pandas.DataFrame |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import pandas as pd
from xarray import Dataset, DataArray, Variable
from xarray.core import indexing
from . import TestCase, ReturnItem
class TestIndexers(TestCase):
def set_to_zero(self, x, i):
x = x.copy()
x[i] = 0
return x
def test_expanded_indexer(self):
x = np.random.randn(10, 11, 12, 13, 14)
y = np.arange(5)
I = ReturnItem()
for i in [I[:], I[...], I[0, :, 10], I[..., 10], I[:5, ..., 0],
I[..., 0, :], I[y], I[y, y], I[..., y, y],
I[..., 0, 1, 2, 3, 4]]:
j = indexing.expanded_indexer(i, x.ndim)
self.assertArrayEqual(x[i], x[j])
self.assertArrayEqual(self.set_to_zero(x, i),
self.set_to_zero(x, j))
with self.assertRaisesRegexp(IndexError, 'too many indices'):
indexing.expanded_indexer(I[1, 2, 3], 2)
def test_orthogonal_indexer(self):
x = np.random.randn(10, 11, 12, 13, 14)
y = np.arange(5)
I = ReturnItem()
# orthogonal and numpy indexing should be equivalent, because we only
# use at most one array and it never in between two slice objects
# (i.e., we try to avoid numpy's mind-boggling "partial indexing"
# http://docs.scipy.org/doc/numpy/reference/arrays.indexing.html)
for i in [I[:], I[0], I[0, 0], I[:5], I[5:], I[2:5], I[3:-3], I[::-1],
I[::-2], I[5::-2], I[:3:-2], I[2:5:-1], I[7:3:-2], I[:3, :4],
I[:3, 0, :4], I[:3, 0, :4, 0], I[y], I[:, y], I[0, y],
I[:2, :3, y], I[0, y, :, :4, 0]]:
j = indexing.orthogonal_indexer(i, x.shape)
self.assertArrayEqual(x[i], x[j])
self.assertArrayEqual(self.set_to_zero(x, i),
self.set_to_zero(x, j))
# for more complicated cases, check orthogonal indexing is still
# equivalent to slicing
z = np.arange(2, 8, 2)
for i, j, shape in [
(I[y, y], I[:5, :5], (5, 5, 12, 13, 14)),
(I[y, z], I[:5, 2:8:2], (5, 3, 12, 13, 14)),
(I[0, y, y], I[0, :5, :5], (5, 5, 13, 14)),
(I[y, 0, z], I[:5, 0, 2:8:2], (5, 3, 13, 14)),
(I[y, :, z], I[:5, :, 2:8:2], (5, 11, 3, 13, 14)),
(I[0, :, z], I[0, :, 2:8:2], (11, 3, 13, 14)),
(I[0, :2, y, y, 0], I[0, :2, :5, :5, 0], (2, 5, 5)),
(I[0, :, y, :, 0], I[0, :, :5, :, 0], (11, 5, 13)),
(I[:, :, y, :, 0], I[:, :, :5, :, 0], (10, 11, 5, 13)),
(I[:, :, y, z, :], I[:, :, :5, 2:8:2], (10, 11, 5, 3, 14))]:
k = indexing.orthogonal_indexer(i, x.shape)
self.assertEqual(shape, x[k].shape)
self.assertArrayEqual(x[j], x[k])
self.assertArrayEqual(self.set_to_zero(x, j),
self.set_to_zero(x, k))
# standard numpy (non-orthogonal) indexing doesn't work anymore
with self.assertRaisesRegexp(ValueError, 'only supports 1d'):
indexing.orthogonal_indexer(x > 0, x.shape)
with self.assertRaisesRegexp(ValueError, 'invalid subkey'):
print(indexing.orthogonal_indexer((1.5 * y, 1.5 * y), x.shape))
def test_asarray_tuplesafe(self):
res = indexing._asarray_tuplesafe(('a', 1))
assert isinstance(res, np.ndarray)
assert res.ndim == 0
assert res.item() == ('a', 1)
res = indexing._asarray_tuplesafe([(0,), (1,)])
assert res.shape == (2,)
assert res[0] == (0,)
assert res[1] == (1,)
def test_convert_label_indexer(self):
# TODO: add tests that aren't just for edge cases
index = | pd.Index([1, 2, 3]) | pandas.Index |
import types
from typing import List, Optional, Iterable
import numpy as np
import pandas as pd
import sqlalchemy as sa
from boadata.core import DataObject
from boadata.core.data_conversion import DataConversion, MethodConversion
from .. import wrap
from .mixins import (
AsArrayMixin,
CopyableMixin,
GetItemMixin,
NumericalMixin,
StatisticsMixin,
)
class _PandasBase(
DataObject, GetItemMixin, StatisticsMixin, NumericalMixin, CopyableMixin
):
"""Shared behaviour for all pandas-based types.
These include Series and DataFrame based types.
"""
def __to_csv__(self, uri, **kwargs):
self.inner_data.to_csv(uri, **kwargs)
klass = DataObject.registered_types["csv"]
return klass.from_uri(uri=uri, source=self)
@property
def name(self) -> Optional[str]:
# Pandas objects do not have names.
return None
def select_rows(self, indexer) -> '_PandasBase':
inner_data = self.inner_data.iloc[indexer]
return DataObject.from_native(inner_data, source=self)
def sample_rows(self, number: int) -> '_PandasBase':
inner_data = self.inner_data.sample(number)
return DataObject.from_native(inner_data)
def concat(self, *others: Iterable["_PandasBase"], **kwargs) -> "_PandasBase":
inner_data = pd.concat(
[self.inner_data, *(o.inner_data for o in others)], **kwargs)
return DataObject.from_native(inner_data)
@DataObject.proxy_methods("head")
class PandasDataFrameBase(_PandasBase):
real_type = pd.DataFrame
def histogram(self, bins=None, columns=None, weights=None, **kwargs):
"""Histogram data on all numeric columns.
:param bins: number of bins or edges (numpy-like)
:param weights: as in numpy.histogram, but can be a column name as well
kwargs
------
- dropna: don't include nan values (these make zero values)
- range: as in numpy.histogram
In contrast to pandas hist method, this does not show the histogram.
"""
if not columns:
# All numeric
columns = [
col for col in self.columns if self.inner_data[col].dtype.kind in "iuf"
]
if isinstance(columns, str):
columns = [columns]
if isinstance(weights, str):
columns = [col for col in columns if col != weights]
weights = self[weights]
return {
col: self[col].histogram(bins=bins, weights=weights, **kwargs)
for col in columns
}
def sql(self, sql, table_name=None):
"""Run SQL query on columns of the table.
:param table_name: name of the temporary table (default is the name of the dataframe)
Uses SQLite in-memory storage to create temporary table.
"""
from sqlalchemy import create_engine
from boadata import wrap
if not table_name:
table_name = self.name
if not table_name:
raise RuntimeError(
"Cannot run SQL queries on unnamed dataframe. Specify table_name argument..."
)
engine = sa.create_engine("sqlite:///:memory:")
self.inner_data.to_sql(table_name, engine)
# TODO: some clean up???
return wrap(pd.read_sql_query(sql, engine), source=self)
def __to_pandas_data_frame__(self):
return PandasDataFrame(inner_data=self.inner_data, source=self)
def __to_xy_dataseries__(self, x=None, y=None, **kwargs):
constructor = DataObject.registered_types["xy_dataseries"]
if not x and not y:
# Auto from 2 dim
if len(self.columns) == 1:
return self[self.columns[0]].convert("xy_dataseries")
elif len(self.columns) == 2:
xdata = self[self.columns[0]]
ydata = self[self.columns[1]]
xname = xdata.name or "x"
yname = ydata.name or "y"
else:
raise RuntimeError("Cannot convert dataframes with more than 2 columns")
elif x:
if not y:
try:
ydata = self.evaluate(x)
except:
ydata = self[x]
xdata = range(ydata.shape[0]) # TODO: proper index???
xname = "x"
yname = x
else:
try:
xdata = self.evaluate(x)
except:
xdata = self[x]
try:
ydata = self.evaluate(y)
except:
ydata = self[y]
xname = x
yname = y
else:
raise RuntimeError("Cannot specify col2 and not col1.")
return constructor(
xdata,
ydata,
xname=kwargs.get("xname", xname),
yname=kwargs.get("yname", yname),
)
def __to_excel_sheet__(self, uri: str):
if "::" in uri:
file, sheet = uri.split("::")
else:
file = uri
sheet = self.name or "Unknown"
self.inner_data.to_excel(file, sheet)
uri = "{0}::{1}".format(file, sheet)
klass = DataObject.registered_types.get("excel_sheet")
return klass.from_uri(uri=uri, source=self)
def __to_feather__(self, uri: str):
if not "feather" in DataObject.registered_types:
raise RuntimeError("Cannot convert to feather.")
import feather
feather.write_dataframe(self.inner_data, uri)
return DataObject.registered_types["feather"].from_uri(uri, source=self)
# def __to_db_table__(self, uri: str):
# df = self.convert("pandas_data_frame")
# dshape = odo.discover(df.inner_data)
# new_inner_data = odo.odo(df.inner_data, uri, dshape=dshape)
# return DataObject.registered_types["db_table"].from_uri(uri, source=self)
def drop_columns(self, columns, allow_nonexistent=False):
if isinstance(columns, str):
columns = [columns]
if allow_nonexistent:
columns = [column for column in columns if column in self.columns]
self.inner_data.drop(columns, axis=1, inplace=True)
def rename_columns(self, col_dict):
"""Change columns names.
:param col_dict: New names
:type col_dict: list | dict
If col_dict is a dict, it is used as a mapping (non-matching ignored)
If col_dict is a list, all columns are renamed to this (size checked)
"""
if isinstance(col_dict, list):
if len(col_dict) != len(self.columns):
raise RuntimeError("Invalid number of columns to rename")
new_names = col_dict
elif isinstance(col_dict, dict):
new_names = [col_dict.get(col, col) for col in self.columns]
elif isinstance(col_dict, types.FunctionType):
new_names = [col_dict(col) for col in self.columns]
else:
raise RuntimeError("Column names not understood.")
self.inner_data.columns = new_names
def reorder_columns(self, cols):
# TODO: Can there be duplicates in DataFrame? Perhaps not
if not set(cols) == set(self.columns):
raise RuntimeError(
"The new ordering of columns must be complete. {0} is not {1}".format(
set(cols), set(self.columns)
)
)
self.inner_data = self.inner_data[cols]
def add_column(self, expression, name=None):
if name in self.columns:
raise RuntimeError("Column already exists: {0}".format(name))
self._create_column(expression, name)
def _create_column(self, expression, name=None):
if isinstance(expression, str):
new_column = self.evaluate(expression, wrap=False)
if not name:
name = expression
elif isinstance(expression, PandasSeriesBase):
new_column = expression.inner_data
if not name:
name = new_column.name
elif isinstance(expression, pd.Series):
new_column = expression
if not name:
name = new_column.name
self.inner_data[name] = new_column
def replace_column(self, name, expression):
if name not in self.columns:
raise IndexError("The column {0} does not exist".format(name))
self._create_column(expression, name)
def select_columns(self, names: List[str]) -> "PandasDataFrame":
"""Select only several columns."""
inner_data = self.inner_data.loc[:, names]
# TODO: It's actually a view, isn't it?
# TODO: Enable regexes
return DataObject.from_native(inner_data, source=self)
def dropna(self, **kwargs):
kwargs["inplace"] = True
self.inner_data.dropna(**kwargs)
def sort_by(self, names: List[str]) -> "PandasDataFrame":
inner_data = self.inner_data.sort_values(by=names)
return DataObject.from_native(inner_data, source=self)
def append(self, other, **kwargs):
# TODO: Is used? Concat may be better
other = wrap(other)
if not isinstance(other, PandasDataFrameBase):
try:
other = other.convert("pandas_data_frame")
except:
raise TypeError("Only dataframes may be appended to dataframes.")
if self.columns:
if other.columns:
if not other.columns == self.columns:
raise RuntimeError("Both dataframes must have same column names")
if not np.all(other.inner_data.dtypes == self.inner_data.dtypes):
raise RuntimeError("Both dataframes must have same column dtypes.")
self.inner_data = self.inner_data.append(other.inner_data, **kwargs)
@DataObject.proxy_methods("dropna", "head")
@DataObject.proxy_methods("histogram", through="numpy_array")
@DataObject.proxy_methods("abs")
class PandasSeriesBase(_PandasBase, AsArrayMixin):
"""Abstract class for all types based on pandas Series"""
real_type = pd.Series
@property
def ndim(self):
return 1
def __repr__(self):
return "{0} (name={1}, shape={2}, dtype={3})".format(
self.__class__.__name__, self.inner_data.name, self.shape, self.dtype
)
def __to_xy_dataseries__(self, **kwargs):
constructor = DataObject.registered_types["xy_dataseries"]
x = range(self.shape[0]) # TODO: Change to proper index
y = self
if self.inner_data.name:
name = self.inner_data.name
else:
name = "data"
return constructor(x, y, xname="#", yname=name)
def __to_pandas_data_frame__(self, name=None):
if not name:
name = self.name or "Data"
df = | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/python
import click
import pandas as pd
import datetime
import time
import os
timestr = time.strftime("%Y%m%d-%H%M%S")
from click_help_colors import HelpColorsGroup, HelpColorsCommand
from pyfiglet import Figlet
# DEFAULT URLS FOR DATASOURCE Original Deprecated
# confirmed_cases_url_deprecated = "https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_19-covid-Confirmed.csv"
# recovered_cases_url_deprecated = "https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_19-covid-Recovered.csv"
# death_cases_url_deprecated = "https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_19-covid-Deaths.csv"
# # previous_cases_url = "https://raw.githubusercontent.com/Jcharis/covidcli/master/covidcli/data/coronavirus_dataset.csv"
# DEFAULT URLS FOR DATASOURCE Modified
confirmed_cases_url = "https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv"
recovered_cases_url = "https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_recovered_global.csv"
death_cases_url = "https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_deaths_global.csv"
previous_cases_url = "https://raw.githubusercontent.com/Jcharis/covidcli/master/covidcli/data/coronavirus_dataset.csv"
us_confirmed_cases_url = "https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_US.csv"
us_deaths_cases_url = "https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_deaths_US.csv"
def get_n_melt_data(data_url, case_type):
df = pd.read_csv(data_url)
melted_df = df.melt(id_vars=["Province/State", "Country/Region", "Lat", "Long"])
melted_df.rename(columns={"variable": "Date", "value": case_type}, inplace=True)
return melted_df
def merge_data(confirm_df, recovered_df, deaths_df):
new_df = confirm_df.join(recovered_df["Recovered"]).join(deaths_df["Deaths"])
return new_df
def find_active_cases(total_confirmed,total_recovered,total_deaths):
active_cases = total_confirmed -(total_recovered + total_deaths)
if active_cases < 0:
result = 'Uncertain'
else:
result = int(active_cases)
return result
def is_recovered_more(total_confirmed,total_recovered):
if total_recovered > total_confirmed:
click.echo(click.style("Error:", fg="red") + " {}".format('Data Discrepancy'))
click.secho('Recovered Cases MORE THAN Confirmed Cases!!!',fg='red')
def find_discrepancy(total_confirmed,total_recovered):
if total_recovered > total_confirmed:
click.echo(click.style("Error:", fg="blue") + " {}".format('Data Discrepancy'))
click.secho('Recovered Cases Exceed Confirmed Cases!!!',fg='red')
def get_n_melt_us_confirmed(data_url, case_type):
df = pd.read_csv(data_url)
melted_df = df.melt(id_vars=['UID', 'iso2', 'iso3', 'code3', 'FIPS', 'Admin2', 'Province_State','Country_Region', 'Lat', 'Long_', 'Combined_Key'])
melted_df.rename(columns={"variable": "Date", "value": case_type}, inplace=True)
return melted_df
def get_n_melt_us_death(data_url, case_type):
df = | pd.read_csv(data_url) | pandas.read_csv |
'''Python script to generate CAC'''
'''Authors - <NAME>
'''
import numpy as np
import pandas as pd
from datetime import datetime
import collections
from .helpers import *
class CAC:
def __init__(self, fin_perf, oper_metrics, oth_metrics):
print("INIT CAC")
self.fin_perf = pd.DataFrame(fin_perf)
self.oper_metrics = pd.DataFrame(oper_metrics)
self.oth_metrics = pd.DataFrame(oth_metrics)
def run(self):
self.clean_inputs()
print(self.fin_perf)
print(self.oper_metrics)
print(self.oth_metrics)
self.ttm_cac()
self.yoy_growth()
self.clean_outputs()
json = {
"CAC & CAC TTM": self.cac_ttm.to_dict(orient='records'),
"CAC YoY Growth": self.cac_yoy.to_dict(orient='records'),
}
return json
def clean_inputs(self):
self.fin_perf = self.fin_perf.copy()
self.fin_perf.set_index("Financial Performance", inplace=True)
self.fin_perf.apply(filter_to_dec_list)
self.oper_metrics = self.oper_metrics.copy()
self.oper_metrics.set_index("Operating Metrics", inplace=True)
self.oper_metrics.apply(filter_to_dec_list)
self.oth_metrics.set_index("Other Metrics", inplace=True)
self.oth_metrics.apply(filter_to_dec_list)
def clean_outputs(self):
self.cac_ttm = self.cac_ttm.astype(object)
self.cac_ttm.apply(nan_to_blank_list)
self.cac_ttm = self.cac_ttm.apply(numbers_with_commas_list)
self.cac_ttm = self.cac_ttm.drop(self.cac_ttm.columns[0], axis=1)
self.cac_ttm.reset_index(inplace=True)
self.cac_yoy = self.cac_yoy.astype(object)
self.cac_yoy.apply(nan_to_blank_list)
cac_yoy_copy = self.cac_yoy.copy()
self.cac_yoy = self.cac_yoy.apply(numbers_with_commas_list)
self.cac_yoy.loc['YoY growth'] = cac_yoy_copy.loc['YoY growth'].apply(dec_to_percents)
self.cac_yoy.loc['YoY growth*'] = cac_yoy_copy.loc['YoY growth*'].apply(dec_to_percents)
self.cac_yoy = self.cac_yoy.drop(self.cac_yoy.columns[0], axis=1)
self.cac_yoy.reset_index(inplace=True)
print("CAC & CAC TTM")
print(self.cac_ttm)
print("CAC YoY Growth")
print(self.cac_yoy)
def ttm_cac(self):
index = ["S&M", "Total Expense", "# of New Customers", "CAC", "TTM CAC"]
self.cac_ttm = pd.DataFrame(index=np.arange(len(index)), columns=self.fin_perf.columns)
self.cac_ttm.set_index( | pd.Series(index, name="") | pandas.Series |
import os
import random
import sys
import joblib
import math
import lightgbm as lgb
import xgboost as xgb
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import sklearn.svm as svm
from sklearn.ensemble import RandomForestClassifier, VotingClassifier
from sklearn.linear_model import LinearRegression, LogisticRegression
from catboost import CatBoostClassifier
from sklearn.metrics import (accuracy_score, average_precision_score,
classification_report, confusion_matrix, f1_score,
precision_recall_curve, roc_auc_score, roc_curve)
from sklearn.model_selection import GroupKFold
from sklearn.naive_bayes import GaussianNB
from keras.layers import LSTM
from keras.layers import Dense
from keras.models import Sequential
from keras.utils import to_categorical
import keras.callbacks as kcallbacks
from utilClass import RocAucMetricCallback
from utils import series_to_supervised
from tqdm import tqdm
from imblearn.over_sampling import SMOTE
from imblearn.under_sampling import RandomUnderSampler
from imblearn.pipeline import Pipeline
sys.path.append('..')
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', None)
class AtoA:
def __init__(self, mode, type='df', seed=2021, scale='all'):
self.seed = seed
self.window = 0 # 在线数据读取中滑动窗口的长度
self.win_df = pd.DataFrame() # 在线数据读取中始终维持一个长度为最大滑动窗口的dataframe
self.mode = mode # 离线或在线
self.type = type # df 或者 dcs
self.scale = scale # 模型量级
self.current_row = None # 当前时刻样本
self.pre_row = None # 上一时刻样本
# dogfight特征工程工具函数
def FE_DF(self, data):
""" DF特征工程
Args:
data (dataframe): 原始数据
Returns:
DataFrame: 特征工程后数据
"""
data = data.sort_values(by=['id'])
if self.scale == 'all':
# 计算敌机的速度,先用diffh函数得到和上一时刻xyz差值,然后除以时间得到速度
for f in ['x', 'y', 'z']:
data['enemy_v_{}'.format(f)] = data.groupby('id')[
'enemy_{}'.format(f)].diff(1) / 0.02
# 敌我两机加速度,先用diffh函数得到和上一时刻v_x,v_y,v_z差值,然后除以时间得到加速度
for f in ['v_x', 'v_y', 'v_z']:
data[f'my_{f}_acc'] = data.groupby(
'id')[f'my_{f}'].diff() / 0.2
data[f'enemy_{f}_acc'] = data.groupby(
'id')[f'enemy_{f}'].diff() / 0.2
# 敌我两机速度与位置交互式差值
for f in ['x', 'y', 'z', 'v_x', 'v_y', 'v_z']:
data[f'{f}_me_minus'] = data[f'my_{f}'] - data[f'enemy_{f}']
# 飞机之间的距离
data['distance'] = ((data['my_x'] - data['enemy_x'])**2 +
(data['my_y'] - data['enemy_y'])**2 +
(data['my_z'] - data['enemy_z'])**2)**0.5
# 瞄准夹角
data['cos'] = ((data['my_v_x'] * (data['enemy_x'] - data['my_x'])) +
(data['my_v_y'] * (data['enemy_y'] - data['my_y'])) +
(data['my_v_z'] * (data['enemy_z'] - data['my_z'])))
# 合速度
data['speedAll'] = ((data['my_v_x']**2 + data['my_v_y']**2 +
data['my_v_z']**2)**0.5)
# 夹角cos值
data['cosValue'] = data['cos'] / (data['speedAll'] * data['distance'])
# 缺失值补0
data.fillna(0, inplace=True)
return data
def online_FE_DF(self, row_dict):
""" DF在线特征工程
Args:
row_dict(dict): 传入的一行字典记录
Returns:
DataFrame: 加入特征后的记录dataframe
"""
# 将字典转为dataframe格式
data = pd.DataFrame(row_dict, index=[0])
# 飞机之间的距离
data['distance'] = ((data['my_x'] - data['enemy_x'])**2 +
(data['my_y'] - data['enemy_y'])**2 +
(data['my_z'] - data['enemy_z'])**2)**0.5
# 瞄准夹角
data['cos'] = ((data['my_v_x'] * (data['enemy_x'] - data['my_x'])) +
(data['my_v_y'] * (data['enemy_y'] - data['my_y'])) +
(data['my_v_z'] * (data['enemy_z'] - data['my_z'])))
# 合速度
data['speedAll'] = ((data['my_v_x']**2 + data['my_v_y']**2 +
data['my_v_z']**2)**0.5)
# 夹角cos值
data['cosValue'] = data['cos'] / (data['speedAll'] * data['distance'])
# 缺失值补0
data.fillna(0, inplace=True)
return data
# DCS特征工程工具函数
def FE_DCS(self, data_):
""" DCS特征工程
Args:
data (dataframe): 原始数据
Returns:
DataFrame: 特征工程后数据
"""
data = data_.copy(deep=True)
if self.mode == 'offline':
# 如果是离线训练,需要根据id进行数据分组
data = data.sort_values(by=['id'])
# 飞机之间的距离
data['distance'] = (
(data['my_position_x'] - data['enemy_position_x'])**2 +
(data['my_position_y'] - data['enemy_position_y'])**2 +
(data['my_position_z'] - data['enemy_position_z'])**2)**0.5
# 向量乘法,向量 a = (x,y,z) b = (x2,y2,z2) c = (x3,y3,z3),a代表我机速度向量
# b代表位置向量,c代表敌机位置向量,我机中心到敌机中心向量d = c - b
# d与a之间cos = d×a/(|d|*|a|)
data['cos'] = ((data['my_speed_x'] *
(data['enemy_position_x'] - data['my_position_x'])) +
(data['my_speed_y'] *
(data['enemy_position_y'] - data['my_position_y'])) +
(data['my_speed_z'] *
(data['enemy_position_z'] - data['my_position_z'])))
# 速度向量
data['speedAll'] = ((data['my_speed_x']**2 + data['my_speed_y']**2 +
data['my_speed_z']**2)**0.5)
# 向量之间夹角
data['cosValue'] = data['cos'] / (data['speedAll'] * data['distance'])
# 敌我两机位置交互式差值
for f in ['position_x', 'position_y', 'position_z']:
data[f'{f}_diff'] = data[f'my_{f}'] - data[f'enemy_{f}']
return data
@staticmethod
def _caculate_speed_connect_cos(x, y, z, enemy_x, enemy_y, enemy_z,
speed_x, speed_y, speed_z):
"""
计算我敌连线矢量与我机速度矢量夹角
Args:
x, y, z: 我机坐标
enemy_x, enemy_y, enemy_z:敌机坐标
speed_x, speed_y, speed_z: 我机或敌机速度
Returns:
speed_connect_cos:我敌连线矢量与速度矢量夹角余弦值
"""
connect_vec = np.array([enemy_x - x, enemy_y - y, enemy_z - z])
my_speed_vec = np.array([speed_x, speed_y, speed_z])
speed_connect_cos = connect_vec.dot(my_speed_vec) / np.sqrt(
connect_vec.dot(connect_vec) * my_speed_vec.dot(my_speed_vec))
return speed_connect_cos
@staticmethod
def _caculate_speed_cos(speed_x, speed_y, speed_z, enemy_speed_x,
enemy_speed_y, enemy_speed_z):
"""
计算我机速度矢量与敌机速度矢量夹角
Args:
speed_x, speed_y, speed_z:我机速度
enemy_speed_x, enemy_speed_y, enemy_speed_z: 敌机速度
Returns:
speed_cos:敌机速度与我机速度矢量夹角余弦值
"""
my_speed_vec = np.array([speed_x, speed_y, speed_z])
enemy_speed_vec = np.array(
[enemy_speed_x, enemy_speed_y, enemy_speed_z])
speed_cos = my_speed_vec.dot(enemy_speed_vec) / np.sqrt(
my_speed_vec.dot(my_speed_vec) *
enemy_speed_vec.dot(enemy_speed_vec))
return speed_cos
def FE_DCS_new(self, data_):
"""新DCS任务特征工程
Args:
data_ (dataframe): 原始数据
Returns:
data: 特征工程后数据
"""
data = data_.copy()
data = data.sort_values(by=['id', 'ISO time'])
data.reset_index(drop=True, inplace=True)
data.rename(columns={
'U': 'x',
'V': 'z',
'Altitude': 'y',
'enemy_U': 'enemy_x',
'enemy_V': 'enemy_z',
'enemy_Altitude': 'enemy_y',
},
inplace=True)
if self.mode == 'offline':
if self.scale == 'all':
# 计算我机速度
data = pd.concat([
data,
pd.DataFrame({
'speed_x': data.groupby('id')['x'].diff(),
'speed_y': data.groupby('id')['y'].diff(),
'speed_z': data.groupby('id')['z'].diff()
})
],
sort=False,
axis=1)
data.fillna(0, inplace=True)
data[['speed_x', 'speed_y', 'speed_z'
]] = data[['speed_x', 'speed_y', 'speed_z']] / 0.05
data['speed'] = data.apply(lambda x: np.sqrt(x[
'speed_x']**2 + x['speed_y']**2 + x['speed_z']**2),
axis=1)
# 计算敌机速度
data = pd.concat([
data,
pd.DataFrame({
'enemy_speed_x':
data.groupby('id')['enemy_x'].diff(),
'enemy_speed_y':
data.groupby('id')['enemy_y'].diff(),
'enemy_speed_z':
data.groupby('id')['enemy_z'].diff()
})
],
sort=False,
axis=1)
data.fillna(0, inplace=True)
data[[
'enemy_speed_x', 'enemy_speed_y', 'enemy_speed_z'
]] = data[['enemy_speed_x', 'enemy_speed_y', 'enemy_speed_z'
]] / 0.05
data['enemy_speed'] = data.apply(
lambda x: np.sqrt(x['enemy_speed_x']**2 + x[
'enemy_speed_y']**2 + x['enemy_speed_z']**2),
axis=1)
# 计算敌我距离
data['distance'] = data.apply(lambda x: np.sqrt(
(x['x'] - x['enemy_x'])**2 + (x['y'] - x['enemy_y'])**2 +
(x['z'] - x['enemy_z'])**2),
axis=1)
# 计算我机速度与敌我连线夹角余弦值
data['speed_connect_cos'] = data.apply(
lambda x: self._caculate_speed_connect_cos(
x['x'], x['y'], x['z'], x['enemy_x'], x['enemy_y'], x[
'enemy_z'], x['speed_x'], x['speed_y'], x['speed_z'
]),
axis=1)
# 计算敌机速度与敌我连线夹角余弦值
data['enemy_speed_connect_cos'] = data.apply(
lambda x: self._caculate_speed_connect_cos(
x['x'], x['y'], x['z'], x['enemy_x'], x['enemy_y'], x[
'enemy_z'], x['enemy_speed_x'], x['enemy_speed_y'],
x['enemy_speed_z']),
axis=1)
# 计算两机速度夹角余弦值
data['speed_cos'] = data.apply(
lambda x: self._caculate_speed_cos(x['speed_x'], x[
'speed_y'], x['speed_z'], x['enemy_speed_x'], x[
'enemy_speed_y'], x['enemy_speed_z']),
axis=1)
# 我机朝向处理
data['Heading'] = data['Heading'] % 360
# 计算相对位置与速度
for f in [
'x', 'y', 'z', 'speed_x', 'speed_y', 'speed_z', 'speed'
]:
data[f'relative_{f}'] = data[f'enemy_{f}'] - data[f'{f}']
# 计算是否领先追逐
data['is_lead_chase'] = data.apply(
lambda x: self._is_lead_chase(
x['x'], x['y'], x['z'], x['enemy_x'], x['enemy_y'],
x['enemy_z'], x['speed_x'], x['speed_y'], x['speed_z'],
x['enemy_speed_x'], x['enemy_speed_y'], x[
'enemy_speed_z'], x['speed_connect_cos'], x[
'enemy_speed_connect_cos']),
axis=1)
# 筛除不能开火标签(两机距离大于1000或背对)
data['label'] = data.apply(
lambda x: 0 if x['speed_connect_cos'] < 0 or x[
'distance'] > 1000 else x['label'],
axis=1)
data.fillna(0, inplace=True)
data.dropna(inplace=True)
data.to_csv('a2a_fe.csv', index=False)
return data
elif self.scale == 'light':
# 计算我机速度
data = pd.concat([
data,
pd.DataFrame({
'speed_x': data.groupby('id')['x'].diff(),
'speed_y': data.groupby('id')['y'].diff(),
'speed_z': data.groupby('id')['z'].diff()
})
],
sort=False,
axis=1)
data.fillna(0, inplace=True)
data[['speed_x', 'speed_y', 'speed_z'
]] = data[['speed_x', 'speed_y', 'speed_z']] / 0.05
data['speed'] = data.apply(lambda x: np.sqrt(x[
'speed_x']**2 + x['speed_y']**2 + x['speed_z']**2),
axis=1)
# 计算敌机速度
data = pd.concat([
data,
pd.DataFrame({
'enemy_speed_x':
data.groupby('id')['enemy_x'].diff(),
'enemy_speed_y':
data.groupby('id')['enemy_y'].diff(),
'enemy_speed_z':
data.groupby('id')['enemy_z'].diff()
})
],
sort=False,
axis=1)
data.fillna(0, inplace=True)
data[[
'enemy_speed_x', 'enemy_speed_y', 'enemy_speed_z'
]] = data[['enemy_speed_x', 'enemy_speed_y', 'enemy_speed_z'
]] / 0.05
data['enemy_speed'] = data.apply(
lambda x: np.sqrt(x['enemy_speed_x']**2 + x[
'enemy_speed_y']**2 + x['enemy_speed_z']**2),
axis=1)
# 计算敌我距离
data['distance'] = data.apply(lambda x: np.sqrt(
(x['x'] - x['enemy_x'])**2 + (x['y'] - x['enemy_y'])**2 +
(x['z'] - x['enemy_z'])**2),
axis=1)
# 计算我机速度与敌我连线夹角余弦值
data['speed_connect_cos'] = data.apply(
lambda x: self._caculate_speed_connect_cos(
x['x'], x['y'], x['z'], x['enemy_x'], x['enemy_y'], x[
'enemy_z'], x['speed_x'], x['speed_y'], x['speed_z'
]),
axis=1)
# 计算敌机速度与敌我连线夹角余弦值
data['enemy_speed_connect_cos'] = data.apply(
lambda x: self._caculate_speed_connect_cos(
x['x'], x['y'], x['z'], x['enemy_x'], x['enemy_y'], x[
'enemy_z'], x['enemy_speed_x'], x['enemy_speed_y'],
x['enemy_speed_z']),
axis=1)
# 计算两机速度夹角余弦
data['speed_cos'] = data.apply(
lambda x: self._caculate_speed_cos(x['speed_x'], x[
'speed_y'], x['speed_z'], x['enemy_speed_x'], x[
'enemy_speed_y'], x['enemy_speed_z']),
axis=1)
# 计算相对位置
for f in ['z', 'speed']:
data[f'relative_{f}'] = data[f'enemy_{f}'] - data[f'{f}']
# 计算是否领先追逐
data['is_lead_chase'] = data.apply(
lambda x: self._is_lead_chase(
x['x'], x['y'], x['z'], x['enemy_x'], x['enemy_y'],
x['enemy_z'], x['speed_x'], x['speed_y'], x['speed_z'],
x['enemy_speed_x'], x['enemy_speed_y'], x[
'enemy_speed_z'], x['speed_connect_cos'], x[
'enemy_speed_connect_cos']),
axis=1)
# 筛除不能开火标签(两机距离大于1000或背对)
data['label'] = data.apply(
lambda x: 0 if x['speed_connect_cos'] < 0 or x[
'distance'] > 1000 else x['label'],
axis=1)
data.fillna(0, inplace=True)
data.dropna(inplace=True)
data.to_csv('a2a_fe.csv', index=False)
return data
if self.mode == 'online':
if self.scale == 'all':
# 计算我机速度
data = pd.concat([
data,
pd.DataFrame({
'speed_x': data['x'].diff(),
'speed_y': data['y'].diff(),
'speed_z': data['z'].diff()
})
],
sort=False,
axis=1)
data.fillna(0, inplace=True)
data[['speed_x', 'speed_y', 'speed_z'
]] = data[['speed_x', 'speed_y', 'speed_z']] / 0.05
data['speed'] = data.apply(lambda x: np.sqrt(x[
'speed_x']**2 + x['speed_y']**2 + x['speed_z']**2),
axis=1)
# 计算敌机速度
data = pd.concat([
data,
pd.DataFrame({
'enemy_speed_x': data['enemy_x'].diff(),
'enemy_speed_y': data['enemy_y'].diff(),
'enemy_speed_z': data['enemy_z'].diff()
})
],
sort=False,
axis=1)
data.fillna(0, inplace=True)
data[[
'enemy_speed_x', 'enemy_speed_y', 'enemy_speed_z'
]] = data[['enemy_speed_x', 'enemy_speed_y', 'enemy_speed_z'
]] / 0.05
data['enemy_speed'] = data.apply(
lambda x: np.sqrt(x['enemy_speed_x']**2 + x[
'enemy_speed_y']**2 + x['enemy_speed_z']**2),
axis=1)
# 计算敌我距离
data['distance'] = data.apply(lambda x: np.sqrt(
(x['x'] - x['enemy_x'])**2 + (x['y'] - x['enemy_y'])**2 +
(x['z'] - x['enemy_z'])**2),
axis=1)
# 计算我机速度与敌我连线夹角余弦值
data['speed_connect_cos'] = data.apply(
lambda x: self._caculate_speed_connect_cos(
x['x'], x['y'], x['z'], x['enemy_x'], x['enemy_y'], x[
'enemy_z'], x['speed_x'], x['speed_y'], x['speed_z'
]),
axis=1)
# 计算敌机速度与敌我连线夹角余弦值
data['enemy_speed_connect_cos'] = data.apply(
lambda x: self._caculate_speed_connect_cos(
x['x'], x['y'], x['z'], x['enemy_x'], x['enemy_y'], x[
'enemy_z'], x['enemy_speed_x'], x['enemy_speed_y'],
x['enemy_speed_z']),
axis=1)
# 计算两机速度夹角余弦值
data['speed_cos'] = data.apply(
lambda x: self._caculate_speed_cos(x['speed_x'], x[
'speed_y'], x['speed_z'], x['enemy_speed_x'], x[
'enemy_speed_y'], x['enemy_speed_z']),
axis=1)
# 我机朝向处理
data['Heading'] = data['Heading'] % 360
# 计算相对位置与速度
for f in [
'x', 'y', 'z', 'speed_x', 'speed_y', 'speed_z', 'speed'
]:
data[f'relative_{f}'] = data[f'enemy_{f}'] - data[f'{f}']
# 筛除不能开火标签(两机距离大于1000或背对)
data['label'] = data.apply(
lambda x: 0 if x['speed_connect_cos'] < 0 or x[
'distance'] > 1000 else x['label'],
axis=1)
data.fillna(0, inplace=True)
data.dropna(inplace=True)
data.to_csv('a2a_fe.csv', index=False)
return data
elif self.scale == 'light':
# 计算我机速度
data = pd.concat([
data,
pd.DataFrame({
'speed_x': data['x'].diff(),
'speed_y': data['y'].diff(),
'speed_z': data['z'].diff()
})
],
sort=False,
axis=1)
data.fillna(0, inplace=True)
data[['speed_x', 'speed_y', 'speed_z'
]] = data[['speed_x', 'speed_y', 'speed_z']] / 0.05
data['speed'] = data.apply(lambda x: np.sqrt(x[
'speed_x']**2 + x['speed_y']**2 + x['speed_z']**2),
axis=1)
# 计算敌机速度
data = pd.concat([
data,
pd.DataFrame({
'enemy_speed_x': data['enemy_x'].diff(),
'enemy_speed_y': data['enemy_y'].diff(),
'enemy_speed_z': data['enemy_z'].diff()
})
],
sort=False,
axis=1)
data.fillna(0, inplace=True)
data[[
'enemy_speed_x', 'enemy_speed_y', 'enemy_speed_z'
]] = data[['enemy_speed_x', 'enemy_speed_y', 'enemy_speed_z'
]] / 0.05
data['enemy_speed'] = data.apply(
lambda x: np.sqrt(x['enemy_speed_x']**2 + x[
'enemy_speed_y']**2 + x['enemy_speed_z']**2),
axis=1)
# 计算敌我距离
data['distance'] = data.apply(lambda x: np.sqrt(
(x['x'] - x['enemy_x'])**2 + (x['y'] - x['enemy_y'])**2 +
(x['z'] - x['enemy_z'])**2),
axis=1)
# 计算我机速度与敌我连线夹角余弦值
data['speed_connect_cos'] = data.apply(
lambda x: self._caculate_speed_connect_cos(
x['x'], x['y'], x['z'], x['enemy_x'], x['enemy_y'], x[
'enemy_z'], x['speed_x'], x['speed_y'], x['speed_z'
]),
axis=1)
# 计算敌机速度与敌我连线夹角余弦值
data['enemy_speed_connect_cos'] = data.apply(
lambda x: self._caculate_speed_connect_cos(
x['x'], x['y'], x['z'], x['enemy_x'], x['enemy_y'], x[
'enemy_z'], x['enemy_speed_x'], x['enemy_speed_y'],
x['enemy_speed_z']),
axis=1)
# 计算两机速度夹角余弦
data['speed_cos'] = data.apply(
lambda x: self._caculate_speed_cos(x['speed_x'], x[
'speed_y'], x['speed_z'], x['enemy_speed_x'], x[
'enemy_speed_y'], x['enemy_speed_z']),
axis=1)
# 计算相对位置
for f in ['y', 'speed']:
data[f'relative_{f}'] = data[f'enemy_{f}'] - data[f'{f}']
# 筛除不能开火标签(两机距离大于1000或背对)
data['label'] = data.apply(
lambda x: 0 if x['speed_connect_cos'] < 0 or x[
'distance'] > 1000 else x['label'],
axis=1)
data.fillna(0, inplace=True)
data.dropna(inplace=True)
data.to_csv('a2a_fe.csv', index=False)
return data
# DCS在线特征工程
def online_FE_DCS(self, row_dict):
""" DCS在线特征工程
Args:
row_dict(dict): 传入的一行字典记录
Returns:
DataFrame: 加入特征后的记录dataframe
"""
# 字典转dataframe
row = pd.DataFrame(row_dict, index=[0])
# 调用离线特征工程函数
FE_row = self.FE_DCS(row)
return FE_row
# DCS在线特征工程
def online_FE_DCS_new(self, row_dict):
""" AtoA在线特征工程
Args:
row_dict(dict): 传入的一行字典记录
Returns:
DataFrame: 加入特征后的记录dataframe
"""
row = pd.DataFrame(row_dict, index=[0])
self.current_row = row
if self.pre_row is None:
FE_row = self.online_FE_DCS(self.current_row)
else:
window = pd.concat([self.pre_row, self.current_row], axis=0)
FE_row = self.online_FE_DCS(window)[-1:]
self.pre_row = self.current_row
return FE_row
def train_val_split(self, df_train, percent=0.8):
""" 数据集划分
划分数据集为训练集与测试
Args:
df_train(dataframe): 原始数据
percent(int): 切分比例
Returns:
train(dataframe): 训练集
val_data(dataframe): 验证集
"""
# 获取所有id
all_ids = df_train['id'].values.tolist()
# id去重
all_ids = list(set(all_ids))
# 每次 set 的结果都不一样,所以要先排序,防止结果不可复现
all_ids.sort()
# random.seed 只能生效一次,所以每次 random.sample 之前都要设置
random.seed(self.seed)
# 训练集id采样
train_ids = random.sample(all_ids, int(len(all_ids) * percent))
# 获取验证集id
val_ids = list(set(all_ids) - set(train_ids))
# 根据id获取训练数据
train_data = df_train[df_train['id'].isin(train_ids)]
# 根据id获取验证数据
val_data = df_train[df_train['id'].isin(val_ids)]
# 连续序列数据,但是是以单个样本建模的情况下,需要 shuffle 打乱
train_data = train_data.sample(
frac=1, random_state=self.seed).reset_index(drop=True)
return train_data, val_data
def smote(self, data_):
data = data_.copy()
over = SMOTE(sampling_strategy=0.2, random_state=self.seed)
under = RandomUnderSampler(sampling_strategy=1.0,
random_state=self.seed)
steps = [('o', over), ('u', under)]
pipeline = Pipeline(steps=steps)
X, y = pipeline.fit_resample(
data[[i for i in data.columns if i not in ['label']]],
data['label'])
return pd.concat([X, y], axis=1)
def _feature_name(self):
""" 获取保留列名
Returns:
feature_names(list): 列名信息
"""
# 固定顺序,否则模型预测会出错
if self.type == 'df':
if self.scale == 'all':
feature_names = [
'my_x', 'my_y', 'my_z', 'my_v_x', 'my_v_y', 'my_v_z',
'my_rot_x', 'my_rot_y', 'my_rot_z', 'enemy_x', 'enemy_y',
'enemy_z', 'enemy_v_x', 'enemy_v_y', 'enemy_v_z',
'my_v_x_acc', 'enemy_v_x_acc', 'my_v_y_acc',
'enemy_v_y_acc', 'my_v_z_acc', 'enemy_v_z_acc',
'x_me_minus', 'y_me_minus', 'z_me_minus', 'v_x_me_minus',
'v_y_me_minus', 'v_z_me_minus', 'distance', 'cos',
'speedAll', 'cosValue'
]
else:
feature_names = ['cosValue', 'speedAll', 'distance']
elif self.type == 'dcs':
if self.scale == 'all':
feature_names = [
'z', 'Roll', 'Pitch', 'Yaw', 'x', 'y', 'Heading',
'enemy_z', 'enemy_x', 'enemy_y', 'speed_x', 'speed_y',
'speed_z', 'enemy_speed_x', 'enemy_speed_y',
'enemy_speed_z', 'distance', 'speed', 'speed_connect_cos',
'enemy_speed_connect_cos', 'relative_x', 'relative_z',
'relative_y', 'relative_speed_x', 'relative_speed_y',
'relative_speed_z', 'relative_speed', 'speed_cos'
]
elif self.scale == 'light':
feature_names = [
'distance', 'speed_connect_cos', 'enemy_speed_connect_cos',
'relative_y', 'speed_cos'
]
else:
feature_names = [
'z', 'Roll', 'Pitch', 'Yaw', 'x', 'y', 'Heading',
'enemy_z', 'enemy_x', 'enemy_y'
]
return feature_names
# 留出法数据
def _hold_out(self, raw_train, percent_train):
""" 获取留出法的训练数据
Args:
raw_train(dataframe): 原始数据
percent_train(int): 训练集占比
Returns:
train(dataframe): 训练集
val(dataframe): 验证集
"""
# 获取保留的列名
feature_names = self._feature_name()
# 切分训练集、验证集
train_data, val_data = self.train_val_split(raw_train,
percent=percent_train)
if self.type == 'dcs':
train_data = self.smote(train_data)
# 获取训练验证数据和标签数据
X_train, X_val, y_train, y_val = train_data[feature_names], val_data[
feature_names], train_data['label'], val_data['label']
return X_train, X_val, y_train, y_val
# k折交叉验证数据
def _k_fold(self, raw_train, k):
""" 获取交叉验证数据
Args:
raw_train(dataframe): 原始数据
k(int): 交叉折数
Returns:
train(dataframe): k折交叉验证的训练集
val(dataframe): 验证集
"""
# 获取保留列名
feature_names = self._feature_name()
# 根据id分组
groups = list(raw_train['id'])
# 分组交叉验证
gkf = GroupKFold(n_splits=k)
data_list = []
# 获取交叉验证数据
for train_index, val_index in gkf.split(raw_train[feature_names],
raw_train['label'],
groups=groups):
# 根据index索引获取每一折数据
X_train, y_train, X_val, y_val = raw_train.iloc[train_index][feature_names], \
raw_train.iloc[train_index]['label'], \
raw_train.iloc[val_index][feature_names], \
raw_train.iloc[val_index]['label']
# 将数据加入列表保存
data_list.append([X_train, X_val, y_train, y_val])
# 返回列表
return data_list
def _bootstrap(self, raw_train):
""" 获取提升法数据
Args:
raw_train(dataframe): 原始数据
Returns:
train(dataframe): 提升法训练集
val(dataframe): 验证集
"""
# 获取保留列名
feature_names = self._feature_name()
# 获取所有数据id,并去重
ids = pd.DataFrame(set(raw_train['id']), columns=['id'], index=None)
random.seed(self.seed)
# 根据id采样
train_group_ids = ids.sample(frac=1.0,
replace=True,
random_state=self.seed)
# 总id减去训练集的id,得到验证集id
val_group_ids = ids.loc[ids.index.difference(
train_group_ids.index)].copy()
# 创建两个dataframe
train_data = pd.DataFrame()
val_data = | pd.DataFrame() | pandas.DataFrame |
import os
import unittest
from builtins import range
import matplotlib
import mock
import numpy as np
import pandas as pd
import root_numpy
from mock import MagicMock, patch, mock_open
import six
from numpy.testing import assert_array_equal
from pandas.util.testing import assert_frame_equal
import ROOT
from PyAnalysisTools.AnalysisTools import MLHelper as mh
from PyAnalysisTools.base import InvalidInputError
from PyAnalysisTools.base.FileHandle import FileHandle
if six.PY2:
builtin = '__builtin__'
else:
builtin = 'builtins'
cwd = os.path.dirname(__file__)
ROOT.gROOT.SetBatch(True)
class TestMLHelpers(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_print_classification(self):
model = MagicMock()
model.predict_classes = MagicMock(return_value=[1])
mh.print_classification(model, 1, [2], 3, [4])
@mock.patch.object(matplotlib.pyplot, 'savefig', lambda x: None)
def test_plot_scoring(self):
class Object(object):
pass
history = Object()
history.history = {'foo': [(100, 100), (200, 200)]}
history.history['val_foo'] = history.history['foo']
mh.plot_scoring(history, 'foo', ['foo'], 'foo')
class TestMLConfig(unittest.TestCase):
def test_ctor_default(self):
self.assertRaises(KeyError, mh.MLConfig)
def test_ctor(self):
cfg = mh.MLConfig(branch_name='foo', variable_list=[], selection=None)
self.assertEqual('foo', cfg.score_name)
self.assertEqual([], cfg.varset)
self.assertIsNone(cfg.scaler)
self.assertIsNone(cfg.scale_algo)
self.assertIsNone(cfg.selection)
def test_equality(self):
cfg = mh.MLConfig(branch_name='foo', variable_list=[], selection=None)
cfg2 = mh.MLConfig(branch_name='foo', variable_list=[], selection=None)
self.assertEqual(cfg, cfg2)
def test_inequality(self):
cfg = mh.MLConfig(branch_name='foo', variable_list=[], selection=None)
cfg2 = mh.MLConfig(branch_name='bar', variable_list=[], selection=None)
self.assertNotEqual(cfg, cfg2)
def test_inequality_scaler(self):
scaler = mh.DataScaler()
cfg = mh.MLConfig(branch_name='foo', variable_list=[], selection=None, scaler=scaler)
cfg2 = mh.MLConfig(branch_name='bar', variable_list=[], selection=None)
self.assertNotEqual(cfg, cfg2)
self.assertNotEqual(cfg2, cfg)
def test_inequality_scaler_algo(self):
scaler_def = mh.DataScaler()
scaler = mh.DataScaler('foo')
cfg = mh.MLConfig(branch_name='foo', variable_list=[], selection=None, scaler=scaler)
cfg2 = mh.MLConfig(branch_name='bar', variable_list=[], selection=None, scaler=scaler_def)
self.assertNotEqual(cfg, cfg2)
self.assertNotEqual(cfg2, cfg)
def test_inequality_type(self):
cfg = mh.MLConfig(branch_name='foo', variable_list=[], selection=None)
self.assertNotEqual(cfg, 5.)
def test_handle_ctor(self):
handle = mh.MLConfigHandle(branch_name='foo', variable_list=[], selection=None)
cfg = mh.MLConfig(branch_name='foo', variable_list=[], selection=None)
self.assertEqual(cfg, handle.config)
self.assertEqual('.', handle.output_path)
self.assertEqual('./ml_config_summary.pkl', handle.file_name)
def test_print(self):
handle = mh.MLConfig(branch_name='foo', variable_list=['foo'], selection=['bar'])
print_out = 'Attached ML branch foo was created with the following configuration \nvariables: \n\t foo\n' \
'selection: \n\t bar\nscaler: None\n'
self.assertEqual(print_out, handle.__str__())
class TestRootNumpyConverter(unittest.TestCase):
def test_ctor(self):
converter = mh.Root2NumpyConverter(['foo'])
self.assertEqual(['foo'], converter.branches)
def test_ctor_no_list(self):
converter = mh.Root2NumpyConverter('foo')
self.assertEqual(['foo'], converter.branches)
def test_merge(self):
arr1 = np.array([1, 2])
arr2 = np.array([3, 4])
arr3 = np.array([5, 6])
arr4 = np.array([7, 8])
converter = mh.Root2NumpyConverter('foo')
data, labels = converter.merge([arr1, arr2], [arr3, arr4])
np.testing.assert_array_equal(np.array([i+1 for i in range(8)]), data)
np.testing.assert_array_equal(np.array([1]*4+[0]*4), labels)
@patch.object(root_numpy, 'tree2array', lambda x, **kwargs: (x, kwargs))
def test_convert(self):
converter = mh.Root2NumpyConverter(['foo'])
data = converter.convert_to_array(None, 'sel', 1000)
self.assertIsNone(data[0])
self.assertEqual({'branches': ['foo'], 'selection': 'sel', 'start': 0, 'stop': 1000}, data[1])
class TestTrainingReader(unittest.TestCase):
def test_default_ctor(self):
reader = mh.TrainingReader()
self.assertEqual('', reader.mode)
self.assertFalse(reader.numpy_input)
@mock.patch.object(pd, 'read_json', lambda _: None)
@patch(builtin + ".open", new_callable=mock_open)
def test_ctor_json(self, _):
reader = mh.TrainingReader(input_file_list=['foo.json'])
self.assertEqual('pandas', reader.mode)
self.assertFalse(reader.numpy_input)
self.assertEqual({'foo.json': None}, reader.data)
def test_ctor_numpy_list(self):
reader = mh.TrainingReader(input_file=['foo.npy', 'bar.npy'])
self.assertEqual('', reader.mode)
self.assertTrue(reader.numpy_input)
def test_ctor_numpy(self):
reader = mh.TrainingReader(input_file='foo.npy', signal_tree_names=['sig'], bkg_tree_names=['bkg'])
self.assertEqual('', reader.mode)
self.assertFalse(reader.numpy_input)
self.assertEqual(['sig'], reader.signal_tree_names)
self.assertEqual(['bkg'], reader.bkg_tree_names)
def test_parse_tree_names(self):
reader = mh.TrainingReader(input_file='foo.npy', signal_tree_names=['sig'], bkg_tree_names=['bkg'])
sig_train, bkg_train, sig_eval, bkg_eval = reader.parse_tree_names()
self.assertEqual(['train_sig'], sig_train)
self.assertEqual(['eval_sig'], sig_eval)
self.assertEqual(['train_bkg'], bkg_train)
self.assertEqual(['eval_bkg'], bkg_eval)
@mock.patch.object(FileHandle, 'get_object_by_name', lambda _, x: x)
def test_get_trees(self):
reader = mh.TrainingReader(input_file='foo.npy', signal_tree_names=['sig'], bkg_tree_names=['bkg'])
sig_train, bkg_train, sig_eval, bkg_eval = reader.get_trees()
self.assertEqual(['train_sig'], sig_train)
self.assertEqual(['eval_sig'], sig_eval)
self.assertEqual(['train_bkg'], bkg_train)
self.assertEqual(['eval_bkg'], bkg_eval)
def test_prepare_data(self):
reader = mh.TrainingReader()
reader.mode = 'pandas'
reader.data = {'_foo': pd.DataFrame({'var1': [1., 2.]}), '_bar': | pd.DataFrame({'var1': [3., 4.]}) | pandas.DataFrame |
""" test fancy indexing & misc """
from datetime import datetime
import re
import weakref
import numpy as np
import pytest
import pandas.util._test_decorators as td
from pandas.core.dtypes.common import (
is_float_dtype,
is_integer_dtype,
)
import pandas as pd
from pandas import (
DataFrame,
Index,
NaT,
Series,
date_range,
offsets,
timedelta_range,
)
import pandas._testing as tm
from pandas.core.api import Float64Index
from pandas.tests.indexing.common import _mklbl
from pandas.tests.indexing.test_floats import gen_obj
# ------------------------------------------------------------------------
# Indexing test cases
class TestFancy:
"""pure get/set item & fancy indexing"""
def test_setitem_ndarray_1d(self):
# GH5508
# len of indexer vs length of the 1d ndarray
df = DataFrame(index=Index(np.arange(1, 11)))
df["foo"] = np.zeros(10, dtype=np.float64)
df["bar"] = np.zeros(10, dtype=complex)
# invalid
msg = "Must have equal len keys and value when setting with an iterable"
with pytest.raises(ValueError, match=msg):
df.loc[df.index[2:5], "bar"] = np.array([2.33j, 1.23 + 0.1j, 2.2, 1.0])
# valid
df.loc[df.index[2:6], "bar"] = np.array([2.33j, 1.23 + 0.1j, 2.2, 1.0])
result = df.loc[df.index[2:6], "bar"]
expected = Series(
[2.33j, 1.23 + 0.1j, 2.2, 1.0], index=[3, 4, 5, 6], name="bar"
)
tm.assert_series_equal(result, expected)
def test_setitem_ndarray_1d_2(self):
# GH5508
# dtype getting changed?
df = DataFrame(index=Index(np.arange(1, 11)))
df["foo"] = np.zeros(10, dtype=np.float64)
df["bar"] = np.zeros(10, dtype=complex)
msg = "Must have equal len keys and value when setting with an iterable"
with pytest.raises(ValueError, match=msg):
df[2:5] = np.arange(1, 4) * 1j
def test_getitem_ndarray_3d(
self, index, frame_or_series, indexer_sli, using_array_manager
):
# GH 25567
obj = gen_obj(frame_or_series, index)
idxr = indexer_sli(obj)
nd3 = np.random.randint(5, size=(2, 2, 2))
msgs = []
if frame_or_series is Series and indexer_sli in [tm.setitem, tm.iloc]:
msgs.append(r"Wrong number of dimensions. values.ndim > ndim \[3 > 1\]")
if using_array_manager:
msgs.append("Passed array should be 1-dimensional")
if frame_or_series is Series or indexer_sli is tm.iloc:
msgs.append(r"Buffer has wrong number of dimensions \(expected 1, got 3\)")
if using_array_manager:
msgs.append("indexer should be 1-dimensional")
if indexer_sli is tm.loc or (
frame_or_series is Series and indexer_sli is tm.setitem
):
msgs.append("Cannot index with multidimensional key")
if frame_or_series is DataFrame and indexer_sli is tm.setitem:
msgs.append("Index data must be 1-dimensional")
if isinstance(index, pd.IntervalIndex) and indexer_sli is tm.iloc:
msgs.append("Index data must be 1-dimensional")
if isinstance(index, (pd.TimedeltaIndex, pd.DatetimeIndex, pd.PeriodIndex)):
msgs.append("Data must be 1-dimensional")
if len(index) == 0 or isinstance(index, pd.MultiIndex):
msgs.append("positional indexers are out-of-bounds")
msg = "|".join(msgs)
potential_errors = (IndexError, ValueError, NotImplementedError)
with pytest.raises(potential_errors, match=msg):
idxr[nd3]
def test_setitem_ndarray_3d(self, index, frame_or_series, indexer_sli):
# GH 25567
obj = gen_obj(frame_or_series, index)
idxr = indexer_sli(obj)
nd3 = np.random.randint(5, size=(2, 2, 2))
if indexer_sli is tm.iloc:
err = ValueError
msg = f"Cannot set values with ndim > {obj.ndim}"
else:
err = ValueError
msg = "|".join(
[
r"Buffer has wrong number of dimensions \(expected 1, got 3\)",
"Cannot set values with ndim > 1",
"Index data must be 1-dimensional",
"Data must be 1-dimensional",
"Array conditional must be same shape as self",
]
)
with pytest.raises(err, match=msg):
idxr[nd3] = 0
def test_getitem_ndarray_0d(self):
# GH#24924
key = np.array(0)
# dataframe __getitem__
df = DataFrame([[1, 2], [3, 4]])
result = df[key]
expected = Series([1, 3], name=0)
tm.assert_series_equal(result, expected)
# series __getitem__
ser = Series([1, 2])
result = ser[key]
assert result == 1
def test_inf_upcast(self):
# GH 16957
# We should be able to use np.inf as a key
# np.inf should cause an index to convert to float
# Test with np.inf in rows
df = DataFrame(columns=[0])
df.loc[1] = 1
df.loc[2] = 2
df.loc[np.inf] = 3
# make sure we can look up the value
assert df.loc[np.inf, 0] == 3
result = df.index
expected = Float64Index([1, 2, np.inf])
tm.assert_index_equal(result, expected)
def test_setitem_dtype_upcast(self):
# GH3216
df = DataFrame([{"a": 1}, {"a": 3, "b": 2}])
df["c"] = np.nan
assert df["c"].dtype == np.float64
df.loc[0, "c"] = "foo"
expected = DataFrame(
[{"a": 1, "b": np.nan, "c": "foo"}, {"a": 3, "b": 2, "c": np.nan}]
)
tm.assert_frame_equal(df, expected)
@pytest.mark.parametrize("val", [3.14, "wxyz"])
def test_setitem_dtype_upcast2(self, val):
# GH10280
df = DataFrame(
np.arange(6, dtype="int64").reshape(2, 3),
index=list("ab"),
columns=["foo", "bar", "baz"],
)
left = df.copy()
left.loc["a", "bar"] = val
right = DataFrame(
[[0, val, 2], [3, 4, 5]],
index=list("ab"),
columns=["foo", "bar", "baz"],
)
tm.assert_frame_equal(left, right)
assert is_integer_dtype(left["foo"])
assert is_integer_dtype(left["baz"])
def test_setitem_dtype_upcast3(self):
left = DataFrame(
np.arange(6, dtype="int64").reshape(2, 3) / 10.0,
index=list("ab"),
columns=["foo", "bar", "baz"],
)
left.loc["a", "bar"] = "wxyz"
right = DataFrame(
[[0, "wxyz", 0.2], [0.3, 0.4, 0.5]],
index=list("ab"),
columns=["foo", "bar", "baz"],
)
tm.assert_frame_equal(left, right)
assert is_float_dtype(left["foo"])
assert is_float_dtype(left["baz"])
def test_dups_fancy_indexing(self):
# GH 3455
df = tm.makeCustomDataframe(10, 3)
df.columns = ["a", "a", "b"]
result = df[["b", "a"]].columns
expected = Index(["b", "a", "a"])
tm.assert_index_equal(result, expected)
def test_dups_fancy_indexing_across_dtypes(self):
# across dtypes
df = DataFrame([[1, 2, 1.0, 2.0, 3.0, "foo", "bar"]], columns=list("aaaaaaa"))
df.head()
str(df)
result = DataFrame([[1, 2, 1.0, 2.0, 3.0, "foo", "bar"]])
result.columns = list("aaaaaaa")
# TODO(wesm): unused?
df_v = df.iloc[:, 4] # noqa
res_v = result.iloc[:, 4] # noqa
tm.assert_frame_equal(df, result)
def test_dups_fancy_indexing_not_in_order(self):
# GH 3561, dups not in selected order
df = DataFrame(
{"test": [5, 7, 9, 11], "test1": [4.0, 5, 6, 7], "other": list("abcd")},
index=["A", "A", "B", "C"],
)
rows = ["C", "B"]
expected = DataFrame(
{"test": [11, 9], "test1": [7.0, 6], "other": ["d", "c"]}, index=rows
)
result = df.loc[rows]
tm.assert_frame_equal(result, expected)
result = df.loc[Index(rows)]
tm.assert_frame_equal(result, expected)
rows = ["C", "B", "E"]
with pytest.raises(KeyError, match="not in index"):
df.loc[rows]
# see GH5553, make sure we use the right indexer
rows = ["F", "G", "H", "C", "B", "E"]
with pytest.raises(KeyError, match="not in index"):
df.loc[rows]
def test_dups_fancy_indexing_only_missing_label(self):
# List containing only missing label
dfnu = DataFrame(np.random.randn(5, 3), index=list("AABCD"))
with pytest.raises(
KeyError,
match=re.escape(
"\"None of [Index(['E'], dtype='object')] are in the [index]\""
),
):
dfnu.loc[["E"]]
# ToDo: check_index_type can be True after GH 11497
@pytest.mark.parametrize("vals", [[0, 1, 2], list("abc")])
def test_dups_fancy_indexing_missing_label(self, vals):
# GH 4619; duplicate indexer with missing label
df = DataFrame({"A": vals})
with pytest.raises(KeyError, match="not in index"):
df.loc[[0, 8, 0]]
def test_dups_fancy_indexing_non_unique(self):
# non unique with non unique selector
df = DataFrame({"test": [5, 7, 9, 11]}, index=["A", "A", "B", "C"])
with pytest.raises(KeyError, match="not in index"):
df.loc[["A", "A", "E"]]
def test_dups_fancy_indexing2(self):
# GH 5835
# dups on index and missing values
df = DataFrame(np.random.randn(5, 5), columns=["A", "B", "B", "B", "A"])
with pytest.raises(KeyError, match="not in index"):
df.loc[:, ["A", "B", "C"]]
def test_dups_fancy_indexing3(self):
# GH 6504, multi-axis indexing
df = DataFrame(
np.random.randn(9, 2), index=[1, 1, 1, 2, 2, 2, 3, 3, 3], columns=["a", "b"]
)
expected = df.iloc[0:6]
result = df.loc[[1, 2]]
tm.assert_frame_equal(result, expected)
expected = df
result = df.loc[:, ["a", "b"]]
tm.assert_frame_equal(result, expected)
expected = df.iloc[0:6, :]
result = df.loc[[1, 2], ["a", "b"]]
tm.assert_frame_equal(result, expected)
def test_duplicate_int_indexing(self, indexer_sl):
# GH 17347
ser = Series(range(3), index=[1, 1, 3])
expected = Series(range(2), index=[1, 1])
result = indexer_sl(ser)[[1]]
tm.assert_series_equal(result, expected)
def test_indexing_mixed_frame_bug(self):
# GH3492
df = DataFrame(
{"a": {1: "aaa", 2: "bbb", 3: "ccc"}, "b": {1: 111, 2: 222, 3: 333}}
)
# this works, new column is created correctly
df["test"] = df["a"].apply(lambda x: "_" if x == "aaa" else x)
# this does not work, ie column test is not changed
idx = df["test"] == "_"
temp = df.loc[idx, "a"].apply(lambda x: "-----" if x == "aaa" else x)
df.loc[idx, "test"] = temp
assert df.iloc[0, 2] == "-----"
def test_multitype_list_index_access(self):
# GH 10610
df = DataFrame(np.random.random((10, 5)), columns=["a"] + [20, 21, 22, 23])
with pytest.raises(KeyError, match=re.escape("'[26, -8] not in index'")):
df[[22, 26, -8]]
assert df[21].shape[0] == df.shape[0]
def test_set_index_nan(self):
# GH 3586
df = DataFrame(
{
"PRuid": {
17: "nonQC",
18: "nonQC",
19: "nonQC",
20: "10",
21: "11",
22: "12",
23: "13",
24: "24",
25: "35",
26: "46",
27: "47",
28: "48",
29: "59",
30: "10",
},
"QC": {
17: 0.0,
18: 0.0,
19: 0.0,
20: np.nan,
21: np.nan,
22: np.nan,
23: np.nan,
24: 1.0,
25: np.nan,
26: np.nan,
27: np.nan,
28: np.nan,
29: np.nan,
30: np.nan,
},
"data": {
17: 7.9544899999999998,
18: 8.0142609999999994,
19: 7.8591520000000008,
20: 0.86140349999999999,
21: 0.87853110000000001,
22: 0.8427041999999999,
23: 0.78587700000000005,
24: 0.73062459999999996,
25: 0.81668560000000001,
26: 0.81927080000000008,
27: 0.80705009999999999,
28: 0.81440240000000008,
29: 0.80140849999999997,
30: 0.81307740000000006,
},
"year": {
17: 2006,
18: 2007,
19: 2008,
20: 1985,
21: 1985,
22: 1985,
23: 1985,
24: 1985,
25: 1985,
26: 1985,
27: 1985,
28: 1985,
29: 1985,
30: 1986,
},
}
).reset_index()
result = (
df.set_index(["year", "PRuid", "QC"])
.reset_index()
.reindex(columns=df.columns)
)
tm.assert_frame_equal(result, df)
def test_multi_assign(self):
# GH 3626, an assignment of a sub-df to a df
df = DataFrame(
{
"FC": ["a", "b", "a", "b", "a", "b"],
"PF": [0, 0, 0, 0, 1, 1],
"col1": list(range(6)),
"col2": list(range(6, 12)),
}
)
df.iloc[1, 0] = np.nan
df2 = df.copy()
mask = ~df2.FC.isna()
cols = ["col1", "col2"]
dft = df2 * 2
dft.iloc[3, 3] = np.nan
expected = DataFrame(
{
"FC": ["a", np.nan, "a", "b", "a", "b"],
"PF": [0, 0, 0, 0, 1, 1],
"col1": Series([0, 1, 4, 6, 8, 10]),
"col2": [12, 7, 16, np.nan, 20, 22],
}
)
# frame on rhs
df2.loc[mask, cols] = dft.loc[mask, cols]
tm.assert_frame_equal(df2, expected)
# with an ndarray on rhs
# coerces to float64 because values has float64 dtype
# GH 14001
expected = DataFrame(
{
"FC": ["a", np.nan, "a", "b", "a", "b"],
"PF": [0, 0, 0, 0, 1, 1],
"col1": [0.0, 1.0, 4.0, 6.0, 8.0, 10.0],
"col2": [12, 7, 16, np.nan, 20, 22],
}
)
df2 = df.copy()
df2.loc[mask, cols] = dft.loc[mask, cols].values
tm.assert_frame_equal(df2, expected)
def test_multi_assign_broadcasting_rhs(self):
# broadcasting on the rhs is required
df = DataFrame(
{
"A": [1, 2, 0, 0, 0],
"B": [0, 0, 0, 10, 11],
"C": [0, 0, 0, 10, 11],
"D": [3, 4, 5, 6, 7],
}
)
expected = df.copy()
mask = expected["A"] == 0
for col in ["A", "B"]:
expected.loc[mask, col] = df["D"]
df.loc[df["A"] == 0, ["A", "B"]] = df["D"]
tm.assert_frame_equal(df, expected)
# TODO(ArrayManager) setting single item with an iterable doesn't work yet
# in the "split" path
@td.skip_array_manager_not_yet_implemented
def test_setitem_list(self):
# GH 6043
# iloc with a list
df = DataFrame(index=[0, 1], columns=[0])
df.iloc[1, 0] = [1, 2, 3]
df.iloc[1, 0] = [1, 2]
result = DataFrame(index=[0, 1], columns=[0])
result.iloc[1, 0] = [1, 2]
tm.assert_frame_equal(result, df)
def test_string_slice(self):
# GH 14424
# string indexing against datetimelike with object
# dtype should properly raises KeyError
df = DataFrame([1], Index([pd.Timestamp("2011-01-01")], dtype=object))
assert df.index._is_all_dates
with pytest.raises(KeyError, match="'2011'"):
df["2011"]
with pytest.raises(KeyError, match="'2011'"):
df.loc["2011", 0]
def test_string_slice_empty(self):
# GH 14424
df = DataFrame()
assert not df.index._is_all_dates
with pytest.raises(KeyError, match="'2011'"):
df["2011"]
with pytest.raises(KeyError, match="^0$"):
df.loc["2011", 0]
def test_astype_assignment(self):
# GH4312 (iloc)
df_orig = DataFrame(
[["1", "2", "3", ".4", 5, 6.0, "foo"]], columns=list("ABCDEFG")
)
df = df_orig.copy()
df.iloc[:, 0:2] = df.iloc[:, 0:2].astype(np.int64)
expected = DataFrame(
[[1, 2, "3", ".4", 5, 6.0, "foo"]], columns=list("ABCDEFG")
)
tm.assert_frame_equal(df, expected)
df = df_orig.copy()
df.iloc[:, 0:2] = df.iloc[:, 0:2]._convert(datetime=True, numeric=True)
expected = DataFrame(
[[1, 2, "3", ".4", 5, 6.0, "foo"]], columns=list("ABCDEFG")
)
tm.assert_frame_equal(df, expected)
# GH5702 (loc)
df = df_orig.copy()
df.loc[:, "A"] = df.loc[:, "A"].astype(np.int64)
expected = DataFrame(
[[1, "2", "3", ".4", 5, 6.0, "foo"]], columns=list("ABCDEFG")
)
tm.assert_frame_equal(df, expected)
df = df_orig.copy()
df.loc[:, ["B", "C"]] = df.loc[:, ["B", "C"]].astype(np.int64)
expected = DataFrame(
[["1", 2, 3, ".4", 5, 6.0, "foo"]], columns=list("ABCDEFG")
)
tm.assert_frame_equal(df, expected)
def test_astype_assignment_full_replacements(self):
# full replacements / no nans
df = DataFrame({"A": [1.0, 2.0, 3.0, 4.0]})
df.iloc[:, 0] = df["A"].astype(np.int64)
expected = DataFrame({"A": [1, 2, 3, 4]})
tm.assert_frame_equal(df, expected)
df = DataFrame({"A": [1.0, 2.0, 3.0, 4.0]})
df.loc[:, "A"] = df["A"].astype(np.int64)
expected = DataFrame({"A": [1, 2, 3, 4]})
tm.assert_frame_equal(df, expected)
@pytest.mark.parametrize("indexer", [tm.getitem, tm.loc])
def test_index_type_coercion(self, indexer):
# GH 11836
# if we have an index type and set it with something that looks
# to numpy like the same, but is actually, not
# (e.g. setting with a float or string '0')
# then we need to coerce to object
# integer indexes
for s in [Series(range(5)), Series(range(5), index=range(1, 6))]:
assert s.index.is_integer()
s2 = s.copy()
indexer(s2)[0.1] = 0
assert s2.index.is_floating()
assert indexer(s2)[0.1] == 0
s2 = s.copy()
indexer(s2)[0.0] = 0
exp = s.index
if 0 not in s:
exp = Index(s.index.tolist() + [0])
tm.assert_index_equal(s2.index, exp)
s2 = s.copy()
indexer(s2)["0"] = 0
assert s2.index.is_object()
for s in [Series(range(5), index=np.arange(5.0))]:
assert s.index.is_floating()
s2 = s.copy()
indexer(s2)[0.1] = 0
assert s2.index.is_floating()
assert indexer(s2)[0.1] == 0
s2 = s.copy()
indexer(s2)[0.0] = 0
tm.assert_index_equal(s2.index, s.index)
s2 = s.copy()
indexer(s2)["0"] = 0
assert s2.index.is_object()
class TestMisc:
def test_float_index_to_mixed(self):
df = DataFrame({0.0: np.random.rand(10), 1.0: np.random.rand(10)})
df["a"] = 10
expected = DataFrame({0.0: df[0.0], 1.0: df[1.0], "a": [10] * 10})
tm.assert_frame_equal(expected, df)
def test_float_index_non_scalar_assignment(self):
df = DataFrame({"a": [1, 2, 3], "b": [3, 4, 5]}, index=[1.0, 2.0, 3.0])
df.loc[df.index[:2]] = 1
expected = DataFrame({"a": [1, 1, 3], "b": [1, 1, 5]}, index=df.index)
tm.assert_frame_equal(expected, df)
def test_loc_setitem_fullindex_views(self):
df = DataFrame({"a": [1, 2, 3], "b": [3, 4, 5]}, index=[1.0, 2.0, 3.0])
df2 = df.copy()
df.loc[df.index] = df.loc[df.index]
tm.assert_frame_equal(df, df2)
def test_rhs_alignment(self):
# GH8258, tests that both rows & columns are aligned to what is
# assigned to. covers both uniform data-type & multi-type cases
def run_tests(df, rhs, right_loc, right_iloc):
# label, index, slice
lbl_one, idx_one, slice_one = list("bcd"), [1, 2, 3], slice(1, 4)
lbl_two, idx_two, slice_two = ["joe", "jolie"], [1, 2], slice(1, 3)
left = df.copy()
left.loc[lbl_one, lbl_two] = rhs
tm.assert_frame_equal(left, right_loc)
left = df.copy()
left.iloc[idx_one, idx_two] = rhs
tm.assert_frame_equal(left, right_iloc)
left = df.copy()
left.iloc[slice_one, slice_two] = rhs
tm.assert_frame_equal(left, right_iloc)
xs = np.arange(20).reshape(5, 4)
cols = ["jim", "joe", "jolie", "joline"]
df = DataFrame(xs, columns=cols, index=list("abcde"), dtype="int64")
# right hand side; permute the indices and multiplpy by -2
rhs = -2 * df.iloc[3:0:-1, 2:0:-1]
# expected `right` result; just multiply by -2
right_iloc = df.copy()
right_iloc["joe"] = [1, 14, 10, 6, 17]
right_iloc["jolie"] = [2, 13, 9, 5, 18]
right_iloc.iloc[1:4, 1:3] *= -2
right_loc = df.copy()
right_loc.iloc[1:4, 1:3] *= -2
# run tests with uniform dtypes
run_tests(df, rhs, right_loc, right_iloc)
# make frames multi-type & re-run tests
for frame in [df, rhs, right_loc, right_iloc]:
frame["joe"] = frame["joe"].astype("float64")
frame["jolie"] = frame["jolie"].map("@{}".format)
right_iloc["joe"] = [1.0, "@-28", "@-20", "@-12", 17.0]
right_iloc["jolie"] = ["@2", -26.0, -18.0, -10.0, "@18"]
run_tests(df, rhs, right_loc, right_iloc)
def test_str_label_slicing_with_negative_step(self):
SLC = pd.IndexSlice
for idx in [_mklbl("A", 20), np.arange(20) + 100, np.linspace(100, 150, 20)]:
idx = Index(idx)
ser = Series(np.arange(20), index=idx)
tm.assert_indexing_slices_equivalent(ser, SLC[idx[9] :: -1], SLC[9::-1])
tm.assert_indexing_slices_equivalent(ser, SLC[: idx[9] : -1], SLC[:8:-1])
tm.assert_indexing_slices_equivalent(
ser, SLC[idx[13] : idx[9] : -1], SLC[13:8:-1]
)
tm.assert_indexing_slices_equivalent(
ser, SLC[idx[9] : idx[13] : -1], SLC[:0]
)
def test_slice_with_zero_step_raises(self, indexer_sl, frame_or_series):
obj = frame_or_series(np.arange(20), index=_mklbl("A", 20))
with pytest.raises(ValueError, match="slice step cannot be zero"):
indexer_sl(obj)[::0]
def test_loc_setitem_indexing_assignment_dict_already_exists(self):
index = Index([-5, 0, 5], name="z")
df = DataFrame({"x": [1, 2, 6], "y": [2, 2, 8]}, index=index)
expected = df.copy()
rhs = {"x": 9, "y": 99}
df.loc[5] = rhs
expected.loc[5] = [9, 99]
tm.assert_frame_equal(df, expected)
# GH#38335 same thing, mixed dtypes
df = DataFrame({"x": [1, 2, 6], "y": [2.0, 2.0, 8.0]}, index=index)
df.loc[5] = rhs
expected = DataFrame({"x": [1, 2, 9], "y": [2.0, 2.0, 99.0]}, index=index)
tm.assert_frame_equal(df, expected)
def test_iloc_getitem_indexing_dtypes_on_empty(self):
# Check that .iloc returns correct dtypes GH9983
df = DataFrame({"a": [1, 2, 3], "b": ["b", "b2", "b3"]})
df2 = df.iloc[[], :]
assert df2.loc[:, "a"].dtype == np.int64
tm.assert_series_equal(df2.loc[:, "a"], df2.iloc[:, 0])
@pytest.mark.parametrize("size", [5, 999999, 1000000])
def test_loc_range_in_series_indexing(self, size):
# range can cause an indexing error
# GH 11652
s = Series(index=range(size), dtype=np.float64)
s.loc[range(1)] = 42
tm.assert_series_equal(s.loc[range(1)], Series(42.0, index=[0]))
s.loc[range(2)] = 43
tm.assert_series_equal(s.loc[range(2)], Series(43.0, index=[0, 1]))
def test_partial_boolean_frame_indexing(self):
# GH 17170
df = DataFrame(
np.arange(9.0).reshape(3, 3), index=list("abc"), columns=list("ABC")
)
index_df = DataFrame(1, index=list("ab"), columns=list("AB"))
result = df[index_df.notnull()]
expected = DataFrame(
np.array([[0.0, 1.0, np.nan], [3.0, 4.0, np.nan], [np.nan] * 3]),
index=list("abc"),
columns=list("ABC"),
)
tm.assert_frame_equal(result, expected)
def test_no_reference_cycle(self):
df = DataFrame({"a": [0, 1], "b": [2, 3]})
for name in ("loc", "iloc", "at", "iat"):
getattr(df, name)
wr = weakref.ref(df)
del df
assert wr() is None
def test_label_indexing_on_nan(self, nulls_fixture):
# GH 32431
df = Series([1, "{1,2}", 1, nulls_fixture])
vc = df.value_counts(dropna=False)
result1 = vc.loc[nulls_fixture]
result2 = vc[nulls_fixture]
expected = 1
assert result1 == expected
assert result2 == expected
class TestDataframeNoneCoercion:
EXPECTED_SINGLE_ROW_RESULTS = [
# For numeric series, we should coerce to NaN.
([1, 2, 3], [np.nan, 2, 3]),
([1.0, 2.0, 3.0], [np.nan, 2.0, 3.0]),
# For datetime series, we should coerce to NaT.
(
[datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)],
[NaT, datetime(2000, 1, 2), datetime(2000, 1, 3)],
),
# For objects, we should preserve the None value.
(["foo", "bar", "baz"], [None, "bar", "baz"]),
]
@pytest.mark.parametrize("expected", EXPECTED_SINGLE_ROW_RESULTS)
def test_coercion_with_loc(self, expected):
start_data, expected_result = expected
start_dataframe = DataFrame({"foo": start_data})
start_dataframe.loc[0, ["foo"]] = None
expected_dataframe = DataFrame({"foo": expected_result})
tm.assert_frame_equal(start_dataframe, expected_dataframe)
@pytest.mark.parametrize("expected", EXPECTED_SINGLE_ROW_RESULTS)
def test_coercion_with_setitem_and_dataframe(self, expected):
start_data, expected_result = expected
start_dataframe = DataFrame({"foo": start_data})
start_dataframe[start_dataframe["foo"] == start_dataframe["foo"][0]] = None
expected_dataframe = DataFrame({"foo": expected_result})
tm.assert_frame_equal(start_dataframe, expected_dataframe)
@pytest.mark.parametrize("expected", EXPECTED_SINGLE_ROW_RESULTS)
def test_none_coercion_loc_and_dataframe(self, expected):
start_data, expected_result = expected
start_dataframe = DataFrame({"foo": start_data})
start_dataframe.loc[start_dataframe["foo"] == start_dataframe["foo"][0]] = None
expected_dataframe = DataFrame({"foo": expected_result})
tm.assert_frame_equal(start_dataframe, expected_dataframe)
def test_none_coercion_mixed_dtypes(self):
start_dataframe = DataFrame(
{
"a": [1, 2, 3],
"b": [1.0, 2.0, 3.0],
"c": [datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)],
"d": ["a", "b", "c"],
}
)
start_dataframe.iloc[0] = None
exp = DataFrame(
{
"a": [np.nan, 2, 3],
"b": [np.nan, 2.0, 3.0],
"c": [NaT, datetime(2000, 1, 2), datetime(2000, 1, 3)],
"d": [None, "b", "c"],
}
)
tm.assert_frame_equal(start_dataframe, exp)
class TestDatetimelikeCoercion:
def test_setitem_dt64_string_scalar(self, tz_naive_fixture, indexer_sli):
# dispatching _can_hold_element to underlying DatetimeArray
tz = tz_naive_fixture
dti = date_range("2016-01-01", periods=3, tz=tz)
ser = Series(dti)
values = ser._values
newval = "2018-01-01"
values._validate_setitem_value(newval)
indexer_sli(ser)[0] = newval
if tz is None:
# TODO(EA2D): we can make this no-copy in tz-naive case too
assert ser.dtype == dti.dtype
assert ser._values._data is values._data
else:
assert ser._values is values
@pytest.mark.parametrize("box", [list, np.array, pd.array, pd.Categorical, Index])
@pytest.mark.parametrize(
"key", [[0, 1], slice(0, 2), np.array([True, True, False])]
)
def test_setitem_dt64_string_values(self, tz_naive_fixture, indexer_sli, key, box):
# dispatching _can_hold_element to underling DatetimeArray
tz = tz_naive_fixture
if isinstance(key, slice) and indexer_sli is tm.loc:
key = slice(0, 1)
dti = date_range("2016-01-01", periods=3, tz=tz)
ser = Series(dti)
values = ser._values
newvals = box(["2019-01-01", "2010-01-02"])
values._validate_setitem_value(newvals)
indexer_sli(ser)[key] = newvals
if tz is None:
# TODO(EA2D): we can make this no-copy in tz-naive case too
assert ser.dtype == dti.dtype
assert ser._values._data is values._data
else:
assert ser._values is values
@pytest.mark.parametrize("scalar", ["3 Days", offsets.Hour(4)])
def test_setitem_td64_scalar(self, indexer_sli, scalar):
# dispatching _can_hold_element to underling TimedeltaArray
tdi = timedelta_range("1 Day", periods=3)
ser = Series(tdi)
values = ser._values
values._validate_setitem_value(scalar)
indexer_sli(ser)[0] = scalar
assert ser._values._data is values._data
@pytest.mark.parametrize("box", [list, np.array, pd.array, pd.Categorical, Index])
@pytest.mark.parametrize(
"key", [[0, 1], slice(0, 2), np.array([True, True, False])]
)
def test_setitem_td64_string_values(self, indexer_sli, key, box):
# dispatching _can_hold_element to underling TimedeltaArray
if isinstance(key, slice) and indexer_sli is tm.loc:
key = slice(0, 1)
tdi = timedelta_range("1 Day", periods=3)
ser = Series(tdi)
values = ser._values
newvals = box(["10 Days", "44 hours"])
values._validate_setitem_value(newvals)
indexer_sli(ser)[key] = newvals
assert ser._values._data is values._data
def test_extension_array_cross_section():
# A cross-section of a homogeneous EA should be an EA
df = DataFrame(
{
"A": pd.array([1, 2], dtype="Int64"),
"B": pd.array([3, 4], dtype="Int64"),
},
index=["a", "b"],
)
expected = Series( | pd.array([1, 3], dtype="Int64") | pandas.array |
"""
test date_range, bdate_range construction from the convenience range functions
"""
from datetime import datetime, time, timedelta
import numpy as np
import pytest
import pytz
from pytz import timezone
from pandas._libs.tslibs import timezones
from pandas._libs.tslibs.offsets import BDay, CDay, DateOffset, MonthEnd, prefix_mapping
from pandas.errors import OutOfBoundsDatetime
import pandas.util._test_decorators as td
import pandas as pd
from pandas import DatetimeIndex, Timestamp, bdate_range, date_range, offsets
import pandas._testing as tm
from pandas.core.arrays.datetimes import generate_range
START, END = datetime(2009, 1, 1), datetime(2010, 1, 1)
class TestTimestampEquivDateRange:
# Older tests in TestTimeSeries constructed their `stamp` objects
# using `date_range` instead of the `Timestamp` constructor.
# TestTimestampEquivDateRange checks that these are equivalent in the
# pertinent cases.
def test_date_range_timestamp_equiv(self):
rng = date_range("20090415", "20090519", tz="US/Eastern")
stamp = rng[0]
ts = Timestamp("20090415", tz="US/Eastern", freq="D")
assert ts == stamp
def test_date_range_timestamp_equiv_dateutil(self):
rng = date_range("20090415", "20090519", tz="dateutil/US/Eastern")
stamp = rng[0]
ts = Timestamp("20090415", tz="dateutil/US/Eastern", freq="D")
assert ts == stamp
def test_date_range_timestamp_equiv_explicit_pytz(self):
rng = date_range("20090415", "20090519", tz=pytz.timezone("US/Eastern"))
stamp = rng[0]
ts = Timestamp("20090415", tz=pytz.timezone("US/Eastern"), freq="D")
assert ts == stamp
@td.skip_if_windows_python_3
def test_date_range_timestamp_equiv_explicit_dateutil(self):
from pandas._libs.tslibs.timezones import dateutil_gettz as gettz
rng = date_range("20090415", "20090519", tz=gettz("US/Eastern"))
stamp = rng[0]
ts = Timestamp("20090415", tz=gettz("US/Eastern"), freq="D")
assert ts == stamp
def test_date_range_timestamp_equiv_from_datetime_instance(self):
datetime_instance = datetime(2014, 3, 4)
# build a timestamp with a frequency, since then it supports
# addition/subtraction of integers
timestamp_instance = date_range(datetime_instance, periods=1, freq="D")[0]
ts = Timestamp(datetime_instance, freq="D")
assert ts == timestamp_instance
def test_date_range_timestamp_equiv_preserve_frequency(self):
timestamp_instance = date_range("2014-03-05", periods=1, freq="D")[0]
ts = Timestamp("2014-03-05", freq="D")
assert timestamp_instance == ts
class TestDateRanges:
def test_date_range_nat(self):
# GH#11587
msg = "Neither `start` nor `end` can be NaT"
with pytest.raises(ValueError, match=msg):
date_range(start="2016-01-01", end=pd.NaT, freq="D")
with pytest.raises(ValueError, match=msg):
| date_range(start=pd.NaT, end="2016-01-01", freq="D") | pandas.date_range |
# QUANTCONNECT.COM - Democratizing Finance, Empowering Individuals.
# Lean Algorithmic Trading Engine v2.0. Copyright 2014 QuantConnect Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# You can run this test by first running `nPython.exe` (with mono or otherwise):
# $ ./nPython.exe ReportChartTests.py
import numpy as np
import pandas as pd
from datetime import datetime
from ReportCharts import ReportCharts
charts = ReportCharts()
## Test GetReturnsPerTrade
backtest = list(np.random.normal(0, 1, 1000))
live = list(np.random.normal(0.5, 1, 400))
result = charts.GetReturnsPerTrade([], [])
result = charts.GetReturnsPerTrade(backtest, [])
result = charts.GetReturnsPerTrade(backtest, live)
## Test GetCumulativeReturnsPlot
time = [ | pd.Timestamp(x) | pandas.Timestamp |
# -*- coding: utf-8 -*-
from datetime import timedelta, time
import numpy as np
from pandas import (DatetimeIndex, Float64Index, Index, Int64Index,
NaT, Period, PeriodIndex, Series, Timedelta,
TimedeltaIndex, date_range, period_range,
timedelta_range, notnull)
import pandas.util.testing as tm
import pandas as pd
from pandas.lib import Timestamp
from .common import Base
class DatetimeLike(Base):
def test_shift_identity(self):
idx = self.create_index()
self.assert_index_equal(idx, idx.shift(0))
def test_str(self):
# test the string repr
idx = self.create_index()
idx.name = 'foo'
self.assertFalse("length=%s" % len(idx) in str(idx))
self.assertTrue("'foo'" in str(idx))
self.assertTrue(idx.__class__.__name__ in str(idx))
if hasattr(idx, 'tz'):
if idx.tz is not None:
self.assertTrue(idx.tz in str(idx))
if hasattr(idx, 'freq'):
self.assertTrue("freq='%s'" % idx.freqstr in str(idx))
def test_view(self):
super(DatetimeLike, self).test_view()
i = self.create_index()
i_view = i.view('i8')
result = self._holder(i)
tm.assert_index_equal(result, i)
i_view = i.view(self._holder)
result = self._holder(i)
tm.assert_index_equal(result, i_view)
class TestDatetimeIndex(DatetimeLike, tm.TestCase):
_holder = DatetimeIndex
_multiprocess_can_split_ = True
def setUp(self):
self.indices = dict(index=tm.makeDateIndex(10))
self.setup_indices()
def create_index(self):
return date_range('20130101', periods=5)
def test_shift(self):
# test shift for datetimeIndex and non datetimeIndex
# GH8083
drange = self.create_index()
result = drange.shift(1)
expected = DatetimeIndex(['2013-01-02', '2013-01-03', '2013-01-04',
'2013-01-05',
'2013-01-06'], freq='D')
self.assert_index_equal(result, expected)
result = drange.shift(-1)
expected = DatetimeIndex(['2012-12-31', '2013-01-01', '2013-01-02',
'2013-01-03', '2013-01-04'],
freq='D')
self.assert_index_equal(result, expected)
result = drange.shift(3, freq='2D')
expected = DatetimeIndex(['2013-01-07', '2013-01-08', '2013-01-09',
'2013-01-10',
'2013-01-11'], freq='D')
self.assert_index_equal(result, expected)
def test_construction_with_alt(self):
i = pd.date_range('20130101', periods=5, freq='H', tz='US/Eastern')
i2 = DatetimeIndex(i, dtype=i.dtype)
self.assert_index_equal(i, i2)
i2 = DatetimeIndex(i.tz_localize(None).asi8, tz=i.dtype.tz)
self.assert_index_equal(i, i2)
i2 = DatetimeIndex(i.tz_localize(None).asi8, dtype=i.dtype)
self.assert_index_equal(i, i2)
i2 = DatetimeIndex(
i.tz_localize(None).asi8, dtype=i.dtype, tz=i.dtype.tz)
self.assert_index_equal(i, i2)
# localize into the provided tz
i2 = DatetimeIndex(i.tz_localize(None).asi8, tz='UTC')
expected = i.tz_localize(None).tz_localize('UTC')
self.assert_index_equal(i2, expected)
# incompat tz/dtype
self.assertRaises(ValueError, lambda: DatetimeIndex(
i.tz_localize(None).asi8, dtype=i.dtype, tz='US/Pacific'))
def test_pickle_compat_construction(self):
pass
def test_construction_index_with_mixed_timezones(self):
# GH 11488
# no tz results in DatetimeIndex
result = Index(
[Timestamp('2011-01-01'), Timestamp('2011-01-02')], name='idx')
exp = DatetimeIndex(
[Timestamp('2011-01-01'), Timestamp('2011-01-02')], name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertTrue(isinstance(result, DatetimeIndex))
self.assertIsNone(result.tz)
# same tz results in DatetimeIndex
result = Index([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00', tz='Asia/Tokyo')],
name='idx')
exp = DatetimeIndex(
[Timestamp('2011-01-01 10:00'), Timestamp('2011-01-02 10:00')
], tz='Asia/Tokyo', name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertTrue(isinstance(result, DatetimeIndex))
self.assertIsNotNone(result.tz)
self.assertEqual(result.tz, exp.tz)
# same tz results in DatetimeIndex (DST)
result = Index([Timestamp('2011-01-01 10:00', tz='US/Eastern'),
Timestamp('2011-08-01 10:00', tz='US/Eastern')],
name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01 10:00'),
Timestamp('2011-08-01 10:00')],
tz='US/Eastern', name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertTrue(isinstance(result, DatetimeIndex))
self.assertIsNotNone(result.tz)
self.assertEqual(result.tz, exp.tz)
# different tz results in Index(dtype=object)
result = Index([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
name='idx')
exp = Index([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
dtype='object', name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertFalse(isinstance(result, DatetimeIndex))
result = Index([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
name='idx')
exp = Index([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
dtype='object', name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertFalse(isinstance(result, DatetimeIndex))
# passing tz results in DatetimeIndex
result = Index([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
tz='Asia/Tokyo', name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01 19:00'),
Timestamp('2011-01-03 00:00')],
tz='Asia/Tokyo', name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertTrue(isinstance(result, DatetimeIndex))
# length = 1
result = Index([Timestamp('2011-01-01')], name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01')], name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertTrue(isinstance(result, DatetimeIndex))
self.assertIsNone(result.tz)
# length = 1 with tz
result = Index(
[Timestamp('2011-01-01 10:00', tz='Asia/Tokyo')], name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01 10:00')], tz='Asia/Tokyo',
name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertTrue(isinstance(result, DatetimeIndex))
self.assertIsNotNone(result.tz)
self.assertEqual(result.tz, exp.tz)
def test_construction_index_with_mixed_timezones_with_NaT(self):
# GH 11488
result = Index([pd.NaT, Timestamp('2011-01-01'),
pd.NaT, Timestamp('2011-01-02')], name='idx')
exp = DatetimeIndex([pd.NaT, Timestamp('2011-01-01'),
pd.NaT, Timestamp('2011-01-02')], name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertTrue(isinstance(result, DatetimeIndex))
self.assertIsNone(result.tz)
# same tz results in DatetimeIndex
result = Index([pd.NaT, Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
pd.NaT, Timestamp('2011-01-02 10:00',
tz='Asia/Tokyo')],
name='idx')
exp = DatetimeIndex([pd.NaT, Timestamp('2011-01-01 10:00'),
pd.NaT, Timestamp('2011-01-02 10:00')],
tz='Asia/Tokyo', name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertTrue(isinstance(result, DatetimeIndex))
self.assertIsNotNone(result.tz)
self.assertEqual(result.tz, exp.tz)
# same tz results in DatetimeIndex (DST)
result = Index([Timestamp('2011-01-01 10:00', tz='US/Eastern'),
pd.NaT,
Timestamp('2011-08-01 10:00', tz='US/Eastern')],
name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01 10:00'), pd.NaT,
Timestamp('2011-08-01 10:00')],
tz='US/Eastern', name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertTrue(isinstance(result, DatetimeIndex))
self.assertIsNotNone(result.tz)
self.assertEqual(result.tz, exp.tz)
# different tz results in Index(dtype=object)
result = Index([pd.NaT, Timestamp('2011-01-01 10:00'),
pd.NaT, Timestamp('2011-01-02 10:00',
tz='US/Eastern')],
name='idx')
exp = Index([pd.NaT, Timestamp('2011-01-01 10:00'),
pd.NaT, Timestamp('2011-01-02 10:00', tz='US/Eastern')],
dtype='object', name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertFalse(isinstance(result, DatetimeIndex))
result = Index([pd.NaT, Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
pd.NaT, Timestamp('2011-01-02 10:00',
tz='US/Eastern')], name='idx')
exp = Index([pd.NaT, Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
pd.NaT, Timestamp('2011-01-02 10:00', tz='US/Eastern')],
dtype='object', name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertFalse(isinstance(result, DatetimeIndex))
# passing tz results in DatetimeIndex
result = Index([pd.NaT, Timestamp('2011-01-01 10:00'),
pd.NaT, Timestamp('2011-01-02 10:00',
tz='US/Eastern')],
tz='Asia/Tokyo', name='idx')
exp = DatetimeIndex([pd.NaT, Timestamp('2011-01-01 19:00'),
pd.NaT, Timestamp('2011-01-03 00:00')],
tz='Asia/Tokyo', name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertTrue(isinstance(result, DatetimeIndex))
# all NaT
result = Index([pd.NaT, pd.NaT], name='idx')
exp = DatetimeIndex([pd.NaT, pd.NaT], name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertTrue(isinstance(result, DatetimeIndex))
self.assertIsNone(result.tz)
# all NaT with tz
result = Index([pd.NaT, pd.NaT], tz='Asia/Tokyo', name='idx')
exp = DatetimeIndex([pd.NaT, pd.NaT], tz='Asia/Tokyo', name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertTrue(isinstance(result, DatetimeIndex))
self.assertIsNotNone(result.tz)
self.assertEqual(result.tz, exp.tz)
def test_construction_dti_with_mixed_timezones(self):
# GH 11488 (not changed, added explicit tests)
# no tz results in DatetimeIndex
result = DatetimeIndex(
[Timestamp('2011-01-01'), Timestamp('2011-01-02')], name='idx')
exp = DatetimeIndex(
[Timestamp('2011-01-01'), Timestamp('2011-01-02')], name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertTrue(isinstance(result, DatetimeIndex))
# same tz results in DatetimeIndex
result = DatetimeIndex([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00',
tz='Asia/Tokyo')],
name='idx')
exp = DatetimeIndex(
[Timestamp('2011-01-01 10:00'), Timestamp('2011-01-02 10:00')
], tz='Asia/Tokyo', name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertTrue(isinstance(result, DatetimeIndex))
# same tz results in DatetimeIndex (DST)
result = DatetimeIndex([Timestamp('2011-01-01 10:00', tz='US/Eastern'),
Timestamp('2011-08-01 10:00',
tz='US/Eastern')],
name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01 10:00'),
Timestamp('2011-08-01 10:00')],
tz='US/Eastern', name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertTrue(isinstance(result, DatetimeIndex))
# different tz coerces tz-naive to tz-awareIndex(dtype=object)
result = DatetimeIndex([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-02 10:00',
tz='US/Eastern')], name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01 05:00'),
Timestamp('2011-01-02 10:00')],
tz='US/Eastern', name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertTrue(isinstance(result, DatetimeIndex))
# tz mismatch affecting to tz-aware raises TypeError/ValueError
with tm.assertRaises(ValueError):
DatetimeIndex([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
name='idx')
with tm.assertRaises(TypeError):
DatetimeIndex([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
tz='Asia/Tokyo', name='idx')
with tm.assertRaises(ValueError):
DatetimeIndex([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
tz='US/Eastern', name='idx')
def test_astype(self):
# GH 13149, GH 13209
idx = DatetimeIndex(['2016-05-16', 'NaT', NaT, np.NaN])
result = idx.astype(object)
expected = Index([Timestamp('2016-05-16')] + [NaT] * 3, dtype=object)
tm.assert_index_equal(result, expected)
result = idx.astype(int)
expected = Int64Index([1463356800000000000] +
[-9223372036854775808] * 3, dtype=np.int64)
tm.assert_index_equal(result, expected)
rng = date_range('1/1/2000', periods=10)
result = rng.astype('i8')
self.assert_index_equal(result, Index(rng.asi8))
self.assert_numpy_array_equal(result.values, rng.asi8)
def test_astype_with_tz(self):
# with tz
rng = date_range('1/1/2000', periods=10, tz='US/Eastern')
result = rng.astype('datetime64[ns]')
expected = (date_range('1/1/2000', periods=10,
tz='US/Eastern')
.tz_convert('UTC').tz_localize(None))
tm.assert_index_equal(result, expected)
# BUG#10442 : testing astype(str) is correct for Series/DatetimeIndex
result = pd.Series(pd.date_range('2012-01-01', periods=3)).astype(str)
expected = pd.Series(
['2012-01-01', '2012-01-02', '2012-01-03'], dtype=object)
tm.assert_series_equal(result, expected)
result = Series(pd.date_range('2012-01-01', periods=3,
tz='US/Eastern')).astype(str)
expected = Series(['2012-01-01 00:00:00-05:00',
'2012-01-02 00:00:00-05:00',
'2012-01-03 00:00:00-05:00'],
dtype=object)
tm.assert_series_equal(result, expected)
def test_astype_str_compat(self):
# GH 13149, GH 13209
# verify that we are returing NaT as a string (and not unicode)
idx = DatetimeIndex(['2016-05-16', 'NaT', NaT, np.NaN])
result = idx.astype(str)
expected = Index(['2016-05-16', 'NaT', 'NaT', 'NaT'], dtype=object)
tm.assert_index_equal(result, expected)
def test_astype_str(self):
# test astype string - #10442
result = date_range('2012-01-01', periods=4,
name='test_name').astype(str)
expected = Index(['2012-01-01', '2012-01-02', '2012-01-03',
'2012-01-04'], name='test_name', dtype=object)
tm.assert_index_equal(result, expected)
# test astype string with tz and name
result = date_range('2012-01-01', periods=3, name='test_name',
tz='US/Eastern').astype(str)
expected = Index(['2012-01-01 00:00:00-05:00',
'2012-01-02 00:00:00-05:00',
'2012-01-03 00:00:00-05:00'],
name='test_name', dtype=object)
tm.assert_index_equal(result, expected)
# test astype string with freqH and name
result = date_range('1/1/2011', periods=3, freq='H',
name='test_name').astype(str)
expected = Index(['2011-01-01 00:00:00', '2011-01-01 01:00:00',
'2011-01-01 02:00:00'],
name='test_name', dtype=object)
tm.assert_index_equal(result, expected)
# test astype string with freqH and timezone
result = date_range('3/6/2012 00:00', periods=2, freq='H',
tz='Europe/London', name='test_name').astype(str)
expected = Index(['2012-03-06 00:00:00+00:00',
'2012-03-06 01:00:00+00:00'],
dtype=object, name='test_name')
tm.assert_index_equal(result, expected)
def test_astype_datetime64(self):
# GH 13149, GH 13209
idx = DatetimeIndex(['2016-05-16', 'NaT', NaT, np.NaN])
result = idx.astype('datetime64[ns]')
tm.assert_index_equal(result, idx)
self.assertFalse(result is idx)
result = idx.astype('datetime64[ns]', copy=False)
tm.assert_index_equal(result, idx)
self.assertTrue(result is idx)
idx_tz = DatetimeIndex(['2016-05-16', 'NaT', NaT, np.NaN], tz='EST')
result = idx_tz.astype('datetime64[ns]')
expected = DatetimeIndex(['2016-05-16 05:00:00', 'NaT', 'NaT', 'NaT'],
dtype='datetime64[ns]')
tm.assert_index_equal(result, expected)
def test_astype_raises(self):
# GH 13149, GH 13209
idx = DatetimeIndex(['2016-05-16', 'NaT', NaT, np.NaN])
self.assertRaises(ValueError, idx.astype, float)
self.assertRaises(ValueError, idx.astype, 'timedelta64')
self.assertRaises(ValueError, idx.astype, 'timedelta64[ns]')
self.assertRaises(ValueError, idx.astype, 'datetime64')
self.assertRaises(ValueError, idx.astype, 'datetime64[D]')
def test_where_other(self):
# other is ndarray or Index
i = pd.date_range('20130101', periods=3, tz='US/Eastern')
for arr in [np.nan, pd.NaT]:
result = i.where(notnull(i), other=np.nan)
expected = i
tm.assert_index_equal(result, expected)
i2 = i.copy()
i2 = Index([pd.NaT, pd.NaT] + i[2:].tolist())
result = i.where(notnull(i2), i2)
tm.assert_index_equal(result, i2)
i2 = i.copy()
i2 = Index([pd.NaT, pd.NaT] + i[2:].tolist())
result = i.where(notnull(i2), i2.values)
tm.assert_index_equal(result, i2)
def test_where_tz(self):
i = pd.date_range('20130101', periods=3, tz='US/Eastern')
result = i.where(notnull(i))
expected = i
tm.assert_index_equal(result, expected)
i2 = i.copy()
i2 = Index([pd.NaT, pd.NaT] + i[2:].tolist())
result = i.where(notnull(i2))
expected = i2
tm.assert_index_equal(result, expected)
def test_get_loc(self):
idx = pd.date_range('2000-01-01', periods=3)
for method in [None, 'pad', 'backfill', 'nearest']:
self.assertEqual(idx.get_loc(idx[1], method), 1)
self.assertEqual(idx.get_loc(idx[1].to_pydatetime(), method), 1)
self.assertEqual(idx.get_loc(str(idx[1]), method), 1)
if method is not None:
self.assertEqual(idx.get_loc(idx[1], method,
tolerance=pd.Timedelta('0 days')),
1)
self.assertEqual(idx.get_loc('2000-01-01', method='nearest'), 0)
self.assertEqual(idx.get_loc('2000-01-01T12', method='nearest'), 1)
self.assertEqual(idx.get_loc('2000-01-01T12', method='nearest',
tolerance='1 day'), 1)
self.assertEqual(idx.get_loc('2000-01-01T12', method='nearest',
tolerance=pd.Timedelta('1D')), 1)
self.assertEqual(idx.get_loc('2000-01-01T12', method='nearest',
tolerance=np.timedelta64(1, 'D')), 1)
self.assertEqual(idx.get_loc('2000-01-01T12', method='nearest',
tolerance=timedelta(1)), 1)
with tm.assertRaisesRegexp(ValueError, 'must be convertible'):
idx.get_loc('2000-01-01T12', method='nearest', tolerance='foo')
with tm.assertRaises(KeyError):
idx.get_loc('2000-01-01T03', method='nearest', tolerance='2 hours')
self.assertEqual(idx.get_loc('2000', method='nearest'), slice(0, 3))
self.assertEqual(idx.get_loc('2000-01', method='nearest'), slice(0, 3))
self.assertEqual(idx.get_loc('1999', method='nearest'), 0)
self.assertEqual(idx.get_loc('2001', method='nearest'), 2)
with tm.assertRaises(KeyError):
idx.get_loc('1999', method='pad')
with tm.assertRaises(KeyError):
idx.get_loc('2001', method='backfill')
with tm.assertRaises(KeyError):
idx.get_loc('foobar')
with tm.assertRaises(TypeError):
idx.get_loc(slice(2))
idx = pd.to_datetime(['2000-01-01', '2000-01-04'])
self.assertEqual(idx.get_loc('2000-01-02', method='nearest'), 0)
self.assertEqual(idx.get_loc('2000-01-03', method='nearest'), 1)
self.assertEqual(idx.get_loc('2000-01', method='nearest'), slice(0, 2))
# time indexing
idx = pd.date_range('2000-01-01', periods=24, freq='H')
tm.assert_numpy_array_equal(idx.get_loc(time(12)),
np.array([12], dtype=np.int64))
tm.assert_numpy_array_equal(idx.get_loc(time(12, 30)),
np.array([], dtype=np.int64))
with tm.assertRaises(NotImplementedError):
idx.get_loc(time(12, 30), method='pad')
def test_get_indexer(self):
idx = pd.date_range('2000-01-01', periods=3)
tm.assert_numpy_array_equal(idx.get_indexer(idx), np.array([0, 1, 2]))
target = idx[0] + pd.to_timedelta(['-1 hour', '12 hours',
'1 day 1 hour'])
tm.assert_numpy_array_equal(idx.get_indexer(target, 'pad'),
np.array([-1, 0, 1]))
tm.assert_numpy_array_equal(idx.get_indexer(target, 'backfill'),
np.array([0, 1, 2]))
tm.assert_numpy_array_equal(idx.get_indexer(target, 'nearest'),
np.array([0, 1, 1]))
tm.assert_numpy_array_equal(
idx.get_indexer(target, 'nearest',
tolerance=pd.Timedelta('1 hour')),
np.array([0, -1, 1]))
with tm.assertRaises(ValueError):
idx.get_indexer(idx[[0]], method='nearest', tolerance='foo')
def test_roundtrip_pickle_with_tz(self):
# GH 8367
# round-trip of timezone
index = date_range('20130101', periods=3, tz='US/Eastern', name='foo')
unpickled = self.round_trip_pickle(index)
self.assert_index_equal(index, unpickled)
def test_reindex_preserves_tz_if_target_is_empty_list_or_array(self):
# GH7774
index = date_range('20130101', periods=3, tz='US/Eastern')
self.assertEqual(str(index.reindex([])[0].tz), 'US/Eastern')
self.assertEqual(str(index.reindex(np.array([]))[0].tz), 'US/Eastern')
def test_time_loc(self): # GH8667
from datetime import time
from pandas.index import _SIZE_CUTOFF
ns = _SIZE_CUTOFF + np.array([-100, 100], dtype=np.int64)
key = time(15, 11, 30)
start = key.hour * 3600 + key.minute * 60 + key.second
step = 24 * 3600
for n in ns:
idx = pd.date_range('2014-11-26', periods=n, freq='S')
ts = pd.Series(np.random.randn(n), index=idx)
i = np.arange(start, n, step)
tm.assert_numpy_array_equal(ts.index.get_loc(key), i)
tm.assert_series_equal(ts[key], ts.iloc[i])
left, right = ts.copy(), ts.copy()
left[key] *= -10
right.iloc[i] *= -10
tm.assert_series_equal(left, right)
def test_time_overflow_for_32bit_machines(self):
# GH8943. On some machines NumPy defaults to np.int32 (for example,
# 32-bit Linux machines). In the function _generate_regular_range
# found in tseries/index.py, `periods` gets multiplied by `strides`
# (which has value 1e9) and since the max value for np.int32 is ~2e9,
# and since those machines won't promote np.int32 to np.int64, we get
# overflow.
periods = np.int_(1000)
idx1 = pd.date_range(start='2000', periods=periods, freq='S')
self.assertEqual(len(idx1), periods)
idx2 = pd.date_range(end='2000', periods=periods, freq='S')
self.assertEqual(len(idx2), periods)
def test_intersection(self):
first = self.index
second = self.index[5:]
intersect = first.intersection(second)
self.assertTrue(tm.equalContents(intersect, second))
# GH 10149
cases = [klass(second.values) for klass in [np.array, Series, list]]
for case in cases:
result = first.intersection(case)
self.assertTrue(tm.equalContents(result, second))
third = Index(['a', 'b', 'c'])
result = first.intersection(third)
expected = pd.Index([], dtype=object)
self.assert_index_equal(result, expected)
def test_union(self):
first = self.index[:5]
second = self.index[5:]
everything = self.index
union = first.union(second)
self.assertTrue(tm.equalContents(union, everything))
# GH 10149
cases = [klass(second.values) for klass in [np.array, Series, list]]
for case in cases:
result = first.union(case)
self.assertTrue(tm.equalContents(result, everything))
def test_nat(self):
self.assertIs(DatetimeIndex([np.nan])[0], pd.NaT)
def test_ufunc_coercions(self):
idx = date_range('2011-01-01', periods=3, freq='2D', name='x')
delta = np.timedelta64(1, 'D')
for result in [idx + delta, np.add(idx, delta)]:
tm.assertIsInstance(result, DatetimeIndex)
exp = date_range('2011-01-02', periods=3, freq='2D', name='x')
tm.assert_index_equal(result, exp)
self.assertEqual(result.freq, '2D')
for result in [idx - delta, np.subtract(idx, delta)]:
tm.assertIsInstance(result, DatetimeIndex)
exp = date_range('2010-12-31', periods=3, freq='2D', name='x')
tm.assert_index_equal(result, exp)
self.assertEqual(result.freq, '2D')
delta = np.array([np.timedelta64(1, 'D'), np.timedelta64(2, 'D'),
np.timedelta64(3, 'D')])
for result in [idx + delta, np.add(idx, delta)]:
tm.assertIsInstance(result, DatetimeIndex)
exp = DatetimeIndex(['2011-01-02', '2011-01-05', '2011-01-08'],
freq='3D', name='x')
tm.assert_index_equal(result, exp)
self.assertEqual(result.freq, '3D')
for result in [idx - delta, np.subtract(idx, delta)]:
tm.assertIsInstance(result, DatetimeIndex)
exp = DatetimeIndex(['2010-12-31', '2011-01-01', '2011-01-02'],
freq='D', name='x')
tm.assert_index_equal(result, exp)
self.assertEqual(result.freq, 'D')
def test_fillna_datetime64(self):
# GH 11343
for tz in ['US/Eastern', 'Asia/Tokyo']:
idx = pd.DatetimeIndex(['2011-01-01 09:00', pd.NaT,
'2011-01-01 11:00'])
exp = pd.DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'])
self.assert_index_equal(
idx.fillna(pd.Timestamp('2011-01-01 10:00')), exp)
# tz mismatch
exp = pd.Index([pd.Timestamp('2011-01-01 09:00'),
pd.Timestamp('2011-01-01 10:00', tz=tz),
pd.Timestamp('2011-01-01 11:00')], dtype=object)
self.assert_index_equal(
idx.fillna(pd.Timestamp('2011-01-01 10:00', tz=tz)), exp)
# object
exp = pd.Index([pd.Timestamp('2011-01-01 09:00'), 'x',
pd.Timestamp('2011-01-01 11:00')], dtype=object)
self.assert_index_equal(idx.fillna('x'), exp)
idx = pd.DatetimeIndex(
['2011-01-01 09:00', pd.NaT, '2011-01-01 11:00'], tz=tz)
exp = pd.DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00'
], tz=tz)
self.assert_index_equal(
idx.fillna(pd.Timestamp('2011-01-01 10:00', tz=tz)), exp)
exp = pd.Index([pd.Timestamp('2011-01-01 09:00', tz=tz),
pd.Timestamp('2011-01-01 10:00'),
pd.Timestamp('2011-01-01 11:00', tz=tz)],
dtype=object)
self.assert_index_equal(
idx.fillna(pd.Timestamp('2011-01-01 10:00')), exp)
# object
exp = pd.Index([pd.Timestamp('2011-01-01 09:00', tz=tz),
'x',
pd.Timestamp('2011-01-01 11:00', tz=tz)],
dtype=object)
self.assert_index_equal(idx.fillna('x'), exp)
class TestPeriodIndex(DatetimeLike, tm.TestCase):
_holder = PeriodIndex
_multiprocess_can_split_ = True
def setUp(self):
self.indices = dict(index=tm.makePeriodIndex(10))
self.setup_indices()
def create_index(self):
return period_range('20130101', periods=5, freq='D')
def test_astype(self):
# GH 13149, GH 13209
idx = PeriodIndex(['2016-05-16', 'NaT', NaT, np.NaN], freq='D')
result = idx.astype(object)
expected = Index([Period('2016-05-16', freq='D')] +
[Period(NaT, freq='D')] * 3, dtype='object')
# Hack because of lack of support for Period null checking (GH12759)
tm.assert_index_equal(result[:1], expected[:1])
result_arr = np.asarray([p.ordinal for p in result], dtype=np.int64)
expected_arr = np.asarray([p.ordinal for p in expected],
dtype=np.int64)
tm.assert_numpy_array_equal(result_arr, expected_arr)
# TODO: When GH12759 is resolved, change the above hack to:
# tm.assert_index_equal(result, expected) # now, it raises.
result = idx.astype(int)
expected = Int64Index([16937] + [-9223372036854775808] * 3,
dtype=np.int64)
tm.assert_index_equal(result, expected)
idx = period_range('1990', '2009', freq='A')
result = idx.astype('i8')
self.assert_index_equal(result, Index(idx.asi8))
self.assert_numpy_array_equal(result.values, idx.values)
def test_astype_raises(self):
# GH 13149, GH 13209
idx = PeriodIndex(['2016-05-16', 'NaT', NaT, np.NaN], freq='D')
self.assertRaises(ValueError, idx.astype, str)
self.assertRaises(ValueError, idx.astype, float)
self.assertRaises(ValueError, idx.astype, 'timedelta64')
self.assertRaises(ValueError, idx.astype, 'timedelta64[ns]')
self.assertRaises(ValueError, idx.astype, 'datetime64')
self.assertRaises(ValueError, idx.astype, 'datetime64[ns]')
def test_shift(self):
# test shift for PeriodIndex
# GH8083
drange = self.create_index()
result = drange.shift(1)
expected = PeriodIndex(['2013-01-02', '2013-01-03', '2013-01-04',
'2013-01-05', '2013-01-06'], freq='D')
self.assert_index_equal(result, expected)
def test_pickle_compat_construction(self):
pass
def test_get_loc(self):
idx = pd.period_range('2000-01-01', periods=3)
for method in [None, 'pad', 'backfill', 'nearest']:
self.assertEqual(idx.get_loc(idx[1], method), 1)
self.assertEqual(
idx.get_loc(idx[1].asfreq('H', how='start'), method), 1)
self.assertEqual(idx.get_loc(idx[1].to_timestamp(), method), 1)
self.assertEqual(
idx.get_loc(idx[1].to_timestamp().to_pydatetime(), method), 1)
self.assertEqual(idx.get_loc(str(idx[1]), method), 1)
idx = pd.period_range('2000-01-01', periods=5)[::2]
self.assertEqual(idx.get_loc('2000-01-02T12', method='nearest',
tolerance='1 day'), 1)
self.assertEqual(idx.get_loc('2000-01-02T12', method='nearest',
tolerance=pd.Timedelta('1D')), 1)
self.assertEqual(idx.get_loc('2000-01-02T12', method='nearest',
tolerance=np.timedelta64(1, 'D')), 1)
self.assertEqual(idx.get_loc('2000-01-02T12', method='nearest',
tolerance=timedelta(1)), 1)
with tm.assertRaisesRegexp(ValueError, 'must be convertible'):
idx.get_loc('2000-01-10', method='nearest', tolerance='foo')
msg = 'Input has different freq from PeriodIndex\\(freq=D\\)'
with tm.assertRaisesRegexp(ValueError, msg):
idx.get_loc('2000-01-10', method='nearest', tolerance='1 hour')
with tm.assertRaises(KeyError):
idx.get_loc('2000-01-10', method='nearest', tolerance='1 day')
def test_where(self):
i = self.create_index()
result = i.where(notnull(i))
expected = i
tm.assert_index_equal(result, expected)
i2 = i.copy()
i2 = pd.PeriodIndex([pd.NaT, pd.NaT] + i[2:].tolist(),
freq='D')
result = i.where(notnull(i2))
expected = i2
tm.assert_index_equal(result, expected)
def test_where_other(self):
i = self.create_index()
for arr in [np.nan, pd.NaT]:
result = i.where(notnull(i), other=np.nan)
expected = i
tm.assert_index_equal(result, expected)
i2 = i.copy()
i2 = pd.PeriodIndex([pd.NaT, pd.NaT] + i[2:].tolist(),
freq='D')
result = i.where(notnull(i2), i2)
tm.assert_index_equal(result, i2)
i2 = i.copy()
i2 = pd.PeriodIndex([pd.NaT, pd.NaT] + i[2:].tolist(),
freq='D')
result = i.where(notnull(i2), i2.values)
tm.assert_index_equal(result, i2)
def test_get_indexer(self):
idx = pd.period_range('2000-01-01', periods=3).asfreq('H', how='start')
tm.assert_numpy_array_equal(idx.get_indexer(idx),
np.array([0, 1, 2], dtype=np.int_))
target = pd.PeriodIndex(['1999-12-31T23', '2000-01-01T12',
'2000-01-02T01'], freq='H')
tm.assert_numpy_array_equal(idx.get_indexer(target, 'pad'),
np.array([-1, 0, 1], dtype=np.int_))
tm.assert_numpy_array_equal(idx.get_indexer(target, 'backfill'),
np.array([0, 1, 2], dtype=np.int_))
tm.assert_numpy_array_equal(idx.get_indexer(target, 'nearest'),
np.array([0, 1, 1], dtype=np.int_))
tm.assert_numpy_array_equal(idx.get_indexer(target, 'nearest',
tolerance='1 hour'),
np.array([0, -1, 1], dtype=np.int_))
msg = 'Input has different freq from PeriodIndex\\(freq=H\\)'
with self.assertRaisesRegexp(ValueError, msg):
idx.get_indexer(target, 'nearest', tolerance='1 minute')
tm.assert_numpy_array_equal(idx.get_indexer(target, 'nearest',
tolerance='1 day'),
np.array([0, 1, 1], dtype=np.int_))
def test_repeat(self):
# GH10183
idx = pd.period_range('2000-01-01', periods=3, freq='D')
res = idx.repeat(3)
exp = PeriodIndex(idx.values.repeat(3), freq='D')
self.assert_index_equal(res, exp)
self.assertEqual(res.freqstr, 'D')
def test_period_index_indexer(self):
# GH4125
idx = pd.period_range('2002-01', '2003-12', freq='M')
df = pd.DataFrame(pd.np.random.randn(24, 10), index=idx)
self.assert_frame_equal(df, df.ix[idx])
self.assert_frame_equal(df, df.ix[list(idx)])
self.assert_frame_equal(df, df.loc[list(idx)])
self.assert_frame_equal(df.iloc[0:5], df.loc[idx[0:5]])
self.assert_frame_equal(df, df.loc[list(idx)])
def test_fillna_period(self):
# GH 11343
idx = pd.PeriodIndex(
['2011-01-01 09:00', pd.NaT, '2011-01-01 11:00'], freq='H')
exp = pd.PeriodIndex(
['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00'
], freq='H')
self.assert_index_equal(
idx.fillna(pd.Period('2011-01-01 10:00', freq='H')), exp)
exp = pd.Index([pd.Period('2011-01-01 09:00', freq='H'), 'x',
pd.Period('2011-01-01 11:00', freq='H')], dtype=object)
self.assert_index_equal(idx.fillna('x'), exp)
with tm.assertRaisesRegexp(
ValueError,
'Input has different freq=D from PeriodIndex\\(freq=H\\)'):
idx.fillna(pd.Period('2011-01-01', freq='D'))
def test_no_millisecond_field(self):
with self.assertRaises(AttributeError):
DatetimeIndex.millisecond
with self.assertRaises(AttributeError):
DatetimeIndex([]).millisecond
class TestTimedeltaIndex(DatetimeLike, tm.TestCase):
_holder = TimedeltaIndex
_multiprocess_can_split_ = True
def setUp(self):
self.indices = dict(index=tm.makeTimedeltaIndex(10))
self.setup_indices()
def create_index(self):
return pd.to_timedelta(range(5), unit='d') + pd.offsets.Hour(1)
def test_shift(self):
# test shift for TimedeltaIndex
# err8083
drange = self.create_index()
result = drange.shift(1)
expected = TimedeltaIndex(['1 days 01:00:00', '2 days 01:00:00',
'3 days 01:00:00',
'4 days 01:00:00', '5 days 01:00:00'],
freq='D')
self.assert_index_equal(result, expected)
result = drange.shift(3, freq='2D 1s')
expected = TimedeltaIndex(['6 days 01:00:03', '7 days 01:00:03',
'8 days 01:00:03', '9 days 01:00:03',
'10 days 01:00:03'], freq='D')
self.assert_index_equal(result, expected)
def test_astype(self):
# GH 13149, GH 13209
idx = TimedeltaIndex([1e14, 'NaT', pd.NaT, np.NaN])
result = idx.astype(object)
expected = Index([Timedelta('1 days 03:46:40')] + [pd.NaT] * 3,
dtype=object)
tm.assert_index_equal(result, expected)
result = idx.astype(int)
expected = Int64Index([100000000000000] + [-9223372036854775808] * 3,
dtype=np.int64)
tm.assert_index_equal(result, expected)
rng = timedelta_range('1 days', periods=10)
result = rng.astype('i8')
self.assert_index_equal(result, Index(rng.asi8))
self.assert_numpy_array_equal(rng.asi8, result.values)
def test_astype_timedelta64(self):
# GH 13149, GH 13209
idx = TimedeltaIndex([1e14, 'NaT', pd.NaT, np.NaN])
result = idx.astype('timedelta64')
expected = Float64Index([1e+14] + [np.NaN] * 3, dtype='float64')
tm.assert_index_equal(result, expected)
result = idx.astype('timedelta64[ns]')
tm.assert_index_equal(result, idx)
self.assertFalse(result is idx)
result = idx.astype('timedelta64[ns]', copy=False)
tm.assert_index_equal(result, idx)
self.assertTrue(result is idx)
def test_astype_raises(self):
# GH 13149, GH 13209
idx = TimedeltaIndex([1e14, 'NaT', pd.NaT, np.NaN])
self.assertRaises(ValueError, idx.astype, float)
self.assertRaises(ValueError, idx.astype, str)
self.assertRaises(ValueError, idx.astype, 'datetime64')
self.assertRaises(ValueError, idx.astype, 'datetime64[ns]')
def test_get_loc(self):
idx = pd.to_timedelta(['0 days', '1 days', '2 days'])
for method in [None, 'pad', 'backfill', 'nearest']:
self.assertEqual(idx.get_loc(idx[1], method), 1)
self.assertEqual(idx.get_loc(idx[1].to_pytimedelta(), method), 1)
self.assertEqual(idx.get_loc(str(idx[1]), method), 1)
self.assertEqual(
idx.get_loc(idx[1], 'pad', tolerance=pd.Timedelta(0)), 1)
self.assertEqual(
idx.get_loc(idx[1], 'pad', tolerance=np.timedelta64(0, 's')), 1)
self.assertEqual(idx.get_loc(idx[1], 'pad', tolerance=timedelta(0)), 1)
with tm.assertRaisesRegexp(ValueError, 'must be convertible'):
idx.get_loc(idx[1], method='nearest', tolerance='foo')
for method, loc in [('pad', 1), ('backfill', 2), ('nearest', 1)]:
self.assertEqual(idx.get_loc('1 day 1 hour', method), loc)
def test_get_indexer(self):
idx = pd.to_timedelta(['0 days', '1 days', '2 days'])
tm.assert_numpy_array_equal(idx.get_indexer(idx),
np.array([0, 1, 2], dtype=np.int_))
target = pd.to_timedelta(['-1 hour', '12 hours', '1 day 1 hour'])
tm.assert_numpy_array_equal(idx.get_indexer(target, 'pad'),
np.array([-1, 0, 1], dtype=np.int_))
tm.assert_numpy_array_equal(idx.get_indexer(target, 'backfill'),
np.array([0, 1, 2], dtype=np.int_))
tm.assert_numpy_array_equal(idx.get_indexer(target, 'nearest'),
np.array([0, 1, 1], dtype=np.int_))
res = idx.get_indexer(target, 'nearest',
tolerance=pd.Timedelta('1 hour'))
tm.assert_numpy_array_equal(res, np.array([0, -1, 1], dtype=np.int_))
def test_numeric_compat(self):
idx = self._holder(np.arange(5, dtype='int64'))
didx = self._holder(np.arange(5, dtype='int64') ** 2)
result = idx * 1
tm.assert_index_equal(result, idx)
result = 1 * idx
tm.assert_index_equal(result, idx)
result = idx / 1
tm.assert_index_equal(result, idx)
result = idx // 1
tm.assert_index_equal(result, idx)
result = idx * np.array(5, dtype='int64')
tm.assert_index_equal(result,
self._holder(np.arange(5, dtype='int64') * 5))
result = idx * np.arange(5, dtype='int64')
tm.assert_index_equal(result, didx)
result = idx * Series(np.arange(5, dtype='int64'))
tm.assert_index_equal(result, didx)
result = idx * Series(np.arange(5, dtype='float64') + 0.1)
tm.assert_index_equal(result, self._holder(np.arange(
5, dtype='float64') * (np.arange(5, dtype='float64') + 0.1)))
# invalid
self.assertRaises(TypeError, lambda: idx * idx)
self.assertRaises(ValueError, lambda: idx * self._holder(np.arange(3)))
self.assertRaises(ValueError, lambda: idx * np.array([1, 2]))
def test_pickle_compat_construction(self):
pass
def test_ufunc_coercions(self):
# normal ops are also tested in tseries/test_timedeltas.py
idx = TimedeltaIndex(['2H', '4H', '6H', '8H', '10H'],
freq='2H', name='x')
for result in [idx * 2, np.multiply(idx, 2)]:
tm.assertIsInstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['4H', '8H', '12H', '16H', '20H'],
freq='4H', name='x')
tm.assert_index_equal(result, exp)
self.assertEqual(result.freq, '4H')
for result in [idx / 2, np.divide(idx, 2)]:
tm.assertIsInstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['1H', '2H', '3H', '4H', '5H'],
freq='H', name='x')
tm.assert_index_equal(result, exp)
self.assertEqual(result.freq, 'H')
idx = TimedeltaIndex(['2H', '4H', '6H', '8H', '10H'],
freq='2H', name='x')
for result in [-idx, np.negative(idx)]:
tm.assertIsInstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['-2H', '-4H', '-6H', '-8H', '-10H'],
freq='-2H', name='x')
tm.assert_index_equal(result, exp)
self.assertEqual(result.freq, '-2H')
idx = TimedeltaIndex(['-2H', '-1H', '0H', '1H', '2H'],
freq='H', name='x')
for result in [abs(idx), np.absolute(idx)]:
tm.assertIsInstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['2H', '1H', '0H', '1H', '2H'],
freq=None, name='x')
tm.assert_index_equal(result, exp)
self.assertEqual(result.freq, None)
def test_fillna_timedelta(self):
# GH 11343
idx = pd.TimedeltaIndex(['1 day', pd.NaT, '3 day'])
exp = pd.TimedeltaIndex(['1 day', '2 day', '3 day'])
self.assert_index_equal(idx.fillna(pd.Timedelta('2 day')), exp)
exp = pd.TimedeltaIndex(['1 day', '3 hour', '3 day'])
idx.fillna(pd.Timedelta('3 hour'))
exp = pd.Index(
[ | pd.Timedelta('1 day') | pandas.Timedelta |
import pandas as pd
import datetime as dt
import string
import time
## Carregando dados
df = pd.read_csv("apache.log", sep=" ", names=['host', 'delete', 'logname', 'user', 'time', 'request', 'response', 'bytes', 'url', 'browserLog', 'browser', 'networkClass' ])
df.drop('delete', axis=1, inplace=True)
## Pré processamento
def networkClass(ip):
ip2 = int(ip.split(".")[0])
if ip2 >= 0 and ip2 <= 127:
return "Classe A"
if ip2 >= 128 and ip2 <= 191:
return "Classe B"
if ip2 >= 192 and ip2 <= 223:
return "Classe C"
if ip2 >= 224 and ip2 <= 239:
return "Classe D"
if ip2 >= 240 and ip2 <= 255:
return "Classe E"
def sepDate(dt):
dat = str(time.strptime(dt, "[%Y-%m-%dT%H:%M:%SZ]")[0]) + "-" + str(time.strptime(dt, "[%Y-%m-%dT%H:%M:%SZ]")[1]) + "-" + str(time.strptime(dt, "[%Y-%m-%dT%H:%M:%SZ]")[2])
return dat
def analyseResponse(resp):
if int(resp) >= 100 and int(resp) <= 199:
return "Informational"
if int(resp) >= 200 and int(resp) <= 299:
return "Sucess"
if int(resp) >= 300 and int(resp) <= 399:
return "Redirection"
if int(resp) >= 400 and int(resp) <= 499:
return "Client Error"
if int(resp) >= 500 and int(resp) <= 599:
return "Server Error"
df['browser'] = df['browserLog'].apply(lambda br: br.split(" ")[0])
df['networkClass'] = df['host'].apply(networkClass)
df['hour'] = df['time'].apply(lambda d: time.strptime(d, "[%Y-%m-%dT%H:%M:%SZ]")[3])
df['minute'] = df['time'].apply(lambda d: time.strptime(d, "[%Y-%m-%dT%H:%M:%SZ]")[4])
df['second'] = df['time'].apply(lambda d: time.strptime(d, "[%Y-%m-%dT%H:%M:%SZ]")[5])
df['date'] = df['time'].apply(sepDate)
df['endpoint'] = df['request'].apply(lambda end: end.split("/")[1])
df['statusResponse'] = df['response'].apply(analyseResponse)
## Análise dos dados
# Dataframe para armazenar os resultados
dfResult = pd.DataFrame()
# Separando dados por data
dates = df['date'].value_counts().index.tolist()
for date in dates:
# Separando subset da data
subset_date = df[df['date'] == date]
# Desafio 1: 5 logins que mais efetuaram requisições
desafio_01 = subset_date['user'].value_counts().head(5).index.tolist()
# Desafio 2: 10 browsers mais utilizados
desafio_02 = subset_date['browser'].value_counts().head(10).index.tolist()
# Desafio 3: os endereços de rede classe C com maior quantidade de requisições
subset_class = subset_date[subset_date['networkClass'] == 'Classe C']
desafio_03 = subset_class['host'].value_counts().head(5).index.tolist()
# Desafio 4: A hora com mais acesso no dia
desafio_04 = subset_date['hour'].value_counts().head(1).index.tolist()
# Desafio 5: A hora com maior consumo de bytes
groupHour = subset_date.groupby(['hour'])
desafio_05 = groupHour['bytes'].sum().sort_values(ascending=False).index.tolist()[0]
# Desafio 6: O endpoint com maior consumo de bytes
groupEndpoint = subset_date.groupby(['endpoint'])
desafio_06 = groupEndpoint['bytes'].sum().sort_values(ascending=False).index.tolist()[0]
# Desafio 7: A quantidade de bytes por minuto
# Obs: Aqui considerei o total de minutos entre o primeiro e o último log
finalIndex = subset_date.shape[0]
finalIndex = finalIndex - 1
hour_ini = subset_date['hour'].iloc[0]
minu_ini = subset_date['minute'].iloc[0]
hour_fin = subset_date['hour'].iloc[finalIndex]
minu_fin = subset_date['minute'].iloc[finalIndex]
totalMinutes = 0
if hour_ini < hour_fin:
totalMinutes = (60 - minu_ini) + ((hour_fin - (hour_ini + 1)) * 60) + minu_fin
else:
totalMinutes = minu_fin - minu_ini
desafio_07 = subset_date['bytes'].sum() / totalMinutes
# Desafio 8: A quantidade de bytes por hora
# Obs: Aqui considerei a média de do total de bytes do dia pelo total de horas
totalHours = totalMinutes / 60
desafio_08 = subset_date['bytes'].sum() / totalHours
desafio_08
# Desafio 9: A quantidade de usuários por minuto
desafio_09 = subset_date.shape[0] / totalMinutes
# Desafio 10: A quantidade de usuários por hora
desafio_10 = subset_date.shape[0] / totalHours
# Desafio 11: a quantidade de requisições que tiveram erros, agrupadas por erro
subset_erro = df[df['statusResponse'] == 'Client Error']
desafio_11 = subset_erro.shape[0]
desafio_11_group = subset_erro.groupby('response').size().sort_values(ascending=False).to_dict()
# Desafio 12: A quantidade de requisições que tiveram sucesso
desafio_12 = subset_date[subset_date['statusResponse'] == "Sucess"].shape[0]
# Desafio 13: A quantidade de requisições que foram redirecionadas
desafio_13 = subset_date[subset_date['statusResponse'] == "Redirection"].shape[0]
## Gravando resultado
obj = {'date:' : date,
'05_login_mais' : [list(desafio_01)],
'10_browser_mais': [list(desafio_02)],
'05_classe_c': [list(desafio_03)],
'hora_mais_ acesso': [desafio_04[0]],
'hora_maior_bytes': [desafio_05],
'endpoint_mais_bytes': [desafio_06],
'bytes_minuto' : [desafio_07],
'bytes_hora': [desafio_08],
'usuarios_minuto': [desafio_09],
'usuarios_hora': [desafio_10],
'qtd_erro_cliente': [desafio_11],
'qtd_erro_agrup': [desafio_11_group],
'qtd_sucesso': [desafio_12],
'qtd_redirecionadas': [desafio_13]
}
dfData = pd.DataFrame(data=obj, index=[0])
dfResult = | pd.concat([dfResult, dfData]) | pandas.concat |
# coding: utf-8
# In[19]:
from keras.models import model_from_json
import os
import cv2
import glob
import h5py
import pandas as pd
from sklearn.metrics import mean_absolute_error
import scipy.io as io
from PIL import Image
import numpy as np
# In[20]:
def load_model():
json_file = open('models/Model.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
loaded_model.load_weights("weights/model_A_weights.h5")
return loaded_model
def create_img(path):
im = Image.open(path).convert('RGB')
im = np.array(im)
im = im/255.0
im[:,:,0]=(im[:,:,0]-0.485)/0.229
im[:,:,1]=(im[:,:,1]-0.456)/0.224
im[:,:,2]=(im[:,:,2]-0.406)/0.225
im = np.expand_dims(im,axis = 0)
return im
# In[21]:
root = 'data'
# In[22]:
part_A_train = os.path.join(root,'part_A_final/train_data','images')
part_A_test = os.path.join(root,'part_A_final/test_data','images')
part_B_train = os.path.join(root,'part_B_final/train_data','images')
part_B_test = os.path.join(root,'part_B_final/test_data','images')
path_sets = [part_A_test]
# In[23]:
img_paths = []
for path in path_sets:
for img_path in glob.glob(os.path.join(path, '*.jpg')):
img_paths.append(img_path)
print(len(img_paths))
# In[8]:
model = load_model()
name = []
y_true = []
y_pred = []
for image in img_paths:
name.append(image)
gt = h5py.File(image.replace('.jpg','.h5').replace('images','ground') )
groundtruth = np.asarray(gt['density'])
num1 = np.sum(groundtruth)
y_true.append(np.sum(num1))
img = create_img(image)
num = np.sum(model.predict(img))
y_pred.append(np.sum(num))
data = | pd.DataFrame({'name': name,'y_pred': y_pred,'y_true': y_true}) | pandas.DataFrame |
import warnings
import logging
import pandas as pd
from functools import partial
from collections import defaultdict
from dae.utils.helpers import str2bool
from dae.variants.attributes import Role, Sex, Status
from dae.backends.raw.loader import CLILoader, CLIArgument
from dae.pedigrees.family import FamiliesData, Person, PEDIGREE_COLUMN_NAMES
from dae.pedigrees.family_role_builder import FamilyRoleBuilder
from dae.pedigrees.layout import Layout
logger = logging.getLogger(__name__)
PED_COLUMNS_REQUIRED = (
PEDIGREE_COLUMN_NAMES["family"],
PEDIGREE_COLUMN_NAMES["person"],
PEDIGREE_COLUMN_NAMES["mother"],
PEDIGREE_COLUMN_NAMES["father"],
PEDIGREE_COLUMN_NAMES["sex"],
PEDIGREE_COLUMN_NAMES["status"],
)
class FamiliesLoader(CLILoader):
def __init__(self, families_filename, **params):
super().__init__(params=params)
self.filename = families_filename
# TODO FIXME Params should be able to accept namedtuple instances
# self.params["ped_sep"] = ped_sep
self.file_format = self.params.get("ped_file_format", "pedigree")
@staticmethod
def load_pedigree_file(pedigree_filename, pedigree_format={}):
pedigree_format["ped_no_role"] = str2bool(
pedigree_format.get("ped_no_role", False)
)
pedigree_format["ped_no_header"] = str2bool(
pedigree_format.get("ped_no_header", False)
)
ped_df = FamiliesLoader.flexible_pedigree_read(
pedigree_filename, **pedigree_format
)
families = FamiliesData.from_pedigree_df(ped_df)
FamiliesLoader._build_families_layouts(families, pedigree_format)
FamiliesLoader._build_families_roles(families, pedigree_format)
return families
@staticmethod
def _build_families_layouts(families, pedigree_format):
ped_layout_mode = pedigree_format.get("ped_layout_mode", "load")
if ped_layout_mode == "generate":
for family in families.values():
logger.debug(
f"building layout for family: {family.family_id}; "
f"{family}")
layouts = Layout.from_family(family)
for layout in layouts:
layout.apply_to_family(family)
elif ped_layout_mode == "load":
pass
else:
raise ValueError(
f"unexpected `--ped-layout-mode` option value "
f"`{ped_layout_mode}`"
)
@staticmethod
def _build_families_roles(families, pedigree_format):
has_unknown_roles = any(
[
p.role is None # or p.role == Role.unknown
for p in families.persons.values()
]
)
if has_unknown_roles or pedigree_format.get("ped_no_role"):
for family in families.values():
logger.debug(f"building family roles: {family.family_id}")
role_build = FamilyRoleBuilder(family)
role_build.build_roles()
families._ped_df = None
# @staticmethod
# def load_simple_families_file(families_filename):
# ped_df = FamiliesLoader.load_simple_family_file(families_filename)
# return FamiliesData.from_pedigree_df(ped_df)
def load(self):
if self.file_format == "simple":
return self.load_simple_families_file(self.filename)
else:
assert self.file_format == "pedigree"
return self.load_pedigree_file(
self.filename, pedigree_format=self.params
)
@classmethod
def _arguments(cls):
arguments = []
arguments.append(CLIArgument(
"families",
value_type=str,
metavar="<families filename>",
help_text="families filename in pedigree or simple family format",
))
arguments.append(CLIArgument(
"--ped-family",
default_value="familyId",
help_text="specify the name of the column in the pedigree"
" file that holds the ID of the family the person belongs to"
" [default: %(default)s]",
))
arguments.append(CLIArgument(
"--ped-person",
default_value="personId",
help_text="specify the name of the column in the pedigree"
" file that holds the person's ID [default: %(default)s]",
))
arguments.append(CLIArgument(
"--ped-mom",
default_value="momId",
help_text="specify the name of the column in the pedigree"
" file that holds the ID of the person's mother"
" [default: %(default)s]",
))
arguments.append(CLIArgument(
"--ped-dad",
default_value="dadId",
help_text="specify the name of the column in the pedigree"
" file that holds the ID of the person's father"
" [default: %(default)s]",
))
arguments.append(CLIArgument(
"--ped-sex",
default_value="sex",
help_text="specify the name of the column in the pedigree"
" file that holds the sex of the person [default: %(default)s]",
))
arguments.append(CLIArgument(
"--ped-status",
default_value="status",
help_text="specify the name of the column in the pedigree"
" file that holds the status of the person"
" [default: %(default)s]",
))
arguments.append(CLIArgument(
"--ped-role",
default_value="role",
help_text="specify the name of the column in the pedigree"
" file that holds the role of the person"
" [default: %(default)s]",
))
arguments.append(CLIArgument(
"--ped-no-role",
action="store_true",
default_value=False,
help_text="indicates that the provided pedigree file has no role "
"column. "
"If this argument is provided, the import tool will guess the "
"roles "
'of individuals and write them in a "role" column.',
))
arguments.append(CLIArgument(
"--ped-proband",
default_value=None,
help_text="specify the name of the column in the pedigree"
" file that specifies persons with role `proband`;"
" this columns is used only when"
" option `--ped-no-role` is specified. [default: %(default)s]",
))
arguments.append(CLIArgument(
"--ped-no-header",
action="store_true",
default_value=False,
help_text="indicates that the provided pedigree"
" file has no header. The pedigree column arguments"
" will accept indices if this argument is given."
" [default: %(default)s]",
))
arguments.append(CLIArgument(
"--ped-file-format",
default_value="pedigree",
help_text="Families file format. It should `pedigree` or `simple`"
"for simple family format [default: %(default)s]",
))
arguments.append(CLIArgument(
"--ped-layout-mode",
default_value="load",
help_text="Layout mode specifies how pedigrees "
"drawing of each family is handled."
" Available options are `generate` and `load`. When "
"layout mode option is set to generate the loader"
"tryes to generate a layout for the family pedigree. "
"When `load` is specified, the loader tries to load the layout "
"from the layout column of the pedigree. "
"[default: %(default)s]",
))
arguments.append(CLIArgument(
"--ped-sep",
default_value="\t",
raw=True,
help_text="Families file field separator [default: `\\t`]",
))
return arguments
@classmethod
def parse_cli_arguments(cls, argv):
filename = argv.families
super().parse_cli_arguments(argv, use_defaults=False)
ped_ped_args = [
"ped_family",
"ped_person",
"ped_mom",
"ped_dad",
"ped_sex",
"ped_status",
"ped_role",
"ped_file_format",
"ped_sep",
"ped_proband",
"ped_layout_mode",
]
columns = set(
[
"ped_family",
"ped_person",
"ped_mom",
"ped_dad",
"ped_sex",
"ped_status",
"ped_role",
"ped_proband",
]
)
assert argv.ped_file_format in ("simple", "pedigree")
assert argv.ped_layout_mode in ("generate", "load")
res = {}
res["ped_no_header"] = str2bool(argv.ped_no_header)
res["ped_no_role"] = str2bool(argv.ped_no_role)
for col in ped_ped_args:
ped_value = getattr(argv, col)
if not res["ped_no_header"] or col not in columns:
res[col] = ped_value
elif ped_value is not None and col in columns:
res[col] = int(ped_value)
return filename, res
@staticmethod
def produce_header_from_indices(
ped_family=None,
ped_person=None,
ped_mom=None,
ped_dad=None,
ped_sex=None,
ped_status=None,
ped_role=None,
ped_proband=None,
ped_layout=None,
ped_generated=None,
ped_not_sequenced=None,
ped_sample_id=None,
):
header = (
(ped_family, PEDIGREE_COLUMN_NAMES["family"]),
(ped_person, PEDIGREE_COLUMN_NAMES["person"]),
(ped_mom, PEDIGREE_COLUMN_NAMES["mother"]),
(ped_dad, PEDIGREE_COLUMN_NAMES["father"]),
(ped_sex, PEDIGREE_COLUMN_NAMES["sex"]),
(ped_status, PEDIGREE_COLUMN_NAMES["status"]),
(ped_role, PEDIGREE_COLUMN_NAMES["role"]),
(ped_proband, PEDIGREE_COLUMN_NAMES["proband"]),
(ped_layout, PEDIGREE_COLUMN_NAMES["layout"]),
(ped_generated, PEDIGREE_COLUMN_NAMES["generated"]),
(ped_not_sequenced, PEDIGREE_COLUMN_NAMES["not_sequenced"]),
(ped_sample_id, PEDIGREE_COLUMN_NAMES["sample id"]),
)
header = tuple(filter(lambda col: type(col[0]) is int, header))
for col in header:
assert type(col[0]) is int, col[0]
header = tuple(sorted(header, key=lambda col: col[0]))
return zip(*header)
@staticmethod
def flexible_pedigree_read(
pedigree_filepath,
ped_sep="\t",
ped_no_header=False,
ped_family="familyId",
ped_person="personId",
ped_mom="momId",
ped_dad="dadId",
ped_sex="sex",
ped_status="status",
ped_role="role",
ped_proband="proband",
ped_layout="layout",
ped_generated="generated",
ped_not_sequenced="not_sequenced",
ped_sample_id="sampleId",
ped_no_role=False,
**kwargs,
):
if type(ped_no_role) == str:
ped_no_role = str2bool(ped_no_role)
if type(ped_no_header) == str:
ped_no_header = str2bool(ped_no_header)
read_csv_func = partial(
pd.read_csv,
sep=ped_sep,
index_col=False,
skipinitialspace=True,
converters={
ped_role: Role.from_name,
ped_sex: Sex.from_name,
ped_status: Status.from_name,
ped_generated: lambda v: str2bool(v),
ped_not_sequenced: lambda v: str2bool(v),
ped_proband: lambda v: str2bool(v),
},
dtype=str,
comment="#",
encoding="utf-8",
)
with warnings.catch_warnings(record=True) as ws:
warnings.filterwarnings(
"ignore",
category=pd.errors.ParserWarning,
message="Both a converter and dtype were specified",
)
if ped_no_header:
_, file_header = FamiliesLoader.produce_header_from_indices(
ped_family=ped_family,
ped_person=ped_person,
ped_mom=ped_mom,
ped_dad=ped_dad,
ped_sex=ped_sex,
ped_status=ped_status,
ped_role=ped_role,
ped_proband=ped_proband,
ped_layout=ped_layout,
ped_generated=ped_generated,
ped_not_sequenced=ped_not_sequenced,
ped_sample_id=ped_sample_id,
)
ped_family = PEDIGREE_COLUMN_NAMES["family"]
ped_person = PEDIGREE_COLUMN_NAMES["person"]
ped_mom = PEDIGREE_COLUMN_NAMES["mother"]
ped_dad = PEDIGREE_COLUMN_NAMES["father"]
ped_sex = PEDIGREE_COLUMN_NAMES["sex"]
ped_status = PEDIGREE_COLUMN_NAMES["status"]
ped_role = PEDIGREE_COLUMN_NAMES["role"]
ped_proband = PEDIGREE_COLUMN_NAMES["proband"]
ped_layout = PEDIGREE_COLUMN_NAMES["layout"]
ped_generated = PEDIGREE_COLUMN_NAMES["generated"]
ped_not_sequenced = PEDIGREE_COLUMN_NAMES["not_sequenced"]
ped_sample_id = PEDIGREE_COLUMN_NAMES["sample id"]
ped_df = read_csv_func(
pedigree_filepath, header=None, names=file_header
)
else:
ped_df = read_csv_func(pedigree_filepath)
for w in ws:
warnings.showwarning(w.message, w.category, w.filename, w.lineno)
if ped_sample_id in ped_df:
if ped_generated in ped_df or ped_not_sequenced in ped_df:
def fill_sample_id(r):
if not pd.isna(r.sampleId):
return r.sampleId
else:
if r.generated or r.not_sequenced:
return None
else:
return r.personId
else:
def fill_sample_id(r):
if not | pd.isna(r.sampleId) | pandas.isna |
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from ampligraph.datasets import load_from_csv
from ampligraph.discovery import find_clusters
from ampligraph.evaluation import train_test_split_no_unseen
from ampligraph.utils import restore_model
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA, SparsePCA, KernelPCA, FastICA
import mpld3
import plotly.graph_objects as go
import plotly.express as px
X = load_from_csv('data', 'Opcua-all.txt', sep='\t')
# Train test split
X_train, X_test = train_test_split_no_unseen(X, test_size=1000)
# Restore the model
restored_model = restore_model(model_name_path='export/opcua_HolE.pkl')
# Get the teams entities and their corresponding embeddings
triples_df = pd.DataFrame(X, columns=['s', 'p', 'o'])
uniques = triples_df.s.unique()
uniques_embeddings = dict(zip(uniques, restored_model.get_embeddings(uniques)))
uniques_embeddings_array = np.array([i for i in uniques_embeddings.values()])
# Find clusters of embeddings using KMeans
kmeans = KMeans(n_clusters=6, n_init=100, max_iter=500)
clusters = find_clusters(uniques, restored_model, kmeans, mode='entity')
# Project embeddings into 2D space via PCA
embeddings_2d = FastICA(n_components=2).fit_transform(uniques_embeddings_array)
plot_df = pd.DataFrame({"uniques": uniques,
"clusters": | pd.Series(clusters) | pandas.Series |
from datetime import datetime
import numpy as np
import pandas as pd
import pytest
from numba import njit
import vectorbt as vbt
from tests.utils import record_arrays_close
from vectorbt.generic.enums import range_dt, drawdown_dt
from vectorbt.portfolio.enums import order_dt, trade_dt, log_dt
day_dt = np.timedelta64(86400000000000)
example_dt = np.dtype([
('id', np.int64),
('col', np.int64),
('idx', np.int64),
('some_field1', np.float64),
('some_field2', np.float64)
], align=True)
records_arr = np.asarray([
(0, 0, 0, 10, 21),
(1, 0, 1, 11, 20),
(2, 0, 2, 12, 19),
(3, 1, 0, 13, 18),
(4, 1, 1, 14, 17),
(5, 1, 2, 13, 18),
(6, 2, 0, 12, 19),
(7, 2, 1, 11, 20),
(8, 2, 2, 10, 21)
], dtype=example_dt)
records_nosort_arr = np.concatenate((
records_arr[0::3],
records_arr[1::3],
records_arr[2::3]
))
group_by = pd.Index(['g1', 'g1', 'g2', 'g2'])
wrapper = vbt.ArrayWrapper(
index=['x', 'y', 'z'],
columns=['a', 'b', 'c', 'd'],
ndim=2,
freq='1 days'
)
wrapper_grouped = wrapper.replace(group_by=group_by)
records = vbt.records.Records(wrapper, records_arr)
records_grouped = vbt.records.Records(wrapper_grouped, records_arr)
records_nosort = records.replace(records_arr=records_nosort_arr)
records_nosort_grouped = vbt.records.Records(wrapper_grouped, records_nosort_arr)
# ############# Global ############# #
def setup_module():
vbt.settings.numba['check_func_suffix'] = True
vbt.settings.caching.enabled = False
vbt.settings.caching.whitelist = []
vbt.settings.caching.blacklist = []
def teardown_module():
vbt.settings.reset()
# ############# col_mapper.py ############# #
class TestColumnMapper:
def test_col_arr(self):
np.testing.assert_array_equal(
records['a'].col_mapper.col_arr,
np.array([0, 0, 0])
)
np.testing.assert_array_equal(
records.col_mapper.col_arr,
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2])
)
def test_get_col_arr(self):
np.testing.assert_array_equal(
records.col_mapper.get_col_arr(),
records.col_mapper.col_arr
)
np.testing.assert_array_equal(
records_grouped['g1'].col_mapper.get_col_arr(),
np.array([0, 0, 0, 0, 0, 0])
)
np.testing.assert_array_equal(
records_grouped.col_mapper.get_col_arr(),
np.array([0, 0, 0, 0, 0, 0, 1, 1, 1])
)
def test_col_range(self):
np.testing.assert_array_equal(
records['a'].col_mapper.col_range,
np.array([
[0, 3]
])
)
np.testing.assert_array_equal(
records.col_mapper.col_range,
np.array([
[0, 3],
[3, 6],
[6, 9],
[-1, -1]
])
)
def test_get_col_range(self):
np.testing.assert_array_equal(
records.col_mapper.get_col_range(),
np.array([
[0, 3],
[3, 6],
[6, 9],
[-1, -1]
])
)
np.testing.assert_array_equal(
records_grouped['g1'].col_mapper.get_col_range(),
np.array([[0, 6]])
)
np.testing.assert_array_equal(
records_grouped.col_mapper.get_col_range(),
np.array([[0, 6], [6, 9]])
)
def test_col_map(self):
np.testing.assert_array_equal(
records['a'].col_mapper.col_map[0],
np.array([0, 1, 2])
)
np.testing.assert_array_equal(
records['a'].col_mapper.col_map[1],
np.array([3])
)
np.testing.assert_array_equal(
records.col_mapper.col_map[0],
np.array([0, 1, 2, 3, 4, 5, 6, 7, 8])
)
np.testing.assert_array_equal(
records.col_mapper.col_map[1],
np.array([3, 3, 3, 0])
)
def test_get_col_map(self):
np.testing.assert_array_equal(
records.col_mapper.get_col_map()[0],
records.col_mapper.col_map[0]
)
np.testing.assert_array_equal(
records.col_mapper.get_col_map()[1],
records.col_mapper.col_map[1]
)
np.testing.assert_array_equal(
records_grouped['g1'].col_mapper.get_col_map()[0],
np.array([0, 1, 2, 3, 4, 5])
)
np.testing.assert_array_equal(
records_grouped['g1'].col_mapper.get_col_map()[1],
np.array([6])
)
np.testing.assert_array_equal(
records_grouped.col_mapper.get_col_map()[0],
np.array([0, 1, 2, 3, 4, 5, 6, 7, 8])
)
np.testing.assert_array_equal(
records_grouped.col_mapper.get_col_map()[1],
np.array([6, 3])
)
def test_is_sorted(self):
assert records.col_mapper.is_sorted()
assert not records_nosort.col_mapper.is_sorted()
# ############# mapped_array.py ############# #
mapped_array = records.map_field('some_field1')
mapped_array_grouped = records_grouped.map_field('some_field1')
mapped_array_nosort = records_nosort.map_field('some_field1')
mapped_array_nosort_grouped = records_nosort_grouped.map_field('some_field1')
mapping = {x: 'test_' + str(x) for x in pd.unique(mapped_array.values)}
mp_mapped_array = mapped_array.replace(mapping=mapping)
mp_mapped_array_grouped = mapped_array_grouped.replace(mapping=mapping)
class TestMappedArray:
def test_config(self, tmp_path):
assert vbt.MappedArray.loads(mapped_array.dumps()) == mapped_array
mapped_array.save(tmp_path / 'mapped_array')
assert vbt.MappedArray.load(tmp_path / 'mapped_array') == mapped_array
def test_mapped_arr(self):
np.testing.assert_array_equal(
mapped_array['a'].values,
np.array([10., 11., 12.])
)
np.testing.assert_array_equal(
mapped_array.values,
np.array([10., 11., 12., 13., 14., 13., 12., 11., 10.])
)
def test_id_arr(self):
np.testing.assert_array_equal(
mapped_array['a'].id_arr,
np.array([0, 1, 2])
)
np.testing.assert_array_equal(
mapped_array.id_arr,
np.array([0, 1, 2, 3, 4, 5, 6, 7, 8])
)
def test_col_arr(self):
np.testing.assert_array_equal(
mapped_array['a'].col_arr,
np.array([0, 0, 0])
)
np.testing.assert_array_equal(
mapped_array.col_arr,
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2])
)
def test_idx_arr(self):
np.testing.assert_array_equal(
mapped_array['a'].idx_arr,
np.array([0, 1, 2])
)
np.testing.assert_array_equal(
mapped_array.idx_arr,
np.array([0, 1, 2, 0, 1, 2, 0, 1, 2])
)
def test_is_sorted(self):
assert mapped_array.is_sorted()
assert mapped_array.is_sorted(incl_id=True)
assert not mapped_array_nosort.is_sorted()
assert not mapped_array_nosort.is_sorted(incl_id=True)
def test_sort(self):
assert mapped_array.sort().is_sorted()
assert mapped_array.sort().is_sorted(incl_id=True)
assert mapped_array.sort(incl_id=True).is_sorted(incl_id=True)
assert mapped_array_nosort.sort().is_sorted()
assert mapped_array_nosort.sort().is_sorted(incl_id=True)
assert mapped_array_nosort.sort(incl_id=True).is_sorted(incl_id=True)
def test_apply_mask(self):
mask_a = mapped_array['a'].values >= mapped_array['a'].values.mean()
np.testing.assert_array_equal(
mapped_array['a'].apply_mask(mask_a).id_arr,
np.array([1, 2])
)
mask = mapped_array.values >= mapped_array.values.mean()
filtered = mapped_array.apply_mask(mask)
np.testing.assert_array_equal(
filtered.id_arr,
np.array([2, 3, 4, 5, 6])
)
np.testing.assert_array_equal(filtered.col_arr, mapped_array.col_arr[mask])
np.testing.assert_array_equal(filtered.idx_arr, mapped_array.idx_arr[mask])
assert mapped_array_grouped.apply_mask(mask).wrapper == mapped_array_grouped.wrapper
assert mapped_array_grouped.apply_mask(mask, group_by=False).wrapper.grouper.group_by is None
def test_map_to_mask(self):
@njit
def every_2_nb(inout, idxs, col, mapped_arr):
inout[idxs[::2]] = True
np.testing.assert_array_equal(
mapped_array.map_to_mask(every_2_nb),
np.array([True, False, True, True, False, True, True, False, True])
)
def test_top_n_mask(self):
np.testing.assert_array_equal(
mapped_array.top_n_mask(1),
np.array([False, False, True, False, True, False, True, False, False])
)
def test_bottom_n_mask(self):
np.testing.assert_array_equal(
mapped_array.bottom_n_mask(1),
np.array([True, False, False, True, False, False, False, False, True])
)
def test_top_n(self):
np.testing.assert_array_equal(
mapped_array.top_n(1).id_arr,
np.array([2, 4, 6])
)
def test_bottom_n(self):
np.testing.assert_array_equal(
mapped_array.bottom_n(1).id_arr,
np.array([0, 3, 8])
)
def test_to_pd(self):
target = pd.DataFrame(
np.array([
[10., 13., 12., np.nan],
[11., 14., 11., np.nan],
[12., 13., 10., np.nan]
]),
index=wrapper.index,
columns=wrapper.columns
)
pd.testing.assert_series_equal(
mapped_array['a'].to_pd(),
target['a']
)
pd.testing.assert_frame_equal(
mapped_array.to_pd(),
target
)
pd.testing.assert_frame_equal(
mapped_array.to_pd(fill_value=0.),
target.fillna(0.)
)
mapped_array2 = vbt.MappedArray(
wrapper,
records_arr['some_field1'].tolist() + [1],
records_arr['col'].tolist() + [2],
idx_arr=records_arr['idx'].tolist() + [2]
)
with pytest.raises(Exception):
_ = mapped_array2.to_pd()
pd.testing.assert_series_equal(
mapped_array['a'].to_pd(ignore_index=True),
pd.Series(np.array([10., 11., 12.]), name='a')
)
pd.testing.assert_frame_equal(
mapped_array.to_pd(ignore_index=True),
pd.DataFrame(
np.array([
[10., 13., 12., np.nan],
[11., 14., 11., np.nan],
[12., 13., 10., np.nan]
]),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array.to_pd(fill_value=0, ignore_index=True),
pd.DataFrame(
np.array([
[10., 13., 12., 0.],
[11., 14., 11., 0.],
[12., 13., 10., 0.]
]),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array_grouped.to_pd(ignore_index=True),
pd.DataFrame(
np.array([
[10., 12.],
[11., 11.],
[12., 10.],
[13., np.nan],
[14., np.nan],
[13., np.nan],
]),
columns=pd.Index(['g1', 'g2'], dtype='object')
)
)
def test_apply(self):
@njit
def cumsum_apply_nb(idxs, col, a):
return np.cumsum(a)
np.testing.assert_array_equal(
mapped_array['a'].apply(cumsum_apply_nb).values,
np.array([10., 21., 33.])
)
np.testing.assert_array_equal(
mapped_array.apply(cumsum_apply_nb).values,
np.array([10., 21., 33., 13., 27., 40., 12., 23., 33.])
)
np.testing.assert_array_equal(
mapped_array_grouped.apply(cumsum_apply_nb, apply_per_group=False).values,
np.array([10., 21., 33., 13., 27., 40., 12., 23., 33.])
)
np.testing.assert_array_equal(
mapped_array_grouped.apply(cumsum_apply_nb, apply_per_group=True).values,
np.array([10., 21., 33., 46., 60., 73., 12., 23., 33.])
)
assert mapped_array_grouped.apply(cumsum_apply_nb).wrapper == \
mapped_array.apply(cumsum_apply_nb, group_by=group_by).wrapper
assert mapped_array.apply(cumsum_apply_nb, group_by=False).wrapper.grouper.group_by is None
def test_reduce(self):
@njit
def mean_reduce_nb(col, a):
return np.mean(a)
assert mapped_array['a'].reduce(mean_reduce_nb) == 11.
pd.testing.assert_series_equal(
mapped_array.reduce(mean_reduce_nb),
pd.Series(np.array([11., 13.333333333333334, 11., np.nan]), index=wrapper.columns).rename('reduce')
)
pd.testing.assert_series_equal(
mapped_array.reduce(mean_reduce_nb, fill_value=0.),
pd.Series(np.array([11., 13.333333333333334, 11., 0.]), index=wrapper.columns).rename('reduce')
)
pd.testing.assert_series_equal(
mapped_array.reduce(mean_reduce_nb, fill_value=0., wrap_kwargs=dict(dtype=np.int_)),
pd.Series(np.array([11., 13.333333333333334, 11., 0.]), index=wrapper.columns).rename('reduce')
)
pd.testing.assert_series_equal(
mapped_array.reduce(mean_reduce_nb, wrap_kwargs=dict(to_timedelta=True)),
pd.Series(np.array([11., 13.333333333333334, 11., np.nan]), index=wrapper.columns).rename('reduce') * day_dt
)
pd.testing.assert_series_equal(
mapped_array_grouped.reduce(mean_reduce_nb),
pd.Series([12.166666666666666, 11.0], index=pd.Index(['g1', 'g2'], dtype='object')).rename('reduce')
)
assert mapped_array_grouped['g1'].reduce(mean_reduce_nb) == 12.166666666666666
pd.testing.assert_series_equal(
mapped_array_grouped[['g1']].reduce(mean_reduce_nb),
pd.Series([12.166666666666666], index=pd.Index(['g1'], dtype='object')).rename('reduce')
)
pd.testing.assert_series_equal(
mapped_array.reduce(mean_reduce_nb),
mapped_array_grouped.reduce(mean_reduce_nb, group_by=False)
)
pd.testing.assert_series_equal(
mapped_array.reduce(mean_reduce_nb, group_by=group_by),
mapped_array_grouped.reduce(mean_reduce_nb)
)
def test_reduce_to_idx(self):
@njit
def argmin_reduce_nb(col, a):
return np.argmin(a)
assert mapped_array['a'].reduce(argmin_reduce_nb, returns_idx=True) == 'x'
pd.testing.assert_series_equal(
mapped_array.reduce(argmin_reduce_nb, returns_idx=True),
pd.Series(np.array(['x', 'x', 'z', np.nan], dtype=object), index=wrapper.columns).rename('reduce')
)
pd.testing.assert_series_equal(
mapped_array.reduce(argmin_reduce_nb, returns_idx=True, to_index=False),
pd.Series(np.array([0, 0, 2, -1], dtype=int), index=wrapper.columns).rename('reduce')
)
pd.testing.assert_series_equal(
mapped_array_grouped.reduce(argmin_reduce_nb, returns_idx=True, to_index=False),
pd.Series(np.array([0, 2], dtype=int), index=pd.Index(['g1', 'g2'], dtype='object')).rename('reduce')
)
def test_reduce_to_array(self):
@njit
def min_max_reduce_nb(col, a):
return np.array([np.min(a), np.max(a)])
pd.testing.assert_series_equal(
mapped_array['a'].reduce(min_max_reduce_nb, returns_array=True,
wrap_kwargs=dict(name_or_index=['min', 'max'])),
pd.Series([10., 12.], index=pd.Index(['min', 'max'], dtype='object'), name='a')
)
pd.testing.assert_frame_equal(
mapped_array.reduce(min_max_reduce_nb, returns_array=True, wrap_kwargs=dict(name_or_index=['min', 'max'])),
pd.DataFrame(
np.array([
[10., 13., 10., np.nan],
[12., 14., 12., np.nan]
]),
index=pd.Index(['min', 'max'], dtype='object'),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array.reduce(min_max_reduce_nb, returns_array=True, fill_value=0.),
pd.DataFrame(
np.array([
[10., 13., 10., 0.],
[12., 14., 12., 0.]
]),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array.reduce(min_max_reduce_nb, returns_array=True, wrap_kwargs=dict(to_timedelta=True)),
pd.DataFrame(
np.array([
[10., 13., 10., np.nan],
[12., 14., 12., np.nan]
]),
columns=wrapper.columns
) * day_dt
)
pd.testing.assert_frame_equal(
mapped_array_grouped.reduce(min_max_reduce_nb, returns_array=True),
pd.DataFrame(
np.array([
[10., 10.],
[14., 12.]
]),
columns=pd.Index(['g1', 'g2'], dtype='object')
)
)
pd.testing.assert_frame_equal(
mapped_array.reduce(min_max_reduce_nb, returns_array=True),
mapped_array_grouped.reduce(min_max_reduce_nb, returns_array=True, group_by=False)
)
pd.testing.assert_frame_equal(
mapped_array.reduce(min_max_reduce_nb, returns_array=True, group_by=group_by),
mapped_array_grouped.reduce(min_max_reduce_nb, returns_array=True)
)
pd.testing.assert_series_equal(
mapped_array_grouped['g1'].reduce(min_max_reduce_nb, returns_array=True),
pd.Series([10., 14.], name='g1')
)
pd.testing.assert_frame_equal(
mapped_array_grouped[['g1']].reduce(min_max_reduce_nb, returns_array=True),
pd.DataFrame([[10.], [14.]], columns=pd.Index(['g1'], dtype='object'))
)
def test_reduce_to_idx_array(self):
@njit
def idxmin_idxmax_reduce_nb(col, a):
return np.array([np.argmin(a), np.argmax(a)])
pd.testing.assert_series_equal(
mapped_array['a'].reduce(
idxmin_idxmax_reduce_nb,
returns_array=True,
returns_idx=True,
wrap_kwargs=dict(name_or_index=['min', 'max'])
),
pd.Series(
np.array(['x', 'z'], dtype=object),
index=pd.Index(['min', 'max'], dtype='object'),
name='a'
)
)
pd.testing.assert_frame_equal(
mapped_array.reduce(
idxmin_idxmax_reduce_nb,
returns_array=True,
returns_idx=True,
wrap_kwargs=dict(name_or_index=['min', 'max'])
),
pd.DataFrame(
{
'a': ['x', 'z'],
'b': ['x', 'y'],
'c': ['z', 'x'],
'd': [np.nan, np.nan]
},
index=pd.Index(['min', 'max'], dtype='object')
)
)
pd.testing.assert_frame_equal(
mapped_array.reduce(
idxmin_idxmax_reduce_nb,
returns_array=True,
returns_idx=True,
to_index=False
),
pd.DataFrame(
np.array([
[0, 0, 2, -1],
[2, 1, 0, -1]
]),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array_grouped.reduce(
idxmin_idxmax_reduce_nb,
returns_array=True,
returns_idx=True,
to_index=False
),
pd.DataFrame(
np.array([
[0, 2],
[1, 0]
]),
columns=pd.Index(['g1', 'g2'], dtype='object')
)
)
def test_nth(self):
assert mapped_array['a'].nth(0) == 10.
pd.testing.assert_series_equal(
mapped_array.nth(0),
pd.Series(np.array([10., 13., 12., np.nan]), index=wrapper.columns).rename('nth')
)
assert mapped_array['a'].nth(-1) == 12.
pd.testing.assert_series_equal(
mapped_array.nth(-1),
pd.Series(np.array([12., 13., 10., np.nan]), index=wrapper.columns).rename('nth')
)
with pytest.raises(Exception):
_ = mapped_array.nth(10)
pd.testing.assert_series_equal(
mapped_array_grouped.nth(0),
pd.Series(np.array([10., 12.]), index=pd.Index(['g1', 'g2'], dtype='object')).rename('nth')
)
def test_nth_index(self):
assert mapped_array['a'].nth(0) == 10.
pd.testing.assert_series_equal(
mapped_array.nth_index(0),
pd.Series(
np.array(['x', 'x', 'x', np.nan], dtype='object'),
index=wrapper.columns
).rename('nth_index')
)
assert mapped_array['a'].nth(-1) == 12.
pd.testing.assert_series_equal(
mapped_array.nth_index(-1),
pd.Series(
np.array(['z', 'z', 'z', np.nan], dtype='object'),
index=wrapper.columns
).rename('nth_index')
)
with pytest.raises(Exception):
_ = mapped_array.nth_index(10)
pd.testing.assert_series_equal(
mapped_array_grouped.nth_index(0),
pd.Series(
np.array(['x', 'x'], dtype='object'),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('nth_index')
)
def test_min(self):
assert mapped_array['a'].min() == mapped_array['a'].to_pd().min()
pd.testing.assert_series_equal(
mapped_array.min(),
mapped_array.to_pd().min().rename('min')
)
pd.testing.assert_series_equal(
mapped_array_grouped.min(),
pd.Series([10., 10.], index=pd.Index(['g1', 'g2'], dtype='object')).rename('min')
)
def test_max(self):
assert mapped_array['a'].max() == mapped_array['a'].to_pd().max()
pd.testing.assert_series_equal(
mapped_array.max(),
mapped_array.to_pd().max().rename('max')
)
pd.testing.assert_series_equal(
mapped_array_grouped.max(),
pd.Series([14., 12.], index=pd.Index(['g1', 'g2'], dtype='object')).rename('max')
)
def test_mean(self):
assert mapped_array['a'].mean() == mapped_array['a'].to_pd().mean()
pd.testing.assert_series_equal(
mapped_array.mean(),
mapped_array.to_pd().mean().rename('mean')
)
pd.testing.assert_series_equal(
mapped_array_grouped.mean(),
pd.Series([12.166667, 11.], index=pd.Index(['g1', 'g2'], dtype='object')).rename('mean')
)
def test_median(self):
assert mapped_array['a'].median() == mapped_array['a'].to_pd().median()
pd.testing.assert_series_equal(
mapped_array.median(),
mapped_array.to_pd().median().rename('median')
)
pd.testing.assert_series_equal(
mapped_array_grouped.median(),
pd.Series([12.5, 11.], index=pd.Index(['g1', 'g2'], dtype='object')).rename('median')
)
def test_std(self):
assert mapped_array['a'].std() == mapped_array['a'].to_pd().std()
pd.testing.assert_series_equal(
mapped_array.std(),
mapped_array.to_pd().std().rename('std')
)
pd.testing.assert_series_equal(
mapped_array.std(ddof=0),
mapped_array.to_pd().std(ddof=0).rename('std')
)
pd.testing.assert_series_equal(
mapped_array_grouped.std(),
pd.Series([1.4719601443879746, 1.0], index=pd.Index(['g1', 'g2'], dtype='object')).rename('std')
)
def test_sum(self):
assert mapped_array['a'].sum() == mapped_array['a'].to_pd().sum()
pd.testing.assert_series_equal(
mapped_array.sum(),
mapped_array.to_pd().sum().rename('sum')
)
pd.testing.assert_series_equal(
mapped_array_grouped.sum(),
pd.Series([73.0, 33.0], index=pd.Index(['g1', 'g2'], dtype='object')).rename('sum')
)
def test_count(self):
assert mapped_array['a'].count() == mapped_array['a'].to_pd().count()
pd.testing.assert_series_equal(
mapped_array.count(),
mapped_array.to_pd().count().rename('count')
)
pd.testing.assert_series_equal(
mapped_array_grouped.count(),
pd.Series([6, 3], index=pd.Index(['g1', 'g2'], dtype='object')).rename('count')
)
def test_idxmin(self):
assert mapped_array['a'].idxmin() == mapped_array['a'].to_pd().idxmin()
pd.testing.assert_series_equal(
mapped_array.idxmin(),
mapped_array.to_pd().idxmin().rename('idxmin')
)
pd.testing.assert_series_equal(
mapped_array_grouped.idxmin(),
pd.Series(
np.array(['x', 'z'], dtype=object),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('idxmin')
)
def test_idxmax(self):
assert mapped_array['a'].idxmax() == mapped_array['a'].to_pd().idxmax()
pd.testing.assert_series_equal(
mapped_array.idxmax(),
mapped_array.to_pd().idxmax().rename('idxmax')
)
pd.testing.assert_series_equal(
mapped_array_grouped.idxmax(),
pd.Series(
np.array(['y', 'x'], dtype=object),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('idxmax')
)
def test_describe(self):
pd.testing.assert_series_equal(
mapped_array['a'].describe(),
mapped_array['a'].to_pd().describe()
)
pd.testing.assert_frame_equal(
mapped_array.describe(percentiles=None),
mapped_array.to_pd().describe(percentiles=None)
)
pd.testing.assert_frame_equal(
mapped_array.describe(percentiles=[]),
mapped_array.to_pd().describe(percentiles=[])
)
pd.testing.assert_frame_equal(
mapped_array.describe(percentiles=np.arange(0, 1, 0.1)),
mapped_array.to_pd().describe(percentiles=np.arange(0, 1, 0.1))
)
pd.testing.assert_frame_equal(
mapped_array_grouped.describe(),
pd.DataFrame(
np.array([
[6., 3.],
[12.16666667, 11.],
[1.47196014, 1.],
[10., 10.],
[11.25, 10.5],
[12.5, 11.],
[13., 11.5],
[14., 12.]
]),
columns=pd.Index(['g1', 'g2'], dtype='object'),
index=mapped_array.describe().index
)
)
def test_value_counts(self):
pd.testing.assert_series_equal(
mapped_array['a'].value_counts(),
pd.Series(
np.array([1, 1, 1]),
index=pd.Float64Index([10.0, 11.0, 12.0], dtype='float64'),
name='a'
)
)
pd.testing.assert_series_equal(
mapped_array['a'].value_counts(mapping=mapping),
pd.Series(
np.array([1, 1, 1]),
index=pd.Index(['test_10.0', 'test_11.0', 'test_12.0'], dtype='object'),
name='a'
)
)
pd.testing.assert_frame_equal(
mapped_array.value_counts(),
pd.DataFrame(
np.array([
[1, 0, 1, 0],
[1, 0, 1, 0],
[1, 0, 1, 0],
[0, 2, 0, 0],
[0, 1, 0, 0]
]),
index=pd.Float64Index([10.0, 11.0, 12.0, 13.0, 14.0], dtype='float64'),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array_grouped.value_counts(),
pd.DataFrame(
np.array([
[1, 1],
[1, 1],
[1, 1],
[2, 0],
[1, 0]
]),
index=pd.Float64Index([10.0, 11.0, 12.0, 13.0, 14.0], dtype='float64'),
columns=pd.Index(['g1', 'g2'], dtype='object')
)
)
mapped_array2 = mapped_array.replace(mapped_arr=[4, 4, 3, 2, np.nan, 4, 3, 2, 1])
pd.testing.assert_frame_equal(
mapped_array2.value_counts(sort_uniques=False),
pd.DataFrame(
np.array([
[2, 1, 0, 0],
[1, 0, 1, 0],
[0, 1, 1, 0],
[0, 0, 1, 0],
[0, 1, 0, 0]
]),
index=pd.Float64Index([4.0, 3.0, 2.0, 1.0, None], dtype='float64'),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array2.value_counts(sort_uniques=True),
pd.DataFrame(
np.array([
[0, 0, 1, 0],
[0, 1, 1, 0],
[1, 0, 1, 0],
[2, 1, 0, 0],
[0, 1, 0, 0]
]),
index=pd.Float64Index([1.0, 2.0, 3.0, 4.0, None], dtype='float64'),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array2.value_counts(sort=True),
pd.DataFrame(
np.array([
[2, 1, 0, 0],
[0, 1, 1, 0],
[1, 0, 1, 0],
[0, 0, 1, 0],
[0, 1, 0, 0]
]),
index=pd.Float64Index([4.0, 2.0, 3.0, 1.0, np.nan], dtype='float64'),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array2.value_counts(sort=True, ascending=True),
pd.DataFrame(
np.array([
[0, 0, 1, 0],
[0, 1, 0, 0],
[0, 1, 1, 0],
[1, 0, 1, 0],
[2, 1, 0, 0]
]),
index=pd.Float64Index([1.0, np.nan, 2.0, 3.0, 4.0], dtype='float64'),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array2.value_counts(sort=True, normalize=True),
pd.DataFrame(
np.array([
[0.2222222222222222, 0.1111111111111111, 0.0, 0.0],
[0.0, 0.1111111111111111, 0.1111111111111111, 0.0],
[0.1111111111111111, 0.0, 0.1111111111111111, 0.0],
[0.0, 0.0, 0.1111111111111111, 0.0],
[0.0, 0.1111111111111111, 0.0, 0.0]
]),
index=pd.Float64Index([4.0, 2.0, 3.0, 1.0, np.nan], dtype='float64'),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array2.value_counts(sort=True, normalize=True, dropna=True),
pd.DataFrame(
np.array([
[0.25, 0.125, 0.0, 0.0],
[0.0, 0.125, 0.125, 0.0],
[0.125, 0.0, 0.125, 0.0],
[0.0, 0.0, 0.125, 0.0]
]),
index=pd.Float64Index([4.0, 2.0, 3.0, 1.0], dtype='float64'),
columns=wrapper.columns
)
)
@pytest.mark.parametrize(
"test_nosort",
[False, True],
)
def test_indexing(self, test_nosort):
if test_nosort:
ma = mapped_array_nosort
ma_grouped = mapped_array_nosort_grouped
else:
ma = mapped_array
ma_grouped = mapped_array_grouped
np.testing.assert_array_equal(
ma['a'].id_arr,
np.array([0, 1, 2])
)
np.testing.assert_array_equal(
ma['a'].col_arr,
np.array([0, 0, 0])
)
pd.testing.assert_index_equal(
ma['a'].wrapper.columns,
pd.Index(['a'], dtype='object')
)
np.testing.assert_array_equal(
ma['b'].id_arr,
np.array([3, 4, 5])
)
np.testing.assert_array_equal(
ma['b'].col_arr,
np.array([0, 0, 0])
)
pd.testing.assert_index_equal(
ma['b'].wrapper.columns,
pd.Index(['b'], dtype='object')
)
np.testing.assert_array_equal(
ma[['a', 'a']].id_arr,
np.array([0, 1, 2, 0, 1, 2])
)
np.testing.assert_array_equal(
ma[['a', 'a']].col_arr,
np.array([0, 0, 0, 1, 1, 1])
)
pd.testing.assert_index_equal(
ma[['a', 'a']].wrapper.columns,
pd.Index(['a', 'a'], dtype='object')
)
np.testing.assert_array_equal(
ma[['a', 'b']].id_arr,
np.array([0, 1, 2, 3, 4, 5])
)
np.testing.assert_array_equal(
ma[['a', 'b']].col_arr,
np.array([0, 0, 0, 1, 1, 1])
)
pd.testing.assert_index_equal(
ma[['a', 'b']].wrapper.columns,
pd.Index(['a', 'b'], dtype='object')
)
with pytest.raises(Exception):
_ = ma.iloc[::2, :] # changing time not supported
pd.testing.assert_index_equal(
ma_grouped['g1'].wrapper.columns,
pd.Index(['a', 'b'], dtype='object')
)
assert ma_grouped['g1'].wrapper.ndim == 2
assert ma_grouped['g1'].wrapper.grouped_ndim == 1
pd.testing.assert_index_equal(
ma_grouped['g1'].wrapper.grouper.group_by,
pd.Index(['g1', 'g1'], dtype='object')
)
pd.testing.assert_index_equal(
ma_grouped['g2'].wrapper.columns,
pd.Index(['c', 'd'], dtype='object')
)
assert ma_grouped['g2'].wrapper.ndim == 2
assert ma_grouped['g2'].wrapper.grouped_ndim == 1
pd.testing.assert_index_equal(
ma_grouped['g2'].wrapper.grouper.group_by,
pd.Index(['g2', 'g2'], dtype='object')
)
pd.testing.assert_index_equal(
ma_grouped[['g1']].wrapper.columns,
pd.Index(['a', 'b'], dtype='object')
)
assert ma_grouped[['g1']].wrapper.ndim == 2
assert ma_grouped[['g1']].wrapper.grouped_ndim == 2
pd.testing.assert_index_equal(
ma_grouped[['g1']].wrapper.grouper.group_by,
pd.Index(['g1', 'g1'], dtype='object')
)
pd.testing.assert_index_equal(
ma_grouped[['g1', 'g2']].wrapper.columns,
pd.Index(['a', 'b', 'c', 'd'], dtype='object')
)
assert ma_grouped[['g1', 'g2']].wrapper.ndim == 2
assert ma_grouped[['g1', 'g2']].wrapper.grouped_ndim == 2
pd.testing.assert_index_equal(
ma_grouped[['g1', 'g2']].wrapper.grouper.group_by,
pd.Index(['g1', 'g1', 'g2', 'g2'], dtype='object')
)
def test_magic(self):
a = vbt.MappedArray(
wrapper,
records_arr['some_field1'],
records_arr['col'],
id_arr=records_arr['id'],
idx_arr=records_arr['idx']
)
a_inv = vbt.MappedArray(
wrapper,
records_arr['some_field1'][::-1],
records_arr['col'][::-1],
id_arr=records_arr['id'][::-1],
idx_arr=records_arr['idx'][::-1]
)
b = records_arr['some_field2']
a_bool = vbt.MappedArray(
wrapper,
records_arr['some_field1'] > np.mean(records_arr['some_field1']),
records_arr['col'],
id_arr=records_arr['id'],
idx_arr=records_arr['idx']
)
b_bool = records_arr['some_field2'] > np.mean(records_arr['some_field2'])
assert a ** a == a ** 2
with pytest.raises(Exception):
_ = a * a_inv
# binary ops
# comparison ops
np.testing.assert_array_equal((a == b).values, a.values == b)
np.testing.assert_array_equal((a != b).values, a.values != b)
np.testing.assert_array_equal((a < b).values, a.values < b)
np.testing.assert_array_equal((a > b).values, a.values > b)
np.testing.assert_array_equal((a <= b).values, a.values <= b)
np.testing.assert_array_equal((a >= b).values, a.values >= b)
# arithmetic ops
np.testing.assert_array_equal((a + b).values, a.values + b)
np.testing.assert_array_equal((a - b).values, a.values - b)
np.testing.assert_array_equal((a * b).values, a.values * b)
np.testing.assert_array_equal((a ** b).values, a.values ** b)
np.testing.assert_array_equal((a % b).values, a.values % b)
np.testing.assert_array_equal((a // b).values, a.values // b)
np.testing.assert_array_equal((a / b).values, a.values / b)
# __r*__ is only called if the left object does not have an __*__ method
np.testing.assert_array_equal((10 + a).values, 10 + a.values)
np.testing.assert_array_equal((10 - a).values, 10 - a.values)
np.testing.assert_array_equal((10 * a).values, 10 * a.values)
np.testing.assert_array_equal((10 ** a).values, 10 ** a.values)
np.testing.assert_array_equal((10 % a).values, 10 % a.values)
np.testing.assert_array_equal((10 // a).values, 10 // a.values)
np.testing.assert_array_equal((10 / a).values, 10 / a.values)
# mask ops
np.testing.assert_array_equal((a_bool & b_bool).values, a_bool.values & b_bool)
np.testing.assert_array_equal((a_bool | b_bool).values, a_bool.values | b_bool)
np.testing.assert_array_equal((a_bool ^ b_bool).values, a_bool.values ^ b_bool)
np.testing.assert_array_equal((True & a_bool).values, True & a_bool.values)
np.testing.assert_array_equal((True | a_bool).values, True | a_bool.values)
np.testing.assert_array_equal((True ^ a_bool).values, True ^ a_bool.values)
# unary ops
np.testing.assert_array_equal((-a).values, -a.values)
np.testing.assert_array_equal((+a).values, +a.values)
np.testing.assert_array_equal((abs(-a)).values, abs((-a.values)))
def test_stats(self):
stats_index = pd.Index([
'Start', 'End', 'Period', 'Count', 'Mean', 'Std', 'Min', 'Median', 'Max', 'Min Index', 'Max Index'
], dtype='object')
pd.testing.assert_series_equal(
mapped_array.stats(),
pd.Series([
'x', 'z', pd.Timedelta('3 days 00:00:00'),
2.25, 11.777777777777779, 0.859116756396542, 11.0, 11.666666666666666, 12.666666666666666
],
index=stats_index[:-2],
name='agg_func_mean'
)
)
pd.testing.assert_series_equal(
mapped_array.stats(column='a'),
pd.Series([
'x', 'z', pd.Timedelta('3 days 00:00:00'),
3, 11.0, 1.0, 10.0, 11.0, 12.0, 'x', 'z'
],
index=stats_index,
name='a'
)
)
pd.testing.assert_series_equal(
mapped_array.stats(column='g1', group_by=group_by),
pd.Series([
'x', 'z', pd.Timedelta('3 days 00:00:00'),
6, 12.166666666666666, 1.4719601443879746, 10.0, 12.5, 14.0, 'x', 'y'
],
index=stats_index,
name='g1'
)
)
pd.testing.assert_series_equal(
mapped_array['c'].stats(),
mapped_array.stats(column='c')
)
pd.testing.assert_series_equal(
mapped_array['c'].stats(),
mapped_array.stats(column='c', group_by=False)
)
pd.testing.assert_series_equal(
mapped_array_grouped['g2'].stats(),
mapped_array_grouped.stats(column='g2')
)
pd.testing.assert_series_equal(
mapped_array_grouped['g2'].stats(),
mapped_array.stats(column='g2', group_by=group_by)
)
stats_df = mapped_array.stats(agg_func=None)
assert stats_df.shape == (4, 11)
pd.testing.assert_index_equal(stats_df.index, mapped_array.wrapper.columns)
pd.testing.assert_index_equal(stats_df.columns, stats_index)
def test_stats_mapping(self):
stats_index = pd.Index([
'Start', 'End', 'Period', 'Count', 'Value Counts: test_10.0',
'Value Counts: test_11.0', 'Value Counts: test_12.0',
'Value Counts: test_13.0', 'Value Counts: test_14.0'
], dtype='object')
pd.testing.assert_series_equal(
mp_mapped_array.stats(),
pd.Series([
'x',
'z',
pd.Timedelta('3 days 00:00:00'),
2.25, 0.5, 0.5, 0.5, 0.5, 0.25
],
index=stats_index,
name='agg_func_mean'
)
)
pd.testing.assert_series_equal(
mp_mapped_array.stats(column='a'),
pd.Series([
'x',
'z',
pd.Timedelta('3 days 00:00:00'),
3, 1, 1, 1, 0, 0
],
index=stats_index,
name='a'
)
)
pd.testing.assert_series_equal(
mp_mapped_array.stats(column='g1', group_by=group_by),
pd.Series([
'x',
'z',
pd.Timedelta('3 days 00:00:00'),
6, 1, 1, 1, 2, 1
],
index=stats_index,
name='g1'
)
)
pd.testing.assert_series_equal(
mp_mapped_array.stats(),
mapped_array.stats(settings=dict(mapping=mapping))
)
pd.testing.assert_series_equal(
mp_mapped_array['c'].stats(settings=dict(incl_all_keys=True)),
mp_mapped_array.stats(column='c')
)
pd.testing.assert_series_equal(
mp_mapped_array['c'].stats(settings=dict(incl_all_keys=True)),
mp_mapped_array.stats(column='c', group_by=False)
)
pd.testing.assert_series_equal(
mp_mapped_array_grouped['g2'].stats(settings=dict(incl_all_keys=True)),
mp_mapped_array_grouped.stats(column='g2')
)
pd.testing.assert_series_equal(
mp_mapped_array_grouped['g2'].stats(settings=dict(incl_all_keys=True)),
mp_mapped_array.stats(column='g2', group_by=group_by)
)
stats_df = mp_mapped_array.stats(agg_func=None)
assert stats_df.shape == (4, 9)
pd.testing.assert_index_equal(stats_df.index, mp_mapped_array.wrapper.columns)
pd.testing.assert_index_equal(stats_df.columns, stats_index)
# ############# base.py ############# #
class TestRecords:
def test_config(self, tmp_path):
assert vbt.Records.loads(records['a'].dumps()) == records['a']
assert vbt.Records.loads(records.dumps()) == records
records.save(tmp_path / 'records')
assert vbt.Records.load(tmp_path / 'records') == records
def test_records(self):
pd.testing.assert_frame_equal(
records.records,
pd.DataFrame.from_records(records_arr)
)
def test_recarray(self):
np.testing.assert_array_equal(records['a'].recarray.some_field1, records['a'].values['some_field1'])
np.testing.assert_array_equal(records.recarray.some_field1, records.values['some_field1'])
def test_records_readable(self):
pd.testing.assert_frame_equal(
records.records_readable,
pd.DataFrame([
[0, 'a', 'x', 10.0, 21.0], [1, 'a', 'y', 11.0, 20.0], [2, 'a', 'z', 12.0, 19.0],
[3, 'b', 'x', 13.0, 18.0], [4, 'b', 'y', 14.0, 17.0], [5, 'b', 'z', 13.0, 18.0],
[6, 'c', 'x', 12.0, 19.0], [7, 'c', 'y', 11.0, 20.0], [8, 'c', 'z', 10.0, 21.0]
], columns=pd.Index(['Id', 'Column', 'Timestamp', 'some_field1', 'some_field2'], dtype='object'))
)
def test_is_sorted(self):
assert records.is_sorted()
assert records.is_sorted(incl_id=True)
assert not records_nosort.is_sorted()
assert not records_nosort.is_sorted(incl_id=True)
def test_sort(self):
assert records.sort().is_sorted()
assert records.sort().is_sorted(incl_id=True)
assert records.sort(incl_id=True).is_sorted(incl_id=True)
assert records_nosort.sort().is_sorted()
assert records_nosort.sort().is_sorted(incl_id=True)
assert records_nosort.sort(incl_id=True).is_sorted(incl_id=True)
def test_apply_mask(self):
mask_a = records['a'].values['some_field1'] >= records['a'].values['some_field1'].mean()
record_arrays_close(
records['a'].apply_mask(mask_a).values,
np.array([
(1, 0, 1, 11., 20.), (2, 0, 2, 12., 19.)
], dtype=example_dt)
)
mask = records.values['some_field1'] >= records.values['some_field1'].mean()
filtered = records.apply_mask(mask)
record_arrays_close(
filtered.values,
np.array([
(2, 0, 2, 12., 19.), (3, 1, 0, 13., 18.), (4, 1, 1, 14., 17.),
(5, 1, 2, 13., 18.), (6, 2, 0, 12., 19.)
], dtype=example_dt)
)
assert records_grouped.apply_mask(mask).wrapper == records_grouped.wrapper
def test_map_field(self):
np.testing.assert_array_equal(
records['a'].map_field('some_field1').values,
np.array([10., 11., 12.])
)
np.testing.assert_array_equal(
records.map_field('some_field1').values,
np.array([10., 11., 12., 13., 14., 13., 12., 11., 10.])
)
assert records_grouped.map_field('some_field1').wrapper == \
records.map_field('some_field1', group_by=group_by).wrapper
assert records_grouped.map_field('some_field1', group_by=False).wrapper.grouper.group_by is None
def test_map(self):
@njit
def map_func_nb(record):
return record['some_field1'] + record['some_field2']
np.testing.assert_array_equal(
records['a'].map(map_func_nb).values,
np.array([31., 31., 31.])
)
np.testing.assert_array_equal(
records.map(map_func_nb).values,
np.array([31., 31., 31., 31., 31., 31., 31., 31., 31.])
)
assert records_grouped.map(map_func_nb).wrapper == \
records.map(map_func_nb, group_by=group_by).wrapper
assert records_grouped.map(map_func_nb, group_by=False).wrapper.grouper.group_by is None
def test_map_array(self):
arr = records_arr['some_field1'] + records_arr['some_field2']
np.testing.assert_array_equal(
records['a'].map_array(arr[:3]).values,
np.array([31., 31., 31.])
)
np.testing.assert_array_equal(
records.map_array(arr).values,
np.array([31., 31., 31., 31., 31., 31., 31., 31., 31.])
)
assert records_grouped.map_array(arr).wrapper == \
records.map_array(arr, group_by=group_by).wrapper
assert records_grouped.map_array(arr, group_by=False).wrapper.grouper.group_by is None
def test_apply(self):
@njit
def cumsum_apply_nb(records):
return np.cumsum(records['some_field1'])
np.testing.assert_array_equal(
records['a'].apply(cumsum_apply_nb).values,
np.array([10., 21., 33.])
)
np.testing.assert_array_equal(
records.apply(cumsum_apply_nb).values,
np.array([10., 21., 33., 13., 27., 40., 12., 23., 33.])
)
np.testing.assert_array_equal(
records_grouped.apply(cumsum_apply_nb, apply_per_group=False).values,
np.array([10., 21., 33., 13., 27., 40., 12., 23., 33.])
)
np.testing.assert_array_equal(
records_grouped.apply(cumsum_apply_nb, apply_per_group=True).values,
np.array([10., 21., 33., 46., 60., 73., 12., 23., 33.])
)
assert records_grouped.apply(cumsum_apply_nb).wrapper == \
records.apply(cumsum_apply_nb, group_by=group_by).wrapper
assert records_grouped.apply(cumsum_apply_nb, group_by=False).wrapper.grouper.group_by is None
def test_count(self):
assert records['a'].count() == 3
pd.testing.assert_series_equal(
records.count(),
pd.Series(
np.array([3, 3, 3, 0]),
index=wrapper.columns
).rename('count')
)
assert records_grouped['g1'].count() == 6
pd.testing.assert_series_equal(
records_grouped.count(),
pd.Series(
np.array([6, 3]),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('count')
)
@pytest.mark.parametrize(
"test_nosort",
[False, True],
)
def test_indexing(self, test_nosort):
if test_nosort:
r = records_nosort
r_grouped = records_nosort_grouped
else:
r = records
r_grouped = records_grouped
record_arrays_close(
r['a'].values,
np.array([
(0, 0, 0, 10., 21.), (1, 0, 1, 11., 20.), (2, 0, 2, 12., 19.)
], dtype=example_dt)
)
pd.testing.assert_index_equal(
r['a'].wrapper.columns,
pd.Index(['a'], dtype='object')
)
pd.testing.assert_index_equal(
r['b'].wrapper.columns,
pd.Index(['b'], dtype='object')
)
record_arrays_close(
r[['a', 'a']].values,
np.array([
(0, 0, 0, 10., 21.), (1, 0, 1, 11., 20.), (2, 0, 2, 12., 19.),
(0, 1, 0, 10., 21.), (1, 1, 1, 11., 20.), (2, 1, 2, 12., 19.)
], dtype=example_dt)
)
pd.testing.assert_index_equal(
r[['a', 'a']].wrapper.columns,
pd.Index(['a', 'a'], dtype='object')
)
record_arrays_close(
r[['a', 'b']].values,
np.array([
(0, 0, 0, 10., 21.), (1, 0, 1, 11., 20.), (2, 0, 2, 12., 19.),
(3, 1, 0, 13., 18.), (4, 1, 1, 14., 17.), (5, 1, 2, 13., 18.)
], dtype=example_dt)
)
pd.testing.assert_index_equal(
r[['a', 'b']].wrapper.columns,
pd.Index(['a', 'b'], dtype='object')
)
with pytest.raises(Exception):
_ = r.iloc[::2, :] # changing time not supported
pd.testing.assert_index_equal(
r_grouped['g1'].wrapper.columns,
pd.Index(['a', 'b'], dtype='object')
)
assert r_grouped['g1'].wrapper.ndim == 2
assert r_grouped['g1'].wrapper.grouped_ndim == 1
pd.testing.assert_index_equal(
r_grouped['g1'].wrapper.grouper.group_by,
pd.Index(['g1', 'g1'], dtype='object')
)
pd.testing.assert_index_equal(
r_grouped['g2'].wrapper.columns,
pd.Index(['c', 'd'], dtype='object')
)
assert r_grouped['g2'].wrapper.ndim == 2
assert r_grouped['g2'].wrapper.grouped_ndim == 1
pd.testing.assert_index_equal(
r_grouped['g2'].wrapper.grouper.group_by,
pd.Index(['g2', 'g2'], dtype='object')
)
pd.testing.assert_index_equal(
r_grouped[['g1']].wrapper.columns,
pd.Index(['a', 'b'], dtype='object')
)
assert r_grouped[['g1']].wrapper.ndim == 2
assert r_grouped[['g1']].wrapper.grouped_ndim == 2
pd.testing.assert_index_equal(
r_grouped[['g1']].wrapper.grouper.group_by,
pd.Index(['g1', 'g1'], dtype='object')
)
pd.testing.assert_index_equal(
r_grouped[['g1', 'g2']].wrapper.columns,
pd.Index(['a', 'b', 'c', 'd'], dtype='object')
)
assert r_grouped[['g1', 'g2']].wrapper.ndim == 2
assert r_grouped[['g1', 'g2']].wrapper.grouped_ndim == 2
pd.testing.assert_index_equal(
r_grouped[['g1', 'g2']].wrapper.grouper.group_by,
pd.Index(['g1', 'g1', 'g2', 'g2'], dtype='object')
)
def test_filtering(self):
filtered_records = vbt.Records(wrapper, records_arr[[0, -1]])
record_arrays_close(
filtered_records.values,
np.array([(0, 0, 0, 10., 21.), (8, 2, 2, 10., 21.)], dtype=example_dt)
)
# a
record_arrays_close(
filtered_records['a'].values,
np.array([(0, 0, 0, 10., 21.)], dtype=example_dt)
)
np.testing.assert_array_equal(
filtered_records['a'].map_field('some_field1').id_arr,
np.array([0])
)
assert filtered_records['a'].map_field('some_field1').min() == 10.
assert filtered_records['a'].count() == 1.
# b
record_arrays_close(
filtered_records['b'].values,
np.array([], dtype=example_dt)
)
np.testing.assert_array_equal(
filtered_records['b'].map_field('some_field1').id_arr,
np.array([])
)
assert np.isnan(filtered_records['b'].map_field('some_field1').min())
assert filtered_records['b'].count() == 0.
# c
record_arrays_close(
filtered_records['c'].values,
np.array([(8, 0, 2, 10., 21.)], dtype=example_dt)
)
np.testing.assert_array_equal(
filtered_records['c'].map_field('some_field1').id_arr,
np.array([8])
)
assert filtered_records['c'].map_field('some_field1').min() == 10.
assert filtered_records['c'].count() == 1.
# d
record_arrays_close(
filtered_records['d'].values,
np.array([], dtype=example_dt)
)
np.testing.assert_array_equal(
filtered_records['d'].map_field('some_field1').id_arr,
np.array([])
)
assert np.isnan(filtered_records['d'].map_field('some_field1').min())
assert filtered_records['d'].count() == 0.
def test_stats(self):
stats_index = pd.Index([
'Start', 'End', 'Period', 'Count'
], dtype='object')
pd.testing.assert_series_equal(
records.stats(),
pd.Series([
'x', 'z', pd.Timedelta('3 days 00:00:00'), 2.25
],
index=stats_index,
name='agg_func_mean'
)
)
pd.testing.assert_series_equal(
records.stats(column='a'),
pd.Series([
'x', 'z', pd.Timedelta('3 days 00:00:00'), 3
],
index=stats_index,
name='a'
)
)
pd.testing.assert_series_equal(
records.stats(column='g1', group_by=group_by),
pd.Series([
'x', 'z', pd.Timedelta('3 days 00:00:00'), 6
],
index=stats_index,
name='g1'
)
)
pd.testing.assert_series_equal(
records['c'].stats(),
records.stats(column='c')
)
pd.testing.assert_series_equal(
records['c'].stats(),
records.stats(column='c', group_by=False)
)
pd.testing.assert_series_equal(
records_grouped['g2'].stats(),
records_grouped.stats(column='g2')
)
pd.testing.assert_series_equal(
records_grouped['g2'].stats(),
records.stats(column='g2', group_by=group_by)
)
stats_df = records.stats(agg_func=None)
assert stats_df.shape == (4, 4)
pd.testing.assert_index_equal(stats_df.index, records.wrapper.columns)
pd.testing.assert_index_equal(stats_df.columns, stats_index)
# ############# ranges.py ############# #
ts = pd.DataFrame({
'a': [1, -1, 3, -1, 5, -1],
'b': [-1, -1, -1, 4, 5, 6],
'c': [1, 2, 3, -1, -1, -1],
'd': [-1, -1, -1, -1, -1, -1]
}, index=[
datetime(2020, 1, 1),
datetime(2020, 1, 2),
datetime(2020, 1, 3),
datetime(2020, 1, 4),
datetime(2020, 1, 5),
datetime(2020, 1, 6)
])
ranges = vbt.Ranges.from_ts(ts, wrapper_kwargs=dict(freq='1 days'))
ranges_grouped = vbt.Ranges.from_ts(ts, wrapper_kwargs=dict(freq='1 days', group_by=group_by))
class TestRanges:
def test_mapped_fields(self):
for name in range_dt.names:
np.testing.assert_array_equal(
getattr(ranges, name).values,
ranges.values[name]
)
def test_from_ts(self):
record_arrays_close(
ranges.values,
np.array([
(0, 0, 0, 1, 1), (1, 0, 2, 3, 1), (2, 0, 4, 5, 1), (3, 1, 3, 5, 0), (4, 2, 0, 3, 1)
], dtype=range_dt)
)
assert ranges.wrapper.freq == day_dt
pd.testing.assert_index_equal(
ranges_grouped.wrapper.grouper.group_by,
group_by
)
def test_records_readable(self):
records_readable = ranges.records_readable
np.testing.assert_array_equal(
records_readable['Range Id'].values,
np.array([
0, 1, 2, 3, 4
])
)
np.testing.assert_array_equal(
records_readable['Column'].values,
np.array([
'a', 'a', 'a', 'b', 'c'
])
)
np.testing.assert_array_equal(
records_readable['Start Timestamp'].values,
np.array([
'2020-01-01T00:00:00.000000000', '2020-01-03T00:00:00.000000000',
'2020-01-05T00:00:00.000000000', '2020-01-04T00:00:00.000000000',
'2020-01-01T00:00:00.000000000'
], dtype='datetime64[ns]')
)
np.testing.assert_array_equal(
records_readable['End Timestamp'].values,
np.array([
'2020-01-02T00:00:00.000000000', '2020-01-04T00:00:00.000000000',
'2020-01-06T00:00:00.000000000', '2020-01-06T00:00:00.000000000',
'2020-01-04T00:00:00.000000000'
], dtype='datetime64[ns]')
)
np.testing.assert_array_equal(
records_readable['Status'].values,
np.array([
'Closed', 'Closed', 'Closed', 'Open', 'Closed'
])
)
def test_to_mask(self):
pd.testing.assert_series_equal(
ranges['a'].to_mask(),
ts['a'] != -1
)
pd.testing.assert_frame_equal(
ranges.to_mask(),
ts != -1
)
pd.testing.assert_frame_equal(
ranges_grouped.to_mask(),
pd.DataFrame(
[
[True, True],
[False, True],
[True, True],
[True, False],
[True, False],
[True, False]
],
index=ts.index,
columns=pd.Index(['g1', 'g2'], dtype='object')
)
)
def test_duration(self):
np.testing.assert_array_equal(
ranges['a'].duration.values,
np.array([1, 1, 1])
)
np.testing.assert_array_equal(
ranges.duration.values,
np.array([1, 1, 1, 3, 3])
)
def test_avg_duration(self):
assert ranges['a'].avg_duration() == pd.Timedelta('1 days 00:00:00')
pd.testing.assert_series_equal(
ranges.avg_duration(),
pd.Series(
np.array([86400000000000, 259200000000000, 259200000000000, 'NaT'], dtype='timedelta64[ns]'),
index=wrapper.columns
).rename('avg_duration')
)
pd.testing.assert_series_equal(
ranges_grouped.avg_duration(),
pd.Series(
np.array([129600000000000, 259200000000000], dtype='timedelta64[ns]'),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('avg_duration')
)
def test_max_duration(self):
assert ranges['a'].max_duration() == pd.Timedelta('1 days 00:00:00')
pd.testing.assert_series_equal(
ranges.max_duration(),
pd.Series(
np.array([86400000000000, 259200000000000, 259200000000000, 'NaT'], dtype='timedelta64[ns]'),
index=wrapper.columns
).rename('max_duration')
)
pd.testing.assert_series_equal(
ranges_grouped.max_duration(),
pd.Series(
np.array([259200000000000, 259200000000000], dtype='timedelta64[ns]'),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('max_duration')
)
def test_coverage(self):
assert ranges['a'].coverage() == 0.5
pd.testing.assert_series_equal(
ranges.coverage(),
pd.Series(
np.array([0.5, 0.5, 0.5, np.nan]),
index=ts2.columns
).rename('coverage')
)
pd.testing.assert_series_equal(
ranges.coverage(),
ranges.replace(records_arr=np.repeat(ranges.values, 2)).coverage()
)
pd.testing.assert_series_equal(
ranges.replace(records_arr=np.repeat(ranges.values, 2)).coverage(overlapping=True),
pd.Series(
np.array([1.0, 1.0, 1.0, np.nan]),
index=ts2.columns
).rename('coverage')
)
pd.testing.assert_series_equal(
ranges.coverage(normalize=False),
pd.Series(
np.array([3.0, 3.0, 3.0, np.nan]),
index=ts2.columns
).rename('coverage')
)
pd.testing.assert_series_equal(
ranges.replace(records_arr=np.repeat(ranges.values, 2)).coverage(overlapping=True, normalize=False),
pd.Series(
np.array([3.0, 3.0, 3.0, np.nan]),
index=ts2.columns
).rename('coverage')
)
pd.testing.assert_series_equal(
ranges_grouped.coverage(),
pd.Series(
np.array([0.4166666666666667, 0.25]),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('coverage')
)
pd.testing.assert_series_equal(
ranges_grouped.coverage(),
ranges_grouped.replace(records_arr=np.repeat(ranges_grouped.values, 2)).coverage()
)
def test_stats(self):
stats_index = pd.Index([
'Start', 'End', 'Period', 'Coverage', 'Overlap Coverage',
'Total Records', 'Duration: Min', 'Duration: Median', 'Duration: Max',
'Duration: Mean', 'Duration: Std'
], dtype='object')
pd.testing.assert_series_equal(
ranges.stats(),
pd.Series([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-06 00:00:00'),
pd.Timedelta('6 days 00:00:00'), pd.Timedelta('3 days 00:00:00'),
pd.Timedelta('0 days 00:00:00'), 1.25, pd.Timedelta('2 days 08:00:00'),
pd.Timedelta('2 days 08:00:00'), pd.Timedelta('2 days 08:00:00'),
pd.Timedelta('2 days 08:00:00'), pd.Timedelta('0 days 00:00:00')
],
index=stats_index,
name='agg_func_mean'
)
)
pd.testing.assert_series_equal(
ranges.stats(column='a'),
pd.Series([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-06 00:00:00'),
pd.Timedelta('6 days 00:00:00'), pd.Timedelta('3 days 00:00:00'),
pd.Timedelta('0 days 00:00:00'), 3, pd.Timedelta('1 days 00:00:00'),
pd.Timedelta('1 days 00:00:00'), pd.Timedelta('1 days 00:00:00'),
pd.Timedelta('1 days 00:00:00'), pd.Timedelta('0 days 00:00:00')
],
index=stats_index,
name='a'
)
)
pd.testing.assert_series_equal(
ranges.stats(column='g1', group_by=group_by),
pd.Series([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-06 00:00:00'),
pd.Timedelta('6 days 00:00:00'), pd.Timedelta('5 days 00:00:00'),
pd.Timedelta('1 days 00:00:00'), 4, pd.Timedelta('1 days 00:00:00'),
pd.Timedelta('1 days 00:00:00'), pd.Timedelta('3 days 00:00:00'),
pd.Timedelta('1 days 12:00:00'), pd.Timedelta('1 days 00:00:00')
],
index=stats_index,
name='g1'
)
)
pd.testing.assert_series_equal(
ranges['c'].stats(),
ranges.stats(column='c')
)
pd.testing.assert_series_equal(
ranges['c'].stats(),
ranges.stats(column='c', group_by=False)
)
pd.testing.assert_series_equal(
ranges_grouped['g2'].stats(),
ranges_grouped.stats(column='g2')
)
pd.testing.assert_series_equal(
ranges_grouped['g2'].stats(),
ranges.stats(column='g2', group_by=group_by)
)
stats_df = ranges.stats(agg_func=None)
assert stats_df.shape == (4, 11)
pd.testing.assert_index_equal(stats_df.index, ranges.wrapper.columns)
pd.testing.assert_index_equal(stats_df.columns, stats_index)
# ############# drawdowns.py ############# #
ts2 = pd.DataFrame({
'a': [2, 1, 3, 1, 4, 1],
'b': [1, 2, 1, 3, 1, 4],
'c': [1, 2, 3, 2, 1, 2],
'd': [1, 2, 3, 4, 5, 6]
}, index=[
datetime(2020, 1, 1),
datetime(2020, 1, 2),
datetime(2020, 1, 3),
datetime(2020, 1, 4),
datetime(2020, 1, 5),
datetime(2020, 1, 6)
])
drawdowns = vbt.Drawdowns.from_ts(ts2, wrapper_kwargs=dict(freq='1 days'))
drawdowns_grouped = vbt.Drawdowns.from_ts(ts2, wrapper_kwargs=dict(freq='1 days', group_by=group_by))
class TestDrawdowns:
def test_mapped_fields(self):
for name in drawdown_dt.names:
np.testing.assert_array_equal(
getattr(drawdowns, name).values,
drawdowns.values[name]
)
def test_ts(self):
pd.testing.assert_frame_equal(
drawdowns.ts,
ts2
)
pd.testing.assert_series_equal(
drawdowns['a'].ts,
ts2['a']
)
pd.testing.assert_frame_equal(
drawdowns_grouped['g1'].ts,
ts2[['a', 'b']]
)
assert drawdowns.replace(ts=None)['a'].ts is None
def test_from_ts(self):
record_arrays_close(
drawdowns.values,
np.array([
(0, 0, 0, 1, 1, 2, 2.0, 1.0, 3.0, 1), (1, 0, 2, 3, 3, 4, 3.0, 1.0, 4.0, 1),
(2, 0, 4, 5, 5, 5, 4.0, 1.0, 1.0, 0), (3, 1, 1, 2, 2, 3, 2.0, 1.0, 3.0, 1),
(4, 1, 3, 4, 4, 5, 3.0, 1.0, 4.0, 1), (5, 2, 2, 3, 4, 5, 3.0, 1.0, 2.0, 0)
], dtype=drawdown_dt)
)
assert drawdowns.wrapper.freq == day_dt
pd.testing.assert_index_equal(
drawdowns_grouped.wrapper.grouper.group_by,
group_by
)
def test_records_readable(self):
records_readable = drawdowns.records_readable
np.testing.assert_array_equal(
records_readable['Drawdown Id'].values,
np.array([
0, 1, 2, 3, 4, 5
])
)
np.testing.assert_array_equal(
records_readable['Column'].values,
np.array([
'a', 'a', 'a', 'b', 'b', 'c'
])
)
np.testing.assert_array_equal(
records_readable['Peak Timestamp'].values,
np.array([
'2020-01-01T00:00:00.000000000', '2020-01-03T00:00:00.000000000',
'2020-01-05T00:00:00.000000000', '2020-01-02T00:00:00.000000000',
'2020-01-04T00:00:00.000000000', '2020-01-03T00:00:00.000000000'
], dtype='datetime64[ns]')
)
np.testing.assert_array_equal(
records_readable['Start Timestamp'].values,
np.array([
'2020-01-02T00:00:00.000000000', '2020-01-04T00:00:00.000000000',
'2020-01-06T00:00:00.000000000', '2020-01-03T00:00:00.000000000',
'2020-01-05T00:00:00.000000000', '2020-01-04T00:00:00.000000000'
], dtype='datetime64[ns]')
)
np.testing.assert_array_equal(
records_readable['Valley Timestamp'].values,
np.array([
'2020-01-02T00:00:00.000000000', '2020-01-04T00:00:00.000000000',
'2020-01-06T00:00:00.000000000', '2020-01-03T00:00:00.000000000',
'2020-01-05T00:00:00.000000000', '2020-01-05T00:00:00.000000000'
], dtype='datetime64[ns]')
)
np.testing.assert_array_equal(
records_readable['End Timestamp'].values,
np.array([
'2020-01-03T00:00:00.000000000', '2020-01-05T00:00:00.000000000',
'2020-01-06T00:00:00.000000000', '2020-01-04T00:00:00.000000000',
'2020-01-06T00:00:00.000000000', '2020-01-06T00:00:00.000000000'
], dtype='datetime64[ns]')
)
np.testing.assert_array_equal(
records_readable['Peak Value'].values,
np.array([
2., 3., 4., 2., 3., 3.
])
)
np.testing.assert_array_equal(
records_readable['Valley Value'].values,
np.array([
1., 1., 1., 1., 1., 1.
])
)
np.testing.assert_array_equal(
records_readable['End Value'].values,
np.array([
3., 4., 1., 3., 4., 2.
])
)
np.testing.assert_array_equal(
records_readable['Status'].values,
np.array([
'Recovered', 'Recovered', 'Active', 'Recovered', 'Recovered', 'Active'
])
)
def test_drawdown(self):
np.testing.assert_array_almost_equal(
drawdowns['a'].drawdown.values,
np.array([-0.5, -0.66666667, -0.75])
)
np.testing.assert_array_almost_equal(
drawdowns.drawdown.values,
np.array([-0.5, -0.66666667, -0.75, -0.5, -0.66666667, -0.66666667])
)
pd.testing.assert_frame_equal(
drawdowns.drawdown.to_pd(),
pd.DataFrame(
np.array([
[np.nan, np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan, np.nan],
[-0.5, np.nan, np.nan, np.nan],
[np.nan, -0.5, np.nan, np.nan],
[-0.66666669, np.nan, np.nan, np.nan],
[-0.75, -0.66666669, -0.66666669, np.nan]
]),
index=ts2.index,
columns=ts2.columns
)
)
def test_avg_drawdown(self):
assert drawdowns['a'].avg_drawdown() == -0.6388888888888888
pd.testing.assert_series_equal(
drawdowns.avg_drawdown(),
pd.Series(
np.array([-0.63888889, -0.58333333, -0.66666667, np.nan]),
index=wrapper.columns
).rename('avg_drawdown')
)
pd.testing.assert_series_equal(
drawdowns_grouped.avg_drawdown(),
pd.Series(
np.array([-0.6166666666666666, -0.6666666666666666]),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('avg_drawdown')
)
def test_max_drawdown(self):
assert drawdowns['a'].max_drawdown() == -0.75
pd.testing.assert_series_equal(
drawdowns.max_drawdown(),
pd.Series(
np.array([-0.75, -0.66666667, -0.66666667, np.nan]),
index=wrapper.columns
).rename('max_drawdown')
)
pd.testing.assert_series_equal(
drawdowns_grouped.max_drawdown(),
pd.Series(
np.array([-0.75, -0.6666666666666666]),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('max_drawdown')
)
def test_recovery_return(self):
np.testing.assert_array_almost_equal(
drawdowns['a'].recovery_return.values,
np.array([2., 3., 0.])
)
np.testing.assert_array_almost_equal(
drawdowns.recovery_return.values,
np.array([2., 3., 0., 2., 3., 1.])
)
pd.testing.assert_frame_equal(
drawdowns.recovery_return.to_pd(),
pd.DataFrame(
np.array([
[np.nan, np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan, np.nan],
[2.0, np.nan, np.nan, np.nan],
[np.nan, 2.0, np.nan, np.nan],
[3.0, np.nan, np.nan, np.nan],
[0.0, 3.0, 1.0, np.nan]
]),
index=ts2.index,
columns=ts2.columns
)
)
def test_avg_recovery_return(self):
assert drawdowns['a'].avg_recovery_return() == 1.6666666666666667
pd.testing.assert_series_equal(
drawdowns.avg_recovery_return(),
pd.Series(
np.array([1.6666666666666667, 2.5, 1.0, np.nan]),
index=wrapper.columns
).rename('avg_recovery_return')
)
pd.testing.assert_series_equal(
drawdowns_grouped.avg_recovery_return(),
pd.Series(
np.array([2.0, 1.0]),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('avg_recovery_return')
)
def test_max_recovery_return(self):
assert drawdowns['a'].max_recovery_return() == 3.0
pd.testing.assert_series_equal(
drawdowns.max_recovery_return(),
pd.Series(
np.array([3.0, 3.0, 1.0, np.nan]),
index=wrapper.columns
).rename('max_recovery_return')
)
pd.testing.assert_series_equal(
drawdowns_grouped.max_recovery_return(),
pd.Series(
np.array([3.0, 1.0]),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('max_recovery_return')
)
def test_duration(self):
np.testing.assert_array_almost_equal(
drawdowns['a'].duration.values,
np.array([1, 1, 1])
)
np.testing.assert_array_almost_equal(
drawdowns.duration.values,
np.array([1, 1, 1, 1, 1, 3])
)
def test_avg_duration(self):
assert drawdowns['a'].avg_duration() == pd.Timedelta('1 days 00:00:00')
pd.testing.assert_series_equal(
drawdowns.avg_duration(),
pd.Series(
np.array([86400000000000, 86400000000000, 259200000000000, 'NaT'], dtype='timedelta64[ns]'),
index=wrapper.columns
).rename('avg_duration')
)
pd.testing.assert_series_equal(
drawdowns_grouped.avg_duration(),
pd.Series(
np.array([86400000000000, 259200000000000], dtype='timedelta64[ns]'),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('avg_duration')
)
def test_max_duration(self):
assert drawdowns['a'].max_duration() == pd.Timedelta('1 days 00:00:00')
pd.testing.assert_series_equal(
drawdowns.max_duration(),
pd.Series(
np.array([86400000000000, 86400000000000, 259200000000000, 'NaT'], dtype='timedelta64[ns]'),
index=wrapper.columns
).rename('max_duration')
)
pd.testing.assert_series_equal(
drawdowns_grouped.max_duration(),
pd.Series(
np.array([86400000000000, 259200000000000], dtype='timedelta64[ns]'),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('max_duration')
)
def test_coverage(self):
assert drawdowns['a'].coverage() == 0.5
pd.testing.assert_series_equal(
drawdowns.coverage(),
pd.Series(
np.array([0.5, 0.3333333333333333, 0.5, np.nan]),
index=ts2.columns
).rename('coverage')
)
pd.testing.assert_series_equal(
drawdowns_grouped.coverage(),
pd.Series(
np.array([0.4166666666666667, 0.25]),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('coverage')
)
def test_decline_duration(self):
np.testing.assert_array_almost_equal(
drawdowns['a'].decline_duration.values,
np.array([1., 1., 1.])
)
np.testing.assert_array_almost_equal(
drawdowns.decline_duration.values,
np.array([1., 1., 1., 1., 1., 2.])
)
def test_recovery_duration(self):
np.testing.assert_array_almost_equal(
drawdowns['a'].recovery_duration.values,
np.array([1, 1, 0])
)
np.testing.assert_array_almost_equal(
drawdowns.recovery_duration.values,
np.array([1, 1, 0, 1, 1, 1])
)
def test_recovery_duration_ratio(self):
np.testing.assert_array_almost_equal(
drawdowns['a'].recovery_duration_ratio.values,
np.array([1., 1., 0.])
)
np.testing.assert_array_almost_equal(
drawdowns.recovery_duration_ratio.values,
np.array([1., 1., 0., 1., 1., 0.5])
)
def test_active_records(self):
assert isinstance(drawdowns.active, vbt.Drawdowns)
assert drawdowns.active.wrapper == drawdowns.wrapper
record_arrays_close(
drawdowns['a'].active.values,
np.array([
(2, 0, 4, 5, 5, 5, 4., 1., 1., 0)
], dtype=drawdown_dt)
)
record_arrays_close(
drawdowns['a'].active.values,
drawdowns.active['a'].values
)
record_arrays_close(
drawdowns.active.values,
np.array([
(2, 0, 4, 5, 5, 5, 4.0, 1.0, 1.0, 0), (5, 2, 2, 3, 4, 5, 3.0, 1.0, 2.0, 0)
], dtype=drawdown_dt)
)
def test_recovered_records(self):
assert isinstance(drawdowns.recovered, vbt.Drawdowns)
assert drawdowns.recovered.wrapper == drawdowns.wrapper
record_arrays_close(
drawdowns['a'].recovered.values,
np.array([
(0, 0, 0, 1, 1, 2, 2.0, 1.0, 3.0, 1), (1, 0, 2, 3, 3, 4, 3.0, 1.0, 4.0, 1)
], dtype=drawdown_dt)
)
record_arrays_close(
drawdowns['a'].recovered.values,
drawdowns.recovered['a'].values
)
record_arrays_close(
drawdowns.recovered.values,
np.array([
(0, 0, 0, 1, 1, 2, 2.0, 1.0, 3.0, 1), (1, 0, 2, 3, 3, 4, 3.0, 1.0, 4.0, 1),
(3, 1, 1, 2, 2, 3, 2.0, 1.0, 3.0, 1), (4, 1, 3, 4, 4, 5, 3.0, 1.0, 4.0, 1)
], dtype=drawdown_dt)
)
def test_active_drawdown(self):
assert drawdowns['a'].active_drawdown() == -0.75
pd.testing.assert_series_equal(
drawdowns.active_drawdown(),
pd.Series(
np.array([-0.75, np.nan, -0.3333333333333333, np.nan]),
index=wrapper.columns
).rename('active_drawdown')
)
with pytest.raises(Exception):
drawdowns_grouped.active_drawdown()
def test_active_duration(self):
assert drawdowns['a'].active_duration() == np.timedelta64(86400000000000)
pd.testing.assert_series_equal(
drawdowns.active_duration(),
pd.Series(
np.array([86400000000000, 'NaT', 259200000000000, 'NaT'], dtype='timedelta64[ns]'),
index=wrapper.columns
).rename('active_duration')
)
with pytest.raises(Exception):
drawdowns_grouped.active_duration()
def test_active_recovery(self):
assert drawdowns['a'].active_recovery() == 0.
pd.testing.assert_series_equal(
drawdowns.active_recovery(),
pd.Series(
np.array([0., np.nan, 0.5, np.nan]),
index=wrapper.columns
).rename('active_recovery')
)
with pytest.raises(Exception):
drawdowns_grouped.active_recovery()
def test_active_recovery_return(self):
assert drawdowns['a'].active_recovery_return() == 0.
pd.testing.assert_series_equal(
drawdowns.active_recovery_return(),
pd.Series(
np.array([0., np.nan, 1., np.nan]),
index=wrapper.columns
).rename('active_recovery_return')
)
with pytest.raises(Exception):
drawdowns_grouped.active_recovery_return()
def test_active_recovery_duration(self):
assert drawdowns['a'].active_recovery_duration() == pd.Timedelta('0 days 00:00:00')
pd.testing.assert_series_equal(
drawdowns.active_recovery_duration(),
pd.Series(
np.array([0, 'NaT', 86400000000000, 'NaT'], dtype='timedelta64[ns]'),
index=wrapper.columns
).rename('active_recovery_duration')
)
with pytest.raises(Exception):
drawdowns_grouped.active_recovery_duration()
def test_stats(self):
stats_index = pd.Index([
'Start', 'End', 'Period', 'Coverage [%]', 'Total Records',
'Total Recovered Drawdowns', 'Total Active Drawdowns',
'Active Drawdown [%]', 'Active Duration', 'Active Recovery [%]',
'Active Recovery Return [%]', 'Active Recovery Duration',
'Max Drawdown [%]', 'Avg Drawdown [%]', 'Max Drawdown Duration',
'Avg Drawdown Duration', 'Max Recovery Return [%]',
'Avg Recovery Return [%]', 'Max Recovery Duration',
'Avg Recovery Duration', 'Avg Recovery Duration Ratio'
], dtype='object')
pd.testing.assert_series_equal(
drawdowns.stats(),
pd.Series([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-06 00:00:00'),
pd.Timedelta('6 days 00:00:00'), 44.444444444444436, 1.5, 1.0, 0.5,
54.166666666666664, pd.Timedelta('2 days 00:00:00'), 25.0, 50.0,
pd.Timedelta('0 days 12:00:00'), 66.66666666666666, 58.33333333333333,
pd.Timedelta('1 days 00:00:00'), pd.Timedelta('1 days 00:00:00'), 300.0, 250.0,
pd.Timedelta('1 days 00:00:00'), pd.Timedelta('1 days 00:00:00'), 1.0
],
index=stats_index,
name='agg_func_mean'
)
)
pd.testing.assert_series_equal(
drawdowns.stats(settings=dict(incl_active=True)),
pd.Series([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-06 00:00:00'),
pd.Timedelta('6 days 00:00:00'), 44.444444444444436, 1.5, 1.0, 0.5,
54.166666666666664, pd.Timedelta('2 days 00:00:00'), 25.0, 50.0,
pd.Timedelta('0 days 12:00:00'), 69.44444444444444, 62.962962962962955,
pd.Timedelta('1 days 16:00:00'), pd.Timedelta('1 days 16:00:00'), 300.0, 250.0,
pd.Timedelta('1 days 00:00:00'), pd.Timedelta('1 days 00:00:00'), 1.0
],
index=stats_index,
name='agg_func_mean'
)
)
pd.testing.assert_series_equal(
drawdowns.stats(column='a'),
pd.Series([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-06 00:00:00'),
pd.Timedelta('6 days 00:00:00'), 50.0, 3, 2, 1, 75.0, pd.Timedelta('1 days 00:00:00'),
0.0, 0.0, pd.Timedelta('0 days 00:00:00'), 66.66666666666666, 58.33333333333333,
pd.Timedelta('1 days 00:00:00'), pd.Timedelta('1 days 00:00:00'), 300.0, 250.0,
pd.Timedelta('1 days 00:00:00'), pd.Timedelta('1 days 00:00:00'), 1.0
],
index=stats_index,
name='a'
)
)
pd.testing.assert_series_equal(
drawdowns.stats(column='g1', group_by=group_by),
pd.Series([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-06 00:00:00'),
pd.Timedelta('6 days 00:00:00'), 41.66666666666667, 5, 4, 1, 66.66666666666666,
58.33333333333333, pd.Timedelta('1 days 00:00:00'), pd.Timedelta('1 days 00:00:00'),
300.0, 250.0, pd.Timedelta('1 days 00:00:00'), pd.Timedelta('1 days 00:00:00'), 1.0
],
index=pd.Index([
'Start', 'End', 'Period', 'Coverage [%]', 'Total Records',
'Total Recovered Drawdowns', 'Total Active Drawdowns',
'Max Drawdown [%]', 'Avg Drawdown [%]', 'Max Drawdown Duration',
'Avg Drawdown Duration', 'Max Recovery Return [%]',
'Avg Recovery Return [%]', 'Max Recovery Duration',
'Avg Recovery Duration', 'Avg Recovery Duration Ratio'
], dtype='object'),
name='g1'
)
)
pd.testing.assert_series_equal(
drawdowns['c'].stats(),
drawdowns.stats(column='c')
)
pd.testing.assert_series_equal(
drawdowns['c'].stats(),
drawdowns.stats(column='c', group_by=False)
)
pd.testing.assert_series_equal(
drawdowns_grouped['g2'].stats(),
drawdowns_grouped.stats(column='g2')
)
pd.testing.assert_series_equal(
drawdowns_grouped['g2'].stats(),
drawdowns.stats(column='g2', group_by=group_by)
)
stats_df = drawdowns.stats(agg_func=None)
assert stats_df.shape == (4, 21)
pd.testing.assert_index_equal(stats_df.index, drawdowns.wrapper.columns)
pd.testing.assert_index_equal(stats_df.columns, stats_index)
# ############# orders.py ############# #
close = pd.Series([1, 2, 3, 4, 5, 6, 7, 8], index=[
datetime(2020, 1, 1),
datetime(2020, 1, 2),
datetime(2020, 1, 3),
datetime(2020, 1, 4),
datetime(2020, 1, 5),
datetime(2020, 1, 6),
datetime(2020, 1, 7),
datetime(2020, 1, 8)
]).vbt.tile(4, keys=['a', 'b', 'c', 'd'])
size = np.full(close.shape, np.nan, dtype=np.float_)
size[:, 0] = [1, 0.1, -1, -0.1, np.nan, 1, -1, 2]
size[:, 1] = [-1, -0.1, 1, 0.1, np.nan, -1, 1, -2]
size[:, 2] = [1, 0.1, -1, -0.1, np.nan, 1, -2, 2]
orders = vbt.Portfolio.from_orders(close, size, fees=0.01, freq='1 days').orders
orders_grouped = orders.regroup(group_by)
class TestOrders:
def test_mapped_fields(self):
for name in order_dt.names:
np.testing.assert_array_equal(
getattr(orders, name).values,
orders.values[name]
)
def test_close(self):
pd.testing.assert_frame_equal(
orders.close,
close
)
pd.testing.assert_series_equal(
orders['a'].close,
close['a']
)
pd.testing.assert_frame_equal(
orders_grouped['g1'].close,
close[['a', 'b']]
)
assert orders.replace(close=None)['a'].close is None
def test_records_readable(self):
records_readable = orders.records_readable
np.testing.assert_array_equal(
records_readable['Order Id'].values,
np.array([
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20
])
)
np.testing.assert_array_equal(
records_readable['Timestamp'].values,
np.array([
'2020-01-01T00:00:00.000000000', '2020-01-02T00:00:00.000000000',
'2020-01-03T00:00:00.000000000', '2020-01-04T00:00:00.000000000',
'2020-01-06T00:00:00.000000000', '2020-01-07T00:00:00.000000000',
'2020-01-08T00:00:00.000000000', '2020-01-01T00:00:00.000000000',
'2020-01-02T00:00:00.000000000', '2020-01-03T00:00:00.000000000',
'2020-01-04T00:00:00.000000000', '2020-01-06T00:00:00.000000000',
'2020-01-07T00:00:00.000000000', '2020-01-08T00:00:00.000000000',
'2020-01-01T00:00:00.000000000', '2020-01-02T00:00:00.000000000',
'2020-01-03T00:00:00.000000000', '2020-01-04T00:00:00.000000000',
'2020-01-06T00:00:00.000000000', '2020-01-07T00:00:00.000000000',
'2020-01-08T00:00:00.000000000'
], dtype='datetime64[ns]')
)
np.testing.assert_array_equal(
records_readable['Column'].values,
np.array([
'a', 'a', 'a', 'a', 'a', 'a', 'a', 'b', 'b', 'b', 'b', 'b', 'b',
'b', 'c', 'c', 'c', 'c', 'c', 'c', 'c'
])
)
np.testing.assert_array_equal(
records_readable['Size'].values,
np.array([
1.0, 0.1, 1.0, 0.1, 1.0, 1.0, 2.0, 1.0, 0.1, 1.0, 0.1, 1.0, 1.0,
2.0, 1.0, 0.1, 1.0, 0.1, 1.0, 2.0, 2.0
])
)
np.testing.assert_array_equal(
records_readable['Price'].values,
np.array([
1.0, 2.0, 3.0, 4.0, 6.0, 7.0, 8.0, 1.0, 2.0, 3.0, 4.0, 6.0, 7.0,
8.0, 1.0, 2.0, 3.0, 4.0, 6.0, 7.0, 8.0
])
)
np.testing.assert_array_equal(
records_readable['Fees'].values,
np.array([
0.01, 0.002, 0.03, 0.004, 0.06, 0.07, 0.16, 0.01, 0.002, 0.03,
0.004, 0.06, 0.07, 0.16, 0.01, 0.002, 0.03, 0.004, 0.06, 0.14,
0.16
])
)
np.testing.assert_array_equal(
records_readable['Side'].values,
np.array([
'Buy', 'Buy', 'Sell', 'Sell', 'Buy', 'Sell', 'Buy', 'Sell', 'Sell',
'Buy', 'Buy', 'Sell', 'Buy', 'Sell', 'Buy', 'Buy', 'Sell', 'Sell',
'Buy', 'Sell', 'Buy'
])
)
def test_buy_records(self):
assert isinstance(orders.buy, vbt.Orders)
assert orders.buy.wrapper == orders.wrapper
record_arrays_close(
orders['a'].buy.values,
np.array([
(0, 0, 0, 1., 1., 0.01, 0), (1, 0, 1, 0.1, 2., 0.002, 0),
(4, 0, 5, 1., 6., 0.06, 0), (6, 0, 7, 2., 8., 0.16, 0)
], dtype=order_dt)
)
record_arrays_close(
orders['a'].buy.values,
orders.buy['a'].values
)
record_arrays_close(
orders.buy.values,
np.array([
(0, 0, 0, 1., 1., 0.01, 0), (1, 0, 1, 0.1, 2., 0.002, 0),
(4, 0, 5, 1., 6., 0.06, 0), (6, 0, 7, 2., 8., 0.16, 0),
(9, 1, 2, 1., 3., 0.03, 0), (10, 1, 3, 0.1, 4., 0.004, 0),
(12, 1, 6, 1., 7., 0.07, 0), (14, 2, 0, 1., 1., 0.01, 0),
(15, 2, 1, 0.1, 2., 0.002, 0), (18, 2, 5, 1., 6., 0.06, 0),
(20, 2, 7, 2., 8., 0.16, 0)
], dtype=order_dt)
)
def test_sell_records(self):
assert isinstance(orders.sell, vbt.Orders)
assert orders.sell.wrapper == orders.wrapper
record_arrays_close(
orders['a'].sell.values,
np.array([
(2, 0, 2, 1., 3., 0.03, 1), (3, 0, 3, 0.1, 4., 0.004, 1),
(5, 0, 6, 1., 7., 0.07, 1)
], dtype=order_dt)
)
record_arrays_close(
orders['a'].sell.values,
orders.sell['a'].values
)
record_arrays_close(
orders.sell.values,
np.array([
(2, 0, 2, 1., 3., 0.03, 1), (3, 0, 3, 0.1, 4., 0.004, 1),
(5, 0, 6, 1., 7., 0.07, 1), (7, 1, 0, 1., 1., 0.01, 1),
(8, 1, 1, 0.1, 2., 0.002, 1), (11, 1, 5, 1., 6., 0.06, 1),
(13, 1, 7, 2., 8., 0.16, 1), (16, 2, 2, 1., 3., 0.03, 1),
(17, 2, 3, 0.1, 4., 0.004, 1), (19, 2, 6, 2., 7., 0.14, 1)
], dtype=order_dt)
)
def test_stats(self):
stats_index = pd.Index([
'Start', 'End', 'Period', 'Total Records', 'Total Buy Orders', 'Total Sell Orders',
'Min Size', 'Max Size', 'Avg Size', 'Avg Buy Size', 'Avg Sell Size',
'Avg Buy Price', 'Avg Sell Price', 'Total Fees', 'Min Fees', 'Max Fees',
'Avg Fees', 'Avg Buy Fees', 'Avg Sell Fees'
], dtype='object')
pd.testing.assert_series_equal(
orders.stats(),
pd.Series([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-08 00:00:00'),
pd.Timedelta('8 days 00:00:00'), 5.25, 2.75, 2.5, 0.10000000000000002, 2.0,
0.9333333333333335, 0.9166666666666666, 0.9194444444444446, 4.388888888888889,
4.527777777777779, 0.26949999999999996, 0.002, 0.16, 0.051333333333333335,
0.050222222222222224, 0.050222222222222224
],
index=stats_index,
name='agg_func_mean'
)
)
pd.testing.assert_series_equal(
orders.stats(column='a'),
pd.Series([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-08 00:00:00'),
pd.Timedelta('8 days 00:00:00'), 7, 4, 3, 0.1, 2.0, 0.8857142857142858,
1.025, 0.7000000000000001, 4.25, 4.666666666666667, 0.33599999999999997,
0.002, 0.16, 0.047999999999999994, 0.057999999999999996, 0.03466666666666667
],
index=stats_index,
name='a'
)
)
pd.testing.assert_series_equal(
orders.stats(column='g1', group_by=group_by),
pd.Series([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-08 00:00:00'),
pd.Timedelta('8 days 00:00:00'), 14, 7, 7, 0.1, 2.0, 0.8857142857142858,
0.8857142857142856, 0.8857142857142858, 4.428571428571429, 4.428571428571429,
0.672, 0.002, 0.16, 0.048, 0.048, 0.047999999999999994
],
index=stats_index,
name='g1'
)
)
pd.testing.assert_series_equal(
orders['c'].stats(),
orders.stats(column='c')
)
pd.testing.assert_series_equal(
orders['c'].stats(),
orders.stats(column='c', group_by=False)
)
pd.testing.assert_series_equal(
orders_grouped['g2'].stats(),
orders_grouped.stats(column='g2')
)
pd.testing.assert_series_equal(
orders_grouped['g2'].stats(),
orders.stats(column='g2', group_by=group_by)
)
stats_df = orders.stats(agg_func=None)
assert stats_df.shape == (4, 19)
pd.testing.assert_index_equal(stats_df.index, orders.wrapper.columns)
pd.testing.assert_index_equal(stats_df.columns, stats_index)
# ############# trades.py ############# #
exit_trades = vbt.ExitTrades.from_orders(orders)
exit_trades_grouped = vbt.ExitTrades.from_orders(orders_grouped)
class TestExitTrades:
def test_mapped_fields(self):
for name in trade_dt.names:
if name == 'return':
np.testing.assert_array_equal(
getattr(exit_trades, 'returns').values,
exit_trades.values[name]
)
else:
np.testing.assert_array_equal(
getattr(exit_trades, name).values,
exit_trades.values[name]
)
def test_close(self):
pd.testing.assert_frame_equal(
exit_trades.close,
close
)
pd.testing.assert_series_equal(
exit_trades['a'].close,
close['a']
)
pd.testing.assert_frame_equal(
exit_trades_grouped['g1'].close,
close[['a', 'b']]
)
assert exit_trades.replace(close=None)['a'].close is None
def test_records_arr(self):
record_arrays_close(
exit_trades.values,
np.array([
(0, 0, 1., 0, 1.09090909, 0.01090909, 2, 3., 0.03, 1.86818182, 1.7125, 0, 1, 0),
(1, 0, 0.1, 0, 1.09090909, 0.00109091, 3, 4., 0.004, 0.28581818, 2.62, 0, 1, 0),
(2, 0, 1., 5, 6., 0.06, 6, 7., 0.07, 0.87, 0.145, 0, 1, 1),
(3, 0, 2., 7, 8., 0.16, 7, 8., 0., -0.16, -0.01, 0, 0, 2),
(4, 1, 1., 0, 1.09090909, 0.01090909, 2, 3., 0.03, -1.95, -1.7875, 1, 1, 3),
(5, 1, 0.1, 0, 1.09090909, 0.00109091, 3, 4., 0.004, -0.296, -2.71333333, 1, 1, 3),
(6, 1, 1., 5, 6., 0.06, 6, 7., 0.07, -1.13, -0.18833333, 1, 1, 4),
(7, 1, 2., 7, 8., 0.16, 7, 8., 0., -0.16, -0.01, 1, 0, 5),
(8, 2, 1., 0, 1.09090909, 0.01090909, 2, 3., 0.03, 1.86818182, 1.7125, 0, 1, 6),
(9, 2, 0.1, 0, 1.09090909, 0.00109091, 3, 4., 0.004, 0.28581818, 2.62, 0, 1, 6),
(10, 2, 1., 5, 6., 0.06, 6, 7., 0.07, 0.87, 0.145, 0, 1, 7),
(11, 2, 1., 6, 7., 0.07, 7, 8., 0.08, -1.15, -0.16428571, 1, 1, 8),
(12, 2, 1., 7, 8., 0.08, 7, 8., 0., -0.08, -0.01, 0, 0, 9)
], dtype=trade_dt)
)
reversed_col_orders = orders.replace(records_arr=np.concatenate((
orders.values[orders.values['col'] == 2],
orders.values[orders.values['col'] == 1],
orders.values[orders.values['col'] == 0]
)))
record_arrays_close(
vbt.ExitTrades.from_orders(reversed_col_orders).values,
exit_trades.values
)
def test_records_readable(self):
records_readable = exit_trades.records_readable
np.testing.assert_array_equal(
records_readable['Exit Trade Id'].values,
np.array([
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12
])
)
np.testing.assert_array_equal(
records_readable['Column'].values,
np.array([
'a', 'a', 'a', 'a', 'b', 'b', 'b', 'b', 'c', 'c', 'c', 'c', 'c'
])
)
np.testing.assert_array_equal(
records_readable['Size'].values,
np.array([
1.0, 0.10000000000000009, 1.0, 2.0, 1.0, 0.10000000000000009, 1.0,
2.0, 1.0, 0.10000000000000009, 1.0, 1.0, 1.0
])
)
np.testing.assert_array_equal(
records_readable['Entry Timestamp'].values,
np.array([
'2020-01-01T00:00:00.000000000', '2020-01-01T00:00:00.000000000',
'2020-01-06T00:00:00.000000000', '2020-01-08T00:00:00.000000000',
'2020-01-01T00:00:00.000000000', '2020-01-01T00:00:00.000000000',
'2020-01-06T00:00:00.000000000', '2020-01-08T00:00:00.000000000',
'2020-01-01T00:00:00.000000000', '2020-01-01T00:00:00.000000000',
'2020-01-06T00:00:00.000000000', '2020-01-07T00:00:00.000000000',
'2020-01-08T00:00:00.000000000'
], dtype='datetime64[ns]')
)
np.testing.assert_array_equal(
records_readable['Avg Entry Price'].values,
np.array([
1.0909090909090908, 1.0909090909090908, 6.0, 8.0,
1.0909090909090908, 1.0909090909090908, 6.0, 8.0,
1.0909090909090908, 1.0909090909090908, 6.0, 7.0, 8.0
])
)
np.testing.assert_array_equal(
records_readable['Entry Fees'].values,
np.array([
0.010909090909090908, 0.0010909090909090918, 0.06, 0.16,
0.010909090909090908, 0.0010909090909090918, 0.06, 0.16,
0.010909090909090908, 0.0010909090909090918, 0.06, 0.07, 0.08
])
)
np.testing.assert_array_equal(
records_readable['Exit Timestamp'].values,
np.array([
'2020-01-03T00:00:00.000000000', '2020-01-04T00:00:00.000000000',
'2020-01-07T00:00:00.000000000', '2020-01-08T00:00:00.000000000',
'2020-01-03T00:00:00.000000000', '2020-01-04T00:00:00.000000000',
'2020-01-07T00:00:00.000000000', '2020-01-08T00:00:00.000000000',
'2020-01-03T00:00:00.000000000', '2020-01-04T00:00:00.000000000',
'2020-01-07T00:00:00.000000000', '2020-01-08T00:00:00.000000000',
'2020-01-08T00:00:00.000000000'
], dtype='datetime64[ns]')
)
np.testing.assert_array_equal(
records_readable['Avg Exit Price'].values,
np.array([
3.0, 4.0, 7.0, 8.0, 3.0, 4.0, 7.0, 8.0, 3.0, 4.0, 7.0, 8.0, 8.0
])
)
np.testing.assert_array_equal(
records_readable['Exit Fees'].values,
np.array([
0.03, 0.004, 0.07, 0.0, 0.03, 0.004, 0.07, 0.0, 0.03, 0.004, 0.07, 0.08, 0.0
])
)
np.testing.assert_array_equal(
records_readable['PnL'].values,
np.array([
1.8681818181818182, 0.2858181818181821, 0.8699999999999999, -0.16,
-1.9500000000000002, -0.29600000000000026, -1.1300000000000001,
-0.16, 1.8681818181818182, 0.2858181818181821, 0.8699999999999999,
-1.1500000000000001, -0.08
])
)
np.testing.assert_array_equal(
records_readable['Return'].values,
np.array([
1.7125000000000001, 2.62, 0.145, -0.01, -1.7875000000000003,
-2.7133333333333334, -0.18833333333333335, -0.01,
1.7125000000000001, 2.62, 0.145, -0.1642857142857143, -0.01
])
)
np.testing.assert_array_equal(
records_readable['Direction'].values,
np.array([
'Long', 'Long', 'Long', 'Long', 'Short', 'Short', 'Short',
'Short', 'Long', 'Long', 'Long', 'Short', 'Long'
])
)
np.testing.assert_array_equal(
records_readable['Status'].values,
np.array([
'Closed', 'Closed', 'Closed', 'Open', 'Closed', 'Closed', 'Closed',
'Open', 'Closed', 'Closed', 'Closed', 'Closed', 'Open'
])
)
np.testing.assert_array_equal(
records_readable['Position Id'].values,
np.array([
0, 0, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 9
])
)
def test_duration(self):
np.testing.assert_array_almost_equal(
exit_trades['a'].duration.values,
np.array([2, 3, 1, 1])
)
np.testing.assert_array_almost_equal(
exit_trades.duration.values,
np.array([2, 3, 1, 1, 2, 3, 1, 1, 2, 3, 1, 1, 1])
)
def test_winning_records(self):
assert isinstance(exit_trades.winning, vbt.ExitTrades)
assert exit_trades.winning.wrapper == exit_trades.wrapper
record_arrays_close(
exit_trades['a'].winning.values,
np.array([
(0, 0, 1., 0, 1.09090909, 0.01090909, 2, 3., 0.03, 1.86818182, 1.7125, 0, 1, 0),
(1, 0, 0.1, 0, 1.09090909, 0.00109091, 3, 4., 0.004, 0.28581818, 2.62, 0, 1, 0),
(2, 0, 1., 5, 6., 0.06, 6, 7., 0.07, 0.87, 0.145, 0, 1, 1)
], dtype=trade_dt)
)
record_arrays_close(
exit_trades['a'].winning.values,
exit_trades.winning['a'].values
)
record_arrays_close(
exit_trades.winning.values,
np.array([
(0, 0, 1., 0, 1.09090909, 0.01090909, 2, 3., 0.03, 1.86818182, 1.7125, 0, 1, 0),
(1, 0, 0.1, 0, 1.09090909, 0.00109091, 3, 4., 0.004, 0.28581818, 2.62, 0, 1, 0),
(2, 0, 1., 5, 6., 0.06, 6, 7., 0.07, 0.87, 0.145, 0, 1, 1),
(8, 2, 1., 0, 1.09090909, 0.01090909, 2, 3., 0.03, 1.86818182, 1.7125, 0, 1, 6),
(9, 2, 0.1, 0, 1.09090909, 0.00109091, 3, 4., 0.004, 0.28581818, 2.62, 0, 1, 6),
(10, 2, 1., 5, 6., 0.06, 6, 7., 0.07, 0.87, 0.145, 0, 1, 7)
], dtype=trade_dt)
)
def test_losing_records(self):
assert isinstance(exit_trades.losing, vbt.ExitTrades)
assert exit_trades.losing.wrapper == exit_trades.wrapper
record_arrays_close(
exit_trades['a'].losing.values,
np.array([
(3, 0, 2., 7, 8., 0.16, 7, 8., 0., -0.16, -0.01, 0, 0, 2)
], dtype=trade_dt)
)
record_arrays_close(
exit_trades['a'].losing.values,
exit_trades.losing['a'].values
)
record_arrays_close(
exit_trades.losing.values,
np.array([
(3, 0, 2., 7, 8., 0.16, 7, 8., 0., -0.16, -0.01, 0, 0, 2),
(4, 1, 1., 0, 1.09090909, 0.01090909, 2, 3., 0.03, -1.95, -1.7875, 1, 1, 3),
(5, 1, 0.1, 0, 1.09090909, 0.00109091, 3, 4., 0.004, -0.296, -2.71333333, 1, 1, 3),
(6, 1, 1., 5, 6., 0.06, 6, 7., 0.07, -1.13, -0.18833333, 1, 1, 4),
(7, 1, 2., 7, 8., 0.16, 7, 8., 0., -0.16, -0.01, 1, 0, 5),
(11, 2, 1., 6, 7., 0.07, 7, 8., 0.08, -1.15, -0.16428571, 1, 1, 8),
(12, 2, 1., 7, 8., 0.08, 7, 8., 0., -0.08, -0.01, 0, 0, 9)
], dtype=trade_dt)
)
def test_win_rate(self):
assert exit_trades['a'].win_rate() == 0.75
pd.testing.assert_series_equal(
exit_trades.win_rate(),
pd.Series(
np.array([0.75, 0., 0.6, np.nan]),
index=close.columns
).rename('win_rate')
)
pd.testing.assert_series_equal(
exit_trades_grouped.win_rate(),
pd.Series(
np.array([0.375, 0.6]),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('win_rate')
)
def test_winning_streak(self):
np.testing.assert_array_almost_equal(
exit_trades['a'].winning_streak.values,
np.array([1, 2, 3, 0])
)
np.testing.assert_array_almost_equal(
exit_trades.winning_streak.values,
np.array([1, 2, 3, 0, 0, 0, 0, 0, 1, 2, 3, 0, 0])
)
def test_losing_streak(self):
np.testing.assert_array_almost_equal(
exit_trades['a'].losing_streak.values,
np.array([0, 0, 0, 1])
)
np.testing.assert_array_almost_equal(
exit_trades.losing_streak.values,
np.array([0, 0, 0, 1, 1, 2, 3, 4, 0, 0, 0, 1, 2])
)
def test_profit_factor(self):
assert exit_trades['a'].profit_factor() == 18.9
pd.testing.assert_series_equal(
exit_trades.profit_factor(),
pd.Series(
np.array([18.9, 0., 2.45853659, np.nan]),
index=ts2.columns
).rename('profit_factor')
)
pd.testing.assert_series_equal(
exit_trades_grouped.profit_factor(),
pd.Series(
np.array([0.81818182, 2.45853659]),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('profit_factor')
)
def test_expectancy(self):
assert exit_trades['a'].expectancy() == 0.716
pd.testing.assert_series_equal(
exit_trades.expectancy(),
pd.Series(
np.array([0.716, -0.884, 0.3588, np.nan]),
index=ts2.columns
).rename('expectancy')
)
pd.testing.assert_series_equal(
exit_trades_grouped.expectancy(),
pd.Series(
np.array([-0.084, 0.3588]),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('expectancy')
)
def test_sqn(self):
assert exit_trades['a'].sqn() == 1.634155521947584
pd.testing.assert_series_equal(
exit_trades.sqn(),
pd.Series(
np.array([1.63415552, -2.13007307, 0.71660403, np.nan]),
index=ts2.columns
).rename('sqn')
)
pd.testing.assert_series_equal(
exit_trades_grouped.sqn(),
pd.Series(
np.array([-0.20404671, 0.71660403]),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('sqn')
)
def test_long_records(self):
assert isinstance(exit_trades.long, vbt.ExitTrades)
assert exit_trades.long.wrapper == exit_trades.wrapper
record_arrays_close(
exit_trades['a'].long.values,
np.array([
(0, 0, 1., 0, 1.09090909, 0.01090909, 2, 3., 0.03, 1.86818182, 1.7125, 0, 1, 0),
(1, 0, 0.1, 0, 1.09090909, 0.00109091, 3, 4., 0.004, 0.28581818, 2.62, 0, 1, 0),
(2, 0, 1., 5, 6., 0.06, 6, 7., 0.07, 0.87, 0.145, 0, 1, 1),
(3, 0, 2., 7, 8., 0.16, 7, 8., 0., -0.16, -0.01, 0, 0, 2)
], dtype=trade_dt)
)
record_arrays_close(
exit_trades['a'].long.values,
exit_trades.long['a'].values
)
record_arrays_close(
exit_trades.long.values,
np.array([
(0, 0, 1., 0, 1.09090909, 0.01090909, 2, 3., 0.03, 1.86818182, 1.7125, 0, 1, 0),
(1, 0, 0.1, 0, 1.09090909, 0.00109091, 3, 4., 0.004, 0.28581818, 2.62, 0, 1, 0),
(2, 0, 1., 5, 6., 0.06, 6, 7., 0.07, 0.87, 0.145, 0, 1, 1),
(3, 0, 2., 7, 8., 0.16, 7, 8., 0., -0.16, -0.01, 0, 0, 2),
(8, 2, 1., 0, 1.09090909, 0.01090909, 2, 3., 0.03, 1.86818182, 1.7125, 0, 1, 6),
(9, 2, 0.1, 0, 1.09090909, 0.00109091, 3, 4., 0.004, 0.28581818, 2.62, 0, 1, 6),
(10, 2, 1., 5, 6., 0.06, 6, 7., 0.07, 0.87, 0.145, 0, 1, 7),
(12, 2, 1., 7, 8., 0.08, 7, 8., 0., -0.08, -0.01, 0, 0, 9)
], dtype=trade_dt)
)
def test_short_records(self):
assert isinstance(exit_trades.short, vbt.ExitTrades)
assert exit_trades.short.wrapper == exit_trades.wrapper
record_arrays_close(
exit_trades['a'].short.values,
np.array([], dtype=trade_dt)
)
record_arrays_close(
exit_trades['a'].short.values,
exit_trades.short['a'].values
)
record_arrays_close(
exit_trades.short.values,
np.array([
(4, 1, 1., 0, 1.09090909, 0.01090909, 2, 3., 0.03, -1.95, -1.7875, 1, 1, 3),
(5, 1, 0.1, 0, 1.09090909, 0.00109091, 3, 4., 0.004, -0.296, -2.71333333, 1, 1, 3),
(6, 1, 1., 5, 6., 0.06, 6, 7., 0.07, -1.13, -0.18833333, 1, 1, 4),
(7, 1, 2., 7, 8., 0.16, 7, 8., 0., -0.16, -0.01, 1, 0, 5),
(11, 2, 1., 6, 7., 0.07, 7, 8., 0.08, -1.15, -0.16428571, 1, 1, 8)
], dtype=trade_dt)
)
def test_open_records(self):
assert isinstance(exit_trades.open, vbt.ExitTrades)
assert exit_trades.open.wrapper == exit_trades.wrapper
record_arrays_close(
exit_trades['a'].open.values,
np.array([
(3, 0, 2., 7, 8., 0.16, 7, 8., 0., -0.16, -0.01, 0, 0, 2)
], dtype=trade_dt)
)
record_arrays_close(
exit_trades['a'].open.values,
exit_trades.open['a'].values
)
record_arrays_close(
exit_trades.open.values,
np.array([
(3, 0, 2., 7, 8., 0.16, 7, 8., 0., -0.16, -0.01, 0, 0, 2),
(7, 1, 2., 7, 8., 0.16, 7, 8., 0., -0.16, -0.01, 1, 0, 5),
(12, 2, 1., 7, 8., 0.08, 7, 8., 0., -0.08, -0.01, 0, 0, 9)
], dtype=trade_dt)
)
def test_closed_records(self):
assert isinstance(exit_trades.closed, vbt.ExitTrades)
assert exit_trades.closed.wrapper == exit_trades.wrapper
record_arrays_close(
exit_trades['a'].closed.values,
np.array([
(0, 0, 1., 0, 1.09090909, 0.01090909, 2, 3., 0.03, 1.86818182, 1.7125, 0, 1, 0),
(1, 0, 0.1, 0, 1.09090909, 0.00109091, 3, 4., 0.004, 0.28581818, 2.62, 0, 1, 0),
(2, 0, 1., 5, 6., 0.06, 6, 7., 0.07, 0.87, 0.145, 0, 1, 1)
], dtype=trade_dt)
)
record_arrays_close(
exit_trades['a'].closed.values,
exit_trades.closed['a'].values
)
record_arrays_close(
exit_trades.closed.values,
np.array([
(0, 0, 1., 0, 1.09090909, 0.01090909, 2, 3., 0.03, 1.86818182, 1.7125, 0, 1, 0),
(1, 0, 0.1, 0, 1.09090909, 0.00109091, 3, 4., 0.004, 0.28581818, 2.62, 0, 1, 0),
(2, 0, 1., 5, 6., 0.06, 6, 7., 0.07, 0.87, 0.145, 0, 1, 1),
(4, 1, 1., 0, 1.09090909, 0.01090909, 2, 3., 0.03, -1.95, -1.7875, 1, 1, 3),
(5, 1, 0.1, 0, 1.09090909, 0.00109091, 3, 4., 0.004, -0.296, -2.71333333, 1, 1, 3),
(6, 1, 1., 5, 6., 0.06, 6, 7., 0.07, -1.13, -0.18833333, 1, 1, 4),
(8, 2, 1., 0, 1.09090909, 0.01090909, 2, 3., 0.03, 1.86818182, 1.7125, 0, 1, 6),
(9, 2, 0.1, 0, 1.09090909, 0.00109091, 3, 4., 0.004, 0.28581818, 2.62, 0, 1, 6),
(10, 2, 1., 5, 6., 0.06, 6, 7., 0.07, 0.87, 0.145, 0, 1, 7),
(11, 2, 1., 6, 7., 0.07, 7, 8., 0.08, -1.15, -0.16428571, 1, 1, 8)
], dtype=trade_dt)
)
def test_stats(self):
stats_index = pd.Index([
'Start', 'End', 'Period', 'First Trade Start', 'Last Trade End',
'Coverage', 'Overlap Coverage', 'Total Records', 'Total Long Trades',
'Total Short Trades', 'Total Closed Trades', 'Total Open Trades',
'Open Trade PnL', 'Win Rate [%]', 'Max Win Streak', 'Max Loss Streak',
'Best Trade [%]', 'Worst Trade [%]', 'Avg Winning Trade [%]',
'Avg Losing Trade [%]', 'Avg Winning Trade Duration',
'Avg Losing Trade Duration', 'Profit Factor', 'Expectancy', 'SQN'
], dtype='object')
pd.testing.assert_series_equal(
exit_trades.stats(),
pd.Series([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-08 00:00:00'),
pd.Timedelta('8 days 00:00:00'), pd.Timestamp('2020-01-01 00:00:00'),
pd.Timestamp('2020-01-08 00:00:00'), pd.Timedelta('5 days 08:00:00'),
pd.Timedelta('2 days 00:00:00'), 3.25, 2.0, 1.25, 2.5, 0.75, -0.1,
58.333333333333336, 2.0, 1.3333333333333333, 168.38888888888889,
-91.08730158730158, 149.25, -86.3670634920635, pd.Timedelta('2 days 00:00:00'),
pd.Timedelta('1 days 12:00:00'), np.inf, 0.11705555555555548, 0.18931590012681135
],
index=stats_index,
name='agg_func_mean'
)
)
pd.testing.assert_series_equal(
exit_trades.stats(settings=dict(incl_open=True)),
pd.Series([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-08 00:00:00'),
pd.Timedelta('8 days 00:00:00'), pd.Timestamp('2020-01-01 00:00:00'),
pd.Timestamp('2020-01-08 00:00:00'), pd.Timedelta('5 days 08:00:00'),
pd.Timedelta('2 days 00:00:00'), 3.25, 2.0, 1.25, 2.5, 0.75, -0.1,
58.333333333333336, 2.0, 2.3333333333333335, 174.33333333333334,
-96.25396825396825, 149.25, -42.39781746031746, pd.Timedelta('2 days 00:00:00'),
pd.Timedelta('1 days 06:00:00'), 7.11951219512195, 0.06359999999999993, 0.07356215977397455
],
index=stats_index,
name='agg_func_mean'
)
)
pd.testing.assert_series_equal(
exit_trades.stats(column='a'),
pd.Series([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-08 00:00:00'),
pd.Timedelta('8 days 00:00:00'), pd.Timestamp('2020-01-01 00:00:00'),
pd.Timestamp('2020-01-08 00:00:00'), pd.Timedelta('5 days 00:00:00'),
pd.Timedelta('2 days 00:00:00'), 4, 4, 0, 3, 1, -0.16, 100.0, 3, 0,
262.0, 14.499999999999998, 149.25, np.nan, pd.Timedelta('2 days 00:00:00'),
pd.NaT, np.inf, 1.008, 2.181955050824476
],
index=stats_index,
name='a'
)
)
pd.testing.assert_series_equal(
exit_trades.stats(column='g1', group_by=group_by),
pd.Series([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-08 00:00:00'),
pd.Timedelta('8 days 00:00:00'), pd.Timestamp('2020-01-01 00:00:00'),
pd.Timestamp('2020-01-08 00:00:00'), pd.Timedelta('5 days 00:00:00'),
pd.Timedelta('5 days 00:00:00'), 8, 4, 4, 6, 2, -0.32, 50.0, 3, 3, 262.0,
-271.3333333333333, 149.25, -156.30555555555557, pd.Timedelta('2 days 00:00:00'),
pd.Timedelta('2 days 00:00:00'), 0.895734597156398, -0.058666666666666756, -0.10439051512510047
],
index=stats_index,
name='g1'
)
)
pd.testing.assert_index_equal(
exit_trades.stats(tags='trades').index,
pd.Index([
'First Trade Start', 'Last Trade End', 'Total Long Trades',
'Total Short Trades', 'Total Closed Trades', 'Total Open Trades',
'Open Trade PnL', 'Win Rate [%]', 'Max Win Streak', 'Max Loss Streak',
'Best Trade [%]', 'Worst Trade [%]', 'Avg Winning Trade [%]',
'Avg Losing Trade [%]', 'Avg Winning Trade Duration',
'Avg Losing Trade Duration', 'Profit Factor', 'Expectancy', 'SQN'
], dtype='object')
)
pd.testing.assert_series_equal(
exit_trades['c'].stats(),
exit_trades.stats(column='c')
)
pd.testing.assert_series_equal(
exit_trades['c'].stats(),
exit_trades.stats(column='c', group_by=False)
)
pd.testing.assert_series_equal(
exit_trades_grouped['g2'].stats(),
exit_trades_grouped.stats(column='g2')
)
pd.testing.assert_series_equal(
exit_trades_grouped['g2'].stats(),
exit_trades.stats(column='g2', group_by=group_by)
)
stats_df = exit_trades.stats(agg_func=None)
assert stats_df.shape == (4, 25)
| pd.testing.assert_index_equal(stats_df.index, exit_trades.wrapper.columns) | pandas.testing.assert_index_equal |
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 13 15:21:55 2019
@author: raryapratama
"""
#%%
#Step (1): Import Python libraries, set land conversion scenarios general parameters
import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import quad
import seaborn as sns
import pandas as pd
#PF_SF Scenario
##Set parameters
#Parameters for primary forest
initAGB = 233 #source: van Beijma et al. (2018)
initAGB_min = 233-72
initAGB_max = 233 + 72
#parameters for secondary forest. Sourc: Busch et al. (2019)
coeff_MF_nonpl = 11.47
coeff_DF_nonpl = 11.24
coeff_GL_nonpl = 9.42
coeff_MF_pl =17.2
tf = 201
a = 0.082
b = 2.53
#%%
#Step (2_1): C loss from the harvesting/clear cut
df1 = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_SF_EC.xlsx', 'PF_SF_S1')
df3 = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_SF_EC.xlsx', 'PF_SF_E')
t = range(0,tf,1)
c_firewood_energy_S1 = df1['Firewood_other_energy_use'].values
c_firewood_energy_E = df3['Firewood_other_energy_use'].values
#print(c_loss_S1)
#print(c_loss_E)
#%%
#Step (2_2): C loss from the harvesting/clear cut as wood pellets
dfE = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_SF_EC.xlsx', 'PF_SF_E')
c_pellets_E = df3['Wood_pellets'].values
#%%
#Step (3): Aboveground biomass (AGB) decomposition
#S1
df = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_SF_EC.xlsx', 'PF_SF_S1')
tf = 201
t = np.arange(tf)
def decomp_S1(t,remainAGB_S1):
return (1-(1-np.exp(-a*t))**b)*remainAGB_S1
#set zero matrix
output_decomp_S1 = np.zeros((len(t),len(df['C_remainAGB'].values)))
for i,remain_part_S1 in enumerate(df['C_remainAGB'].values):
#print(i,remain_part)
output_decomp_S1[i:,i] = decomp_S1(t[:len(t)-i],remain_part_S1)
print(output_decomp_S1[:,:4])
#find the yearly emissions from decomposition by calculating the differences between elements in list 'decomp_tot_S1'
#(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
# https://stackoverflow.com/questions/11095892/numpy-difference-between-neighboring-elements
#difference between element,
subs_matrix_S1 = np.zeros((len(t)-1,len(df['C_remainAGB'].values-1)))
i = 0
while i < tf:
subs_matrix_S1[:,i] = np.diff(output_decomp_S1[:,i])
i = i + 1
print(subs_matrix_S1[:,:4])
print(len(subs_matrix_S1))
#since there is no carbon emission from decomposition at the beginning of the year (esp. from 'year 1' onward),
#we have to replace the positive numbers with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913)
subs_matrix_S1 = subs_matrix_S1.clip(max=0)
print(subs_matrix_S1[:,:4])
#make the results as absolute values
subs_matrix_S1 = abs(subs_matrix_S1)
print(subs_matrix_S1[:,:4])
#insert row of zeros into the first row of the subs_matrix
zero_matrix_S1 = np.zeros((len(t)-200,len(df['C_remainAGB'].values)))
print(zero_matrix_S1)
subs_matrix_S1 = np.vstack((zero_matrix_S1, subs_matrix_S1))
print(subs_matrix_S1[:,:4])
#sum every column of the subs_matrix into one vector matrix
matrix_tot_S1 = (tf,1)
decomp_tot_S1 = np.zeros(matrix_tot_S1)
i = 0
while i < tf:
decomp_tot_S1[:,0] = decomp_tot_S1[:,0] + subs_matrix_S1[:,i]
i = i + 1
print(decomp_tot_S1[:,0])
#E
df = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_SF_EC.xlsx', 'PF_SF_E')
tf = 201
t = np.arange(tf)
def decomp_E_trial(t,remainAGB_E):
return (1-(1-np.exp(-a*t))**b)*remainAGB_E
#set zero matrix
output_decomp_E = np.zeros((len(t),len(df['C_remainAGB'].values)))
for i,remain_part_E in enumerate(df['C_remainAGB'].values):
#print(i,remain_part)
output_decomp_E[i:,i] = decomp_E_trial(t[:len(t)-i],remain_part_E)
print(output_decomp_E[:,:4])
#find the yearly emissions from decomposition by calculating the differences between elements in list 'decomp_tot_S1'
#(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
# https://stackoverflow.com/questions/11095892/numpy-difference-between-neighboring-elements
#difference between element,
subs_matrix_E = np.zeros((len(t)-1,len(df['C_remainAGB'].values-1)))
i = 0
while i < tf:
subs_matrix_E[:,i] = np.diff(output_decomp_E[:,i])
i = i + 1
print(subs_matrix_E[:,:4])
print(len(subs_matrix_E))
#since there is no carbon emission from decomposition at the beginning of the year (esp. from 'year 1' onward),
#we have to replace the positive numbers with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913)
subs_matrix_E = subs_matrix_E.clip(max=0)
print(subs_matrix_E[:,:4])
#make the results as absolute values
subs_matrix_E = abs(subs_matrix_E)
print(subs_matrix_E[:,:4])
#insert row of zeros into the first row of the subs_matrix
zero_matrix_E_trial = np.zeros((len(t)-200,len(df['C_remainAGB'].values)))
print(zero_matrix_E_trial)
subs_matrix_E = np.vstack((zero_matrix_E_trial, subs_matrix_E))
print(subs_matrix_E[:,:4])
#sum every column of the subs_matrix into one vector matrix
matrix_tot_E = (tf,1)
decomp_tot_E = np.zeros(matrix_tot_E)
i = 0
while i < tf:
decomp_tot_E[:,0] = decomp_tot_E[:,0] + subs_matrix_E[:,i]
i = i + 1
print(decomp_tot_E[:,0])
#plotting
t = np.arange(0,tf)
plt.plot(t,decomp_tot_S1,label='S1')
plt.plot(t,decomp_tot_E,label='E')
plt.xlim(0,200)
plt.legend(bbox_to_anchor=(1.04,1), loc="upper left", frameon=False)
plt.show()
type(decomp_tot_E[:,0])
#%%
#Step (4): Dynamic stock model of in-use wood materials
#HWP from primary forest, 35 year-old building materials lifetime
from dynamic_stock_model import DynamicStockModel
df1 = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_SF_EC.xlsx', 'PF_SF_S1')
dfE = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_SF_EC.xlsx', 'PF_SF_E')
#product lifetime
#building materials
B = 35
TestDSM1 = DynamicStockModel(t = df1['Year'].values, i = df1['Input_PF'].values, lt = {'Type': 'Normal', 'Mean': np.array([B]), 'StdDev': np.array([0.3*B])})
TestDSME = DynamicStockModel(t = dfE['Year'].values, i = dfE['Input_PF'].values, lt = {'Type': 'Normal', 'Mean': np.array([B]), 'StdDev': np.array([0.3*B])})
CheckStr1, ExitFlag1 = TestDSM1.dimension_check()
CheckStrE, ExitFlagE = TestDSME.dimension_check()
Stock_by_cohort1, ExitFlag1 = TestDSM1.compute_s_c_inflow_driven()
Stock_by_cohortE, ExitFlagE = TestDSME.compute_s_c_inflow_driven()
S1, ExitFlag1 = TestDSM1.compute_stock_total()
SE, ExitFlagE = TestDSME.compute_stock_total()
O_C1, ExitFlag1 = TestDSM1.compute_o_c_from_s_c()
O_CE, ExitFlagE = TestDSME.compute_o_c_from_s_c()
O1, ExitFlag1 = TestDSM1.compute_outflow_total()
OE, ExitFlagE = TestDSME.compute_outflow_total()
DS1, ExitFlag1 = TestDSM1.compute_stock_change()
DSE, ExitFlagE = TestDSME.compute_stock_change()
Bal1, ExitFlag1 = TestDSM1.check_stock_balance()
BalE, ExitFlagE = TestDSME.check_stock_balance()
#print output flow
print(TestDSM1.o)
print(TestDSME.o)
#%%
#Step (5): Biomass growth
t = range(0,tf,1)
#calculate the biomass and carbon content of moist forest
def Cgrowth_1(t):
return (44/12*1000*coeff_MF_nonpl*(np.sqrt(t)))
flat_list_moist = Cgrowth_1(t)
#calculate the biomass and carbon content of moist forest
def Cgrowth_2(t):
return (44/12*1000*coeff_DF_nonpl*(np.sqrt(t)))
flat_list_dry = Cgrowth_2(t)
#plotting
plt.plot (t,flat_list_moist, label = 'Moist Forest, non-plantation')
plt.plot (t,flat_list_dry, label = 'Dry forest, non-plantation')
plt.xlim([0, 200])
plt.xlabel('Year')
plt.ylabel('Carbon stock (tC/ha)')
plt.title('')
plt.legend(loc='upper left')
plt.savefig('C:\Work\Programming\C_removal_fig.png', dpi=300)
plt.show()
###Yearly Sequestration
###Moist Forest
#find the yearly sequestration by calculating the differences between elements in list 'flat_list_moist'(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
flat_list_moist = [p - q for q, p in zip(flat_list_moist, flat_list_moist[1:])]
#insert 0 value to the list as the first element, because there is no sequestration in year 0
var = 0
flat_list_moist.insert(0,var)
#make 'flat_list_moist' elements negative numbers to denote sequestration
flat_list_moist = [ -x for x in flat_list_moist]
print(flat_list_moist)
#Dry forest
#find the yearly sequestration by calculating the differences between elements in list 'flat_list_dry'(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
flat_list_dry = [t - u for u, t in zip(flat_list_dry, flat_list_dry[1:])]
#insert 0 value to the list as the first element, because there is no sequestration in year 0
var = 0
flat_list_dry.insert(0,var)
#make 'flat_list_dry' elements negative numbers to denote sequestration
flat_list_dry = [ -x for x in flat_list_dry]
print(flat_list_dry)
#%%
#Step(6): post-harvest processing of wood
#post-harvest wood processing
df1 = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_SF_EC.xlsx', 'PF_SF_S1')
df3 = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_SF_EC.xlsx', 'PF_SF_E')
t = range(0,tf,1)
PH_Emissions_HWP1_S1 = df1['PH_Emissions_HWP'].values
PH_Emissions_HWP1_E = df3['PH_Emissions_HWP'].values
#%%
#Step (7_1): landfill gas decomposition (CH4)
#CH4 decomposition
hl = 20 #half-live
k = (np.log(2))/hl
#S1
df1_CH4 = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_SF_EC.xlsx', 'PF_SF_S1')
tf = 201
t = np.arange(tf)
def decomp_CH4_S1(t,Landfill_decomp_CH4_S1):
return (1-(1-np.exp(-k*t)))*Landfill_decomp_CH4_S1
#set zero matrix
output_decomp_CH4_S1 = np.zeros((len(t),len(df1_CH4['Landfill_decomp_CH4'].values)))
for i,remain_part_CH4_S1 in enumerate(df1_CH4['Landfill_decomp_CH4'].values):
#print(i,remain_part)
output_decomp_CH4_S1[i:,i] = decomp_CH4_S1(t[:len(t)-i],remain_part_CH4_S1)
print(output_decomp_CH4_S1[:,:4])
#find the yearly emissions from decomposition by calculating the differences between elements in list 'decomp_tot_S1'
#(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
# https://stackoverflow.com/questions/11095892/numpy-difference-between-neighboring-elements
#difference between element,
subs_matrix_CH4_S1 = np.zeros((len(t)-1,len(df1_CH4['Landfill_decomp_CH4'].values-1)))
i = 0
while i < tf:
subs_matrix_CH4_S1[:,i] = np.diff(output_decomp_CH4_S1[:,i])
i = i + 1
print(subs_matrix_CH4_S1[:,:4])
print(len(subs_matrix_CH4_S1))
#since there is no carbon emission from decomposition at the beginning of the year (esp. from 'year 1' onward),
#we have to replace the positive numbers with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913)
subs_matrix_CH4_S1 = subs_matrix_CH4_S1.clip(max=0)
print(subs_matrix_CH4_S1[:,:4])
#make the results as absolute values
subs_matrix_CH4_S1 = abs(subs_matrix_CH4_S1)
print(subs_matrix_CH4_S1[:,:4])
#insert row of zeros into the first row of the subs_matrix
zero_matrix_CH4_S1 = np.zeros((len(t)-200,len(df1_CH4['Landfill_decomp_CH4'].values)))
print(zero_matrix_CH4_S1)
subs_matrix_CH4_S1 = np.vstack((zero_matrix_CH4_S1, subs_matrix_CH4_S1))
print(subs_matrix_CH4_S1[:,:4])
#sum every column of the subs_matrix into one vector matrix
matrix_tot_CH4_S1 = (tf,1)
decomp_tot_CH4_S1 = np.zeros(matrix_tot_CH4_S1)
i = 0
while i < tf:
decomp_tot_CH4_S1[:,0] = decomp_tot_CH4_S1[:,0] + subs_matrix_CH4_S1[:,i]
i = i + 1
print(decomp_tot_CH4_S1[:,0])
#E
dfE_CH4 = | pd.read_excel('C:\\Work\\Programming\\Practice\\PF_SF_EC.xlsx', 'PF_SF_E') | pandas.read_excel |
import csv
import datetime
import random
from operator import itemgetter
import lightgbm as lgb
import numpy as np
import pandas as pd
from catboost import CatBoostClassifier, CatBoostRegressor
from sklearn.ensemble import (
AdaBoostClassifier,
AdaBoostRegressor,
BaggingClassifier,
BaggingRegressor,
RandomForestClassifier,
RandomForestRegressor,
)
from sklearn.feature_selection import SelectKBest, chi2
from sklearn.linear_model import (
ElasticNet,
LogisticRegression,
SGDClassifier,
SGDRegressor,
)
from sklearn.model_selection import GridSearchCV
from sklearn.multioutput import MultiOutputClassifier
from sklearn.naive_bayes import MultinomialNB
from sklearn.neural_network import MLPClassifier, MLPRegressor
from sklearn.pipeline import Pipeline
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from xgboost import XGBClassifier, XGBRegressor
class TrainModel:
def __init__(self):
self.classifier_param_list = [
{
"model": [DecisionTreeClassifier()],
"model__min_samples_split": [0.25, 0.5, 1.0],
"model__max_depth": [5, 10, 15],
},
{
"model": [RandomForestClassifier()],
"model__min_samples_split": [0.25, 0.5, 1.0],
"model__max_depth": [5, 10, 15],
},
{
"model": [MLPClassifier()],
"model__activation": ["identity", "logistic", "tanh", "relu"],
"model__alpha": [0.001, 0.01, 0.1],
},
{
"model": [LogisticRegression(fit_intercept=False)],
"model__C": [1, 5, 10],
},
{
"model": [BaggingClassifier()],
"model__n_estimators": [5, 10, 15],
"model__max_features": [0.25, 0.5, 1.0],
},
{
"model": [AdaBoostClassifier()],
"model__n_estimators": [5, 10, 15],
"model__learning_rate": [0.001, 0.01, 0.1],
},
{
"model": [XGBClassifier()],
"model__n_estimators": [5, 10, 15],
"model__learning_rate": [0.001, 0.01, 0.1],
},
{
"model": [lgb.LGBMClassifier()],
"model__learning_rate": [0.01, 0.001],
},
{
"model": [CatBoostClassifier()],
"model__learning_rate": [0.01, 0.001],
},
]
self.regressor_param_list = [
{
"model": [DecisionTreeRegressor()],
"model__min_samples_split": [0.25, 0.5, 1.0],
"model__max_depth": [5, 10, 15],
},
{
"model": [RandomForestRegressor()],
"model__min_samples_split": [0.25, 0.5, 1.0],
"model__max_depth": [5, 10, 15],
},
{
"model": [MLPRegressor()],
"model__activation": ["identity", "logistic", "tanh", "relu"],
"model__alpha": [0.001, 0.01, 0.1],
},
{
"model": [ElasticNet(fit_intercept=False)],
"model__alpha": [0.001, 0.01, 0.1],
"model__l1_ratio": [0.25, 0.5, 1.0],
},
{
"model": [BaggingRegressor()],
"model__n_estimators": [5, 10, 15],
"model__max_features": [0.25, 0.5, 1.0],
},
{
"model": [AdaBoostRegressor()],
"model__n_estimators": [5, 10, 15],
"model__learning_rate": [0.001, 0.01, 0.1],
},
{
"model": [XGBRegressor()],
"model__n_estimators": [5, 10, 15],
"model__learning_rate": [0.001, 0.01, 0.1],
},
{
"model": [lgb.LGBMRegressor()],
"model__learning_rate": [0.01, 0.001],
},
{
"model": [CatBoostRegressor()],
"model__learning_rate": [0.01, 0.001],
},
]
def train_test_split_by_ids(self, df, id_col, target_col, prop_train):
"""
Parameters
--------
df: DataFrame
id_col: str
target_col: str
prop_train: float
Returns
--------
X_train: DataFrame
y_train: DataFrame
X_test: DataFrame
y_test: DataFrame
"""
ids = list(set(df[id_col].values))
random.shuffle(ids)
len_ids = len(ids)
number_to_select = int(len_ids * prop_train)
X_train_ids = pd.DataFrame(ids[:number_to_select], columns=[id_col])
X_test_ids = pd.DataFrame(ids[number_to_select:], columns=[id_col])
X_train = pd.merge(df, X_train_ids, how="inner")
X_test = pd.merge(df, X_test_ids, how="inner")
y_train = X_train[target_col]
y_test = X_test[target_col]
return X_train, X_test, y_train, y_test
def model_testing(
self,
X_train,
y_train,
model_type,
tie_breaker_scoring_method,
save_to_csv=True,
file_name="model_results",
multiclass=False,
):
"""
Gridsearches using a model for best models/params out of a list of commonly used
Parameters
--------
X_train: DataFrame
y_train: DataFrame
model: sklearn model
model_type: str
'classification' or 'regression'
tie_breaker_scoring_method: str
For classification: "precision", "recall", or "f1"
For regression: "neg_root_mean_squared_error", "neg_median_absolute_error", or "r2"
save_to_csv: bool
file_name: str
multiclass: bool
Returns
--------
best_params: dict
"""
if model_type == "classification":
model = Pipeline(
[
("model", LogisticRegression()),
]
)
# Only some models/scoring work with multiclass
if multiclass:
param_list = self.classifier_param_list[:3]
lst_scoring_methods = [
"recall_weighted",
"precision_weighted",
"f1_weighted",
]
else:
param_list = self.classifier_param_list[:3]
lst_scoring_methods = ["recall", "precision", "f1"]
else:
model = Pipeline(
[
("model", ElasticNet()),
]
)
lst_scoring_methods = [
"neg_root_mean_squared_error",
"neg_median_absolute_error",
"r2",
]
param_list = self.regressor_param_list
g = GridSearchCV(
model,
param_list,
cv=3,
n_jobs=-2,
verbose=2,
scoring=lst_scoring_methods,
refit=tie_breaker_scoring_method,
)
g.fit(X_train, y_train)
if model_type == "classification":
if multiclass:
all_scores = list(
zip(
g.cv_results_["params"],
g.cv_results_["mean_test_recall_weighted"],
g.cv_results_["mean_test_precision_weighted"],
g.cv_results_["mean_test_f1_weighted"],
)
)
all_scores.sort(key=lambda x: x[1], reverse=True)
formatted_scores = [
(
"Params: {}".format(x[0]),
"Mean Recall Weighted: {0:.4f}".format(x[1]),
"Mean Precision Weighted: {0:.4f}".format(x[2]),
"Mean F1 Weighted: {0:.4f}".format(x[3]),
)
for x in all_scores
]
else:
all_scores = list(
zip(
g.cv_results_["params"],
g.cv_results_["mean_test_recall"],
g.cv_results_["mean_test_precision"],
g.cv_results_["mean_test_f1"],
)
)
all_scores.sort(key=lambda x: x[1], reverse=True)
formatted_scores = [
(
"Params: {}".format(x[0]),
"Mean Recall: {0:.4f}".format(x[1]),
"Mean Precision: {0:.4f}".format(x[2]),
"Mean F1 Score: {0:.4f}".format(x[3]),
)
for x in all_scores
]
else:
all_scores = list(
zip(
g.cv_results_["params"],
g.cv_results_["mean_test_neg_root_mean_squared_error"],
g.cv_results_["mean_test_neg_median_absolute_error"],
g.cv_results_["mean_test_r2"],
)
)
all_scores.sort(key=lambda x: x[1], reverse=True)
formatted_scores = [
(
"Params: {}".format(x[0]),
"Mean Negative Root Mean Squared Errror: {0:.4f}".format(x[1]),
"Mean Negative Median Absolute Error: {0:.4f}".format(x[2]),
"Mean R2: {0:.4f}".format(x[3]),
)
for x in all_scores
]
# Cleaner printing
print("\n\n")
print(
"*** Best Parameters Using {} | Tie Breaker: {} | {} ***".format(
lst_scoring_methods,
tie_breaker_scoring_method,
datetime.datetime.today().strftime("%Y-%m-%d %H:%m"),
)
)
[
print("{}\n{}\n{}\n{}\n\n".format(x[0], x[1], x[2], x[3]))
for x in formatted_scores[:30]
]
if save_to_csv:
lst_dict = []
for model in all_scores[:30]:
d = dict()
for k, v in zip(
list(model[0].keys()) + lst_scoring_methods,
list(model[0].values()) + [x for x in model[1:]],
):
d[k] = v
lst_dict.append(d)
dateTimeObj = datetime.datetime.now()
timestampStr = dateTimeObj.strftime("%m-%d-%Y (%H:%M:%S.%f)")
temp_df = | pd.DataFrame(lst_dict) | pandas.DataFrame |
from datetime import timedelta
from functools import partial
import itertools
from parameterized import parameterized
import numpy as np
from numpy.testing import assert_array_equal, assert_almost_equal
import pandas as pd
from toolz import merge
from zipline.pipeline import SimplePipelineEngine, Pipeline, CustomFactor
from zipline.pipeline.common import (
EVENT_DATE_FIELD_NAME,
FISCAL_QUARTER_FIELD_NAME,
FISCAL_YEAR_FIELD_NAME,
SID_FIELD_NAME,
TS_FIELD_NAME,
)
from zipline.pipeline.data import DataSet
from zipline.pipeline.data import Column
from zipline.pipeline.domain import EquitySessionDomain
from zipline.pipeline.loaders.earnings_estimates import (
NextEarningsEstimatesLoader,
NextSplitAdjustedEarningsEstimatesLoader,
normalize_quarters,
PreviousEarningsEstimatesLoader,
PreviousSplitAdjustedEarningsEstimatesLoader,
split_normalized_quarters,
)
from zipline.testing.fixtures import (
WithAdjustmentReader,
WithTradingSessions,
ZiplineTestCase,
)
from zipline.testing.predicates import assert_equal
from zipline.testing.predicates import assert_frame_equal
from zipline.utils.numpy_utils import datetime64ns_dtype
from zipline.utils.numpy_utils import float64_dtype
import pytest
class Estimates(DataSet):
event_date = Column(dtype=datetime64ns_dtype)
fiscal_quarter = Column(dtype=float64_dtype)
fiscal_year = Column(dtype=float64_dtype)
estimate = Column(dtype=float64_dtype)
class MultipleColumnsEstimates(DataSet):
event_date = Column(dtype=datetime64ns_dtype)
fiscal_quarter = Column(dtype=float64_dtype)
fiscal_year = Column(dtype=float64_dtype)
estimate1 = Column(dtype=float64_dtype)
estimate2 = Column(dtype=float64_dtype)
def QuartersEstimates(announcements_out):
class QtrEstimates(Estimates):
num_announcements = announcements_out
name = Estimates
return QtrEstimates
def MultipleColumnsQuartersEstimates(announcements_out):
class QtrEstimates(MultipleColumnsEstimates):
num_announcements = announcements_out
name = Estimates
return QtrEstimates
def QuartersEstimatesNoNumQuartersAttr(num_qtr):
class QtrEstimates(Estimates):
name = Estimates
return QtrEstimates
def create_expected_df_for_factor_compute(start_date, sids, tuples, end_date):
"""
Given a list of tuples of new data we get for each sid on each critical
date (when information changes), create a DataFrame that fills that
data through a date range ending at `end_date`.
"""
df = pd.DataFrame(tuples, columns=[SID_FIELD_NAME, "estimate", "knowledge_date"])
df = df.pivot_table(
columns=SID_FIELD_NAME, values="estimate", index="knowledge_date", dropna=False
)
df = df.reindex(pd.date_range(start_date, end_date))
# Index name is lost during reindex.
df.index = df.index.rename("knowledge_date")
df["at_date"] = end_date.tz_localize("utc")
df = df.set_index(["at_date", df.index.tz_localize("utc")]).ffill()
new_sids = set(sids) - set(df.columns)
df = df.reindex(columns=df.columns.union(new_sids))
return df
class WithEstimates(WithTradingSessions, WithAdjustmentReader):
"""
ZiplineTestCase mixin providing cls.loader and cls.events as class
level fixtures.
Methods
-------
make_loader(events, columns) -> PipelineLoader
Method which returns the loader to be used throughout tests.
events : pd.DataFrame
The raw events to be used as input to the pipeline loader.
columns : dict[str -> str]
The dictionary mapping the names of BoundColumns to the
associated column name in the events DataFrame.
make_columns() -> dict[BoundColumn -> str]
Method which returns a dictionary of BoundColumns mapped to the
associated column names in the raw data.
"""
# Short window defined in order for test to run faster.
START_DATE = pd.Timestamp("2014-12-28", tz="utc")
END_DATE = pd.Timestamp("2015-02-04", tz="utc")
@classmethod
def make_loader(cls, events, columns):
raise NotImplementedError("make_loader")
@classmethod
def make_events(cls):
raise NotImplementedError("make_events")
@classmethod
def get_sids(cls):
return cls.events[SID_FIELD_NAME].unique()
@classmethod
def make_columns(cls):
return {
Estimates.event_date: "event_date",
Estimates.fiscal_quarter: "fiscal_quarter",
Estimates.fiscal_year: "fiscal_year",
Estimates.estimate: "estimate",
}
def make_engine(self, loader=None):
if loader is None:
loader = self.loader
return SimplePipelineEngine(
lambda x: loader,
self.asset_finder,
default_domain=EquitySessionDomain(
self.trading_days,
self.ASSET_FINDER_COUNTRY_CODE,
),
)
@classmethod
def init_class_fixtures(cls):
cls.events = cls.make_events()
cls.ASSET_FINDER_EQUITY_SIDS = cls.get_sids()
cls.ASSET_FINDER_EQUITY_SYMBOLS = [
"s" + str(n) for n in cls.ASSET_FINDER_EQUITY_SIDS
]
# We need to instantiate certain constants needed by supers of
# `WithEstimates` before we call their `init_class_fixtures`.
super(WithEstimates, cls).init_class_fixtures()
cls.columns = cls.make_columns()
# Some tests require `WithAdjustmentReader` to be set up by the time we
# make the loader.
cls.loader = cls.make_loader(
cls.events, {column.name: val for column, val in cls.columns.items()}
)
class WithOneDayPipeline(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
events : pd.DataFrame
A simple DataFrame with columns needed for estimates and a single sid
and no other data.
Tests
------
test_wrong_num_announcements_passed()
Tests that loading with an incorrect quarter number raises an error.
test_no_num_announcements_attr()
Tests that the loader throws an AssertionError if the dataset being
loaded has no `num_announcements` attribute.
"""
@classmethod
def make_columns(cls):
return {
MultipleColumnsEstimates.event_date: "event_date",
MultipleColumnsEstimates.fiscal_quarter: "fiscal_quarter",
MultipleColumnsEstimates.fiscal_year: "fiscal_year",
MultipleColumnsEstimates.estimate1: "estimate1",
MultipleColumnsEstimates.estimate2: "estimate2",
}
@classmethod
def make_events(cls):
return pd.DataFrame(
{
SID_FIELD_NAME: [0] * 2,
TS_FIELD_NAME: [pd.Timestamp("2015-01-01"), pd.Timestamp("2015-01-06")],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-10"),
pd.Timestamp("2015-01-20"),
],
"estimate1": [1.0, 2.0],
"estimate2": [3.0, 4.0],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: [2015, 2015],
}
)
@classmethod
def make_expected_out(cls):
raise NotImplementedError("make_expected_out")
@classmethod
def init_class_fixtures(cls):
super(WithOneDayPipeline, cls).init_class_fixtures()
cls.sid0 = cls.asset_finder.retrieve_asset(0)
cls.expected_out = cls.make_expected_out()
def test_load_one_day(self):
# We want to test multiple columns
dataset = MultipleColumnsQuartersEstimates(1)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline({c.name: c.latest for c in dataset.columns}),
start_date=pd.Timestamp("2015-01-15", tz="utc"),
end_date=pd.Timestamp("2015-01-15", tz="utc"),
)
assert_frame_equal(results.sort_index(1), self.expected_out.sort_index(1))
class PreviousWithOneDayPipeline(WithOneDayPipeline, ZiplineTestCase):
"""
Tests that previous quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_out(cls):
return pd.DataFrame(
{
EVENT_DATE_FIELD_NAME: pd.Timestamp("2015-01-10"),
"estimate1": 1.0,
"estimate2": 3.0,
FISCAL_QUARTER_FIELD_NAME: 1.0,
FISCAL_YEAR_FIELD_NAME: 2015.0,
},
index=pd.MultiIndex.from_tuples(
((pd.Timestamp("2015-01-15", tz="utc"), cls.sid0),)
),
)
class NextWithOneDayPipeline(WithOneDayPipeline, ZiplineTestCase):
"""
Tests that next quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_out(cls):
return pd.DataFrame(
{
EVENT_DATE_FIELD_NAME: pd.Timestamp("2015-01-20"),
"estimate1": 2.0,
"estimate2": 4.0,
FISCAL_QUARTER_FIELD_NAME: 2.0,
FISCAL_YEAR_FIELD_NAME: 2015.0,
},
index=pd.MultiIndex.from_tuples(
((pd.Timestamp("2015-01-15", tz="utc"), cls.sid0),)
),
)
dummy_df = pd.DataFrame(
{SID_FIELD_NAME: 0},
columns=[
SID_FIELD_NAME,
TS_FIELD_NAME,
EVENT_DATE_FIELD_NAME,
FISCAL_QUARTER_FIELD_NAME,
FISCAL_YEAR_FIELD_NAME,
"estimate",
],
index=[0],
)
class WithWrongLoaderDefinition(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
events : pd.DataFrame
A simple DataFrame with columns needed for estimates and a single sid
and no other data.
Tests
------
test_wrong_num_announcements_passed()
Tests that loading with an incorrect quarter number raises an error.
test_no_num_announcements_attr()
Tests that the loader throws an AssertionError if the dataset being
loaded has no `num_announcements` attribute.
"""
@classmethod
def make_events(cls):
return dummy_df
def test_wrong_num_announcements_passed(self):
bad_dataset1 = QuartersEstimates(-1)
bad_dataset2 = QuartersEstimates(-2)
good_dataset = QuartersEstimates(1)
engine = self.make_engine()
columns = {
c.name + str(dataset.num_announcements): c.latest
for dataset in (bad_dataset1, bad_dataset2, good_dataset)
for c in dataset.columns
}
p = Pipeline(columns)
err_msg = (
r"Passed invalid number of quarters -[0-9],-[0-9]; "
r"must pass a number of quarters >= 0"
)
with pytest.raises(ValueError, match=err_msg):
engine.run_pipeline(
p,
start_date=self.trading_days[0],
end_date=self.trading_days[-1],
)
def test_no_num_announcements_attr(self):
dataset = QuartersEstimatesNoNumQuartersAttr(1)
engine = self.make_engine()
p = Pipeline({c.name: c.latest for c in dataset.columns})
with pytest.raises(AttributeError):
engine.run_pipeline(
p,
start_date=self.trading_days[0],
end_date=self.trading_days[-1],
)
class PreviousWithWrongNumQuarters(WithWrongLoaderDefinition, ZiplineTestCase):
"""
Tests that previous quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
class NextWithWrongNumQuarters(WithWrongLoaderDefinition, ZiplineTestCase):
"""
Tests that next quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
options = [
"split_adjustments_loader",
"split_adjusted_column_names",
"split_adjusted_asof",
]
class WrongSplitsLoaderDefinition(WithEstimates, ZiplineTestCase):
"""
Test class that tests that loaders break correctly when incorrectly
instantiated.
Tests
-----
test_extra_splits_columns_passed(SplitAdjustedEstimatesLoader)
A test that checks that the loader correctly breaks when an
unexpected column is passed in the list of split-adjusted columns.
"""
@classmethod
def init_class_fixtures(cls):
super(WithEstimates, cls).init_class_fixtures()
@parameterized.expand(
itertools.product(
(
NextSplitAdjustedEarningsEstimatesLoader,
PreviousSplitAdjustedEarningsEstimatesLoader,
),
)
)
def test_extra_splits_columns_passed(self, loader):
columns = {
Estimates.event_date: "event_date",
Estimates.fiscal_quarter: "fiscal_quarter",
Estimates.fiscal_year: "fiscal_year",
Estimates.estimate: "estimate",
}
with pytest.raises(ValueError):
loader(
dummy_df,
{column.name: val for column, val in columns.items()},
split_adjustments_loader=self.adjustment_reader,
split_adjusted_column_names=["estimate", "extra_col"],
split_adjusted_asof=pd.Timestamp("2015-01-01"),
)
class WithEstimatesTimeZero(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
cls.events : pd.DataFrame
Generated dynamically in order to test inter-leavings of estimates and
event dates for multiple quarters to make sure that we select the
right immediate 'next' or 'previous' quarter relative to each date -
i.e., the right 'time zero' on the timeline. We care about selecting
the right 'time zero' because we use that to calculate which quarter's
data needs to be returned for each day.
Methods
-------
get_expected_estimate(q1_knowledge,
q2_knowledge,
comparable_date) -> pd.DataFrame
Retrieves the expected estimate given the latest knowledge about each
quarter and the date on which the estimate is being requested. If
there is no expected estimate, returns an empty DataFrame.
Tests
------
test_estimates()
Tests that we get the right 'time zero' value on each day for each
sid and for each column.
"""
# Shorter date range for performance
END_DATE = pd.Timestamp("2015-01-28", tz="utc")
q1_knowledge_dates = [
pd.Timestamp("2015-01-01"),
pd.Timestamp("2015-01-04"),
pd.Timestamp("2015-01-07"),
pd.Timestamp("2015-01-11"),
]
q2_knowledge_dates = [
pd.Timestamp("2015-01-14"),
pd.Timestamp("2015-01-17"),
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-23"),
]
# We want to model the possibility of an estimate predicting a release date
# that doesn't match the actual release. This could be done by dynamically
# generating more combinations with different release dates, but that
# significantly increases the amount of time it takes to run the tests.
# These hard-coded cases are sufficient to know that we can update our
# beliefs when we get new information.
q1_release_dates = [
pd.Timestamp("2015-01-13"),
pd.Timestamp("2015-01-14"),
] # One day late
q2_release_dates = [
pd.Timestamp("2015-01-25"), # One day early
pd.Timestamp("2015-01-26"),
]
@classmethod
def make_events(cls):
"""
In order to determine which estimate we care about for a particular
sid, we need to look at all estimates that we have for that sid and
their associated event dates.
We define q1 < q2, and thus event1 < event2 since event1 occurs
during q1 and event2 occurs during q2 and we assume that there can
only be 1 event per quarter. We assume that there can be multiple
estimates per quarter leading up to the event. We assume that estimates
will not surpass the relevant event date. We will look at 2 estimates
for an event before the event occurs, since that is the simplest
scenario that covers the interesting edge cases:
- estimate values changing
- a release date changing
- estimates for different quarters interleaving
Thus, we generate all possible inter-leavings of 2 estimates per
quarter-event where estimate1 < estimate2 and all estimates are < the
relevant event and assign each of these inter-leavings to a
different sid.
"""
sid_estimates = []
sid_releases = []
# We want all permutations of 2 knowledge dates per quarter.
it = enumerate(
itertools.permutations(cls.q1_knowledge_dates + cls.q2_knowledge_dates, 4)
)
for sid, (q1e1, q1e2, q2e1, q2e2) in it:
# We're assuming that estimates must come before the relevant
# release.
if (
q1e1 < q1e2
and q2e1 < q2e2
# All estimates are < Q2's event, so just constrain Q1
# estimates.
and q1e1 < cls.q1_release_dates[0]
and q1e2 < cls.q1_release_dates[0]
):
sid_estimates.append(
cls.create_estimates_df(q1e1, q1e2, q2e1, q2e2, sid)
)
sid_releases.append(cls.create_releases_df(sid))
return pd.concat(sid_estimates + sid_releases).reset_index(drop=True)
@classmethod
def get_sids(cls):
sids = cls.events[SID_FIELD_NAME].unique()
# Tack on an extra sid to make sure that sids with no data are
# included but have all-null columns.
return list(sids) + [max(sids) + 1]
@classmethod
def create_releases_df(cls, sid):
# Final release dates never change. The quarters have very tight date
# ranges in order to reduce the number of dates we need to iterate
# through when testing.
return pd.DataFrame(
{
TS_FIELD_NAME: [pd.Timestamp("2015-01-13"), pd.Timestamp("2015-01-26")],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-13"),
pd.Timestamp("2015-01-26"),
],
"estimate": [0.5, 0.8],
FISCAL_QUARTER_FIELD_NAME: [1.0, 2.0],
FISCAL_YEAR_FIELD_NAME: [2015.0, 2015.0],
SID_FIELD_NAME: sid,
}
)
@classmethod
def create_estimates_df(cls, q1e1, q1e2, q2e1, q2e2, sid):
return pd.DataFrame(
{
EVENT_DATE_FIELD_NAME: cls.q1_release_dates + cls.q2_release_dates,
"estimate": [0.1, 0.2, 0.3, 0.4],
FISCAL_QUARTER_FIELD_NAME: [1.0, 1.0, 2.0, 2.0],
FISCAL_YEAR_FIELD_NAME: [2015.0, 2015.0, 2015.0, 2015.0],
TS_FIELD_NAME: [q1e1, q1e2, q2e1, q2e2],
SID_FIELD_NAME: sid,
}
)
def get_expected_estimate(self, q1_knowledge, q2_knowledge, comparable_date):
return pd.DataFrame()
def test_estimates(self):
dataset = QuartersEstimates(1)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline({c.name: c.latest for c in dataset.columns}),
start_date=self.trading_days[1],
end_date=self.trading_days[-2],
)
for sid in self.ASSET_FINDER_EQUITY_SIDS:
sid_estimates = results.xs(sid, level=1)
# Separate assertion for all-null DataFrame to avoid setting
# column dtypes on `all_expected`.
if sid == max(self.ASSET_FINDER_EQUITY_SIDS):
assert sid_estimates.isnull().all().all()
else:
ts_sorted_estimates = self.events[
self.events[SID_FIELD_NAME] == sid
].sort_values(TS_FIELD_NAME)
q1_knowledge = ts_sorted_estimates[
ts_sorted_estimates[FISCAL_QUARTER_FIELD_NAME] == 1
]
q2_knowledge = ts_sorted_estimates[
ts_sorted_estimates[FISCAL_QUARTER_FIELD_NAME] == 2
]
all_expected = pd.concat(
[
self.get_expected_estimate(
q1_knowledge[
q1_knowledge[TS_FIELD_NAME] <= date.tz_localize(None)
],
q2_knowledge[
q2_knowledge[TS_FIELD_NAME] <= date.tz_localize(None)
],
date.tz_localize(None),
).set_index([[date]])
for date in sid_estimates.index
],
axis=0,
)
sid_estimates.index = all_expected.index.copy()
assert_equal(all_expected[sid_estimates.columns], sid_estimates)
class NextEstimate(WithEstimatesTimeZero, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
def get_expected_estimate(self, q1_knowledge, q2_knowledge, comparable_date):
# If our latest knowledge of q1 is that the release is
# happening on this simulation date or later, then that's
# the estimate we want to use.
if (
not q1_knowledge.empty
and q1_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] >= comparable_date
):
return q1_knowledge.iloc[-1:]
# If q1 has already happened or we don't know about it
# yet and our latest knowledge indicates that q2 hasn't
# happened yet, then that's the estimate we want to use.
elif (
not q2_knowledge.empty
and q2_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] >= comparable_date
):
return q2_knowledge.iloc[-1:]
return pd.DataFrame(columns=q1_knowledge.columns, index=[comparable_date])
class PreviousEstimate(WithEstimatesTimeZero, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
def get_expected_estimate(self, q1_knowledge, q2_knowledge, comparable_date):
# The expected estimate will be for q2 if the last thing
# we've seen is that the release date already happened.
# Otherwise, it'll be for q1, as long as the release date
# for q1 has already happened.
if (
not q2_knowledge.empty
and q2_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] <= comparable_date
):
return q2_knowledge.iloc[-1:]
elif (
not q1_knowledge.empty
and q1_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] <= comparable_date
):
return q1_knowledge.iloc[-1:]
return pd.DataFrame(columns=q1_knowledge.columns, index=[comparable_date])
class WithEstimateMultipleQuarters(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events, cls.make_expected_out as
class-level fixtures and self.test_multiple_qtrs_requested as a test.
Attributes
----------
events : pd.DataFrame
Simple DataFrame with estimates for 2 quarters for a single sid.
Methods
-------
make_expected_out() --> pd.DataFrame
Returns the DataFrame that is expected as a result of running a
Pipeline where estimates are requested for multiple quarters out.
fill_expected_out(expected)
Fills the expected DataFrame with data.
Tests
------
test_multiple_qtrs_requested()
Runs a Pipeline that calculate which estimates for multiple quarters
out and checks that the returned columns contain data for the correct
number of quarters out.
"""
@classmethod
def make_events(cls):
return pd.DataFrame(
{
SID_FIELD_NAME: [0] * 2,
TS_FIELD_NAME: [pd.Timestamp("2015-01-01"), pd.Timestamp("2015-01-06")],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-10"),
pd.Timestamp("2015-01-20"),
],
"estimate": [1.0, 2.0],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: [2015, 2015],
}
)
@classmethod
def init_class_fixtures(cls):
super(WithEstimateMultipleQuarters, cls).init_class_fixtures()
cls.expected_out = cls.make_expected_out()
@classmethod
def make_expected_out(cls):
expected = pd.DataFrame(
columns=[cls.columns[col] + "1" for col in cls.columns]
+ [cls.columns[col] + "2" for col in cls.columns],
index=cls.trading_days,
)
for (col, raw_name), suffix in itertools.product(
cls.columns.items(), ("1", "2")
):
expected_name = raw_name + suffix
if col.dtype == datetime64ns_dtype:
expected[expected_name] = pd.to_datetime(expected[expected_name])
else:
expected[expected_name] = expected[expected_name].astype(col.dtype)
cls.fill_expected_out(expected)
return expected.reindex(cls.trading_days)
def test_multiple_qtrs_requested(self):
dataset1 = QuartersEstimates(1)
dataset2 = QuartersEstimates(2)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline(
merge(
[
{c.name + "1": c.latest for c in dataset1.columns},
{c.name + "2": c.latest for c in dataset2.columns},
]
)
),
start_date=self.trading_days[0],
end_date=self.trading_days[-1],
)
q1_columns = [col.name + "1" for col in self.columns]
q2_columns = [col.name + "2" for col in self.columns]
# We now expect a column for 1 quarter out and a column for 2
# quarters out for each of the dataset columns.
assert_equal(
sorted(np.array(q1_columns + q2_columns)), sorted(results.columns.values)
)
assert_equal(
self.expected_out.sort_index(axis=1),
results.xs(0, level=1).sort_index(axis=1),
)
class NextEstimateMultipleQuarters(WithEstimateMultipleQuarters, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@classmethod
def fill_expected_out(cls, expected):
# Fill columns for 1 Q out
for raw_name in cls.columns.values():
expected.loc[
pd.Timestamp("2015-01-01", tz="UTC") : pd.Timestamp(
"2015-01-11", tz="UTC"
),
raw_name + "1",
] = cls.events[raw_name].iloc[0]
expected.loc[
pd.Timestamp("2015-01-11", tz="UTC") : pd.Timestamp(
"2015-01-20", tz="UTC"
),
raw_name + "1",
] = cls.events[raw_name].iloc[1]
# Fill columns for 2 Q out
# We only have an estimate and event date for 2 quarters out before
# Q1's event happens; after Q1's event, we know 1 Q out but not 2 Qs
# out.
for col_name in ["estimate", "event_date"]:
expected.loc[
pd.Timestamp("2015-01-06", tz="UTC") : pd.Timestamp(
"2015-01-10", tz="UTC"
),
col_name + "2",
] = cls.events[col_name].iloc[1]
# But we know what FQ and FY we'd need in both Q1 and Q2
# because we know which FQ is next and can calculate from there
expected.loc[
pd.Timestamp("2015-01-01", tz="UTC") : pd.Timestamp("2015-01-09", tz="UTC"),
FISCAL_QUARTER_FIELD_NAME + "2",
] = 2
expected.loc[
pd.Timestamp("2015-01-12", tz="UTC") : pd.Timestamp("2015-01-20", tz="UTC"),
FISCAL_QUARTER_FIELD_NAME + "2",
] = 3
expected.loc[
pd.Timestamp("2015-01-01", tz="UTC") : pd.Timestamp("2015-01-20", tz="UTC"),
FISCAL_YEAR_FIELD_NAME + "2",
] = 2015
return expected
class PreviousEstimateMultipleQuarters(WithEstimateMultipleQuarters, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@classmethod
def fill_expected_out(cls, expected):
# Fill columns for 1 Q out
for raw_name in cls.columns.values():
expected[raw_name + "1"].loc[
pd.Timestamp("2015-01-12", tz="UTC") : pd.Timestamp(
"2015-01-19", tz="UTC"
)
] = cls.events[raw_name].iloc[0]
expected[raw_name + "1"].loc[
pd.Timestamp("2015-01-20", tz="UTC") :
] = cls.events[raw_name].iloc[1]
# Fill columns for 2 Q out
for col_name in ["estimate", "event_date"]:
expected[col_name + "2"].loc[
pd.Timestamp("2015-01-20", tz="UTC") :
] = cls.events[col_name].iloc[0]
expected[FISCAL_QUARTER_FIELD_NAME + "2"].loc[
pd.Timestamp("2015-01-12", tz="UTC") : pd.Timestamp("2015-01-20", tz="UTC")
] = 4
expected[FISCAL_YEAR_FIELD_NAME + "2"].loc[
pd.Timestamp("2015-01-12", tz="UTC") : pd.Timestamp("2015-01-20", tz="UTC")
] = 2014
expected[FISCAL_QUARTER_FIELD_NAME + "2"].loc[
pd.Timestamp("2015-01-20", tz="UTC") :
] = 1
expected[FISCAL_YEAR_FIELD_NAME + "2"].loc[
pd.Timestamp("2015-01-20", tz="UTC") :
] = 2015
return expected
class WithVaryingNumEstimates(WithEstimates):
"""
ZiplineTestCase mixin providing fixtures and a test to ensure that we
have the correct overwrites when the event date changes. We want to make
sure that if we have a quarter with an event date that gets pushed back,
we don't start overwriting for the next quarter early. Likewise,
if we have a quarter with an event date that gets pushed forward, we want
to make sure that we start applying adjustments at the appropriate, earlier
date, rather than the later date.
Methods
-------
assert_compute()
Defines how to determine that results computed for the `SomeFactor`
factor are correct.
Tests
-----
test_windows_with_varying_num_estimates()
Tests that we create the correct overwrites from 2015-01-13 to
2015-01-14 regardless of how event dates were updated for each
quarter for each sid.
"""
@classmethod
def make_events(cls):
return pd.DataFrame(
{
SID_FIELD_NAME: [0] * 3 + [1] * 3,
TS_FIELD_NAME: [
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-01-12"),
pd.Timestamp("2015-01-13"),
]
* 2,
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-12"),
pd.Timestamp("2015-01-13"),
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-13"),
pd.Timestamp("2015-01-12"),
pd.Timestamp("2015-01-20"),
],
"estimate": [11.0, 12.0, 21.0] * 2,
FISCAL_QUARTER_FIELD_NAME: [1, 1, 2] * 2,
FISCAL_YEAR_FIELD_NAME: [2015] * 6,
}
)
@classmethod
def assert_compute(cls, estimate, today):
raise NotImplementedError("assert_compute")
def test_windows_with_varying_num_estimates(self):
dataset = QuartersEstimates(1)
assert_compute = self.assert_compute
class SomeFactor(CustomFactor):
inputs = [dataset.estimate]
window_length = 3
def compute(self, today, assets, out, estimate):
assert_compute(estimate, today)
engine = self.make_engine()
engine.run_pipeline(
Pipeline({"est": SomeFactor()}),
start_date=pd.Timestamp("2015-01-13", tz="utc"),
# last event date we have
end_date=pd.Timestamp("2015-01-14", tz="utc"),
)
class PreviousVaryingNumEstimates(WithVaryingNumEstimates, ZiplineTestCase):
def assert_compute(self, estimate, today):
if today == pd.Timestamp("2015-01-13", tz="utc"):
assert_array_equal(estimate[:, 0], np.array([np.NaN, np.NaN, 12]))
assert_array_equal(estimate[:, 1], np.array([np.NaN, 12, 12]))
else:
assert_array_equal(estimate[:, 0], np.array([np.NaN, 12, 12]))
assert_array_equal(estimate[:, 1], np.array([12, 12, 12]))
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
class NextVaryingNumEstimates(WithVaryingNumEstimates, ZiplineTestCase):
def assert_compute(self, estimate, today):
if today == pd.Timestamp("2015-01-13", tz="utc"):
assert_array_equal(estimate[:, 0], np.array([11, 12, 12]))
assert_array_equal(estimate[:, 1], np.array([np.NaN, np.NaN, 21]))
else:
assert_array_equal(estimate[:, 0], np.array([np.NaN, 21, 21]))
assert_array_equal(estimate[:, 1], np.array([np.NaN, 21, 21]))
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
class WithEstimateWindows(WithEstimates):
"""
ZiplineTestCase mixin providing fixures and a test to test running a
Pipeline with an estimates loader over differently-sized windows.
Attributes
----------
events : pd.DataFrame
DataFrame with estimates for 2 quarters for 2 sids.
window_test_start_date : pd.Timestamp
The date from which the window should start.
timelines : dict[int -> pd.DataFrame]
A dictionary mapping to the number of quarters out to
snapshots of how the data should look on each date in the date range.
Methods
-------
make_expected_timelines() -> dict[int -> pd.DataFrame]
Creates a dictionary of expected data. See `timelines`, above.
Tests
-----
test_estimate_windows_at_quarter_boundaries()
Tests that we overwrite values with the correct quarter's estimate at
the correct dates when we have a factor that asks for a window of data.
"""
END_DATE = pd.Timestamp("2015-02-10", tz="utc")
window_test_start_date = pd.Timestamp("2015-01-05")
critical_dates = [
pd.Timestamp("2015-01-09", tz="utc"),
pd.Timestamp("2015-01-15", tz="utc"),
pd.Timestamp("2015-01-20", tz="utc"),
pd.Timestamp("2015-01-26", tz="utc"),
pd.Timestamp("2015-02-05", tz="utc"),
pd.Timestamp("2015-02-10", tz="utc"),
]
# Starting date, number of announcements out.
window_test_cases = list(itertools.product(critical_dates, (1, 2)))
@classmethod
def make_events(cls):
# Typical case: 2 consecutive quarters.
sid_0_timeline = pd.DataFrame(
{
TS_FIELD_NAME: [
cls.window_test_start_date,
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-12"),
pd.Timestamp("2015-02-10"),
# We want a case where we get info for a later
# quarter before the current quarter is over but
# after the split_asof_date to make sure that
# we choose the correct date to overwrite until.
pd.Timestamp("2015-01-18"),
],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-02-10"),
pd.Timestamp("2015-02-10"),
pd.Timestamp("2015-04-01"),
],
"estimate": [100.0, 101.0] + [200.0, 201.0] + [400],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [2] * 2 + [4],
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 0,
}
)
# We want a case where we skip a quarter. We never find out about Q2.
sid_10_timeline = pd.DataFrame(
{
TS_FIELD_NAME: [
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-01-12"),
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-01-15"),
],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-22"),
pd.Timestamp("2015-01-22"),
pd.Timestamp("2015-02-05"),
pd.Timestamp("2015-02-05"),
],
"estimate": [110.0, 111.0] + [310.0, 311.0],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [3] * 2,
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 10,
}
)
# We want to make sure we have correct overwrites when sid quarter
# boundaries collide. This sid's quarter boundaries collide with sid 0.
sid_20_timeline = pd.DataFrame(
{
TS_FIELD_NAME: [
cls.window_test_start_date,
pd.Timestamp("2015-01-07"),
cls.window_test_start_date,
pd.Timestamp("2015-01-17"),
],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-02-10"),
pd.Timestamp("2015-02-10"),
],
"estimate": [120.0, 121.0] + [220.0, 221.0],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [2] * 2,
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 20,
}
)
concatted = pd.concat(
[sid_0_timeline, sid_10_timeline, sid_20_timeline]
).reset_index()
np.random.seed(0)
return concatted.reindex(np.random.permutation(concatted.index))
@classmethod
def get_sids(cls):
sids = sorted(cls.events[SID_FIELD_NAME].unique())
# Add extra sids between sids in our data. We want to test that we
# apply adjustments to the correct sids.
return [
sid for i in range(len(sids) - 1) for sid in range(sids[i], sids[i + 1])
] + [sids[-1]]
@classmethod
def make_expected_timelines(cls):
return {}
@classmethod
def init_class_fixtures(cls):
super(WithEstimateWindows, cls).init_class_fixtures()
cls.create_expected_df_for_factor_compute = partial(
create_expected_df_for_factor_compute,
cls.window_test_start_date,
cls.get_sids(),
)
cls.timelines = cls.make_expected_timelines()
@parameterized.expand(window_test_cases)
def test_estimate_windows_at_quarter_boundaries(
self, start_date, num_announcements_out
):
dataset = QuartersEstimates(num_announcements_out)
trading_days = self.trading_days
timelines = self.timelines
# The window length should be from the starting index back to the first
# date on which we got data. The goal is to ensure that as we
# progress through the timeline, all data we got, starting from that
# first date, is correctly overwritten.
window_len = (
self.trading_days.get_loc(start_date)
- self.trading_days.get_loc(self.window_test_start_date)
+ 1
)
class SomeFactor(CustomFactor):
inputs = [dataset.estimate]
window_length = window_len
def compute(self, today, assets, out, estimate):
today_idx = trading_days.get_loc(today)
today_timeline = (
timelines[num_announcements_out]
.loc[today]
.reindex(trading_days[: today_idx + 1])
.values
)
timeline_start_idx = len(today_timeline) - window_len
assert_almost_equal(estimate, today_timeline[timeline_start_idx:])
engine = self.make_engine()
engine.run_pipeline(
Pipeline({"est": SomeFactor()}),
start_date=start_date,
# last event date we have
end_date=pd.Timestamp("2015-02-10", tz="utc"),
)
class PreviousEstimateWindows(WithEstimateWindows, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_timelines(cls):
oneq_previous = pd.concat(
[
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
],
end_date,
)
for end_date in pd.date_range("2015-01-09", "2015-01-19")
]
),
cls.create_expected_df_for_factor_compute(
[
(0, 101, pd.Timestamp("2015-01-20")),
(10, np.NaN, cls.window_test_start_date),
(20, 121, pd.Timestamp("2015-01-20")),
],
pd.Timestamp("2015-01-20"),
),
cls.create_expected_df_for_factor_compute(
[
(0, 101, pd.Timestamp("2015-01-20")),
(10, np.NaN, cls.window_test_start_date),
(20, 121, pd.Timestamp("2015-01-20")),
],
pd.Timestamp("2015-01-21"),
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 101, pd.Timestamp("2015-01-20")),
(10, 111, pd.Timestamp("2015-01-22")),
(20, 121, pd.Timestamp("2015-01-20")),
],
end_date,
)
for end_date in pd.date_range("2015-01-22", "2015-02-04")
]
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 101, pd.Timestamp("2015-01-20")),
(10, 311, pd.Timestamp("2015-02-05")),
(20, 121, pd.Timestamp("2015-01-20")),
],
end_date,
)
for end_date in pd.date_range("2015-02-05", "2015-02-09")
]
),
cls.create_expected_df_for_factor_compute(
[
(0, 201, pd.Timestamp("2015-02-10")),
(10, 311, pd.Timestamp("2015-02-05")),
(20, 221, pd.Timestamp("2015-02-10")),
],
pd.Timestamp("2015-02-10"),
),
]
)
twoq_previous = pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
],
end_date,
)
for end_date in pd.date_range("2015-01-09", "2015-02-09")
]
# We never get estimates for S1 for 2Q ago because once Q3
# becomes our previous quarter, 2Q ago would be Q2, and we have
# no data on it.
+ [
cls.create_expected_df_for_factor_compute(
[
(0, 101, pd.Timestamp("2015-02-10")),
(10, np.NaN, pd.Timestamp("2015-02-05")),
(20, 121, pd.Timestamp("2015-02-10")),
],
pd.Timestamp("2015-02-10"),
)
]
)
return {1: oneq_previous, 2: twoq_previous}
class NextEstimateWindows(WithEstimateWindows, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_timelines(cls):
oneq_next = pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 100, cls.window_test_start_date),
(10, 110, pd.Timestamp("2015-01-09")),
(20, 120, cls.window_test_start_date),
(20, 121, pd.Timestamp("2015-01-07")),
],
pd.Timestamp("2015-01-09"),
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 100, cls.window_test_start_date),
(10, 110, pd.Timestamp("2015-01-09")),
(10, 111, pd.Timestamp("2015-01-12")),
(20, 120, cls.window_test_start_date),
(20, 121, pd.Timestamp("2015-01-07")),
],
end_date,
)
for end_date in pd.date_range("2015-01-12", "2015-01-19")
]
),
cls.create_expected_df_for_factor_compute(
[
(0, 100, cls.window_test_start_date),
(0, 101, pd.Timestamp("2015-01-20")),
(10, 110, pd.Timestamp("2015-01-09")),
(10, 111, pd.Timestamp("2015-01-12")),
(20, 120, cls.window_test_start_date),
(20, 121, pd.Timestamp("2015-01-07")),
],
pd.Timestamp("2015-01-20"),
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 200, pd.Timestamp("2015-01-12")),
(10, 110, pd.Timestamp("2015-01-09")),
(10, 111, pd.Timestamp("2015-01-12")),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp("2015-01-17")),
],
end_date,
)
for end_date in pd.date_range("2015-01-21", "2015-01-22")
]
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 200, pd.Timestamp("2015-01-12")),
(10, 310, pd.Timestamp("2015-01-09")),
(10, 311, pd.Timestamp("2015-01-15")),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp("2015-01-17")),
],
end_date,
)
for end_date in pd.date_range("2015-01-23", "2015-02-05")
]
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 200, pd.Timestamp("2015-01-12")),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp("2015-01-17")),
],
end_date,
)
for end_date in pd.date_range("2015-02-06", "2015-02-09")
]
),
cls.create_expected_df_for_factor_compute(
[
(0, 200, pd.Timestamp("2015-01-12")),
(0, 201, pd.Timestamp("2015-02-10")),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp("2015-01-17")),
],
pd.Timestamp("2015-02-10"),
),
]
)
twoq_next = pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
],
end_date,
)
for end_date in pd.date_range("2015-01-09", "2015-01-11")
]
+ [
cls.create_expected_df_for_factor_compute(
[
(0, 200, pd.Timestamp("2015-01-12")),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
],
end_date,
)
for end_date in pd.date_range("2015-01-12", "2015-01-16")
]
+ [
cls.create_expected_df_for_factor_compute(
[
(0, 200, pd.Timestamp("2015-01-12")),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp("2015-01-17")),
],
pd.Timestamp("2015-01-20"),
)
]
+ [
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
],
end_date,
)
for end_date in pd.date_range("2015-01-21", "2015-02-10")
]
)
return {1: oneq_next, 2: twoq_next}
class WithSplitAdjustedWindows(WithEstimateWindows):
"""
ZiplineTestCase mixin providing fixures and a test to test running a
Pipeline with an estimates loader over differently-sized windows and with
split adjustments.
"""
split_adjusted_asof_date = pd.Timestamp("2015-01-14")
@classmethod
def make_events(cls):
# Add an extra sid that has a release before the split-asof-date in
# order to test that we're reversing splits correctly in the previous
# case (without an overwrite) and in the next case (with an overwrite).
sid_30 = pd.DataFrame(
{
TS_FIELD_NAME: [
cls.window_test_start_date,
pd.Timestamp("2015-01-09"),
# For Q2, we want it to start early enough
# that we can have several adjustments before
# the end of the first quarter so that we
# can test un-adjusting & readjusting with an
# overwrite.
cls.window_test_start_date,
# We want the Q2 event date to be enough past
# the split-asof-date that we can have
# several splits and can make sure that they
# are applied correctly.
pd.Timestamp("2015-01-20"),
],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-20"),
],
"estimate": [130.0, 131.0, 230.0, 231.0],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [2] * 2,
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 30,
}
)
# An extra sid to test no splits before the split-adjusted-asof-date.
# We want an event before and after the split-adjusted-asof-date &
# timestamps for data points also before and after
# split-adjsuted-asof-date (but also before the split dates, so that
# we can test that splits actually get applied at the correct times).
sid_40 = pd.DataFrame(
{
TS_FIELD_NAME: [pd.Timestamp("2015-01-09"), pd.Timestamp("2015-01-15")],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-02-10"),
],
"estimate": [140.0, 240.0],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 40,
}
)
# An extra sid to test all splits before the
# split-adjusted-asof-date. All timestamps should be before that date
# so that we have cases where we un-apply and re-apply splits.
sid_50 = pd.DataFrame(
{
TS_FIELD_NAME: [pd.Timestamp("2015-01-09"), pd.Timestamp("2015-01-12")],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-02-10"),
],
"estimate": [150.0, 250.0],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 50,
}
)
return pd.concat(
[
# Slightly hacky, but want to make sure we're using the same
# events as WithEstimateWindows.
cls.__base__.make_events(),
sid_30,
sid_40,
sid_50,
]
)
@classmethod
def make_splits_data(cls):
# For sid 0, we want to apply a series of splits before and after the
# split-adjusted-asof-date we well as between quarters (for the
# previous case, where we won't see any values until after the event
# happens).
sid_0_splits = pd.DataFrame(
{
SID_FIELD_NAME: 0,
"ratio": (-1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 100),
"effective_date": (
pd.Timestamp("2014-01-01"), # Filter out
# Split before Q1 event & after first estimate
pd.Timestamp("2015-01-07"),
# Split before Q1 event
pd.Timestamp("2015-01-09"),
# Split before Q1 event
pd.Timestamp("2015-01-13"),
# Split before Q1 event
pd.Timestamp("2015-01-15"),
# Split before Q1 event
pd.Timestamp("2015-01-18"),
# Split after Q1 event and before Q2 event
pd.Timestamp("2015-01-30"),
# Filter out - this is after our date index
pd.Timestamp("2016-01-01"),
),
}
)
sid_10_splits = pd.DataFrame(
{
SID_FIELD_NAME: 10,
"ratio": (0.2, 0.3),
"effective_date": (
# We want a split before the first estimate and before the
# split-adjusted-asof-date but within our calendar index so
# that we can test that the split is NEVER applied.
pd.Timestamp("2015-01-07"),
# Apply a single split before Q1 event.
pd.Timestamp("2015-01-20"),
),
}
)
# We want a sid with split dates that collide with another sid (0) to
# make sure splits are correctly applied for both sids.
sid_20_splits = pd.DataFrame(
{
SID_FIELD_NAME: 20,
"ratio": (
0.4,
0.5,
0.6,
0.7,
0.8,
0.9,
),
"effective_date": (
pd.Timestamp("2015-01-07"),
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-01-13"),
pd.Timestamp("2015-01-15"),
pd.Timestamp("2015-01-18"),
pd.Timestamp("2015-01-30"),
),
}
)
# This sid has event dates that are shifted back so that we can test
# cases where an event occurs before the split-asof-date.
sid_30_splits = pd.DataFrame(
{
SID_FIELD_NAME: 30,
"ratio": (8, 9, 10, 11, 12),
"effective_date": (
# Split before the event and before the
# split-asof-date.
pd.Timestamp("2015-01-07"),
# Split on date of event but before the
# split-asof-date.
pd.Timestamp("2015-01-09"),
# Split after the event, but before the
# split-asof-date.
pd.Timestamp("2015-01-13"),
pd.Timestamp("2015-01-15"),
pd.Timestamp("2015-01-18"),
),
}
)
# No splits for a sid before the split-adjusted-asof-date.
sid_40_splits = pd.DataFrame(
{
SID_FIELD_NAME: 40,
"ratio": (13, 14),
"effective_date": (
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-22"),
),
}
)
# No splits for a sid after the split-adjusted-asof-date.
sid_50_splits = pd.DataFrame(
{
SID_FIELD_NAME: 50,
"ratio": (15, 16),
"effective_date": (
pd.Timestamp("2015-01-13"),
pd.Timestamp("2015-01-14"),
),
}
)
return pd.concat(
[
sid_0_splits,
sid_10_splits,
sid_20_splits,
sid_30_splits,
sid_40_splits,
sid_50_splits,
]
)
class PreviousWithSplitAdjustedWindows(WithSplitAdjustedWindows, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return PreviousSplitAdjustedEarningsEstimatesLoader(
events,
columns,
split_adjustments_loader=cls.adjustment_reader,
split_adjusted_column_names=["estimate"],
split_adjusted_asof=cls.split_adjusted_asof_date,
)
@classmethod
def make_expected_timelines(cls):
oneq_previous = pd.concat(
[
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
# Undo all adjustments that haven't happened yet.
(30, 131 * 1 / 10, pd.Timestamp("2015-01-09")),
(40, 140.0, pd.Timestamp("2015-01-09")),
(50, 150 * 1 / 15 * 1 / 16, pd.Timestamp("2015-01-09")),
],
end_date,
)
for end_date in pd.date_range("2015-01-09", "2015-01-12")
]
),
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
(30, 131, pd.Timestamp("2015-01-09")),
(40, 140.0, pd.Timestamp("2015-01-09")),
(50, 150.0 * 1 / 16, pd.Timestamp("2015-01-09")),
],
pd.Timestamp("2015-01-13"),
),
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
(30, 131, pd.Timestamp("2015-01-09")),
(40, 140.0, pd.Timestamp("2015-01-09")),
(50, 150.0, pd.Timestamp("2015-01-09")),
],
pd.Timestamp("2015-01-14"),
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
(30, 131 * 11, pd.Timestamp("2015-01-09")),
(40, 140.0, pd.Timestamp("2015-01-09")),
(50, 150.0, pd.Timestamp("2015-01-09")),
],
end_date,
)
for end_date in pd.date_range("2015-01-15", "2015-01-16")
]
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 101, pd.Timestamp("2015-01-20")),
(10, np.NaN, cls.window_test_start_date),
(20, 121 * 0.7 * 0.8, pd.Timestamp("2015-01-20")),
(30, 231, pd.Timestamp("2015-01-20")),
(40, 140.0 * 13, pd.Timestamp("2015-01-09")),
(50, 150.0, pd.Timestamp("2015-01-09")),
],
end_date,
)
for end_date in pd.date_range("2015-01-20", "2015-01-21")
]
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 101, pd.Timestamp("2015-01-20")),
(10, 111 * 0.3, pd.Timestamp("2015-01-22")),
(20, 121 * 0.7 * 0.8, pd.Timestamp("2015-01-20")),
(30, 231, pd.Timestamp("2015-01-20")),
(40, 140.0 * 13 * 14, pd.Timestamp("2015-01-09")),
(50, 150.0, pd.Timestamp("2015-01-09")),
],
end_date,
)
for end_date in pd.date_range("2015-01-22", "2015-01-29")
]
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 101 * 7, pd.Timestamp("2015-01-20")),
(10, 111 * 0.3, pd.Timestamp("2015-01-22")),
(20, 121 * 0.7 * 0.8 * 0.9, pd.Timestamp("2015-01-20")),
(30, 231, pd.Timestamp("2015-01-20")),
(40, 140.0 * 13 * 14, pd.Timestamp("2015-01-09")),
(50, 150.0, pd.Timestamp("2015-01-09")),
],
end_date,
)
for end_date in pd.date_range("2015-01-30", "2015-02-04")
]
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 101 * 7, pd.Timestamp("2015-01-20")),
(10, 311 * 0.3, pd.Timestamp("2015-02-05")),
(20, 121 * 0.7 * 0.8 * 0.9, pd.Timestamp("2015-01-20")),
(30, 231, pd.Timestamp("2015-01-20")),
(40, 140.0 * 13 * 14, pd.Timestamp("2015-01-09")),
(50, 150.0, pd.Timestamp("2015-01-09")),
],
end_date,
)
for end_date in pd.date_range("2015-02-05", "2015-02-09")
]
),
cls.create_expected_df_for_factor_compute(
[
(0, 201, pd.Timestamp("2015-02-10")),
(10, 311 * 0.3, pd.Timestamp("2015-02-05")),
(20, 221 * 0.8 * 0.9, pd.Timestamp("2015-02-10")),
(30, 231, pd.Timestamp("2015-01-20")),
(40, 240.0 * 13 * 14, pd.Timestamp("2015-02-10")),
(50, 250.0, pd.Timestamp("2015-02-10")),
],
pd.Timestamp("2015-02-10"),
),
]
)
twoq_previous = pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
(30, np.NaN, cls.window_test_start_date),
],
end_date,
)
for end_date in pd.date_range("2015-01-09", "2015-01-19")
]
+ [
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
(30, 131 * 11 * 12, pd.Timestamp("2015-01-20")),
],
end_date,
)
for end_date in pd.date_range("2015-01-20", "2015-02-09")
]
# We never get estimates for S1 for 2Q ago because once Q3
# becomes our previous quarter, 2Q ago would be Q2, and we have
# no data on it.
+ [
cls.create_expected_df_for_factor_compute(
[
(0, 101 * 7, pd.Timestamp("2015-02-10")),
(10, np.NaN, pd.Timestamp("2015-02-05")),
(20, 121 * 0.7 * 0.8 * 0.9, pd.Timestamp("2015-02-10")),
(30, 131 * 11 * 12, pd.Timestamp("2015-01-20")),
(40, 140.0 * 13 * 14, pd.Timestamp("2015-02-10")),
(50, 150.0, pd.Timestamp("2015-02-10")),
],
pd.Timestamp("2015-02-10"),
)
]
)
return {1: oneq_previous, 2: twoq_previous}
class NextWithSplitAdjustedWindows(WithSplitAdjustedWindows, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return NextSplitAdjustedEarningsEstimatesLoader(
events,
columns,
split_adjustments_loader=cls.adjustment_reader,
split_adjusted_column_names=["estimate"],
split_adjusted_asof=cls.split_adjusted_asof_date,
)
@classmethod
def make_expected_timelines(cls):
oneq_next = pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 100 * 1 / 4, cls.window_test_start_date),
(10, 110, pd.Timestamp("2015-01-09")),
(20, 120 * 5 / 3, cls.window_test_start_date),
(20, 121 * 5 / 3, pd.Timestamp("2015-01-07")),
(30, 130 * 1 / 10, cls.window_test_start_date),
(30, 131 * 1 / 10, pd.Timestamp("2015-01-09")),
(40, 140, pd.Timestamp("2015-01-09")),
(50, 150.0 * 1 / 15 * 1 / 16, pd.Timestamp("2015-01-09")),
],
pd.Timestamp("2015-01-09"),
),
cls.create_expected_df_for_factor_compute(
[
(0, 100 * 1 / 4, cls.window_test_start_date),
(10, 110, pd.Timestamp("2015-01-09")),
(10, 111, pd.Timestamp("2015-01-12")),
(20, 120 * 5 / 3, cls.window_test_start_date),
(20, 121 * 5 / 3, pd.Timestamp("2015-01-07")),
(30, 230 * 1 / 10, cls.window_test_start_date),
(40, np.NaN, pd.Timestamp("2015-01-10")),
(50, 250.0 * 1 / 15 * 1 / 16, pd.Timestamp("2015-01-12")),
],
pd.Timestamp("2015-01-12"),
),
cls.create_expected_df_for_factor_compute(
[
(0, 100, cls.window_test_start_date),
(10, 110, pd.Timestamp("2015-01-09")),
(10, 111, pd.Timestamp("2015-01-12")),
(20, 120, cls.window_test_start_date),
(20, 121, pd.Timestamp("2015-01-07")),
(30, 230, cls.window_test_start_date),
(40, np.NaN, pd.Timestamp("2015-01-10")),
(50, 250.0 * 1 / 16, pd.Timestamp("2015-01-12")),
],
pd.Timestamp("2015-01-13"),
),
cls.create_expected_df_for_factor_compute(
[
(0, 100, cls.window_test_start_date),
(10, 110, pd.Timestamp("2015-01-09")),
(10, 111, pd.Timestamp("2015-01-12")),
(20, 120, cls.window_test_start_date),
(20, 121, pd.Timestamp("2015-01-07")),
(30, 230, cls.window_test_start_date),
(40, np.NaN, pd.Timestamp("2015-01-10")),
(50, 250.0, pd.Timestamp("2015-01-12")),
],
pd.Timestamp("2015-01-14"),
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 100 * 5, cls.window_test_start_date),
(10, 110, pd.Timestamp("2015-01-09")),
(10, 111, pd.Timestamp("2015-01-12")),
(20, 120 * 0.7, cls.window_test_start_date),
(20, 121 * 0.7, pd.Timestamp("2015-01-07")),
(30, 230 * 11, cls.window_test_start_date),
(40, 240, pd.Timestamp("2015-01-15")),
(50, 250.0, pd.Timestamp("2015-01-12")),
],
end_date,
)
for end_date in pd.date_range("2015-01-15", "2015-01-16")
]
),
cls.create_expected_df_for_factor_compute(
[
(0, 100 * 5 * 6, cls.window_test_start_date),
(0, 101, pd.Timestamp("2015-01-20")),
(10, 110 * 0.3, pd.Timestamp("2015-01-09")),
(10, 111 * 0.3, pd.Timestamp("2015-01-12")),
(20, 120 * 0.7 * 0.8, cls.window_test_start_date),
(20, 121 * 0.7 * 0.8, pd.Timestamp("2015-01-07")),
(30, 230 * 11 * 12, cls.window_test_start_date),
(30, 231, pd.Timestamp("2015-01-20")),
(40, 240 * 13, pd.Timestamp("2015-01-15")),
(50, 250.0, pd.Timestamp("2015-01-12")),
],
pd.Timestamp("2015-01-20"),
),
cls.create_expected_df_for_factor_compute(
[
(0, 200 * 5 * 6, pd.Timestamp("2015-01-12")),
(10, 110 * 0.3, pd.Timestamp("2015-01-09")),
(10, 111 * 0.3, pd.Timestamp("2015-01-12")),
(20, 220 * 0.7 * 0.8, cls.window_test_start_date),
(20, 221 * 0.8, pd.Timestamp("2015-01-17")),
(40, 240 * 13, pd.Timestamp("2015-01-15")),
(50, 250.0, pd.Timestamp("2015-01-12")),
],
pd.Timestamp("2015-01-21"),
),
cls.create_expected_df_for_factor_compute(
[
(0, 200 * 5 * 6, pd.Timestamp("2015-01-12")),
(10, 110 * 0.3, pd.Timestamp("2015-01-09")),
(10, 111 * 0.3, pd.Timestamp("2015-01-12")),
(20, 220 * 0.7 * 0.8, cls.window_test_start_date),
(20, 221 * 0.8, pd.Timestamp("2015-01-17")),
(40, 240 * 13 * 14, pd.Timestamp("2015-01-15")),
(50, 250.0, pd.Timestamp("2015-01-12")),
],
pd.Timestamp("2015-01-22"),
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 200 * 5 * 6, pd.Timestamp("2015-01-12")),
(10, 310 * 0.3, pd.Timestamp("2015-01-09")),
(10, 311 * 0.3, pd.Timestamp("2015-01-15")),
(20, 220 * 0.7 * 0.8, cls.window_test_start_date),
(20, 221 * 0.8, pd.Timestamp("2015-01-17")),
(40, 240 * 13 * 14, pd.Timestamp("2015-01-15")),
(50, 250.0, pd.Timestamp("2015-01-12")),
],
end_date,
)
for end_date in pd.date_range("2015-01-23", "2015-01-29")
]
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 200 * 5 * 6 * 7, pd.Timestamp("2015-01-12")),
(10, 310 * 0.3, pd.Timestamp("2015-01-09")),
(10, 311 * 0.3, pd.Timestamp("2015-01-15")),
(20, 220 * 0.7 * 0.8 * 0.9, cls.window_test_start_date),
(20, 221 * 0.8 * 0.9, pd.Timestamp("2015-01-17")),
(40, 240 * 13 * 14, pd.Timestamp("2015-01-15")),
(50, 250.0, pd.Timestamp("2015-01-12")),
],
end_date,
)
for end_date in pd.date_range("2015-01-30", "2015-02-05")
]
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 200 * 5 * 6 * 7, | pd.Timestamp("2015-01-12") | pandas.Timestamp |
"""
Module for header classes and metadata interpreters. This includes interpreting data file headers or dedicated files
to describing data.
"""
from os.path import basename
import pandas as pd
import pytz
from .data import SiteData
from .db import get_table_attributes
from .interpretation import *
from .projection import add_geom, reproject_point_in_dict
from .string_management import *
from .utilities import assign_default_kwargs, get_logger, read_n_lines
def read_InSar_annotation(ann_file):
"""
.ann files describe the INSAR data. Use this function to read all that
information in and return it as a dictionary
Expected format:
`DEM Original Pixel spacing (arcsec) = 1`
Where this is interpretted as:
`key (units) = [value]`
Then stored in the dictionary as:
`data[key] = {'value':value, 'units':units}`
values that are found to be numeric and have a decimal are converted to a
float otherwise numeric data is cast as integers. Everything else is left
as strings.
Args:
ann_file: path to UAVsAR description file
Returns:
data: Dictionary containing a dictionary for each entry with keys
for value, units and comments
"""
with open(ann_file) as fp:
lines = fp.readlines()
fp.close()
data = {}
# loop through the data and parse
for line in lines:
# Filter out all comments and remove any line returns
info = line.strip().split(';')
comment = info[-1].strip().lower()
info = info[0]
# ignore empty strings
if info and "=" in info:
d = info.split('=')
name, value = d[0], d[1]
# Clean up tabs, spaces and line returns
key = name.split('(')[0].strip().lower()
units = get_encapsulated(name, '()')
if not units:
units = None
else:
units = units[0]
value = value.strip()
# Cast the values that can be to numbers ###
if value.strip('-').replace('.', '').isnumeric():
if '.' in value:
value = float(value)
else:
value = int(value)
# Assign each entry as a dictionary with value and units
data[key] = {'value': value, 'units': units, 'comment': comment}
# Convert times to datetimes
for pass_num in ['1', '2']:
for timing in ['start', 'stop']:
key = '{} time of acquisition for pass {}'.format(timing, pass_num)
dt = pd.to_datetime(data[key]['value'])
dt = dt.astimezone(pytz.timezone('US/Mountain'))
data[key]['value'] = dt
return data
class SMPMeasurementLog(object):
"""
Opens and processes the log that describes the SMP measurments. This file
contains notes on all the measurements taken.
This class build a dataframe from this file. It also reorganizes the
file contents to be more standardized with our database.
Some of this includes merging information in the comments.
File should have the headers:
Date,
Pit ID
SMP instrument #
Fname sufix
Orientation
Snow depth
Flag
Observer
Comments
Attributes:
observer_map: Dictionary mapping name initials to full verbose names
orientation_map: Dictionary mapping the measurement locations relative
to the pit
header: Dictionary containing other header information regarding the
details of measurements
df: Dataframe containing rows of details describing each measurement
"""
def __init__(self, filename):
self.log = get_logger(__name__)
self.header, self.df = self._read(filename)
# Cardinal map to interpet the orientation
self.cardinal_map = {'N': 'North', 'NE': 'Northeast', 'E': 'East',
'SE': 'Southeast', 'S': 'South', 'SW': 'Southwest',
'W': 'West', 'NW': 'Northwest', 'C': 'Center'}
def _read(self, filename):
"""
Read the CSV file thet contains SMP log inforamtion. Also reads in the
header and creates a few attributes from that information:
1. observer_map
2. orientation_map
"""
self.log.info('Reading SMP file log header')
header_pos = 9
header = read_n_lines(filename, header_pos + 1)
self.observer_map = self._build_observers(header)
# parse/rename column names
line = header[header_pos]
str_cols = [standardize_key(col)
for col in line.lower().split(',') if col.strip()]
# Assume columns are populated left to right so if we have empty ones
# they are assumed at the end
n_cols = len(str_cols)
str_cols = remap_data_names(str_cols, DataHeader.rename)
dtype = {k: str for k in str_cols}
df = pd.read_csv(filename, header=header_pos, names=str_cols,
usecols=range(n_cols), encoding='latin',
parse_dates=[0], dtype=dtype)
# Insure all values are 4 digits. Seems like some were not by accident
df['fname_sufix'] = df['fname_sufix'].apply(lambda v: v.zfill(4))
df = self.interpret_dataframe(df)
return header, df
def interpret_dataframe(self, df):
"""
Using various info collected from the dataframe header modify the data
frame entries to be more verbose and standardize the database
Args:
df: pandas.Dataframe
Returns:
new_df: pandas.Dataframe with modifications
"""
# Apply observer map
df = self.interpret_observers(df)
# Apply orientation map
# Pit ID is actually the Site ID here at least in comparison to the
df['site_id'] = df['pit_id'].copy()
return df
def _build_observers(self, header):
"""
Interprets the header of the smp file log which has a map to the
names of the oberservers names. This creates a dictionary mapping those
string names
"""
# Map for observer names and their
observer_map = {}
for line in header:
ll = line.lower()
# Create a name map for the observers and there initials
if 'observer' in ll:
data = [d.strip() for d in line.split(':')[-1].split(',')]
data = [d for d in data if d]
for d in data:
info = [clean_str(s).strip(')') for s in d.split('(')]
name = info[0]
initials = info[1]
observer_map[initials] = name
break
return observer_map
def interpret_observers(self, df):
"""
Rename all the observers with initials in the observer_map which is
interpeted from the header
Args:
df: dataframe containing a column observer
Return:
new_df: df with the observers column replaced with more verbose
names
"""
new_df = df.copy()
new_df['surveyors'] = \
new_df['surveyors'].apply(lambda x: self.observer_map[x])
return new_df
def interpret_sample_strategy(self, df):
"""
Look through all the measurements posted by site and attempt to
determine the sample strategy
Args:
df: Dataframe containing all the data from the dataframe
Returns:
new_df: Same dataframe with a new column containing the sampling
strategy
"""
pits = pd.unique(df['pit_id'])
for p in pits:
ind = df['pit_id'] == p
temp = df.loc[ind]
orientations = | pd.unique(temp['orientation']) | pandas.unique |
#!/usr/bin/env python
# coding: utf-8
# In[2]:
#TEST 01
#trying to write to csv file
#training the above code
import numpy as np
import pandas as pd
#import matplotlib.pyplot as plt
import csv
import cv2 as cv2 #importing opevcv
import os
import numpy as np
#training the model with above generated csv file
#import test061_training00
data= | pd.read_csv("dr_features_output_main.csv") | pandas.read_csv |
import unittest
import os
import tempfile
from collections import namedtuple
from blotter import blotter
from pandas.util.testing import assert_frame_equal, assert_series_equal, \
assert_dict_equal
import pandas as pd
import numpy as np
class TestBlotter(unittest.TestCase):
def setUp(self):
cdir = os.path.dirname(__file__)
self.prices = os.path.join(cdir, 'data/prices')
self.rates = os.path.join(cdir, 'data/rates/daily_interest_rates.csv')
self.log = os.path.join(cdir, 'data/events.log')
self.meta_log = os.path.join(cdir, 'data/meta_data.log')
def tearDown(self):
pass
def assertEventsEqual(self, evs1, evs2):
if len(evs1) != len(evs2):
raise(ValueError("Event lists length mismatch"))
for ev1, ev2 in zip(evs1, evs2):
self.assertEqual(ev1.type, ev2.type)
assert_dict_equal(ev1.data, ev2.data)
def assertEventTypes(self, evs1, evs2):
msg = "Event lists length mismatch\n\nLeft:\n%s \nRight:\n%s"
left_msg = ""
for ev in evs1:
left_msg += str(ev) + "\n"
right_msg = ""
for ev in evs2:
right_msg += ev.type + "\n"
msg = msg % (left_msg, right_msg)
if len(evs1) != len(evs2):
raise(ValueError(msg))
for ev1, ev2 in zip(evs1, evs2):
if ev1.type is not ev2.type:
raise(ValueError(msg))
def assertDictDataFrameEqual(self, dict1, dict2):
self.assertEqual(dict1.keys(), dict2.keys())
for key in dict1.keys():
try:
assert_frame_equal(dict1[key], dict2[key])
except AssertionError as e:
e.args = (("\nfor key %s\n" % key) + e.args[0],)
raise e
def make_blotter(self):
blt = blotter.Blotter(self.prices, self.rates)
return blt
def test_get_actions(self):
actions = [(pd.Timedelta("16h"), "PNL"),
(pd.Timedelta("16h"), "INTEREST")]
old_ts = pd.Timestamp("2017-01-04T10:30")
new_ts = pd.Timestamp("2017-01-06T10:30")
ac_ts = blotter.Blotter._get_actions(old_ts, new_ts, actions)
idx = pd.DatetimeIndex([pd.Timestamp("2017-01-04T16:00"),
pd.Timestamp("2017-01-04T16:00"),
pd.Timestamp("2017-01-05T16:00"),
pd.Timestamp("2017-01-05T16:00")])
ac_ts_ex = pd.Series(["PNL", "INTEREST", "PNL", "INTEREST"], index=idx)
assert_series_equal(ac_ts, ac_ts_ex)
def test_get_actions_weekend_filter(self):
actions = [(pd.Timedelta("16h"), "PNL"),
(pd.Timedelta("16h"), "INTEREST")]
old_ts = pd.Timestamp("2017-01-06T10:30")
new_ts = pd.Timestamp("2017-01-09T16:30")
ac_ts = blotter.Blotter._get_actions(old_ts, new_ts, actions)
idx = pd.DatetimeIndex([pd.Timestamp("2017-01-06T16:00"),
pd.Timestamp("2017-01-06T16:00"),
pd.Timestamp("2017-01-09T16:00"),
pd.Timestamp("2017-01-09T16:00")])
ac_ts_ex = pd.Series(["PNL", "INTEREST", "PNL", "INTEREST"], index=idx)
assert_series_equal(ac_ts, ac_ts_ex)
def test_trade_undefined_instrument(self):
blt = self.make_blotter()
ts = pd.Timestamp('2016-12-10T08:30:00')
instr = 'CLZ6'
qty = 1
price = 48.56
def make_trade():
blt._trade(ts, instr, qty, price)
self.assertRaises(KeyError, make_trade)
def test_get_meta_data(self):
blt = blt = blotter.Blotter(self.prices, self.rates, base_ccy="USD")
# currency of instrument defaults to base ccy of blotter when not given
blt.define_generic("CL", margin=0.1, multiplier=100, commission=2.5,
isFX=False)
meta = namedtuple('metadata', ['ccy', 'margin', 'multiplier',
'commission', 'isFX'])
metadata_exp = meta("USD", 0.1, 100, 2.5, False)
metadata = blt._gnrc_meta["CL"]
self.assertEqual(metadata, metadata_exp)
def test_get_holdings_empty(self):
blt = self.make_blotter()
blt.connect_market_data()
ts = pd.Timestamp('2015-08-04T00:00:00')
hlds = blt.get_holdings_value(ts)
assert_series_equal(hlds, pd.Series())
def test_get_holdings_value_no_fx_conversion(self):
blt = self.make_blotter()
blt.connect_market_data()
ts = pd.Timestamp('2015-08-04T00:00:00')
qty = 1
price = 0
blt.define_generic("SXM", "ZAR", 0.1, 1, 2.5)
blt.map_instrument("SXM", "SXMZ15")
blt._trade(ts, 'SXMZ15', qty, price)
def no_fx():
return blt.get_holdings_value(ts)
self.assertRaises(KeyError, no_fx)
def test_get_holdings_timestamp_before(self):
blt = self.make_blotter()
blt.connect_market_data()
ts = pd.Timestamp('2015-08-05T00:00:00')
instr = 'ESZ15'
qty = 1
price = 2081
blt.define_generic("ES", "USD", 0.1, 100, 2.5)
blt.map_instrument("ES", "ESZ15")
blt._trade(ts, instr, qty, price)
ts = pd.Timestamp('2015-08-04T00:00:00')
def get_holdings():
blt.get_holdings_value(ts)
self.assertRaises(ValueError, get_holdings)
def test_get_holdings_base_ccy(self):
blt = self.make_blotter()
blt.connect_market_data()
ts = pd.Timestamp('2015-08-04T00:00:00')
instr = 'ESZ15'
qty = 1
price = 2081
blt.define_generic("ES", "USD", 0.1, 100, 2.5)
blt.map_instrument("ES", "ESZ15")
blt._trade(ts, instr, qty, price)
ts = pd.Timestamp('2015-08-05T00:00:00')
hlds = blt.get_holdings_value(ts)
hlds_exp = pd.Series([2082.73 * 100], index=['ESZ15'])
assert_series_equal(hlds, hlds_exp)
def test_get_holds_AUD_instr_AUDUSD_fxrate(self):
blt = self.make_blotter()
blt.connect_market_data()
ts = pd.Timestamp('2015-08-04T00:00:00')
instr = 'APZ15'
qty = 1
price = 5200
blt.define_generic("AP", "AUD", 0.1, 1, 2.5)
blt.map_instrument("AP", "APZ15")
blt._trade(ts, instr, qty, price)
ts = pd.Timestamp('2015-08-05T00:00:00')
hlds = blt.get_holdings_value(ts)
hlds_exp = pd.Series([5283 * 0.73457], index=['APZ15'])
assert_series_equal(hlds, hlds_exp)
def test_get_holds_CAD_instr_USDCAD_fxrate(self):
blt = self.make_blotter()
blt.connect_market_data()
ts = pd.Timestamp('2015-08-04T00:00:00')
instr = 'SXMZ15'
qty = 1
price = 802.52
blt.define_generic("SXM", "CAD", 0.1, 1, 2.5)
blt.map_instrument("SXM", "SXMZ15")
blt._trade(ts, instr, qty, price)
ts = pd.Timestamp('2015-08-05T00:00:00')
hlds = blt.get_holdings_value(ts)
hlds_exp = pd.Series([795.95 / 1.3183], index=['SXMZ15'])
assert_series_equal(hlds, hlds_exp)
def test_get_instruments_empty(self):
blt = self.make_blotter()
blt.connect_market_data()
instrs = blt.get_instruments()
assert_series_equal(instrs, pd.Series())
def test_get_instruments_multiplier(self):
blt = self.make_blotter()
blt.connect_market_data()
ts = pd.Timestamp('2015-08-04T00:00:00')
instr = 'ESZ15'
qty = 1
price = 2081
blt.define_generic("ES", "USD", 0.1, 100, 2.5)
blt.map_instrument("ES", "ESZ15")
blt._trade(ts, instr, qty, price)
instrs = blt.get_instruments()
instrs_exp = pd.Series([qty], index=['ESZ15'])
assert_series_equal(instrs, instrs_exp)
def test_get_instruments_two_ccy(self):
blt = self.make_blotter()
blt.connect_market_data()
ts = pd.Timestamp('2015-08-04T00:00:00')
instr1 = 'ESZ15'
instr2 = 'CLZ15'
qty = 1
price = 2081
blt.define_generic("ES", "USD", 0.1, 100, 2.5)
blt.map_instrument("ES", "ESZ15")
blt.define_generic("CL", "CAD", 0.1, 1, 2.5)
blt.map_instrument("CL", "CLZ15")
blt._trade(ts, instr1, qty, price)
blt._trade(ts, instr2, qty, price)
instrs = blt.get_instruments()
instrs_exp = pd.Series([qty, qty], index=['CLZ15', 'ESZ15'])
assert_series_equal(instrs, instrs_exp)
def test_get_trades_one_future_base_to_base(self):
blt = self.make_blotter()
blt.connect_market_data()
ts = pd.Timestamp('2015-08-04T00:00:00')
instr = 'ESZ15'
qty = 1
price = 2081
mid_price = 2080.75
blt.define_generic("ES", "USD", 0.1, 50, 2.5)
blt.map_instrument("ES", "ESZ15")
blt._trade(ts, instr, qty, price, mid_price)
trades = blt.get_trades()
cols = ['instrument', 'quantity', 'multiplier', 'price', 'ntc_price',
'ccy', 'fx_to_base']
exp_trades = pd.DataFrame([[instr, 1, 50, price, mid_price,
"USD", 1.0]], index=[ts], columns=cols)
exp_trades.index.name = 'timestamp'
assert_frame_equal(trades, exp_trades)
def test_get_trades_one_future_with_mid_price_fx(self):
blt = self.make_blotter()
blt.connect_market_data()
ts = pd.Timestamp('2015-08-04T00:00:00')
instr = 'ESZ15'
qty = 1
price = 2081
mid_price = 2080.75
blt.define_generic("ES", "CAD", 0.1, 50, 2.5)
blt.map_instrument("ES", "ESZ15")
blt._trade(ts, instr, qty, price, mid_price)
trades = blt.get_trades()
cols = ['instrument', 'quantity', 'multiplier', 'price', 'ntc_price',
'ccy', 'fx_to_base']
exp_trades = pd.DataFrame([[instr, 1, 50, price, mid_price, "CAD",
1 / 1.3125]], index=[ts], columns=cols)
exp_trades.index.name = 'timestamp'
assert_frame_equal(trades, exp_trades)
def test_get_trades_two_futures(self):
blt = self.make_blotter()
blt.connect_market_data()
ts = pd.Timestamp('2015-08-04T00:00:00')
instr = 'ESZ15'
qty = 1
price1 = 2081
mid_price1 = 2080.75
price2 = 2083
mid_price2 = 2082.75
blt.define_generic("ES", "USD", 0.1, 50, 2.5)
blt.map_instrument("ES", "ESZ15")
blt.map_instrument("ES", "ESF16")
blt._trade(ts, instr, qty, price1, mid_price1)
blt._trade(ts, instr, qty, price2, mid_price2)
trades = blt.get_trades()
cols = ['instrument', 'quantity', 'multiplier', 'price', 'ntc_price',
'ccy', 'fx_to_base']
data = [[instr, 1, 50, price1, mid_price1, "USD", 1.0],
[instr, 1, 50, price2, mid_price2, "USD", 1.0]]
exp_trades = pd.DataFrame(data, index=[ts, ts], columns=cols)
exp_trades.index.name = 'timestamp'
assert_frame_equal(trades, exp_trades)
def test_create_unknown_event(self):
blt = self.make_blotter()
ts = pd.Timestamp('2015-08-03T00:00:00')
def create_unknown():
return blt.create_events(ts, "NotAllowed")
self.assertRaises(NotImplementedError, create_unknown)
def test_dispatch_unknown_event(self):
blt = self.make_blotter()
ev = blotter._Event("NotAnEvent",
{"timestamp": pd.Timestamp('2015-01-01')})
def dispatch_unknown():
blt.dispatch_events([ev])
self.assertRaises(NotImplementedError, dispatch_unknown)
def test_create_interest_event(self):
blt = self.make_blotter()
blt.connect_market_data()
ts = pd.Timestamp('2015-08-03T00:00:00')
blt._holdings.update_cash(ts, "AUD", 1000000)
blt._holdings.update_cash(ts, "JPY", 1000000)
ts = | pd.Timestamp('2015-08-04T00:00:00') | pandas.Timestamp |
from time import time
from keras import Sequential
import numpy as np
import pandas as pd
from keras.layers import Embedding, LSTM, Dense
from keras.preprocessing import sequence
from matplotlib import pyplot
from gensim.models import Word2Vec
from sklearn.decomposition import PCA
from sklearn.metrics import mean_squared_error, accuracy_score
from sklearn.model_selection import KFold
MODEL_PATH = '../../data/binary/word2Vec.bin'
# TRAINING_DATA_PATH = '../../data/csv/model/train.csv'
# TEST_DATA_PATH = '../../data/csv/model/test.csv'
# TRAINING_DATA_PATH = '../../data/csv/model/trainFAKEDATA.csv'
TRAINING_DATA_PATH = '../../data/csv/model/trainPolitical.csv'
TEST_DATA_PATH = '../../data/csv/model/testPolitical.csv'
# TRAINING_DATA_PATH = '../../data/csv/model/trainAbortion.csv'
# TEST_DATA_PATH = '../../data/csv/model/testAbortion.csv'
# TRAINING_DATA_PATH = '../../data/csv/model/trainModern.csv'
# TEST_DATA_PATH = '../../data/csv/model/testModern.csv'
DEBATE_DATA_PATH = '../../data/csv/processed/debate_sentences_part1.csv'
RESULT_PATH = '../../results/csv/lstmParameters(topic modelling).csv'
RESULT_FINAL_PATH = '../../results/csv/lstmResults.csv'
# RESULT_DEBATE_FINAL_PATH = '../../results/csv/lstmDebate.csv'
RESULT_DEBATE_FINAL_PATH = '../../results/csv/lstmDebate2.csv'
DIFFERENCES_PATH = '../../results/csv/worstMistakes.csv'
def saveWord2VecModel(savePath, model):
model.save(savePath)
def loadWord2VecModel(loadPath):
return Word2Vec.load(loadPath)
def visualizeWordEmbeddings(wordEmbeddingMatrix, model):
# Taken from https://machinelearningmastery.com/develop-word-embeddings-python-gensim/
pca = PCA(n_components=2)
fittedPCA = pca.fit_transform(wordEmbeddingMatrix)
pyplot.scatter(fittedPCA[:, 0], fittedPCA[:, 1])
words = list(model.wv.vocab)
for i, word in enumerate(words):
pyplot.annotate(word, xy=(fittedPCA[i, 0], fittedPCA[i, 1]))
pyplot.show()
def encodeSentences(labelEncoder, sentences):
return list(map(labelEncoder.transform, sentences))
def mapSentenceToIndex(sentence):
return [(wordIndex[word] if word in wordIndex else 0) for word in sentence]
def encodeSentences2(sentences, words):
return list(map(mapSentenceToIndex, sentences))
def splitSentences(sentencesRaw):
sentences = []
for sentence in sentencesRaw:
sentences.append(sentence.split(" "))
return sentences
def addUniqueWordsToWords(sentencesTrain):
global maxSentenceLengthActual
for sentence in sentencesTrain:
if len(sentence) > maxSentenceLengthActual:
maxSentenceLengthActual = len(sentence)
for word in sentence:
if word not in words:
words.add(word)
def createWordIndex():
wordIndex = {}
for index, word in enumerate(words):
wordIndex[word] = index
return wordIndex
def mapToBinary(x):
if x >= 0:
return 1
else:
return 0
print('')
print('Loading Training Data')
start = time()
dataTrain = pd.read_csv(TRAINING_DATA_PATH)
dataTest = pd.read_csv(TEST_DATA_PATH)
dataDebate = | pd.read_csv(DEBATE_DATA_PATH) | pandas.read_csv |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
class RedRio:
def __init__(self,codigo = None,**kwargs):
self.info = pd.Series()
self.codigo = codigo
self.info.slug = None
self.fecha = '2006-06-06 06:06'
self.workspace = '/media/'
self.seccion = pd.DataFrame(columns = [u'vertical', u'x', u'y', u'v01', u'v02', u'v03', u'v04',
u'v05', u'v06', u'v07', u'v08', u'v09', u'vsup'])
self.parametros = "id_aforo,fecha,ancho_superficial,caudal_medio,velocidad_media,perimetro,area_total,profundidad_media,radio_hidraulico"
self.aforo = pd.Series(index = [u'fecha', u'ancho_superficial', u'caudal_medio',
u'velocidad_media',u'perimetro', u'area_total',
u'profundidad_media', u'radio_hidraulico',u'levantamiento'])
self.levantamiento = pd.DataFrame(columns = ['vertical','x','y'])
self.alturas = pd.DataFrame(index=pd.date_range(start = pd.to_datetime('2018').strftime('%Y-%m-%d 06:00'),periods=13,freq='H'),columns = ['profundidad','offset','lamina','caudal'])
self.alturas.index = map(lambda x:x.strftime('%H:00'),self.alturas.index)
@property
def caudales(self):
pass
@property
def folder_path(self):
return self.workspace+pd.to_datetime(self.fecha).strftime('%Y%m%d')+'/'+self.info.slug+'/'
def insert_vel(self,vertical,v02,v04,v08):
self.seccion.loc[vertical,'v02'] = v02
self.seccion.loc[vertical,'v04'] = v04
self.seccion.loc[vertical,'v08'] = v08
def velocidad_media_dovela(self):
columns = [u'vertical', u'x', u'y', u'v01', u'v02', u'v03',
u'v04', u'v05', u'v06', u'v07', u'v08', u'v09', u'vsup']
dfs = self.seccion[columns].copy()
self.seccion['vm'] = np.NaN
vm = []
for index in dfs.index:
vm.append(round(self.estima_velocidad_media_vertical(dfs.loc[index].dropna()),3))
self.seccion['vm'] = vm
def area_dovela(self):
self.seccion['area'] = self.get_area(self.seccion['x'].abs().values,self.seccion['y'].abs().values)
def estima_velocidad_media_vertical(self,vertical,factor=0.0,v_index=0.8):
vertical = vertical[vertical.index!='vm']
index = list(vertical.index)
if index == ['vertical','x','y']:
if vertical['x'] == 0.0:
vm = factor * self.seccion.loc[vertical.name+1,'vm']
else:
vm = factor * self.seccion.loc[vertical.name-1,'vm']
elif (index == ['vertical','x','y','vsup']) or (index == ['vertical','x','y','v08']):
try:
vm = v_index*vertical['vsup']
except:
vm = v_index*vertical['v08']
elif (index == ['vertical','x','y','v04']) or (index == ['vertical','x','y','v04','vsup']):
vm = vertical['v04']
elif (index == ['vertical','x','y','v04','v08']) or (index == ['vertical','x','y','v04','v08','vsup']) or (index == ['vertical','x','y','v02','v04']):
vm = vertical['v04']
elif index == ['vertical','x','y','v08','vsup']:
vm = v_index*vertical['vsup']
elif (index == ['vertical','x','y','v02','v04','v08']) or (index == ['vertical','x','y','v02','v04','v08','vsup']):
vm = (2*vertical['v04']+vertical['v08']+vertical['v02'])/4.0
elif (index == ['vertical','x','y','v02','v08']):
vm = (vertical['v02']+vertical['v08'])/2.0
return vm
def perimetro(self):
x,y = (self.seccion['x'].values,self.seccion['y'].values)
def perimeter(x,y):
p = []
for i in range(len(x)-1):
p.append(round(float(np.sqrt(abs(x[i]-x[i+1])**2.0+abs(y[i]-y[i+1])**2.0)),3))
return [0]+p
self.seccion['perimetro'] = perimeter(self.seccion['x'].values,self.seccion['y'].values)
def get_area(self,x,y):
'''Calcula las áreas y los caudales de cada
una de las verticales, con el método de mid-section
Input:
x = Distancia desde la banca izquierda, type = numpy array
y = Produndidad
v = Velocidad en la vertical
Output:
area = Área de la subsección
Q = Caudal de la subsección
'''
# cálculo de áreas
d = np.absolute(np.diff(x))/2.
b = x[:-1]+d
area = np.diff(b)*y[1:-1]
area = np.insert(area, 0, d[0]*y[0])
area = np.append(area,d[-1]*y[-1])
area = np.absolute(area)
# cálculo de caudal
return np.round(area,3)
def read_excel_format(self,file):
df = pd.read_excel(file)
df = df.loc[df['x'].dropna().index]
df['vertical'] = range(1,df.index.size+1)
df['y'] = df['y'].abs()*-1
df.columns = map(lambda x:x.lower(),df.columns)
self.seccion = df[self.seccion.columns]
df = pd.read_excel(file,sheetname=1)
self.aforo.fecha = df.iloc[1].values[1].strftime('%Y-%m-%d')+df.iloc[2].values[1].strftime(' %H:%M')
self.aforo['x_sensor'] = df.iloc[4].values[1]
self.aforo['lamina'] = df.iloc[5].values[1]
df = pd.read_excel(file,sheetname=2)
self.levantamiento = df[df.columns[1:]]
self.levantamiento.columns = ['x','y']
self.levantamiento.index.name = 'vertical'
self.aforo.levantamiento = True
def plot_bars(self,s,filepath=None,bar_fontsize=14,decimales=2,xfactor =1.005,yfactor=1.01,ax=None):
if ax is None:
plt.figure(figsize=(20,6))
s.plot(kind='bar',ax=ax)
ax.set_ylim(s.min()*0.01,s.max()*1.01)
for container in ax.containers:
plt.setp(container, width=0.8)
for p in ax.patches:
ax.annotate(str(round(p.get_height(),decimales)),
(p.get_x() * xfactor, p.get_height() * yfactor),
fontsize = bar_fontsize)
for j in ['top','right']:
ax.spines[j].set_edgecolor('white')
ax.set_ylabel(r'$Caudal\ [m^3/s]$')
if filepath:
plt.savefig(filepath,bbox_inches='tight')
def plot_levantamientos(self):
for id_aforo in self.levantamientos:
self.plot_section(self.get_levantamiento(id_aforo),x_sensor=2,level=0.0)
plt.title("%s : %s,%s"%(self.info.slug,self.codigo,id_aforo))
def procesa_aforo(self):
self.velocidad_media_dovela()
self.area_dovela()
self.seccion['caudal'] = np.round(np.array(self.seccion.vm*self.seccion.area),3)
self.perimetro()
self.aforo.caudal_medio = round(self.seccion.caudal.sum(),3)
self.aforo.area_total = round(self.seccion.area.sum(),3)
self.aforo.velocidad_media = round(self.aforo.caudal_medio/self.aforo.area_total,3)
self.aforo.ancho_superficial = self.seccion['x'].abs().max()-self.seccion['x'].abs().min()
self.aforo.perimetro = round(self.seccion.perimetro.sum(),3)
self.aforo.profundidad_media = round(self.seccion['y'].abs()[self.seccion['y'].abs()>0.0].mean(),3)
self.aforo.radio_hidraulico = round(self.aforo.area_total/self.aforo.perimetro,3)
self.fecha = self.aforo.fecha
def ajusta_levantamiento(self):
cond = (self.levantamiento['x']<self.aforo.x_sensor).values
flag = cond[0]
for i,j in enumerate(cond):
if j==flag:
pass
else:
point = ((self.levantamiento.iloc[i-1].x,self.levantamiento.iloc[i-1].y),(self.levantamiento.iloc[i].x,self.levantamiento.iloc[i].y))
flag = j
point2 = ((self.aforo.x_sensor,0.1*self.levantamiento['y'].min()),((self.aforo.x_sensor,1.1*self.levantamiento['y'].max())))
intersection = self.line_intersection(point,point2)
self.levantamiento = self.levantamiento.append(pd.DataFrame(np.matrix(intersection),index=['self.aforo.x_sensor'],columns=['x','y'])).sort_values('x')
self.levantamiento['y'] = self.levantamiento['y']-intersection[1]
self.levantamiento['vertical'] = range(1,self.levantamiento.index.size+1)
self.levantamiento.index = range(0,self.levantamiento.index.size)
def line_intersection(self,line1, line2):
xdiff = (line1[0][0] - line1[1][0], line2[0][0] - line2[1][0])
ydiff = (line1[0][1] - line1[1][1], line2[0][1] - line2[1][1])
def det(a, b):
return a[0] * b[1] - a[1] * b[0]
div = det(xdiff, ydiff)
if div == 0:
raise Exception('lines do not intersect')
d = (det(*line1), det(*line2))
x = det(d, xdiff) / div
y = det(d, ydiff) / div
return (x, y)
def get_sections(self,levantamiento,level):
hline = ((levantamiento['x'].min()*1.1,level),(levantamiento['x'].max()*1.1,level)) # horizontal line
lev = pd.DataFrame.copy(levantamiento) #df to modify
#PROBLEMAS EN LOS BORDES
borderWarning = 'Warning:\nProblemas de borde en el levantamiento'
if lev.iloc[0]['y']<level:
lev = pd.DataFrame(np.matrix([lev.iloc[0]['x'],level]),columns=['x','y']).append(lev)
if lev.iloc[-1]['y']<level:
lev = lev.append(pd.DataFrame(np.matrix([lev.iloc[-1]['x'],level]),columns=['x','y']))
condition = (lev['y']>=level).values
flag = condition[0]
nlev = []
intCount = 0
ids=[]
for i,j in enumerate(condition):
if j==flag:
ids.append(i)
nlev.append([lev.iloc[i].x,lev.iloc[i].y])
else:
intCount+=1
ids.append('Point %s'%intCount)
line = ([lev.iloc[i-1].x,lev.iloc[i-1].y],[lev.iloc[i].x,lev.iloc[i].y]) # #puntoA
inter = self.line_intersection(line,hline)
nlev.append(inter)
ids.append(i)
nlev.append([lev.iloc[i].x,lev.iloc[i].y])
flag = j
df = pd.DataFrame(np.matrix(nlev),columns=['x','y'],index=ids)
dfs = []
conteo = (np.arange(1,100,2))
for i in conteo[:int(intCount/2)]:
dfs.append(df.loc['Point %s'%i:'Point %s'%(i+1)])
return dfs
def plot_section(self,*args,**kwargs):
'''Grafica de la seccion transversal de estaciones de nivel
| ----------Parametros
| df : dataFrame con el levantamiento topo-batimetrico, columns=['x','y']
| level : Nivel del agua
| riskLevels : Niveles de alerta
| *args : argumentos plt.plot()
| **kwargs : xSensor,offset,riskLevels,xLabel,yLabel,ax,groundColor,fontsize,figsize,
| Nota: todas las unidades en metros'''
# Kwargs
level = kwargs.get('level',None)
xLabel = kwargs.get('xLabel','x [m]')
yLabel = kwargs.get('yLabel','Profundidad [m]')
waterColor = kwargs.get('waterColor','#e5efff')
groundColor = kwargs.get('groundColor','tan')
fontsize= kwargs.get('fontsize',14)
figsize = kwargs.get('figsize',(6,2))
riskLevels = kwargs.get('riskLevels',None)
xSensor = kwargs.get('xSensor',None)
offset = kwargs.get('offset',0)
scatterSize = kwargs.get('scatterSize',0.0)
ax = kwargs.get('ax',None)
df = self.levantamiento.copy()
# main plot
if ax is None:
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(111)
ax.plot(df['x'].values,df['y'].values,color='k',lw=0.5)
ax.fill_between(np.array(df['x'].values,float),np.array(df['y'].values,float),float(df['y'].min()),color=groundColor,alpha=1.0)
# waterLevel
sections = []
if level is not None:
for data in self.get_sections(df.copy(),level):
#ax.hlines(level,data['x'][0],data['x'][-1],color='k',linewidth=0.5)
ax.fill_between(data['x'],level,data['y'],color=waterColor,alpha=0.9)
ax.plot(data['x'],[level]*data['x'].size,linestyle='--',alpha=0.3)
sections.append(data)
# Sensor
if (offset is not None) and (xSensor is not None):
ax.scatter(xSensor,level,marker='v',color='k',s=30+scatterSize,zorder=22)
ax.scatter(xSensor,level,color='white',s=120+scatterSize+10,edgecolors='k')
#ax.annotate('nivel actual',xy=(label,level*1.2),fontsize=8)
#ax.vlines(xSensor, level,offset,linestyles='--',alpha=0.5,color=self.colores_siata[-1])
#labels
ax.set_xlabel(xLabel)
ax.set_facecolor('white')
#risks
xlim_max = df['x'].max()
if riskLevels is not None:
x = df['x'].max() -df['x'].min()
y = df['y'].max() -df['y'].min()
factorx = 0.05
ancho = x*factorx
locx = df['x'].max()+ancho/2.0
miny = df['y'].min()
locx = 1.03*locx
risks = np.diff(np.array(list(riskLevels)+[offset]))
ax.bar(locx,[riskLevels[0]+abs(miny)],width=ancho,bottom=0,color='green')
colors = ['yellow','orange','red','red']
for i,risk in enumerate(risks):
ax.bar(locx,[risk],width=ancho,bottom=riskLevels[i],color=colors[i],zorder=19)
if level is not None:
ax.hlines(data['y'].max(),data['x'].max(),locx,lw=1,linestyles='--')
ax.scatter([locx],[data['y'].max()],s=30,color='k',zorder=20)
xlim_max=locx+ancho
# ax.hlines(data['y'].max(),df['x'].min(),sections[0].min(),lw=1,linestyles='--')
ax.set_xlim(df['x'].min(),xlim_max)
for j in ['top','right','left']:
ax.spines[j].set_edgecolor('white')
ax.set_ylabel('y [m]')
def plot_aforo(self):
self.seccion['y'] = self.seccion['y'].abs()*(-1.0)
x = list(self.seccion['x'].values)*4
y = list(self.seccion['y'].values*(1-0.2))+list(self.seccion['y'].values*(1-0.4))+list(self.seccion['y'].values*(1-0.8))+self.seccion.y.size*[0.0]
z = list(self.seccion['v02'].values)+list(self.seccion['v04'].values)+list(self.seccion['v08'].values)+list(self.seccion['vsup'].values)
x+=list(self.seccion['x'].values)
y+=list(self.seccion['y'].values)
z+=self.seccion.index.size*[0]
fig = plt.figure(figsize=(7,3))
ax = fig.add_subplot(111)
cm = plt.cm.get_cmap('jet')
sc = plt.scatter(x,y,c=z,vmin=0.0,vmax=3.0,cmap=cm,s=80,zorder=20)
cb = plt.colorbar(sc, pad=0.05)
cb.ax.set_title('V(m/s)')
ax.plot(self.seccion['x'].values,[0]*self.seccion.index.size,linestyle='--',alpha=0.3)
ax.fill_between(np.array(self.seccion['x'].values,float),np.array(self.seccion['y'].values,float),float(self.seccion['y'].min()),color='tan',alpha=1.0)
ax.fill_between(np.array(self.seccion['x'].values,float),np.array(self.seccion['y'].values,float),0,color='#e5efff')
for j in ['top','right','left']:
ax.spines[j].set_edgecolor('white')
ax.set_ylabel('y [m]')
ax.set_xlabel('x [m]')
from hydraulics.models import *
from meta.models import *
from django.db.models import Q
from django_pandas.io import read_frame
from uploadfiles.models import *
import os, sys
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
class Hidraulica:
def __init__(self,item = None,workspace = 'media',**kwargs):
self.workspace = workspace
self.item = item
self.section = pd.DataFrame()
self.topo = | pd.DataFrame() | pandas.DataFrame |
import os
import csv
import pandas
from sklearn.svm import LinearSVC
from sklearn import linear_model, metrics
from sklearn.model_selection import train_test_split
from scipy.sparse import csr_matrix
from questionparser import QuestionParser
CORPUS_DIR = os.path.join(os.path.dirname(__file__), 'corpus')
def compare_model(train_file, test_file):
train_data = pandas.read_csv(train_file)
labels = train_data.pop('Class')
train_data.pop('Question')
test_data = | pandas.read_csv(test_file) | pandas.read_csv |
from __future__ import print_function
import os
import csv
import numpy as np
import pandas as pd
from inferelator_ng import single_cell_workflow
from inferelator_ng import results_processor
from inferelator_ng import utils
from inferelator_ng import default
from inferelator_ng import bbsr_python
from inferelator_ng.utils import Validator as check
# The variable names that get set in the main workflow, but need to get copied to the puppets
SHARED_CLASS_VARIABLES = ['tf_names', 'gene_list', 'num_bootstraps', 'modify_activity_from_metadata',
'metadata_expression_lookup', 'gene_list_lookup', 'mi_sync_path', 'count_minimum',
'gold_standard_filter_method', 'split_priors_for_gold_standard', 'cv_split_ratio',
'split_gold_standard_for_crossvalidation', 'cv_split_axis', 'preprocessing_workflow',
'shuffle_prior_axis', 'write_network', 'output_dir']
class NoOutputRP(results_processor.ResultsProcessor):
"""
Overload the existing results processor to return summary information and to only output files if specifically
instructed to do so
"""
network_file_name = None
pr_curve_file_name = None
confidence_file_name = None
threshold_file_name = None
def summarize_network(self, output_dir, gold_standard, priors, confidence_threshold=default.DEFAULT_CONF,
precision_threshold=default.DEFAULT_PREC):
"""
Take the betas and rescaled beta_errors, construct a network, and test it against the gold standard
:param output_dir: str
Path to write files into. Don't write anything if this is None.
:param gold_standard: pd.DataFrame [G x K]
Gold standard to test the network against
:param priors: pd.DataFrame [G x K]
Prior data
:param confidence_threshold: float
Threshold for confidence scores
:param precision_threshold: float
Threshold for precision
:return aupr: float
Returns the AUPR calculated from the network and gold standard
:return num_conf: int
The number of interactions above the confidence threshold
:return num_prec: int
The number of interactions above the precision threshold
"""
pr_calc = results_processor.RankSummaryPR(self.rescaled_betas, gold_standard, filter_method=self.filter_method)
beta_sign, beta_nonzero = self.summarize(self.betas)
beta_threshold = self.passes_threshold(beta_nonzero, len(self.betas), self.threshold)
resc_betas_mean, resc_betas_median = self.mean_and_median(self.rescaled_betas)
network_data = {'beta.sign.sum': beta_sign, 'var.exp.median': resc_betas_median}
utils.Debug.vprint("Model AUPR:\t{aupr}".format(aupr=pr_calc.aupr), level=0)
# Plot PR curve & Output results to a TSV
self.write_output_files(pr_calc, output_dir, priors, beta_threshold, network_data)
num_conf = pr_calc.num_over_conf_threshold(confidence_threshold)
num_prec = pr_calc.num_over_precision_threshold(precision_threshold)
return pr_calc.aupr, num_conf, num_prec
# Factory method to spit out a puppet workflow
def create_puppet_workflow(base_class=single_cell_workflow.SingleCellWorkflow, result_processor=NoOutputRP):
class PuppetClass(base_class):
"""
Standard workflow except it takes all the data as references to __init__ instead of as filenames on disk or
as environment variables, and returns the model AUPR and edge counts without writing files (unless told to)
"""
write_network = True
network_file_name = None
def __init__(self, kvs, rank, expr_data, meta_data, prior_data, gs_data):
self.kvs = kvs
self.rank = rank
self.expression_matrix = expr_data
self.meta_data = meta_data
self.priors_data = prior_data
self.gold_standard = gs_data
def startup_run(self):
if self.split_priors_for_gold_standard:
self.split_priors_into_gold_standard()
elif self.split_gold_standard_for_crossvalidation:
self.cross_validate_gold_standard()
def emit_results(self, betas, rescaled_betas, gold_standard, priors):
if self.is_master():
results = result_processor(betas, rescaled_betas, filter_method=self.gold_standard_filter_method)
if self.write_network:
results.network_file_name = self.network_file_name
network_file_path = self.output_dir
else:
results.network_file_name = None
network_file_path = None
results.pr_curve_file_name = None
results.confidence_file_name = None
results.threshold_file_name = None
results.write_task_files = False
results.tasks_names = getattr(self, "tasks_names", None) # For multitask
results = results.summarize_network(network_file_path, gold_standard, priors)
self.aupr, self.n_interact, self.precision_interact = results
else:
self.aupr, self.n_interact, self.precision_interact = None, None, None
return PuppetClass
class PuppeteerWorkflow(object):
"""
This class contains the methods to create new child Workflow objects
It does not extend WorkflowBase because I hate keeping track of multiinheritance patterns
"""
write_network = True # bool
csv_writer = None # csv.csvwriter
csv_header = [] # list[]
output_file_name = "aupr.tsv" # str
puppet_class = single_cell_workflow.SingleCellWorkflow
puppet_result_processor = NoOutputRP
regression_type = bbsr_python
def create_writer(self):
"""
Create a CSVWriter and stash it in self.writer
"""
if self.is_master():
self.create_output_dir()
self.csv_writer = csv.writer(open(os.path.join(self.output_dir, self.output_file_name),
mode="w", buffering=1), delimiter="\t", lineterminator="\n",
quoting=csv.QUOTE_NONE)
self.csv_writer.writerow(self.csv_header)
def new_puppet(self, expr_data, meta_data, seed=default.DEFAULT_RANDOM_SEED, priors_data=None, gold_standard=None):
"""
Create a new puppet workflow to run the inferelator
:param expr_data: pd.DataFrame [G x N]
:param meta_data: pd.DataFrame [N x ?]
:param seed: int
:param priors_data: pd.DataFrame [G x K]
:param gold_standard: pd.DataFrame [G x K]
:return puppet:
"""
# Unless told otherwise, use the master priors and master gold standard
if gold_standard is None:
gold_standard = self.gold_standard
if priors_data is None:
priors_data = self.priors_data
# Create a new puppet workflow with the factory method and pass in data on instantiation
puppet = create_puppet_workflow(base_class = self.puppet_class, result_processor = self.puppet_result_processor)
puppet = puppet(self.kvs, self.rank, expr_data, meta_data, priors_data, gold_standard)
# Transfer the class variables necessary to get the puppet to dance (everything in SHARED_CLASS_VARIABLES)
self.assign_class_vars(puppet)
# Set the random seed into the puppet
puppet.random_seed = seed
# Make sure that the puppet knows the correct orientation of the expression matrix
puppet.expression_matrix_columns_are_genes = False
# Tell the puppet what to name stuff (if write_network is False then no output will be produced)
puppet.network_file_name = "network_s{seed}.tsv".format(seed=seed)
return puppet
def assign_class_vars(self, obj):
"""
Transfer class variables from this object to a target object
"""
for varname in SHARED_CLASS_VARIABLES:
try:
setattr(obj, varname, getattr(self, varname))
utils.Debug.vprint("Variable {var} set to child".format(var=varname), level=2)
except AttributeError:
utils.Debug.vprint("Variable {var} not assigned to parent".format(var=varname))
self.regression_type.patch_workflow(obj)
class SingleCellPuppeteerWorkflow(single_cell_workflow.SingleCellWorkflow, PuppeteerWorkflow):
seeds = default.DEFAULT_SEED_RANGE
# Output TSV controllers
write_network = True # bool
csv_writer = None # csv.csvwriter
csv_header = ["Seed", "AUPR", "Num_Interacting"] # list[]
output_file_name = "aupr.tsv" # str
# How to sample
stratified_sampling = False
stratified_batch_lookup = default.DEFAULT_METADATA_FOR_BATCH_CORRECTION
sample_with_replacement = True
def run(self):
np.random.seed(self.random_seed)
self.startup()
self.create_writer()
auprs = self.modeling_method()
def compute_activity(self):
# Compute activities in the puppet, not in the puppetmaster
pass
def single_cell_normalize(self):
# Normalize and impute in the puppet, not in the puppetmaster
pass
def set_gold_standard_and_priors(self):
# Split priors for a gold standard in the puppet, not in the puppetmaster
self.priors_data = self.input_dataframe(self.priors_file)
self.gold_standard = self.input_dataframe(self.gold_standard_file)
def align_priors_and_expression(self):
# Align the priors and expression in the puppet, not in the puppetmaster
pass
def shuffle_priors(self):
# Do any shuffles in the puppet, not in the puppetmaster
pass
def modeling_method(self):
raise NotImplementedError("No method to create models was provided")
def get_sample_index(self, meta_data=None, sample_ratio=None, sample_size=None,
min_size=default.DEFAULT_MINIMUM_SAMPLE_SIZE, stratified_sampling=None):
"""
Produce an integer index to sample data using .iloc. If the self.stratified_sampling flag is True, sample
separately from each group, as defined by the self.stratified_batch_lookup column.
:param meta_data: pd.DataFrame [N x ?]
Data frame to sample from. Use self.meta_data if this is not set.
:param sample_ratio: float
Sample expression_matrix to this proportion of data points
:param sample_size: int
Sample expression matrix to this absolute number of data points. If sampling from each stratified group,
this is the absolute number of data points PER GROUP (not total)
:return new_size, new_idx: int, np.ndarray
Return the total number of
"""
# Sanity check inputs
assert check.arguments_not_none((sample_ratio, sample_size), num_none=1)
assert check.argument_numeric(sample_ratio, low=0, allow_none=True)
assert check.argument_numeric(sample_size, low=0, allow_none=True)
stratified_sampling = stratified_sampling if stratified_sampling is not None else self.stratified_sampling
if stratified_sampling:
# Use the main meta_data if there's nothing given
meta_data = meta_data if meta_data is not None else self.meta_data
# Copy and reindex the meta_data so that the index can be used with iloc
meta_data = meta_data.copy()
meta_data.index = pd.Index(range(meta_data.shape[0]))
new_idx = np.ndarray(0, dtype=int)
# For each factor in the batch column
for batch in meta_data[self.stratified_batch_lookup].unique().tolist():
# Get the integer index of the data points in this batch
batch_idx = meta_data.loc[meta_data[self.stratified_batch_lookup] == batch, :].index.tolist()
# Decide how many to collect from this batch
size = sample_size if sample_ratio is None else max(int(len(batch_idx) * sample_ratio), min_size)
# Resample and append the new sample index to the index array
new_idx = np.append(new_idx, np.random.choice(batch_idx, size=size,
replace=self.sample_with_replacement))
return new_idx
else:
# Decide how many to collect from the total expression matrix or the meta_data
num_samples = self.expression_matrix.shape[1] if meta_data is None else meta_data.shape[0]
size = sample_size if sample_ratio is None else max(int(sample_ratio * num_samples), min_size)
return np.random.choice(num_samples, size=size, replace=self.sample_with_replacement)
class SingleCellSizeSampling(SingleCellPuppeteerWorkflow):
sizes = default.DEFAULT_SIZE_SAMPLING
csv_header = ["Size", "Num_Sampled", "Seed", "AUPR", "Num_Confident_Int", "Num_Precision_Int"]
def modeling_method(self, *args, **kwargs):
return self.get_aupr_for_subsampled_data()
def get_aupr_for_subsampled_data(self):
aupr_data = []
for s_ratio in self.sizes:
for seed in self.seeds:
np.random.seed(seed)
nidx = self.get_sample_index(sample_ratio=s_ratio)
puppet = self.new_puppet(self.expression_matrix.iloc[:, nidx], self.meta_data.iloc[nidx, :], seed=seed)
if self.write_network:
puppet.network_file_name = "network_{size}_s{seed}.tsv".format(size=s_ratio, seed=seed)
puppet.run()
size_aupr = (s_ratio, len(nidx), seed, puppet.aupr, puppet.n_interact, puppet.precision_interact)
aupr_data.extend(size_aupr)
if self.is_master():
self.csv_writer.writerow(size_aupr)
return aupr_data
class SingleCellDropoutConditionSampling(SingleCellPuppeteerWorkflow):
csv_header = ["Dropout", "Seed", "AUPR", "Num_Confident_Int", "Num_Precision_Int"]
# Sampling batches
sample_batches_to_size = default.DEFAULT_BATCH_SIZE
stratified_sampling = True
drop_column = None
def modeling_method(self, *args, **kwargs):
self.factor_indexes = self.factor_singles()
auprs = self.auprs_for_condition_dropin()
auprs.extend(self.auprs_for_condition_dropout())
return auprs
def auprs_for_condition_dropout(self):
"""
Run modeling on all data, and then on data where each factor from `drop_column` has been removed
:return:
"""
# Run the modeling on all data
aupr_data = [self.auprs_for_index("all_dropout", | pd.Series(True, index=self.meta_data.index) | pandas.Series |
import numpy as np
import pandas as pd
pd.options.display.max_rows = 20;
pd.options.display.expand_frame_repr = True
import sys
sys.path.insert(1,'/home/arya/workspace/bio')
import UTILS.Util as utl
import multiprocessing
from UTILS.BED import BED
from UTILS.Util import mask
from time import time
CHROMS=['2L', '2R', '3L', '3R', 'X']
cp=['CHROM','POS']
coord=pd.read_pickle('/home/arya/storage/Data/Dmelanogaster/geneCoordinates/gene_map_table_fb_2014_03.tsv.dmel.df')
coords2=pd.concat([coord.rename(columns={'start':'POS'}).set_index(cp)['FBgn'], \
coord.rename(columns={'end':'POS'}).set_index(cp)['FBgn']]).loc[CHROMS].sort_index()
chromL=coord.groupby('CHROM').end.max().loc[CHROMS]
chromL
path='/home/arya/storage/Data/Dmelanogaster/OxidativeStress/plots/intervals/'
def load_intervals(replicated):
def geti(x):
chrom = x[3:].split(':')[0]
start, end = map(lambda y: 1e6 * float(y), x.replace('Mb', '').split(':')[1].split('-'))
return BED.interval(chrom, start, end)
fname=['intervals_single_rep.tsv','intervals.tsv'][replicated]
I=pd.read_csv(path+fname,sep='\t').set_index('Name')
I=I.join(I.Coordinate.apply(geti)).reset_index()
if replicated:
I.Name=I.Name.apply(lambda x: x[0]+chr(64+int(x[-1])))
I['name']=I.Name.apply(lambda x: '$\mathrm{'+x[0]+'}_{''\mathrm{'+x[-1]+'}}$')
else:
I.Name=I.Name.apply(lambda x: x[0]+x[2]+chr(64+int(x[-1])))
I['name']=I.Name.apply(lambda x: '$\mathrm{'+x[0]+'}_{'+x[1]+'\mathrm{'+x[2]+'}}$')
return I.set_index('Name')
I = load_intervals(True)
def calculate_random_gene_sets(I):
fin='/home/arya/storage/Data/Dmelanogaster/OxidativeStress/plots/intervals/random_gene_sets.df'
try:
return pd.read_pickle(fin)
except:
def get_random_interval_gens_per_len(length,n_per_chrom=2000):
ming=max(1,np.round(length/1e5))
print(length,ming)
random_intervals=[]
np.random.seed(0)
for chrom in CHROMS:
print(chrom)
j=0
while j<n_per_chrom:
start=np.random.randint(0,chromL.loc[chrom]-length)
bg=mask(coords2,BED.interval( chrom,start,start+int(length))).unique()
if bg.size > ming:
random_intervals+=[pd.Series(bg)]
j+=1
return pd.concat(random_intervals,keys=list(range(len(random_intervals)))).reset_index(1,drop=True)
random_gene_sets=I.len.groupby(level=0).apply(lambda x: get_random_interval_gens_per_len(x.loc[x.name]))
random_gene_sets.to_pickle(fin)
go = | pd.read_pickle(utl.PATH.data + "GO/GO.fly.df") | pandas.read_pickle |
#@author: bfoster2
# -*- coding: utf-8 -*-
"""
Created on Tue May 28 10:05:23 2019
@author: bfoster2
"""
import os
#os.system("!pip install gensim --upgrade")
#os.system("pip install keras --upgrade")
#os.system("pip install pandas --upgrade")
# DataFrame
import pandas as pd
# Matplot
import matplotlib.pyplot as plt
#from matplotlib import inline
# Scikit-learn
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import confusion_matrix, classification_report, accuracy_score
from sklearn.manifold import TSNE
from sklearn.feature_extraction.text import TfidfVectorizer
# Keras
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.models import Sequential
from keras.layers import (Activation, Dense, Dropout, Embedding, Flatten, Conv1D, MaxPooling1D,
LSTM)
from keras import utils
from keras.callbacks import ReduceLROnPlateau, EarlyStopping
# nltk
import nltk
from nltk.corpus import stopwords
from nltk.stem import SnowballStemmer
from nltk.tokenize import sent_tokenize, word_tokenize
# Word2vec
import gensim
# Utility
import re
import numpy as np
import os
from collections import Counter
import logging
import time
import pickle
import itertools
filelocation=('C:/Users/bfoster2/desktop/iyb.txt')
data=[line.strip() for line in open (filelocation,'r')]
texts=[[word.lower() for word in text.split()] for text in data]
dataS=''.join(str(e) for e in data)
x=(sent_tokenize(dataS))
df= | pd.DataFrame([x], index=['string_values']) | pandas.DataFrame |
import pandas as pd
import string
crime_2019 = pd.read_csv('./crime_feat_2019.csv')
crime_2018 = pd.read_csv('./crime_feat_2018.csv')
crime_2017 = pd.read_csv('./crime_feat_2017.csv')
crime_2016 = pd.read_csv('./crime_feat_2016.csv')
crime_2015 = pd.read_csv('./crime_feat_2015.csv')
crime_2014 = pd.read_csv('./crime_feat_2014.csv')
frames = [crime_2019, crime_2018, crime_2017, crime_2016, crime_2015, crime_2014]
combined_crime = | pd.concat(frames) | pandas.concat |
"""Analyzes Terms in terms of the underlying gene structure and comparisons with other terms."""
"""
A term ontology is a classification of genes. Examples include: GO (gene ontology),
KO (KEGG Orthology), KEGG Pathway, and EC (Enzyme Commission). A term ontology
is a many-to-many relationship between genes and terms. A gene need not have
a corresponding term in a term ontology.
"""
from common import constants as cn
from common_python import constants as cpn
from common_python.plots import util_plots
from common.data_provider import DataProvider
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import scipy.stats as stats
import seaborn as sns
from scipy.spatial import distance
from scipy.cluster.hierarchy import linkage, fcluster
class TermAnalyzer(object):
def __init__(self, df_term, provider=None, is_plot=True):
"""
:param pd.DataFrame df_term: a one column DataFrame and indexed by cn.GENE_ID
:param bool is_plot:
"""
self._is_plot = is_plot
if provider is None:
self.provider = DataProvider()
self.provider.do()
else:
self.provider = provider
self.df_term = df_term
self.ontology = self._getOntology()
def _getOntology(self):
columns = [c for c in self.df_term.columns]
return columns[0]
def makeAnalyzerMatrix(self):
"""
An analyzer matrix is a dataframe with columns that are terms (plus "Missing"),
indexed by GENE_ID, and values are either a count or np.nan
:return pd.DataFrame: analyzer matrix
"""
# Create a matrix of expressed genes
df_expressed = pd.DataFrame({
cn.GENE_ID: self.provider.df_normalized.index,
})
df_expressed[self.ontology] = np.nan
df_expressed[cn.COUNT] = np.nan
# Matrix of terms
df_term = self.df_term[self.df_term.index.isin(
df_expressed[cn.GENE_ID])].copy()
df_term = df_term.reset_index()
df_term[cn.COUNT] = 1
df_term = df_term.drop_duplicates()
# Ensure all expressed genes are present
gene_expressed = set(df_expressed[cn.GENE_ID].tolist())
gene_term = set(df_term[cn.GENE_ID].tolist())
gene_excluded = gene_expressed.difference(gene_term)
df_expressed_excluded = df_expressed[
df_expressed[cn.GENE_ID].isin(gene_excluded)].copy()
df1_term = | pd.concat([df_term, df_expressed_excluded]) | pandas.concat |
from argparse import ArgumentParser
import numpy as np
import pandas as pd
import statsmodels.api as sm
from arch.bootstrap import StationaryBootstrap
from statsmodels.nonparametric.kernel_regression import KernelReg
from utils import resample
from align_settings import STARTTIME, ENDTIME
SESSIONSTART = pd.to_datetime('2016-01-04 08:00:00')
SESSIONEND = pd.to_datetime('2016-01-04 16:30:00')
TIMESTEP = pd.to_timedelta('1m')
def get_tick_size(quote):
diffs = (quote['Ask'] + quote['Bid']).diff()
diffs = diffs[diffs > 1e-6]
return np.round(diffs.min(), 2)
def compute_grid(time, step):
return pd.to_datetime(0) + step*np.floor((time - pd.to_datetime(0))/step)
def grid_trade(trade, step):
gridded = trade.copy()
gridded['Grid'] = compute_grid(trade.index, step)
gridded.set_index('Grid', append=True, inplace=True)
return gridded
def grid_quote(quote, time_grid, step):
time_grid = pd.Index(time_grid, name=quote.index.name)
grid_start = pd.DataFrame(np.nan, time_grid, quote.columns)
grid_end = pd.DataFrame(np.nan, time_grid + step, quote.columns)
gridded = pd.concat([quote, grid_start, grid_end])
gridded.sort_index(inplace=True)
gridded.ffill(inplace=True)
gridded['Grid'] = compute_grid(gridded.index, step)
gridded['Grid'] = gridded['Grid'].shift(fill_value=time_grid[0])
gridded.set_index('Grid', append=True, inplace=True)
return gridded
def grid_quote_trade(quote, trade):
time_grid = np.arange(SESSIONSTART, SESSIONEND, TIMESTEP)
quote_gridded = quote.groupby(['Class', 'Strike']
).apply(lambda o: grid_quote(o.xs(o.name), time_grid,
TIMESTEP))
trade_gridded = trade.groupby(['Class', 'Strike']
).apply(lambda o: grid_trade(o.xs(o.name), TIMESTEP))
return quote_gridded, trade_gridded
def filter_trade_on_book(quote, trade):
quote_aligned = trade.groupby(['Class', 'Strike']
).apply(lambda o: resample(quote.xs(o.name),
trade.xs(o.name).index))
valid_trades = ((trade['Price'] == quote_aligned['Bid']) |
(trade['Price'] == quote_aligned['Ask']))
filtered = trade[valid_trades]
quote_aligned = quote_aligned.loc[valid_trades]
filtered['Buy'] = filtered['Price'] == quote_aligned['Ask']
filtered['Half-spread'] = (quote_aligned['Ask'] - quote_aligned['Bid']).round(2)/2
return filtered
def compute_duration(quote):
quote = quote.copy()
quote['Half-spread'] = (quote['Ask'] - quote['Bid']).round(2)/2
time = quote.reset_index('Time'
).set_index('Half-spread', append=True)[['Time']]
time['Duration'] = time['Time'].groupby(['Class', 'Strike']
).transform(lambda t: t.diff().shift(-1))
time['Time'] += time['Duration']/2
duration = time.set_index('Time', append=True)['Duration']
duration /= pd.to_timedelta('1s')
return duration
def compute_volume_duration(quote, trade, expiry, tick_size):
quote = quote.xs(expiry, level='Expiry')
trade = trade.xs(expiry, level='Expiry')
tick_sizes = quote.groupby(['Class', 'Strike']).apply(get_tick_size)
strikes = tick_sizes[tick_sizes == tick_size]
quote = quote.groupby('Class'
).apply(lambda c: c.xs(c.name).loc[strikes.xs(c.name).index])
trade = trade.groupby('Class'
).apply(lambda c: c.xs(c.name).loc[strikes.xs(c.name).index])
quote_gridded, trade_gridded = grid_quote_trade(quote, trade)
filtered_trade = trade_gridded.groupby('Grid').apply(
lambda o: filter_trade_on_book(quote_gridded.xs(o.name, level='Grid'),
o.xs(o.name, level='Grid')))
volume = filtered_trade.set_index(['Half-spread', 'Buy'], append=True)['Volume']
duration = quote_gridded.groupby('Grid').apply(
lambda g: compute_duration(g.xs(g.name, level='Grid')))
volume = volume.groupby(['Class', 'Strike', 'Half-spread', 'Buy', 'Grid']).sum()
duration = duration.groupby(['Class', 'Strike', 'Half-spread', 'Grid']).sum()
return volume, duration
def find_quantiles(s, quantile):
quantiles = np.arange(quantile, 1 - quantile/2, quantile)
return s.index[np.searchsorted(s.cumsum()/s.sum(), quantiles)]
def build_quartiles(volume, duration):
volume_by_strike = volume.groupby('Class').apply(lambda c: c.groupby('Strike').sum())
strikes = volume_by_strike.groupby('Class').apply(
lambda c: pd.Index(find_quantiles(c.xs(c.name), 1/4), name='Strike'))
volume = volume.groupby(['Class', 'Strike', 'Half-spread', 'Grid']).sum()
volume_duration = pd.concat(
[o.xs((slice(STARTTIME, ENDTIME), slice(0.025, 0.100)),
level=('Grid', 'Half-spread'), drop_level=False)
for o in [volume, duration]], axis=1).fillna(0)
return volume_duration, strikes
def compute_arrival_rate(volume, duration, strikes):
volume_duration = pd.concat([volume.sum(), duration.sum()],
keys=['Volume', 'Duration'], axis=1)
volume_duration_kernel = volume_duration.apply(
lambda vd: vd.groupby('Half-spread').apply(
lambda d: KernelReg(d.xs(d.name, level='Half-spread'),
d.xs(d.name, level='Half-spread').index,
'c', 'lc')))
arrival_rate = volume_duration_kernel.apply(
lambda vd: vd.groupby('Half-spread').apply(
lambda k: pd.Series(k.xs(k.name).fit(strikes)[0], strikes)))
return np.log(arrival_rate['Volume']/arrival_rate['Duration'])
def calibrate(volume_duration, strikes, reps):
volume_duration = volume_duration.unstack(['Half-spread', 'Strike'])
arrival_rate = volume_duration.groupby('Class').apply(
lambda c: compute_arrival_rate(c.loc[c.name, 'Volume'],
c.loc[c.name, 'Duration'],
strikes[c.name]))
arrival_rate.name = 'Arrival rate'
arrival_rate.index = arrival_rate.index.reorder_levels(['Class', 'Strike',
'Half-spread'])
sbs = volume_duration.groupby('Class').apply(
lambda c: StationaryBootstrap(25, volume=c.loc[c.name, 'Volume'],
duration=c.loc[c.name, 'Duration']))
conf_int = sbs.groupby('Class').apply(lambda c: pd.DataFrame(
c[c.name].conf_int(lambda volume, duration: compute_arrival_rate(
volume, duration, strikes[c.name]), reps=reps),
['2.5%', '97.5%'], arrival_rate.loc[c.name].index))
conf_int = conf_int.T.stack('Class')
conf_int.index = conf_int.index.reorder_levels(['Class', 'Strike', 'Half-spread'])
sigma = sbs.groupby('Class').apply(lambda c: pd.DataFrame(
c[c.name].cov(lambda volume, duration: compute_arrival_rate(
volume, duration, strikes[c.name]), reps=reps),
arrival_rate.loc[c.name].index, arrival_rate.loc[c.name].index))
sigma = sigma.groupby('Strike').apply(lambda k: k.xs(k.name, level='Strike',
axis=1))
sigma.dropna(how='all', inplace=True)
gls = arrival_rate.loc[sigma.index].groupby(['Class', 'Strike']).apply(
lambda k: sm.GLS(k.values,
sm.add_constant(k.index.get_level_values('Half-spread')),
sigma=sigma.xs(k.name, level=['Class', 'Strike']
).dropna(axis=1)).fit())
params = gls.apply(lambda g: pd.Series([np.exp(g.params[0]), -g.params[1]],
['A', '$\\kappa$']))
base_conf_int = gls.apply(
lambda g: pd.Series(np.exp(g.conf_int(alpha=.1)[0]), ['A 5%', 'A 95%']))
decay_conf_int = gls.apply(
lambda g: pd.Series(-g.conf_int(alpha=.1)[1, ::-1],
['$\\kappa$ 5%', '$\\kappa$ 95%']))
params = pd.concat([params, base_conf_int, decay_conf_int], axis=1)
arrival_rate = np.exp(pd.concat([arrival_rate, conf_int], axis=1))
return arrival_rate, params
if __name__ == '__main__':
cli = ArgumentParser()
cli.add_argument('expiry')
cli.add_argument('tick_size')
cli.add_argument('reps')
cli.add_argument('quote_filename')
cli.add_argument('trade_filename')
cli.add_argument('dest_arrival_rate_filename')
cli.add_argument('dest_params_filename')
args = cli.parse_args()
expiry = pd.to_datetime(args.expiry)
tick_size = float(args.tick_size)
quote = pd.read_parquet(args.quote_filename)
trade = | pd.read_parquet(args.trade_filename) | pandas.read_parquet |
# -*- coding: utf-8 -*-
import sys
import dnaio
import numpy as np
import pandas as pd
from xopen import xopen
from .protocol import BarcodePattern, MisSeq
from .report import Reporter
from .utils import getlogger, CommandWrapper
logger = getlogger(__name__)
logger.setLevel(10)
def barcode(
ctx,
fq1s=None,
fq2s=None,
sample=None,
outdir=None,
bctype=None,
pattern=None,
lowqual=None,
lownum=None,
whitelist=None,
linker=None,
thread=None,
debug=False
):
"""
:param ctx:
:param fq1s:
:param fq2s:
:param sample:
:param outdir:
:param bctype:
:param pattern:
:param lowqual:
:param lownum:
:param whitelist:
:param linker:
:param thread:
:param debug:
:return:
"""
logger.info('Extract barcode start!')
barcode_pattern = BarcodePattern(pattern)
cell_len, umi_len = 0, 0
for start, end in zip(barcode_pattern['C'].start, barcode_pattern['C'].end):
cell_len += end - start
for start, end in zip(barcode_pattern['U'].start, barcode_pattern['U'].end):
umi_len += end - start
cell_umi_quality_array = np.zeros((cell_len + umi_len, 42), dtype=np.uint64)
cell_umi_base_array = np.zeros((cell_len + umi_len, 5), dtype=np.uint64)
base_dict = dict(zip(['A', 'T', 'C', 'G', 'N'], range(0, 5)))
whitelist_list = []
if whitelist:
with xopen(whitelist, mode='rt') as f:
for line in f:
whitelist_list.append(line.strip())
cell_dict = MisSeq(whitelist_list)
else:
cell_dict = None
if linker:
with xopen(linker, mode='rt')as f:
length = [end - start for start, end in zip(barcode_pattern['L'].start, barcode_pattern['L'].end)]
linker_list = [[] for i in range(len(length))]
for line in f:
linkers = line.strip()
for nth, val in enumerate(length):
linker_list[nth].append(linkers[:val])
linkers = linkers[val:]
linkers_dict = [MisSeq(linker) for linker in linker_list]
else:
linkers_dict = None
sample_outdir = outdir / sample / '01.barcode'
sample_outdir.mkdir(parents=True, exist_ok=True)
clean_fastq = sample_outdir / f'{sample}_2.fq.gz'
if bctype == 'SCOPEv2':
from .protocol import SCOPEv2 as Sequence
elif bctype == 'SCOPEv1':
from .protocol import SCOPEv1 as Sequence
else:
from .protocol import Sequence
with xopen(clean_fastq, mode='wt') as f:
for fq1, fq2 in zip(fq1s, fq2s):
with dnaio.open(file1=fq1, file2=fq2) as g:
for seq1, seq2 in g:
Sequence.seq_info['total_num'] += 1
sequence = Sequence(
seq1=seq1,
seq2=seq2,
lownum=lownum,
lowqual=lowqual,
barcode_pattern=barcode_pattern,
linkers_dict=linkers_dict,
cell_dict=cell_dict
)
if sequence.rna_sequence:
f.write(f'{sequence.rna_sequence}\n')
for position, quality in enumerate(sequence.cell_quality + sequence.umi_quality):
cell_umi_quality_array[position, quality] += 1
for position, base in enumerate(''.join(sequence.cell) + sequence.umi):
cell_umi_base_array[position, base_dict[base]] += 1
# stat
cell_q30 = cell_umi_quality_array[:cell_len, 30:].sum() / cell_umi_quality_array[:cell_len].sum()
umi_q30 = cell_umi_quality_array[cell_len:, 30:].sum() / cell_umi_quality_array[cell_len:].sum()
stat_info = Sequence.stat_info()
stat_info['Q30 of Barcodes'] = f'{cell_q30:.2%}'
stat_info['Q30 of UMIs'] = f'{umi_q30:.2%}'
# indice
df = | pd.DataFrame(cell_umi_base_array) | pandas.DataFrame |
"""Helper classes and functions with RTOG studies.
"""
import random
import pandas as pd
import numpy as np
import pickle
from collections import Counter
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import OneHotEncoder
from tqdm import tqdm
import pint
# Constants defining variable and file parsing
from rtog_constants import gcp_baseline_paths, rtog_endpoints, rtog_binary_mapping, rtog_unknown_class_X
from rtog_constants import rtog_default_class_y, rtog_text_fields, rtog_field_mapping, rtog_categorical_fields
# Functions allowing RTOG data manipulation
from rtog_constants import is_categorical, merge, serum_values_to_ng_dl
def rtog_from_study_number(study_number, create_endpoints=True, standardize=False):
"""Helper function. Loads an RTOG object given the study number (str)."""
study_path = gcp_baseline_paths[study_number]
rtog = RTOG(filename=study_path, study_number=study_number, file_type='excel', create_endpoints=create_endpoints)
if standardize:
rtog.standardize_rx()
rtog.standardize_race()
rtog.standardize_gleason_scores()
rtog.standardize_tstage()
rtog.standardize_pelvic_rt()
rtog.standardize_prostate_dose()
rtog.standardize_rt_complete()
rtog.standardize_biochemical_failure()
rtog.standardize_disease_specific_survival()
rtog.cause_of_death()
# rtog_object.standardize_baseline_serum() # Note: this line takes a long time to run, due to unit conversions. Also Osama said the data is too noisy to use.
rtog.standardize_unknown_values_in_predictor_variables() # note: this must be done after standardize_rt_complete, bc that re-sets some unknown vars. This replaces the 'unknown' classes with nans, so that boosting can intelligently impute.
print("Loaded RTOG {}, Standardized={}".format(study_number, standardize))
return rtog
class RTOG(object):
def __init__(self, filename=None, study_number=None, file_type="excel", create_endpoints=True):
self.filename = filename
self.df = None
self.study_number = study_number
# Load Endpoints, Default Classes (for y), and Unknown Classes (for X).
if self.study_number in rtog_endpoints:
self.endpoints = rtog_endpoints[study_number]
if self.study_number in rtog_default_class_y:
self.default_class_y = rtog_default_class_y[study_number]
if self.study_number in rtog_unknown_class_X:
self.unknown_class_X = rtog_unknown_class_X[study_number]
# Load Data.
if self.filename is not None:
if file_type == "excel":
self.df = pd.read_excel(filename)
elif file_type == "csv":
self.df = pd.read_csv(filename, index_col=0)
self._field_fix()
self.table_sort()
# Study-specific additional derived endpoints get hardcoded here
if study_number == '9202':
# Add Radiotherapy info
gcp_path = "/export/medical_ai/ucsf/box_data/Aperio Images of NRG GU H&E Slides/NRG Statistics/RTOG 9202/All_RT_Data_9202.xlsx"
self.df_rt = pd.read_excel(gcp_path)
self.df_rt.columns = self.df_rt.columns.str.lower()
self.df_rt.rename({'pelvis_does' : 'pelvis_dose'}, axis='columns', inplace=True)
elif study_number == '9413': #note: data lacks disease specific survival
pass
elif study_number == '9408':
pass
elif study_number == '9910':
# Add Radiotherapy info
gcp_path = "/export/medical_ai/ucsf/box_data/Aperio Images of NRG GU H&E Slides/NRG Statistics/RTOG 9910/Radiation_treatment_9910.xlsx"
self.df_rt = pd.read_excel(gcp_path)
self.df_rt.columns = self.df_rt.columns.str.lower()
elif study_number == "0126":
# Add Serum info
gcp_path = "/export/medical_ai/ucsf/box_data/Aperio Images of NRG GU H&E Slides/NRG Statistics/RTOG 0126/All_serum_testosteron_0126.xlsx"
self.df_serum = pd.read_excel(gcp_path)
self.df_serum.columns = self.df_serum.columns.str.lower()
else:
pass
# Replace nans with defaults in endpoint fields
self.df = self.df.fillna(self.default_class_y)
if create_endpoints:
for timeframe in [5,10,15,25]:
self.add_distant_met_Nyr_endpoint(timeframe)
self.add_biochemical_failure_Nyr_endpoint(timeframe)
self.add_disease_specific_survival_Nyr_endpoint(timeframe)
self.add_survival_Nyr_endpoint(timeframe)
def _repr_html_(self):
return self.df._repr_html_()
def __getitem__(self, columns):
if type(columns) == str:
columns = [columns]
new_rtog = self.copy()
new_rtog.df = new_rtog.df[columns]
return new_rtog
def _field_fix(self):
"""Fixes field names for uniformity and typos. Determined in rtog_constants.py
"""
self.df = self.df.rename(columns=str.lower)
self.df = self.df.rename(rtog_field_mapping, axis='columns')
def table_sort(self):
"""Sorts rows and columns in ascending order.
"""
self.df = self.df.sort_index()
self.df = self.df.sort_index(axis=1)
def add_biochemical_failure_Nyr_endpoint(self, years):
"""Adds column 'biochemical_failure_Nyr' to self.df
Indicates if the cancer metastasized within N years.
Args:
years(int): the years.
Column values:
0: Censored
1: Failure within given years
2: Competing event (death without failure)
"""
field_name = 'biochemical_failure'
if self.study_number == '9202':
failure_outside_timeframe_value = 2 # Has a 'competing events' class.
new_field = field_name + "_{}year".format(years)
elif self.study_number == '9408':
failure_outside_timeframe_value = 0 # Does not have a 'competing events' class.
new_field = field_name + "_{}year".format(years)
elif self.study_number == '9413':
failure_outside_timeframe_value = 2 # Has a 'competing events' class.
new_field = field_name + "_{}year".format(years)
field_name = 'phoenix_biochemical_failure'
elif self.study_number == '9910':
failure_outside_timeframe_value = 2 # Has a 'competing events' class.
new_field = field_name + "_{}year".format(years)
field_name = 'phoenix_biochemical_failure'
elif self.study_number == "0126":
failure_outside_timeframe_value = 2 # Has a 'competing events' class.
new_field = field_name + "_{}year".format(years)
field_name = 'phoenix_biochemical_failure'
else:
raise ValueError("The failure value for biochemical_failure is not set for this study: {}".format(self.study_number))
field_name_years = field_name + "_years"
assert field_name in self.endpoint_fields(), "{} not in endpoint fields".format(field_name)
assert field_name_years in self.endpoint_fields() , "{} not in endpoint fields".format(field_name_years)
# Insert new field. If it exists already, re-compute it.
if new_field in self.df.columns:
self.df = self.df.drop(columns=[new_field])
idx = self.df.columns.get_loc(field_name) + 1
new_column_vals = []
for f, fy in zip(self.df[field_name], self.df[field_name_years]):
if f == 0: # Default class for biochemical_failure is 0. Same for biochemical_failure_5yr.
new_column_vals.append(0)
if f == 2:
new_column_vals.append(2)
if f == 1:
assert ~np.isnan(fy), "Found biochemical_failure=1, with biochemical_failure_years=nan. Impossible. See rtog {}".format(
self.study_number)
if fy <= years:
new_column_vals.append(1)
else:
new_column_vals.append(failure_outside_timeframe_value)
self.df.insert(loc=idx, column=new_field, value=list(map(int, new_column_vals)))
self.table_sort()
# Update endpoint fields
self._add_endpoint_field(new_field, 0)
def add_disease_specific_survival_Nyr_endpoint(self, years):
"""Adds column 'disease_specific_survival_Nyr' to self.df
Indicates if the patient has lived free of prostate cancer within N years.
Note: Contrast this with disease_free_survival, which means the patient has lived free of any disease.
Args:
years(int): the years.
Column values:
0: Censored
1: Failure within given years
2: Competing event (death from something other than prostate cancer.)
"""
field_name = 'disease_specific_survival'
if self.study_number == '9202':
failure_outside_timeframe_value = 2
# field_name_years = "survival_years" # Stephanie confirmed we can use this value.
elif self.study_number == '9408':
failure_outside_timeframe_value = 2
# field_name_years = "dsm_years" # Osama confirmed we can use this value.
elif self.study_number == '9413':
failure_outside_timeframe_value = 2
elif self.study_number == '9910':
failure_outside_timeframe_value = 2
elif self.study_number == '0126':
failure_outside_timeframe_value = 2
else:
raise ValueError("The failure_outside_timeframe_value for disease specific survival is not set for this study: {}".format(
self.study_number))
field_name_years = field_name + "_years"
assert field_name in self.endpoint_fields(), "{} not in endpoint fields".format(field_name)
assert field_name_years in self.endpoint_fields() , "{} not in endpoint fields".format(field_name_years)
# Insert new field. If it exists already, re-compute it.
new_field = field_name + "_{}year".format(years)
if new_field in self.df.columns:
self.df = self.df.drop(columns=[new_field])
idx = self.df.columns.get_loc(field_name) + 1
new_column_vals = []
for dss, dfsy in zip(self.df[field_name], self.df[field_name_years]):
if dss == 0: # Default class for distant_met is 0. Same for distant_met_5yr.
new_column_vals.append(0)
if dss == 2:
new_column_vals.append(2)
if dss == 1:
if dfsy <= years:
new_column_vals.append(1)
else:
new_column_vals.append(failure_outside_timeframe_value)
try:
self.df.insert(loc=idx, column=new_field, value=list(map(int, new_column_vals)))
except:
import IPython
IPython.embed()
# self.df.insert(loc=idx, column=new_field, value=list(map(int, new_column_vals)))
self.table_sort()
# Update endpoint fields
self._add_endpoint_field(new_field, 0)
def add_survival_Nyr_endpoint(self, years):
"""Adds column 'survival_Nyr' to self.df. Refers to overall survival.
Args:
years(int): the years.
Column values:
0: Alive, within given years.
1: Death, within given years.
"""
field_name = 'survival'
field_name_years = "survival_years" # Note, that for disease_specific_survival=1, we can take the time period from disease_free_surival_years.
assert field_name in self.endpoint_fields(), "{} not in endpoint fields".format(field_name)
assert field_name_years in self.endpoint_fields() , "{} not in endpoint fields".format(field_name_years)
# Insert new field. If it exists already, re-compute it.
new_field = field_name + "_{}year".format(years)
if new_field in self.df.columns:
self.df = self.df.drop(columns=[new_field])
idx = self.df.columns.get_loc(field_name) + 1
new_column_vals = []
for fn, fny in zip(self.df[field_name], self.df[field_name_years]):
if fn == 0: # Default class for distant_met is 0. Same for distant_met_5yr.
new_column_vals.append(0)
if fn == 1:
if fny <= years:
new_column_vals.append(1)
else:
new_column_vals.append(0)
self.df.insert(loc=idx, column=new_field, value=list(map(int, new_column_vals)))
self.table_sort()
# Update endpoint fields
self._add_endpoint_field(new_field, 0)
def add_distant_met_Nyr_endpoint(self, years):
"""Adds column 'distant_met_Nyr' to self.df
Indicates if the cancer metastasized within N years.
Args:
years(int): the years.
Column values:
0: Censored
1: Failure within given years (metastatic prostate cancer)
2: Competing event (death from something other than prostate cancer.)
"""
field_name = 'distant_met'
field_name_years = field_name + "_years"
if self.study_number == '9202':
failure_outside_timeframe_value = 2 # Has a 'competing events' class.
elif self.study_number == '9408':
failure_outside_timeframe_value = 0 # Has a 'competing events' class
elif self.study_number == '9413':
failure_outside_timeframe_value = 2 # Has a 'competing events' class.
elif self.study_number == '9910':
failure_outside_timeframe_value = 2 # Has a 'competing events' class.
elif self.study_number == '0126':
failure_outside_timeframe_value = 2 # Has a 'competing events' class.
else:
raise ValueError("The failure_outside_timeframe_value for disease specific survival is not set for this study: {}".format(self.study_number))
assert field_name in self.endpoint_fields(), "{} not in endpoint fields".format(field_name)
assert field_name_years in self.endpoint_fields() , "{} not in endpoint fields".format(field_name_years)
# Insert new field. If it exists already, re-compute it.
new_field = field_name + "_{}year".format(years)
if new_field in self.df.columns:
self.df = self.df.drop(columns=[new_field])
idx = self.df.columns.get_loc(field_name) + 1
new_column_vals = []
for dm, dmy in zip(self.df[field_name], self.df[field_name_years]):
if dm == 0: # Default class for distant_met is 0. Same for distant_met_5yr.
new_column_vals.append(0)
if dm == 2:
new_column_vals.append(2)
if dm == 1:
assert ~np.isnan(dmy), "Found distant_met=1, with distant_met_years=nan. Impossible. See rtog {}".format(self.study_number)
if dmy <= years:
new_column_vals.append(1)
else:
new_column_vals.append(failure_outside_timeframe_value)
self.df.insert(loc=idx, column=new_field, value=list(map(int, new_column_vals)))
self.table_sort()
# Update endpoint fields
self._add_endpoint_field(new_field, 0)
def _add_endpoint_field(self, endpoint_field, default_class_y):
if endpoint_field in self.endpoints:
if self.default_class_y[endpoint_field] != default_class_y:
raise ValueError("Endpoint already listed, with different default class: {}. New attempt: {}".format(
self.default_class_y[endpoint_field], default_class_y
))
return
self.endpoints.append(endpoint_field)
self.default_class_y[endpoint_field] = default_class_y
def printc(self):
prev = pd.options.display.max_columns
prev_r = pd.options.display.max_rows
pd.options.display.max_columns = None
pd.options.display.max_rows = 90
display(self.df)
pd.options.display.max_columns = prev
pd.options.display.max_rows = prev_r
def get_fields(self):
return self.df.columns
def set_study_number(self, number):
if number not in rtog_endpoints:
raise ValueError('Study number not available: {}. Options: {}'.format(number, rtogendpoints.keys()))
self.study_number = number
self.endpoints = rtog_endpoints[number]
self.default_class_y = rtog_default_class_y[number]
def copy(self):
new_rtog = RTOG()
new_rtog.df = self.df.copy(deep=True)
new_rtog.filename = self.filename
new_rtog.study_number = self.study_number
new_rtog.endpoints = self.endpoints
new_rtog.default_class_y = self.default_class_y
new_rtog.unknown_class_X = self.unknown_class_X
return new_rtog
def drop(self, columns=''):
new_rtog = self.copy()
new_rtog.df = self.df.drop(columns=columns)
return new_rtog
def clear_columns(self, columns=[""]):
"""Sets the specified column values to empty.
Args:
columns(list): the names of the columns to replace.
"""
N = len(self.df)
new_rtog = self.copy()
null_columns = {c : [''] * N for c in columns}
for c, l in null_columns.items():
new_rtog.df[c] = l
return new_rtog
def endpoint_fields(self):
if not self.study_number:
raise ValueError("Study number not set. Cannot select endpoint fields")
return self.endpoints
def text_fields(self):
if not self.study_number:
raise ValueError("Study number not set. Cannot select text fields")
return rtog_text_fields[self.study_number]
def get_Xy(self, y_var=None, make_binary=False):
"""Returns training/testing data, properly formatted.
For each study, see the RTOG XXXX Variable Listings documents for reference.
Args:
y_var(str): the column of self.df to use as the prediction variable. E.g. y_var='cod'
Any rows with nans are removed.
make_binary(bool): if True, it returns a binary vector (0,1), using the class mapping
defined above, rtog_binary_mapping.
"""
# Set X. Don't impute. Boosting methods do this better than you can.
rtog_X = self.drop(columns=self.endpoint_fields() + self.text_fields())
rtog_X = rtog_X.copy()
rtog_meta = self.copy()
rtog_meta.df = rtog_meta.df[self.endpoint_fields()]
# Set y. Impute to default class.
rtog_y = self.copy()
rtog_y = rtog_y[rtog_y.endpoint_fields()]
if y_var:
default_class_y = self.default_class_y[y_var]
rtog_y = rtog_y[y_var]
rtog_y.df = rtog_y.df.fillna(default_class_y)
if make_binary: # Forces y to be binary, using a pre-specified mapping in the parent class.
for c in rtog_y.df.columns:
mapping = rtog_binary_mapping[self.study_number][c]
rtog_y.df[c] = rtog_y.df[c].replace(mapping)
return rtog_X, rtog_y, rtog_meta
def generate_test_set(self, size=100, seed=None, field_to_balance=""):
"""Samples a test set, printing the class statistics of each.
Args:
size(int): the number of entries to sample
seed(int): Optional. Random seed for reproducibility.
field_to_balance(str): Optional. If set, function tries to return an equal class
balance in this field. E.g. disease_free_survival
Returns:
RTOG object - the sampled test set.
"""
if seed is not None:
random.seed(seed)
df = self.df.copy(deep=True)
if field_to_balance:
classes = df[field_to_balance].unique()
indices = {}
for c in classes:
sub_df = df[df[field_to_balance] == c]
indices[c] = list(sub_df.index)
m = min([len(v) for _, v in indices.items()])
for c, l in indices.items():
if len(l) > m:
random.shuffle(l)
indices[c] = l[:m]
idx = [elem for _, l in indices.items() for elem in l]
else:
idx = list(range(len(df)))
random.shuffle(idx)
idx = idx[:size]
new_rtog = self.copy()
new_rtog.df = df
new_rtog.df = new_rtog.df.loc[idx]
return new_rtog
def to_csv(self, filename):
self.df.to_csv(filename)
def standardize_disease_specific_survival(self, drop_prior_columns=True):
self.standardize_disease_specific_survival_events(drop_prior_columns=drop_prior_columns)
self.standardize_disease_specific_survival_years(drop_prior_columns=drop_prior_columns)
# If DSS-years unspecified but DSS censored, set DSS-years to 25 (assume long time).
isnan = self.df['disease_specific_survival_years'].isnull().values
iszero = (self.df['disease_specific_survival'] == 0).values
self.df.loc[np.logical_and(isnan, iszero), 'disease_specific_survival_years'] = 25
def standardize_disease_specific_survival_events(self, drop_prior_columns=True):
"""Merges variants of DSS, prioritizing phoenix, and naming everything disease_specific_survival
Args:
drop_prior_columns(bool): If True, drops the original columns.
"""
bcr_fields = [f for f in self.df.columns if 'disease_specific_survival' in f]
e_bcr_fields = np.array([f for f in bcr_fields if 'year' not in f])
idx_sort = []
idx_sort.append(np.where(['phoenix' in e for e in e_bcr_fields])[0])
idx_sort.append(np.where(['disease_specific_survival' == e for e in e_bcr_fields])[0])
idx_sort = np.array([i[0] for i in idx_sort if len(i) > 0])
e_bcr = self.df[e_bcr_fields[idx_sort]]
new_values = e_bcr[e_bcr.columns[0]]
for i in range(1,len(e_bcr.columns)):
next_best = e_bcr[e_bcr.columns[i]][new_values.isnull()].values.copy()
new_values = new_values.fillna(pd.Series(next_best))
self.df = self.df.assign(disease_specific_survival=new_values)
def standardize_disease_specific_survival_years(self, drop_prior_columns=True):
"""Merges variants of BCR, prioritizing phoenix, and naming everything disease_specific_survival
Args:
drop_prior_columns(bool): If True, drops the original columns.
"""
bcr_fields = [f for f in self.df.columns if 'disease_specific_survival' in f]
e_bcr_fields = np.array([f for f in bcr_fields if 'years' in f])
idx_sort = []
idx_sort.append(np.where(['phoenix' in e for e in e_bcr_fields])[0])
idx_sort.append(np.where(['disease_specific_survival_years' == e for e in e_bcr_fields])[0])
idx_sort = np.array([i[0] for i in idx_sort if len(i) > 0])
e_bcr = self.df[e_bcr_fields[idx_sort]]
new_values = e_bcr[e_bcr.columns[0]]
for i in range(1,len(e_bcr.columns)):
next_best = e_bcr[e_bcr.columns[i]][new_values.isnull()].values.copy()
new_values = new_values.fillna(pd.Series(next_best))
self.df = self.df.assign(disease_specific_survival_years=new_values)
def standardize_biochemical_failure(self, drop_prior_columns=True):
self.standardize_biochemical_failure_events(drop_prior_columns=drop_prior_columns)
self.standardize_biochemical_failure_years(drop_prior_columns=drop_prior_columns)
def standardize_biochemical_failure_events(self, drop_prior_columns=True):
"""Merges variants of BCR, prioritizing phoenix, and naming everything biochemical_failure
Args:
drop_prior_columns(bool): If True, drops the original columns.
"""
bcr_fields = [f for f in self.df.columns if 'biochemical' in f]
e_bcr_fields = np.array([f for f in bcr_fields if 'year' not in f])
idx_sort = []
idx_sort.append(np.where(['phoenix' in e for e in e_bcr_fields])[0])
idx_sort.append(np.where(['biochemical_failure' == e for e in e_bcr_fields])[0])
idx_sort.append(np.where(['astro' in e for e in e_bcr_fields])[0])
idx_sort = np.array([i[0] for i in idx_sort if len(i) > 0])
e_bcr = self.df[e_bcr_fields[idx_sort]]
new_values = e_bcr[e_bcr.columns[0]]
for i in range(1,len(e_bcr.columns)):
next_best = e_bcr[e_bcr.columns[i]][new_values.isnull()].values.copy()
new_values = new_values.fillna(pd.Series(next_best))
self.df = self.df.assign(biochemical_failure=new_values)
def standardize_biochemical_failure_years(self, drop_prior_columns=True):
"""Merges variants of BCR, prioritizing phoenix, and naming everything biochemical_failure
Args:
drop_prior_columns(bool): If True, drops the original columns.
"""
bcr_fields = [f for f in self.df.columns if 'biochemical' in f]
e_bcr_fields = np.array([f for f in bcr_fields if 'years' in f])
idx_sort = []
idx_sort.append(np.where(['phoenix' in e for e in e_bcr_fields])[0])
idx_sort.append(np.where(['biochemical_failure_years' == e for e in e_bcr_fields])[0])
idx_sort.append(np.where(['astro' in e for e in e_bcr_fields])[0])
idx_sort = np.array([i[0] for i in idx_sort if len(i) > 0])
e_bcr = self.df[e_bcr_fields[idx_sort]]
new_values = e_bcr[e_bcr.columns[0]]
for i in range(1,len(e_bcr.columns)):
next_best = e_bcr[e_bcr.columns[i]][new_values.isnull()].values.copy()
new_values = new_values.fillna(pd.Series(next_best))
self.df = self.df.assign(biochemical_failure_years=new_values)
def standardize_baseline_psa(self, drop_prior_columns=True):
"""Merges variants of 'baseline_psa' together across studies.
Args:
drop_prior_columns(bool): If True, drops the original columns.
"""
if self.study_number == '0126':
self.df['baseline_psa'] = self.df['psa']
if drop_prior_columns:
self.df.drop(columns='psa')
def standardize_baseline_serum(self, drop_prior_columns=True):
"""Merges baseline_serum* values into a single, column: baseline_serum_ng_dl, deleting the original columns.
Args:
drop_prior_columns(bool): If True, drops the original baseline_serum and baseline_serum_unit (or equivalent) columns.
"""
baseline_serum_ngdl = []
if self.study_number == "9202":
# Has two columns: baseline_serum, and baseline_serum_nmol_l, which are all mixed up
# Per Osama:
# if the value >100, it's in ng/dl, and belongs to baseline_serum
#. if the value <100, it's in nmol_l, and belongs to baseline_serum_nmol_l
# After running the code below:
# import matplotlib.pyplot as plt
# v = list(r9202.df['baseline_serum_nmol_l'].values) + list(r9202.df['baseline_serum'])
# v = [val for val in v if not np.isnan(val)]
# plt.hist(v, bins='auto')
# Is it evident that 75 is a better cutoff
cutoff = 75
for index, row in tqdm(self.df.iterrows()):
# If there's a conflict between baseline_serum and baseline_serum_nmol_l, we set the value to NaN
if not (np.isnan(row['baseline_serum']) or np.isnan(row['baseline_serum_nmol_l'])):
print("9202: serum conflict, setting to Nan: index={}, baseline_serum={}, baseline_serum_nmol_l={}".format(
index, row['baseline_serum'], row['baseline_serum_nmol_l']
))
baseline_serum_ngdl.append(np.nan)
continue
# Grab the row's serum value. One column has a nan, the other has a number.
if np.isnan(row['baseline_serum']):
rowval = row['baseline_serum_nmol_l']
else:
rowval = row['baseline_serum']
if rowval < cutoff:
baseline_serum_ngdl.append(serum_values_to_ng_dl(rowval, 'nmol/l'))
else:
baseline_serum_ngdl.append(rowval)
if drop_prior_columns:
self.df.drop(columns=['baseline_serum', 'baseline_serum_nmol_l'], inplace=True)
elif self.study_number == "9408":
# Conversion: 1= NMOL/L 2 = NG/DL 3 = NG/ML 4= Unit/NOS
for index, row in tqdm(self.df.iterrows()):
value = row['baseline_serum']
unit = row['baseline_serum_unit']
if np.isnan(value) or np.isnan(unit):
baseline_serum_ngdl.append(np.nan)
continue
if unit == 1:
new_value = serum_values_to_ng_dl(value, "nmol/l")
elif unit == 2:
new_value = serum_values_to_ng_dl(value, "ng/dl")
elif unit == 3:
new_value = serum_values_to_ng_dl(value, "ng/ml")
elif unit == 4:
#TODO: Adjust this, pending Osama/Felix clarifying how to convert unit/nos to ng/dl
print("9408: Action unknown for unit/nos, index={}. Setting baseline_serum value to nan".format(
index
))
new_value = np.nan
else:
raise ValueError("baseline_serum_unit type unknown: index={}, unit={}".format(index, unit))
baseline_serum_ngdl.append(new_value)
if drop_prior_columns:
self.df.drop(columns=['baseline_serum', 'baseline_serum_unit'], inplace=True)
elif self.study_number == "9413":
# Conversion: 1 = ng/dl 2 = ng/ml 3 = nmol/l 4 = units/NOS
for index, row in tqdm(self.df.iterrows()):
value = row['baseline_serum']
unit = row['baseline_serum_unit']
if np.isnan(value) or type(unit) != str:
baseline_serum_ngdl.append(np.nan)
continue
unit = unit.lower()
if unit in ["ng/dl", "ng/ml", "nmol/l"]:
new_value = serum_values_to_ng_dl(value, unit)
elif unit in ["unit/nos", "units/nos"]:
#TODO: Adjust this, pending Osama/Felix clarifying how to convert unit/nos to ng/dl
print("WARNING: Action unknown for unit/nos, index={}. Setting baseline_serum value to nan".format(
index
))
new_value = np.nan
elif unit in ['unk']:
new_value = np.nan
else:
raise ValueError("baseline_serum_unit type unknown: index={}, unit={}".format(index, unit))
baseline_serum_ngdl.append(new_value)
if drop_prior_columns:
self.df.drop(columns=['baseline_serum', 'baseline_serum_unit'], inplace=True)
elif self.study_number == "9910":
print("9910: no baseline_serum field. No action taken")
self.table_sort()
return
elif self.study_number == '0126':
# Conversion: 1=ng/dl 2=nmol/L 3=Other
df_serum = self.df_serum.copy()
df_serum['baseline_serum_value'] = df_serum['serum_value']
df_serum.loc[df_serum['serum_years'] != 0.0, 'baseline_serum_value'] = np.nan
df_serum = df_serum[1 ^ df_serum['baseline_serum_value'].isnull()]
self.df = pd.merge(self.df, df_serum[['cn_deidentified', 'baseline_serum_value', 'serum_unit']], on=['cn_deidentified'], how='left')
for index, row in tqdm(self.df.iterrows()):
value = row['baseline_serum_value']
unit = row['serum_unit']
if np.isnan(value) or unit not in {1,2}:
baseline_serum_ngdl.append(np.nan)
elif unit == 1:
baseline_serum_ngdl.append(value)
elif unit == 2: # Unit is nmol/L
new_value = serum_values_to_ng_dl(value, 'nmol/l')
baseline_serum_ngdl.append(new_value)
else:
raise ValueError("0126, index={}, action unknown for value={}, unit={}".format(index, value, unit))
self.df.drop(columns=['baseline_serum_value', 'serum_unit'], inplace=True)
else:
raise ValueError("Study number not supported: {}".format(self.study_number))
self.df['baseline_serum_ng_dl'] = baseline_serum_ngdl
self.table_sort()
def standardize_rx(self):
"""Standardizes the treatment arms according to the following convention.
Notation:
RT (Radiotherapy),
STADT (Short-term Androgen Deprivation Therapy),
LTADT (Long-term Androgen Deprivation Therapy)
Classes:
0: RT
1: RT + STADT (Short-term Hormone)
2: RT + LTADT (Long-term Hormone)
3: RT + ITADT (Intermediate-term Hormone)
"""
if self.study_number == "9202": # Already matches above key
pass
elif self.study_number == "9408":
self.df.loc[self.df['rx'] == 2, 'rx'] = 0
elif self.study_number == "9413":
self.df['rx_orig'] = self.df['rx'] # We are required to store this for standardize_pelvic_rt
self.df.loc[self.df['rx'] == 1, 'rx'] = 1 # Variable sheet: 1 = Pre + Boost
self.df.loc[self.df['rx'] == 2, 'rx'] = 1 # Variable sheet: 2 = Pre + Prostate
self.df.loc[self.df['rx'] == 3, 'rx'] = 1 # Variable sheet: 3 = Boost/Horm
self.df.loc[self.df['rx'] == 4, 'rx'] = 1 # Variable sheet: 4 = Pros RT/Horm
elif self.study_number == "9910": # Already matches above key. "Ask Osama what to do about this rx. It's "intermediate"
self.df.loc[self.df['rx'] == 1, 'rx'] = 1 # Variable sheet: 1 = 8 Wks Pre-RT Hormone
self.df.loc[self.df['rx'] == 2, 'rx'] = 3 # Variable sheet: 2 = 28 Wks Pre-Rt Hormone
elif self.study_number == "0126": #TODO: ask Osama what to do about this rx. it's "intermediate"
self.df.loc[self.df['rx'] == 1, 'rx'] = 0 # Variable sheet: 1 = 3D/IMRT 70.2 (3D Intensity-Modulated RadioTherapy)
self.df.loc[self.df['rx'] == 2, 'rx'] = 0 # Variable sheet: 2 = 2 = 3D/IMRT 79.2
def standardize_race(self):
"""Standardizes race according to the rules below.
White=1,
Hispanic=2,
AA=3,
Asian=4,
Other=5
Unknown=9
9408:
Expected Values: 1,2,3,4,5,6,98,99
Actual Values: 1, 2, 3, 4, 5, 6, 98, 99
9413:
Expected Values: 1,2,3,4,6,7
Actual Values: 1, 2, 3, 5, 6, 98, 99
As a result of the above discrepancy (9413's actual values don't match the expected. They match the expected of 9408 instead)
The code below has 9413's standardization following the rules of 9408.
TODO: Osama to advise on what to do.
"""
if self.study_number == "9202":
self.df.loc[self.df['race'] == 6, 'race'] = 'Native American'
self.df.loc[self.df['race'] == 7, 'race'] = 'Other'
self.df.loc[self.df['race'] == 8, 'race'] = 'Unknown'
self.df.loc[self.df['race'] == 9, 'race'] = 'Prefer not to answer'
self.df.loc[self.df['race'] == 'Native American', 'race'] = 5
self.df.loc[self.df['race'] == 'Other', 'race'] = 5
self.df.loc[self.df['race'] == 'Unknown', 'race'] = 9
self.df.loc[self.df['race'] == 'Prefer not to answer', 'race'] = 9
# Changing the unknown class requires changing self.unknown_class_X
self.unknown_class_X['race'] = [9]
elif self.study_number == "9408":
self.df.loc[self.df['race'] == 4, 'race'] = 'NativeHawaiian' # Native Hawaiian -> Tmp
self.df.loc[self.df['race'] == 5, 'race'] = 'Asian' # Asian -> Asian
self.df.loc[self.df['race'] == 6, 'race'] = 'NativeAmerican' # Native American -> Other
self.df.loc[self.df['race'] == 98, 'race'] = 'Other' # Other -> Other
self.df.loc[self.df['race'] == 99, 'race'] = 'Unknown' # Unknown -> Unknown
self.df.loc[self.df['race'] == 'NativeHawaiian', 'race'] = 5 # Tmp -> Other
self.df.loc[self.df['race'] == 'Asian', 'race'] = 4 # Tmp -> Other
self.df.loc[self.df['race'] == 'NativeAmerican', 'race'] = 5 # Tmp -> Other
self.df.loc[self.df['race'] == 'Other', 'race'] = 5 # Tmp -> Other
self.df.loc[self.df['race'] == 'Unknown', 'race'] = 9 # Tmp -> Other
# Changing the unknown class requires changing self.unknown_class_X
self.unknown_class_X['race'] = [9]
elif self.study_number == "9413":
# Copied rules from 9408. At some point I was told to do this.
self.df.loc[self.df['race'] == 4, 'race'] = 'tmp' # Native Hawaiian -> Tmp
self.df.loc[self.df['race'] == 5, 'race'] = 4 # Asian -> Asian
self.df.loc[self.df['race'] == 'tmp', 'race'] = 5 # Tmp -> Other
self.df.loc[self.df['race'] == 6, 'race'] = 5 # Native American -> Other
self.df.loc[self.df['race'] == 98, 'race'] = 5 # Other -> Other
self.df.loc[self.df['race'] == 99, 'race'] = 9 # Unknown -> Unknown
# Changing the unknown class requires changing self.unknown_class_X
self.unknown_class_X['race'] = [9]
# Original rules for 9413
# self.df.loc[self.df['race'] == 6, 'race'] = 5 # Native American -> Other
# self.df.loc[self.df['race'] == 7, 'race'] = 5 # Other -> Other
elif self.study_number == "9910":
self.df.loc[self.df['race'] == 4, 'race'] = 'Native Hawaiian' # Native Hawaiian -> Tmp
self.df.loc[self.df['race'] == 5, 'race'] = 'Asian' # Asian -> Asian
self.df.loc[self.df['race'] == 6, 'race'] = 'Native American' # Native American -> Other
self.df.loc[self.df['race'] == 98, 'race'] = 'Other' # Other -> Other
self.df.loc[self.df['race'] == 99, 'race'] = 'Unknown' # Unknown -> Unknown
self.df.loc[self.df['race'] == 'Native Hawaiian', 'race'] = 5
self.df.loc[self.df['race'] == 'Asian', 'race'] = 4
self.df.loc[self.df['race'] == 'Native American', 'race'] = 5
self.df.loc[self.df['race'] == 'Other', 'race'] = 5
self.df.loc[self.df['race'] == 'Unknown', 'race'] = 9
# Changing the unknown class requires changing self.unknown_class_X
self.unknown_class_X['race'] = [9]
elif self.study_number == "0126":
self.df.loc[self.df['race'] == 1, 'race'] = 'Native American'
self.df.loc[self.df['race'] == 2, 'race'] = 'Asian'
self.df.loc[self.df['race'] == 3, 'race'] = 'Black'
self.df.loc[self.df['race'] == 4, 'race'] = 'Native Hawaiian'
self.df.loc[self.df['race'] == 5, 'race'] = 'White'
self.df.loc[self.df['race'] == 6, 'race'] = 'Multi-Race'
self.df.loc[self.df['race'] == 9, 'race'] = 'Unknown'
self.df.loc[self.df['race'] == 'Native American', 'race'] = 5
self.df.loc[self.df['race'] == 'Asian', 'race'] = 4
self.df.loc[self.df['race'] == 'Black', 'race'] = 3
self.df.loc[self.df['race'] == 'Native Hawaiian', 'race'] = 5
self.df.loc[self.df['race'] == 'White', 'race'] = 1
self.df.loc[self.df['race'] == 'Multi-Race', 'race'] = 5
self.df.loc[self.df['race'] == 'Unknown', 'race'] = 9
else:
raise ValueError("Study number not supported: {}".format(self.study_number))
self.df['race'] = pd.to_numeric(self.df['race'], downcast="float")
def standardize_unknown_values_in_predictor_variables(self, search_string=""):
"""Replaces all unknown values in predictor variables with nans.
This is done to allow the model, or the programmer, to intelligently impute. E.g. xgboost benefits from this, if the data is very sparse.
Args:
search_string(str): standardizes the unknown values of any variable defined in self.unknown_class_X, iff that variable name contains 'search_string'
"""
items = [(var, val) for var, val in self.unknown_class_X.items() if search_string in var]
for var, unknown_vals in items:
for uv in unknown_vals:
self.df.loc[self.df[var] == uv, var] = np.nan
def standardize_gleason_scores(self):
"""Fills in all three of (gleason_primary, secondary, combined), if possible.
Primary: 1-5
Secondary: 1-5
Combined: 2-10
Anything else (e.g. multiple studies have 9 or 99 for Unknown): set to nan
"""
# This line handles the case of the values '9' and '99' referring to unknown gleason for
# primary/secondary, and combined, respectively.
self.standardize_unknown_values_in_predictor_variables(search_string="gleason")
if self.study_number == "9202":
self.df['gleason_combined'] = self.df['gleason_primary'] + self.df['gleason_secondary']
self.df.drop(columns=['gleason'], inplace=True)
elif self.study_number == "9408":
pass
elif self.study_number == "9413":
self.df['gleason_secondary'] = self.df['gleason_combined'] - self.df['gleason_primary']
elif self.study_number == "9910":
self.df['gleason_combined'] = self.df['gleason']
self.df.drop(columns=['gleason'], inplace=True)
elif self.study_number == "0126":
self.df['gleason_combined'] = self.df['gleason']
self.df.drop(columns=['gleason'], inplace=True)
else:
raise ValueError("Study number not supported: {}".format(self.study_number))
def standardize_tstage(self):
"""Consolidate T-stage: T1, T2, T3, T4
0 - T0
1 - T1
2 - T2
3 - T3
4 - T4
"""
if self.study_number == "9202":
self.df.loc[self.df['tstage'] == 5, 'tstage'] = 1
self.df.loc[self.df['tstage'] == 6, 'tstage'] = 1
self.df.loc[self.df['tstage'] == 10, 'tstage'] = 1
self.df.loc[self.df['tstage'] == 7, 'tstage'] = 2
self.df.loc[self.df['tstage'] == 8, 'tstage'] = 2
self.df.loc[self.df['tstage'] == 11, 'tstage'] = 2
self.df.loc[self.df['tstage'] == 12, 'tstage'] = 3
self.df.loc[self.df['tstage'] == 13, 'tstage'] = 3
self.df.loc[self.df['tstage'] == 14, 'tstage'] = 3
self.df.loc[self.df['tstage'] == 15, 'tstage'] = 4
self.df.loc[self.df['tstage'] == 16, 'tstage'] = 4
elif self.study_number == "9408":
self.df.loc[self.df['tstage'] == 2, 'tstage'] = 1 # T1a -> T1
self.df.loc[self.df['tstage'] == 3, 'tstage'] = 1 # T1b -> T1
self.df.loc[self.df['tstage'] == 4, 'tstage'] = 1 # T1c -> T1
self.df.loc[self.df['tstage'] == 5, 'tstage'] = 2 # T2 Nos -> T2
self.df.loc[self.df['tstage'] == 6, 'tstage'] = 2 # T2a -> T2
self.df.loc[self.df['tstage'] == 7, 'tstage'] = 2 # T2b -> T2
self.df.loc[self.df['tstage'] == 8, 'tstage'] = 2 # T2c -> T2
self.df.loc[self.df['tstage'] == 10, 'tstage'] = 3 # T3 NOS -> T3
self.df.loc[self.df['tstage'] == 11, 'tstage'] = 3 # T3a -> T3
self.df.loc[self.df['tstage'] == 12, 'tstage'] = 3 # T3b -> T3
self.df.loc[self.df['tstage'] == 13, 'tstage'] = 3 # T3c -> T3
self.df.loc[self.df['tstage'] == 14, 'tstage'] = 4 # T4 NOS -> T4
elif self.study_number == "9413":
self.df.loc[self.df['tstage'] == 2, 'tstage'] = 1 # T2 NOS -> T2
self.df.loc[self.df['tstage'] == 3, 'tstage'] = 1 # T2 NOS -> T2
self.df.loc[self.df['tstage'] == 4, 'tstage'] = 1 # T2 NOS -> T2
self.df.loc[self.df['tstage'] == 5, 'tstage'] = 2 # T2 NOS -> T2
self.df.loc[self.df['tstage'] == 6, 'tstage'] = 2 # T2a -> T2
self.df.loc[self.df['tstage'] == 7, 'tstage'] = 2 # T2b -> T2
self.df.loc[self.df['tstage'] == 8, 'tstage'] = 2 # T2c -> T2
self.df.loc[self.df['tstage'] == 10, 'tstage'] = 3 # T3 NOS -> T3
self.df.loc[self.df['tstage'] == 11, 'tstage'] = 3 # T3a -> T3
self.df.loc[self.df['tstage'] == 12, 'tstage'] = 3 # T3b -> T3
self.df.loc[self.df['tstage'] == 13, 'tstage'] = 3 # T3c -> T3
self.df.loc[self.df['tstage'] == 14, 'tstage'] = 4 # T4 NOS-> T4
self.df.loc[self.df['tstage'] == 15, 'tstage'] = 4 # Undefined in variable listing. 15 and 16 appear in data. Assume T4.
self.df.loc[self.df['tstage'] == 16, 'tstage'] = 4 #
elif self.study_number == "9910":
self.df.loc[self.df['tstage'] == 2, 'tstage'] = 1 # T1a -> T1
self.df.loc[self.df['tstage'] == 3, 'tstage'] = 1 # T1b -> T1
self.df.loc[self.df['tstage'] == 4, 'tstage'] = 1 # T1c -> T1
self.df.loc[self.df['tstage'] == 5, 'tstage'] = 2 # T2 Nos -> T2
self.df.loc[self.df['tstage'] == 6, 'tstage'] = 2 # T2a -> T2
self.df.loc[self.df['tstage'] == 7, 'tstage'] = 2 # T2b -> T2
self.df.loc[self.df['tstage'] == 10, 'tstage'] = 3 # T3 NOS -> T3
self.df.loc[self.df['tstage'] == 11, 'tstage'] = 3 # T3a -> T3
self.df.loc[self.df['tstage'] == 12, 'tstage'] = 3 # T3b -> T3
self.df.loc[self.df['tstage'] == 13, 'tstage'] = 4 # T4 -> T4
elif self.study_number == "0126":
self.df.loc[self.df['tstage'] == 2, 'tstage'] = 1 # T1c -> T1
self.df.loc[self.df['tstage'] == 3, 'tstage'] = 2 # T2a -> T2
self.df.loc[self.df['tstage'] == 4, 'tstage'] = 2 # T2b -> T2
self.df.loc[self.df['tstage'] == 5, 'tstage'] = 3 # T3a -> T3
self.df.loc[self.df['tstage'] == 6, 'tstage'] = 3 # T3b -> T3
self.df.loc[self.df['tstage'] == 7, 'tstage'] = 4 # T4 -> T4
else:
raise ValueError("Study number not supported: {}".format(self.study_number))
def standardize_pelvic_rt(self):
"""Creates variable pelvic_rt.
0 - no
1 - yes
Instructions:
Add new category, pelvic RT (yes=1, no=0, unknown=3).
For 9202, look at pelvic_dose: if "0" then no. If integer, then "1". If blank, then "3".
For 9408, keep the colum blank because its missing data (will clarify with NRG).
For 9413: Rx 1 and 3 are "1". Rx 2 and 4 are "0".
"""
if self.study_number == "9202":
self.df.loc[self.df_rt['pelvis_dose'] == 0, 'pelvic_rt'] = 0
self.df.loc[self.df_rt['pelvis_dose'] > 0, 'pelvic_rt'] = 1
self.df.loc[self.df_rt['pelvis_dose'].isnull(), 'pelvic_rt'] = np.nan
elif self.study_number == "9408":
self.df['pelvic_rt'] = np.nan
elif self.study_number == "9413":
if 'rx_orig' in self.df.columns: # if we've run standardize_rx()
column = 'rx_orig'
else: # if we haven't yet
column = 'rx'
self.df.loc[self.df[column] == 1, 'pelvic_rt'] = 1
self.df.loc[self.df[column] == 3, 'pelvic_rt'] = 1
self.df.loc[self.df[column] == 2, 'pelvic_rt'] = 0
self.df.loc[self.df[column] == 4, 'pelvic_rt'] = 0
elif self.study_number == "9910":
self.df = | pd.merge(self.df, self.df_rt[['cn_deidentified', 'pelvic_rt']], on=['cn_deidentified'], how='left') | pandas.merge |
import pandas as pd
from .datastore import merge_postcodes
from .types import ErrorDefinition
from .utils import add_col_to_tables_CONTINUOUSLY_LOOKED_AFTER as add_CLA_column # Check 'Episodes' present before use!
def validate_165():
error = ErrorDefinition(
code = '165',
description = 'Data entry for mother status is invalid.',
affected_fields = ['MOTHER', 'SEX', 'ACTIV', 'ACCOM', 'IN_TOUCH', 'DECOM']
)
def _validate(dfs):
if 'Header' not in dfs or 'Episodes' not in dfs or 'OC3' not in dfs:
return {}
else:
header = dfs['Header']
episodes = dfs['Episodes']
oc3 = dfs['OC3']
collection_start = dfs['metadata']['collection_start']
collection_end = dfs['metadata']['collection_end']
valid_values = ['0','1']
# prepare to merge
oc3.reset_index(inplace=True)
header.reset_index(inplace=True)
episodes.reset_index(inplace=True)
collection_start = pd.to_datetime(collection_start, format='%d/%m/%Y', errors='coerce')
collection_end = pd.to_datetime(collection_end, format='%d/%m/%Y', errors='coerce')
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
episodes['EPS'] = (episodes['DECOM']>=collection_start) & (episodes['DECOM']<=collection_end)
episodes['EPS_COUNT'] = episodes.groupby('CHILD')['EPS'].transform('sum')
merged = episodes.merge(header, on='CHILD', how='left', suffixes=['_eps', '_er']).merge(oc3, on='CHILD', how='left')
# Raise error if provided <MOTHER> is not a valid value.
value_validity = merged['MOTHER'].notna() & (~merged['MOTHER'].isin(valid_values))
# If not provided
female = (merged['SEX']=='1')
eps_in_year = (merged['EPS_COUNT']>0)
none_provided = (merged['ACTIV'].isna()& merged['ACCOM'].isna()& merged['IN_TOUCH'].isna())
# If provided <MOTHER> must be a valid value. If not provided <MOTHER> then either <GENDER> is male or no episode record for current year and any of <IN_TOUCH>, <ACTIV> or <ACCOM> have been provided
mask = value_validity | (merged['MOTHER'].isna() & (female & (eps_in_year | none_provided)))
# That is, if value not provided and child is a female with eps in current year or no values of IN_TOUCH, ACTIV and ACCOM, then raise error.
error_locs_eps = merged.loc[mask, 'index_eps']
error_locs_header = merged.loc[mask, 'index_er']
error_locs_oc3 = merged.loc[mask, 'index']
return {'Header':error_locs_header.dropna().unique().tolist(),
'OC3':error_locs_oc3.dropna().unique().tolist()}
return error, _validate
def validate_1014():
error = ErrorDefinition(
code='1014',
description='UASC information is not required for care leavers',
affected_fields=['ACTIV', 'ACCOM', 'IN_TOUCH', 'DECOM']
)
def _validate(dfs):
if 'UASC' not in dfs or 'Episodes' not in dfs or 'OC3' not in dfs:
return {}
else:
uasc = dfs['UASC']
episodes = dfs['Episodes']
oc3 = dfs['OC3']
collection_start = dfs['metadata']['collection_start']
collection_end = dfs['metadata']['collection_end']
# prepare to merge
oc3.reset_index(inplace=True)
uasc.reset_index(inplace=True)
episodes.reset_index(inplace=True)
collection_start = pd.to_datetime(collection_start, format='%d/%m/%Y', errors='coerce')
collection_end = pd.to_datetime(collection_end, format='%d/%m/%Y', errors='coerce')
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
episodes['DEC'] = pd.to_datetime(episodes['DEC'], format='%d/%m/%Y', errors='coerce')
date_check = (
((episodes['DECOM'] >= collection_start) & (episodes['DECOM'] <= collection_end))
| ((episodes['DEC'] >= collection_start) & (episodes['DEC'] <= collection_end))
| ((episodes['DECOM'] <= collection_start) & episodes['DEC'].isna())
)
episodes['EPS'] = date_check
episodes['EPS_COUNT'] = episodes.groupby('CHILD')['EPS'].transform('sum')
# inner merge to take only episodes of children which are also found on the uasc table
merged = episodes.merge(uasc, on='CHILD', how='inner', suffixes=['_eps', '_sc']).merge(oc3, on='CHILD',
how='left')
# adding suffixes with the secondary merge here does not go so well yet.
some_provided = (merged['ACTIV'].notna() | merged['ACCOM'].notna() | merged['IN_TOUCH'].notna())
mask = (merged['EPS_COUNT'] == 0) & some_provided
error_locs_uasc = merged.loc[mask, 'index_sc']
error_locs_oc3 = merged.loc[mask, 'index']
return {'UASC': error_locs_uasc.unique().tolist(), 'OC3': error_locs_oc3.unique().tolist()}
return error, _validate
# !# not sure what this rule is actually supposed to be getting at - description is confusing
def validate_197B():
error = ErrorDefinition(
code='197B',
description="SDQ score or reason for no SDQ should be reported for 4- or 17-year-olds.",
affected_fields=['SDQ_REASON', 'DOB'],
)
def _validate(dfs):
if 'OC2' not in dfs or 'Episodes' not in dfs:
return {}
oc2 = add_CLA_column(dfs, 'OC2')
start = pd.to_datetime(dfs['metadata']['collection_start'], format='%d/%m/%Y', errors='coerce')
endo = pd.to_datetime(dfs['metadata']['collection_end'], format='%d/%m/%Y', errors='coerce')
oc2['DOB'] = pd.to_datetime(oc2['DOB'], format='%d/%m/%Y', errors='coerce')
ERRRR = (
(
(oc2['DOB'] + pd.DateOffset(years=4) == start) # ???
| (oc2['DOB'] + pd.DateOffset(years=17) == start)
)
& oc2['CONTINUOUSLY_LOOKED_AFTER']
& oc2['SDQ_SCORE'].isna()
& oc2['SDQ_REASON'].isna()
)
return {'OC2': oc2[ERRRR].index.to_list()}
return error, _validate
def validate_157():
error = ErrorDefinition(
code='157',
description="Child is aged 4 years or over at the beginning of the year or 16 years or under at the end of the "
"year and Strengths and Difficulties Questionnaire (SDQ) 1 has been recorded as the reason for no "
"Strengths and Difficulties Questionnaire (SDQ) score.",
affected_fields=['SDQ_REASON', 'DOB'],
)
def _validate(dfs):
if 'OC2' not in dfs or 'Episodes' not in dfs:
return {}
oc2 = add_CLA_column(dfs, 'OC2')
start = pd.to_datetime(dfs['metadata']['collection_start'], format='%d/%m/%Y', errors='coerce')
endo = pd.to_datetime(dfs['metadata']['collection_end'], format='%d/%m/%Y', errors='coerce')
oc2['DOB'] = pd.to_datetime(oc2['DOB'], format='%d/%m/%Y', errors='coerce')
ERRRR = (
oc2['CONTINUOUSLY_LOOKED_AFTER']
& (oc2['DOB'] + pd.DateOffset(years=4) <= start)
& (oc2['DOB'] + pd.DateOffset(years=16) >= endo)
& oc2['SDQ_SCORE'].isna()
& (oc2['SDQ_REASON'] == 'SDQ1')
)
return {'OC2': oc2[ERRRR].index.to_list()}
return error, _validate
def validate_357():
error = ErrorDefinition(
code='357',
description='If this is the first episode ever for this child, reason for new episode must be S. '
'Check whether there is an episode immediately preceding this one, which has been left out. '
'If not the reason for new episode code must be amended to S.',
affected_fields=['RNE'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
eps = dfs['Episodes']
eps['DECOM'] = pd.to_datetime(eps['DECOM'], format='%d/%m/%Y', errors='coerce')
eps = eps.loc[eps['DECOM'].notnull()]
first_eps = eps.loc[eps.groupby('CHILD')['DECOM'].idxmin()]
errs = first_eps[first_eps['RNE'] != 'S'].index.to_list()
return {'Episodes': errs}
return error, _validate
def validate_117():
error = ErrorDefinition(
code='117',
description='Date of decision that a child should/should no longer be placed for adoption is beyond the current collection year or after the child ceased to be looked after.',
affected_fields=['DATE_PLACED_CEASED', 'DATE_PLACED', 'DEC', 'REC', 'DECOM'],
)
def _validate(dfs):
if 'Episodes' not in dfs or 'PlacedAdoption' not in dfs:
return {}
else:
episodes = dfs['Episodes']
placed_adoption = dfs['PlacedAdoption']
collection_end = dfs['metadata']['collection_end']
# datetime
placed_adoption['DATE_PLACED_CEASED'] = pd.to_datetime(placed_adoption['DATE_PLACED_CEASED'],
format='%d/%m/%Y', errors='coerce')
placed_adoption['DATE_PLACED'] = pd.to_datetime(placed_adoption['DATE_PLACED'], format='%d/%m/%Y',
errors='coerce')
collection_end = pd.to_datetime(collection_end, format='%d/%m/%Y', errors='coerce')
episodes['DEC'] = pd.to_datetime(episodes['DEC'], format='%d/%m/%Y', errors='coerce')
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
# Drop nans and continuing episodes
episodes = episodes.dropna(subset=['DECOM'])
episodes = episodes[episodes['REC'] != 'X1']
episodes = episodes.loc[episodes.groupby('CHILD')['DECOM'].idxmax()]
# prepare to merge
placed_adoption.reset_index(inplace=True)
episodes.reset_index(inplace=True)
p4a_cols = ['DATE_PLACED', 'DATE_PLACED_CEASED']
# latest episodes
merged = episodes.merge(placed_adoption, on='CHILD', how='left', suffixes=['_eps', '_pa'])
mask = (
(merged['DATE_PLACED'] > collection_end)
| (merged['DATE_PLACED'] > merged['DEC'])
| (merged['DATE_PLACED_CEASED'] > collection_end)
| (merged['DATE_PLACED_CEASED'] > merged['DEC'])
)
# If provided <DATE_PLACED> and/or <DATE_PLACED_CEASED> must not be > <COLLECTION_END_DATE> or <DEC> of latest episode where <REC> not = 'X1'
pa_error_locs = merged.loc[mask, 'index_pa']
eps_error_locs = merged.loc[mask, 'index_eps']
return {'Episodes': eps_error_locs.tolist(), 'PlacedAdoption': pa_error_locs.unique().tolist()}
return error, _validate
def validate_118():
error = ErrorDefinition(
code='118',
description='Date of decision that a child should no longer be placed for adoption is before the current collection year or before the date the child started to be looked after.',
affected_fields=['DECOM', 'DECOM', 'LS']
)
def _validate(dfs):
if ('PlacedAdoption' not in dfs) or ('Episodes' not in dfs):
return {}
else:
placed_adoption = dfs['PlacedAdoption']
episodes = dfs['Episodes']
collection_start = dfs['metadata']['collection_start']
code_list = ['V3', 'V4']
# datetime
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
placed_adoption['DATE_PLACED_CEASED'] = pd.to_datetime(placed_adoption['DATE_PLACED_CEASED'],
format='%d/%m/%Y', errors='coerce')
collection_start = pd.to_datetime(collection_start, format='%d/%m/%Y', errors='coerce')
# <DECOM> of the earliest episode with an <LS> not = 'V3' or 'V4'
filter_by_ls = episodes[~(episodes['LS'].isin(code_list))]
earliest_episode_idxs = filter_by_ls.groupby('CHILD')['DECOM'].idxmin()
earliest_episodes = episodes[episodes.index.isin(earliest_episode_idxs)]
# prepare to merge
placed_adoption.reset_index(inplace=True)
earliest_episodes.reset_index(inplace=True)
# merge
merged = earliest_episodes.merge(placed_adoption, on='CHILD', how='left', suffixes=['_eps', '_pa'])
# drop rows where DATE_PLACED_CEASED is not provided
merged = merged.dropna(subset=['DATE_PLACED_CEASED'])
# If provided <DATE_PLACED_CEASED> must not be prior to <COLLECTION_START_DATE> or <DECOM> of the earliest episode with an <LS> not = 'V3' or 'V4'
mask = (merged['DATE_PLACED_CEASED'] < merged['DECOM']) | (merged['DATE_PLACED_CEASED'] < collection_start)
# error locations
pa_error_locs = merged.loc[mask, 'index_pa']
eps_error_locs = merged.loc[mask, 'index_eps']
return {'Episodes': eps_error_locs.tolist(), 'PlacedAdoption': pa_error_locs.unique().tolist()}
return error, _validate
def validate_352():
error = ErrorDefinition(
code='352',
description='Child who started to be looked after was aged 18 or over.',
affected_fields=['DECOM', 'RNE'],
)
def _validate(dfs):
if 'Header' not in dfs:
return {}
if 'Episodes' not in dfs:
return {}
else:
header = dfs['Header']
episodes = dfs['Episodes']
header['DOB'] = pd.to_datetime(header['DOB'], format='%d/%m/%Y', errors='coerce')
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
header['DOB18'] = header['DOB'] + pd.DateOffset(years=18)
episodes_merged = episodes.reset_index().merge(header, how='left', on=['CHILD'], suffixes=('', '_header'),
indicator=True).set_index('index')
care_start = episodes_merged['RNE'].str.upper().astype(str).isin(['S'])
started_over_18 = episodes_merged['DOB18'] <= episodes_merged['DECOM']
error_mask = care_start & started_over_18
error_locations = episodes.index[error_mask]
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_209():
error = ErrorDefinition(
code='209',
description='Child looked after is of school age and should not have an unknown Unique Pupil Number (UPN) code of UN1.',
affected_fields=['UPN', 'DOB']
)
def _validate(dfs):
if 'Header' not in dfs:
return {}
else:
header = dfs['Header']
collection_start = dfs['metadata']['collection_start']
# convert to datetime
header['DOB'] = pd.to_datetime(header['DOB'], format='%d/%m/%Y', errors='coerce')
collection_start = pd.to_datetime(collection_start, format='%d/%m/%Y', errors='coerce')
yr = collection_start.year - 1
reference_date = pd.to_datetime('31/08/' + str(yr), format='%d/%m/%Y', errors='coerce')
# If <DOB> >= 4 years prior to 31/08/YYYY then <UPN> should not be 'UN1' Note: YYYY in this instance refers to the year prior to the collection start (for collection year 2019-2020, it would be looking at the 31/08/2018).
mask = (reference_date >= (header['DOB'] + pd.offsets.DateOffset(years=4))) & (header['UPN'] == 'UN1')
# error locations
error_locs_header = header.index[mask]
return {'Header': error_locs_header.tolist()}
return error, _validate
def validate_198():
error = ErrorDefinition(
code='198',
description="Child has not been looked after continuously for at least 12 months at 31 March but a reason "
"for no Strengths and Difficulties (SDQ) score has been completed. ",
affected_fields=['SDQ_REASON'],
)
def _validate(dfs):
if 'Episodes' not in dfs or 'OC2' not in dfs:
return {}
oc2 = add_CLA_column(dfs, 'OC2')
error_mask = oc2['SDQ_REASON'].notna() & ~oc2['CONTINUOUSLY_LOOKED_AFTER']
error_locs = oc2.index[error_mask].to_list()
return {'OC2': error_locs}
return error, _validate
def validate_185():
error = ErrorDefinition(
code='185',
description="Child has not been looked after continuously for at least 12 months at " +
"31 March but a Strengths and Difficulties (SDQ) score has been completed.",
affected_fields=['SDQ_SCORE'],
)
def _validate(dfs):
if 'Episodes' not in dfs or 'OC2' not in dfs:
return {}
oc2 = add_CLA_column(dfs, 'OC2')
error_mask = oc2['SDQ_SCORE'].notna() & ~oc2['CONTINUOUSLY_LOOKED_AFTER']
error_locs = oc2.index[error_mask].to_list()
return {'OC2': error_locs}
return error, _validate
def validate_186():
error = ErrorDefinition(
code='186',
description="Children aged 4 or over at the start of the year and children aged under 17 at the " +
"end of the year and who have been looked after for at least 12 months continuously " +
"should have a Strengths and Difficulties (SDQ) score completed.",
affected_fields=['SDQ_SCORE'],
)
def _validate(dfs):
if 'Episodes' not in dfs or 'OC2' not in dfs:
return {}
oc2 = dfs['OC2']
collection_start_str = dfs['metadata']['collection_start']
collection_end_str = dfs['metadata']['collection_end']
collection_start = pd.to_datetime(collection_start_str, format='%d/%m/%Y', errors='coerce')
collection_end = pd.to_datetime(collection_end_str, format='%d/%m/%Y', errors='coerce')
oc2['DOB_dt'] = pd.to_datetime(oc2['DOB'], format='%d/%m/%Y', errors='coerce')
oc2 = add_CLA_column(dfs, 'OC2')
oc2['4th_bday'] = oc2['DOB_dt'] + pd.DateOffset(years=4)
oc2['17th_bday'] = oc2['DOB_dt'] + pd.DateOffset(years=17)
error_mask = (
(oc2['4th_bday'] <= collection_start)
& (oc2['17th_bday'] > collection_end)
& oc2['CONTINUOUSLY_LOOKED_AFTER']
& oc2['SDQ_SCORE'].isna()
)
oc2_errors = oc2.loc[error_mask].index.to_list()
return {'OC2': oc2_errors}
return error, _validate
def validate_187():
error = ErrorDefinition(
code='187',
description="Child cannot be looked after continuously for 12 months at " +
"31 March (OC2) and have any of adoption or care leavers returns completed.",
affected_fields=['DATE_INT', 'DATE_MATCH', 'FOSTER_CARE', 'NB_ADOPTR', 'SEX_ADOPTR', 'LS_ADOPTR', # OC3
'IN_TOUCH', 'ACTIV', 'ACCOM'], # AD1
)
def _validate(dfs):
if (
'OC3' not in dfs
or 'AD1' not in dfs
or 'Episodes' not in dfs
):
return {}
# add 'CONTINUOUSLY_LOOKED_AFTER' column
ad1, oc3 = add_CLA_column(dfs, ['AD1', 'OC3'])
# OC3
should_be_blank = ['IN_TOUCH', 'ACTIV', 'ACCOM']
oc3_mask = oc3['CONTINUOUSLY_LOOKED_AFTER'] & oc3[should_be_blank].notna().any(axis=1)
oc3_error_locs = oc3[oc3_mask].index.to_list()
# AD1
should_be_blank = ['DATE_INT', 'DATE_MATCH', 'FOSTER_CARE', 'NB_ADOPTR', 'SEX_ADOPTR', 'LS_ADOPTR']
ad1_mask = ad1['CONTINUOUSLY_LOOKED_AFTER'] & ad1[should_be_blank].notna().any(axis=1)
ad1_error_locs = ad1[ad1_mask].index.to_list()
return {'AD1': ad1_error_locs,
'OC3': oc3_error_locs}
return error, _validate
def validate_188():
error = ErrorDefinition(
code='188',
description="Child is aged under 4 years at the end of the year, "
"but a Strengths and Difficulties (SDQ) score or a reason "
"for no SDQ score has been completed. ",
affected_fields=['SDQ_SCORE', 'SDQ_REASON'],
)
def _validate(dfs):
if 'OC2' not in dfs:
return {}
oc2 = dfs['OC2']
collection_end_str = dfs['metadata']['collection_end']
collection_end = pd.to_datetime(collection_end_str, format='%d/%m/%Y', errors='coerce')
oc2['DOB_dt'] = pd.to_datetime(oc2['DOB'], format='%d/%m/%Y', errors='coerce')
oc2['4th_bday'] = oc2['DOB_dt'] + pd.DateOffset(years=4)
error_mask = (
(oc2['4th_bday'] > collection_end)
& oc2[['SDQ_SCORE', 'SDQ_REASON']].notna().any(axis=1)
)
oc2_errors = oc2.loc[error_mask].index.to_list()
return {'OC2': oc2_errors}
return error, _validate
def validate_190():
error = ErrorDefinition(
code='190',
description="Child has not been looked after continuously for at least 12 months at 31 March but one or more "
"data items relating to children looked after for 12 months have been completed.",
affected_fields=['CONVICTED', 'HEALTH_CHECK', 'IMMUNISATIONS', 'TEETH_CHECK', 'HEALTH_ASSESSMENT',
'SUBSTANCE_MISUSE', 'INTERVENTION_RECEIVED', 'INTERVENTION_OFFERED']
, # AD1
)
def _validate(dfs):
if (
'OC2' not in dfs
or 'Episodes' not in dfs
):
return {}
# add 'CONTINUOUSLY_LOOKED_AFTER' column
oc2 = add_CLA_column(dfs, 'OC2')
should_be_blank = ['CONVICTED', 'HEALTH_CHECK', 'IMMUNISATIONS', 'TEETH_CHECK', 'HEALTH_ASSESSMENT',
'SUBSTANCE_MISUSE', 'INTERVENTION_RECEIVED', 'INTERVENTION_OFFERED']
mask = ~oc2['CONTINUOUSLY_LOOKED_AFTER'] & oc2[should_be_blank].notna().any(axis=1)
error_locs = oc2[mask].index.to_list()
return {'OC2': error_locs}
return error, _validate
def validate_191():
error = ErrorDefinition(
code='191',
description="Child has been looked after continuously for at least 12 months at 31 March but one or more "
"data items relating to children looked after for 12 months have been left blank.",
affected_fields=['IMMUNISATIONS', 'TEETH_CHECK', 'HEALTH_ASSESSMENT', 'SUBSTANCE_MISUSE'], # OC2
)
def _validate(dfs):
if (
'OC2' not in dfs
or 'Episodes' not in dfs
):
return {}
# add 'CONTINUOUSLY_LOOKED_AFTER' column
oc2 = add_CLA_column(dfs, 'OC2')
should_be_present = ['IMMUNISATIONS', 'TEETH_CHECK', 'HEALTH_ASSESSMENT', 'SUBSTANCE_MISUSE']
mask = oc2['CONTINUOUSLY_LOOKED_AFTER'] & oc2[should_be_present].isna().any(axis=1)
error_locs = oc2[mask].index.to_list()
return {'OC2': error_locs}
return error, _validate
def validate_607():
error = ErrorDefinition(
code='607',
description='Child ceased to be looked after in the year, but mother field has not been completed.',
affected_fields=['DEC', 'REC', 'MOTHER', 'LS', 'SEX']
)
def _validate(dfs):
if 'Header' not in dfs or 'Episodes' not in dfs:
return {}
else:
header = dfs['Header']
episodes = dfs['Episodes']
collection_start = dfs['metadata']['collection_start']
collection_end = dfs['metadata']['collection_end']
code_list = ['V3', 'V4']
# convert to datetiime format
episodes['DEC'] = pd.to_datetime(episodes['DEC'], format='%d/%m/%Y', errors='coerce')
collection_start = pd.to_datetime(collection_start, format='%d/%m/%Y', errors='coerce')
collection_end = pd.to_datetime(collection_end, format='%d/%m/%Y', errors='coerce')
# prepare to merge
episodes.reset_index(inplace=True)
header.reset_index(inplace=True)
merged = episodes.merge(header, on='CHILD', how='left', suffixes=['_eps', '_er'])
# CEASED_TO_BE_LOOKED_AFTER = DEC is not null and REC is filled but not equal to X1
CEASED_TO_BE_LOOKED_AFTER = merged['DEC'].notna() & ((merged['REC'] != 'X1') & merged['REC'].notna())
# and <LS> not = ‘V3’ or ‘V4’
check_LS = ~(merged['LS'].isin(code_list))
# and <DEC> is in <CURRENT_COLLECTION_YEAR
check_DEC = (collection_start <= merged['DEC']) & (merged['DEC'] <= collection_end)
# Where <CEASED_TO_BE_LOOKED_AFTER> = ‘Y’, and <LS> not = ‘V3’ or ‘V4’ and <DEC> is in <CURRENT_COLLECTION_YEAR> and <SEX> = ‘2’ then <MOTHER> should be provided.
mask = CEASED_TO_BE_LOOKED_AFTER & check_LS & check_DEC & (merged['SEX'] == '2') & (merged['MOTHER'].isna())
header_error_locs = merged.loc[mask, 'index_er']
eps_error_locs = merged.loc[mask, 'index_eps']
return {'Episodes': eps_error_locs.tolist(), 'Header': header_error_locs.unique().tolist()}
return error, _validate
def validate_210():
error = ErrorDefinition(
code='210',
description='Children looked after for more than a week at 31 March should not have an unknown Unique Pupil Number (UPN) code of UN4.',
affected_fields=['UPN', 'DECOM']
)
def _validate(dfs):
if 'Header' not in dfs or 'Episodes' not in dfs:
return {}
else:
header = dfs['Header']
episodes = dfs['Episodes']
collection_end = dfs['metadata']['collection_end']
# convert to datetime
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
collection_end = pd.to_datetime(collection_end, format='%d/%m/%Y', errors='coerce')
yr = collection_end.year
reference_date = ref_date = pd.to_datetime('24/03/' + str(yr), format='%d/%m/%Y', errors='coerce')
# prepare to merge
episodes.reset_index(inplace=True)
header.reset_index(inplace=True)
# the logical way is to merge left on UPN but that will be a one to many merge and may not go as well as a many to one merge that we've been doing.
merged = episodes.merge(header, on='CHILD', how='left', suffixes=['_eps', '_er'])
# If <UPN> = 'UN4' then no episode <DECOM> must be >` = 24/03/YYYY Note: YYYY refers to the current collection year.
mask = (merged['UPN'] == 'UN4') & (merged['DECOM'] >= reference_date)
# error locations
error_locs_header = merged.loc[mask, 'index_er']
error_locs_eps = merged.loc[mask, 'index_eps']
return {'Episodes': error_locs_eps.tolist(), 'Header': error_locs_header.unique().tolist()}
return error, _validate
def validate_1010():
error = ErrorDefinition(
code='1010',
description='This child has no episodes loaded for current year even though there was an open episode of '
+ 'care at the end of the previous year, and care leaver data has been entered.',
affected_fields=['IN_TOUCH', 'ACTIV', 'ACCOM'],
)
def _validate(dfs):
if 'Episodes' not in dfs or 'Episodes_last' not in dfs or 'OC3' not in dfs:
return {}
else:
episodes = dfs['Episodes']
episodes_last = dfs['Episodes_last']
oc3 = dfs['OC3']
# convert DECOM to datetime, drop missing/invalid sort by CHILD then DECOM,
episodes_last['DECOM'] = pd.to_datetime(episodes_last['DECOM'], format='%d/%m/%Y', errors='coerce')
episodes_last = episodes_last.dropna(subset=['DECOM']).sort_values(['CHILD', 'DECOM'], ascending=True)
# Keep only the final episode for each child (ie where the following row has a different CHILD value)
episodes_last = episodes_last[
episodes_last['CHILD'].shift(-1) != episodes_last['CHILD']
]
# Keep only the final episodes that were still open
episodes_last = episodes_last[episodes_last['DEC'].isna()]
# The remaining children ought to have episode data in the current year if they are in OC3
has_current_episodes = oc3['CHILD'].isin(episodes['CHILD'])
has_open_episode_last = oc3['CHILD'].isin(episodes_last['CHILD'])
error_mask = ~has_current_episodes & has_open_episode_last
validation_error_locations = oc3.index[error_mask]
return {'OC3': validation_error_locations.tolist()}
return error, _validate
def validate_525():
error = ErrorDefinition(
code='525',
description='A child for whom the decision to be placed for adoption has been reversed cannot be adopted during the year.',
affected_fields=['DATE_PLACED_CEASED', 'DATE_INT', 'DATE_MATCH', 'FOSTER_CARE', 'NB_ADOPTR', 'SEX_ADOPTR',
'LS_ADOPTR']
)
def _validate(dfs):
if 'PlacedAdoption' not in dfs or 'AD1' not in dfs:
return {}
else:
placed_adoption = dfs['PlacedAdoption']
ad1 = dfs['AD1']
# prepare to merge
placed_adoption.reset_index(inplace=True)
ad1.reset_index(inplace=True)
merged = placed_adoption.merge(ad1, on='CHILD', how='left', suffixes=['_placed', '_ad1'])
# If <DATE_PLACED_CEASED> not Null, then <DATE_INT>; <DATE_MATCH>; <FOSTER_CARE>; <NB_ADOPTR>; <SEX_ADOPTR>; and <LS_ADOPTR> should not be provided
mask = merged['DATE_PLACED_CEASED'].notna() & (
merged['DATE_INT'].notna() | merged['DATE_MATCH'].notna() | merged['FOSTER_CARE'].notna() |
merged['NB_ADOPTR'].notna() | merged['SEX_ADOPTR'].notna() | merged['LS_ADOPTR'].notna())
# error locations
pa_error_locs = merged.loc[mask, 'index_placed']
ad_error_locs = merged.loc[mask, 'index_ad1']
# return result
return {'PlacedAdoption': pa_error_locs.tolist(), 'AD1': ad_error_locs.tolist()}
return error, _validate
def validate_335():
error = ErrorDefinition(
code='335',
description='The current foster value (0) suggests that child is not adopted by current foster carer, but last placement is A2, A3, or A5. Or the current foster value (1) suggests that child is adopted by current foster carer, but last placement is A1, A4 or A6.',
affected_fields=['PLACE', 'FOSTER_CARE']
)
def _validate(dfs):
if 'Episodes' not in dfs or 'AD1' not in dfs:
return {}
else:
episodes = dfs['Episodes']
ad1 = dfs['AD1']
# prepare to merge
episodes.reset_index(inplace=True)
ad1.reset_index(inplace=True)
merged = episodes.merge(ad1, on='CHILD', how='left', suffixes=['_eps', '_ad1'])
# Where <PL> = 'A2', 'A3' or 'A5' and <DEC> = 'E1', 'E11', 'E12' <FOSTER_CARE> should not be '0'; Where <PL> = ‘A1’, ‘A4’ or ‘A6’ and <REC> = ‘E1’, ‘E11’, ‘E12’ <FOSTER_CARE> should not be ‘1’.
mask = (
merged['REC'].isin(['E1', 'E11', 'E12']) & (
(merged['PLACE'].isin(['A2', 'A3', 'A5']) & (merged['FOSTER_CARE'].astype(str) == '0'))
| (merged['PLACE'].isin(['A1', 'A4', 'A6']) & (merged['FOSTER_CARE'].astype(str) == '1'))
)
)
eps_error_locs = merged.loc[mask, 'index_eps']
ad1_error_locs = merged.loc[mask, 'index_ad1']
# use .unique since join is many to one
return {'Episodes': eps_error_locs.tolist(), 'AD1': ad1_error_locs.unique().tolist()}
return error, _validate
def validate_215():
error = ErrorDefinition(
code='215',
description='Child has care leaver information but one or more data items relating to children looked after for 12 months have been completed.',
affected_fields=['IN_TOUCH', 'ACTIV', 'ACCOM', 'CONVICTED', 'HEALTH_CHECK', 'IMMUNISATIONS', 'TEETH_CHECK',
'HEALTH_ASSESSMENT', 'SUBSTANCE_MISUSE', 'INTERVENTION_RECEIVED', 'INTERVENTION_OFFERED']
)
def _validate(dfs):
if 'OC3' not in dfs or 'OC2' not in dfs:
return {}
else:
oc3 = dfs['OC3']
oc2 = dfs['OC2']
# prepare to merge
oc3.reset_index(inplace=True)
oc2.reset_index(inplace=True)
merged = oc3.merge(oc2, on='CHILD', how='left', suffixes=['_3', '_2'])
# If any of <IN_TOUCH>, <ACTIV> or <ACCOM> have been provided then <CONVICTED>; <HEALTH_CHECK>; <IMMUNISATIONS>; <TEETH_CHECK>; <HEALTH_ASSESSMENT>; <SUBSTANCE MISUSE>; <INTERVENTION_RECEIVED>; <INTERVENTION_OFFERED>; should not be provided
mask = (merged['IN_TOUCH'].notna() | merged['ACTIV'].notna() | merged['ACCOM'].notna()) & (
merged['CONVICTED'].notna() | merged['HEALTH_CHECK'].notna() | merged['IMMUNISATIONS'].notna() |
merged['TEETH_CHECK'].notna() | merged['HEALTH_ASSESSMENT'].notna() | merged[
'SUBSTANCE_MISUSE'].notna() | merged['INTERVENTION_RECEIVED'].notna() | merged[
'INTERVENTION_OFFERED'].notna())
# error locations
oc3_error_locs = merged.loc[mask, 'index_3']
oc2_error_locs = merged.loc[mask, 'index_2']
return {'OC3': oc3_error_locs.tolist(), 'OC2': oc2_error_locs.tolist()}
return error, _validate
def validate_399():
error = ErrorDefinition(
code='399',
description='Mother field, review field or participation field are completed but '
+ 'child is looked after under legal status V3 or V4.',
affected_fields=['MOTHER', 'LS', 'REVIEW', 'REVIEW_CODE']
)
def _validate(dfs):
if 'Episodes' not in dfs or 'Header' not in dfs or 'Reviews' not in dfs:
return {}
else:
episodes = dfs['Episodes']
header = dfs['Header']
reviews = dfs['Reviews']
code_list = ['V3', 'V4']
# prepare to merge
episodes['index_eps'] = episodes.index
header['index_hdr'] = header.index
reviews['index_revs'] = reviews.index
# merge
merged = (episodes.merge(header, on='CHILD', how='left')
.merge(reviews, on='CHILD', how='left'))
# If <LS> = 'V3' or 'V4' then <MOTHER>, <REVIEW> and <REVIEW_CODE> should not be provided
mask = merged['LS'].isin(code_list) & (
merged['MOTHER'].notna() | merged['REVIEW'].notna() | merged['REVIEW_CODE'].notna())
# Error locations
eps_errors = merged.loc[mask, 'index_eps']
header_errors = merged.loc[mask, 'index_hdr'].unique()
revs_errors = merged.loc[mask, 'index_revs'].unique()
return {'Episodes': eps_errors.tolist(),
'Header': header_errors.tolist(),
'Reviews': revs_errors.tolist()}
return error, _validate
def validate_189():
error = ErrorDefinition(
code='189',
description='Child is aged 17 years or over at the beginning of the year, but an Strengths and Difficulties '
+ '(SDQ) score or a reason for no Strengths and Difficulties (SDQ) score has been completed.',
affected_fields=['DOB', 'SDQ_SCORE', 'SDQ_REASON']
)
def _validate(dfs):
if 'OC2' not in dfs:
return {}
else:
oc2 = dfs['OC2']
collection_start = dfs['metadata']['collection_start']
# datetime format allows appropriate comparison between dates
oc2['DOB'] = pd.to_datetime(oc2['DOB'], format='%d/%m/%Y', errors='coerce')
collection_start = pd.to_datetime(collection_start, format='%d/%m/%Y', errors='coerce')
# If <DOB> >17 years prior to <COLLECTION_START_DATE> then <SDQ_SCORE> and <SDQ_REASON> should not be provided
mask = ((oc2['DOB'] + pd.offsets.DateOffset(years=17)) <= collection_start) & (
oc2['SDQ_REASON'].notna() | oc2['SDQ_SCORE'].notna())
# That is, raise error if collection_start > DOB + 17years
oc_error_locs = oc2.index[mask]
return {'OC2': oc_error_locs.tolist()}
return error, _validate
def validate_226():
error = ErrorDefinition(
code='226',
description='Reason for placement change is not required.',
affected_fields=['REASON_PLACE_CHANGE', 'PLACE']
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
code_list = ['T0', 'T1', 'T2', 'T3', 'T4']
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
# create column to see previous REASON_PLACE_CHANGE
episodes = episodes.sort_values(['CHILD', 'DECOM'])
episodes['PREVIOUS_REASON'] = episodes.groupby('CHILD')['REASON_PLACE_CHANGE'].shift(1)
# If <PL> = 'T0'; 'T1'; 'T2'; 'T3' or 'T4' then <REASON_PLACE_CHANGE> should be null in current episode and current episode - 1
mask = episodes['PLACE'].isin(code_list) & (
episodes['REASON_PLACE_CHANGE'].notna() | episodes['PREVIOUS_REASON'].notna())
# error locations
error_locs = episodes.index[mask]
return {'Episodes': error_locs.tolist()}
return error, _validate
def validate_358():
error = ErrorDefinition(
code='358',
description='Child with this legal status should not be under 10.',
affected_fields=['DECOM', 'DOB', 'LS']
)
def _validate(dfs):
if 'Episodes' not in dfs or 'Header' not in dfs:
return {}
else:
episodes = dfs['Episodes']
header = dfs['Header']
code_list = ['J1', 'J2', 'J3']
# convert dates to datetime format
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
header['DOB'] = pd.to_datetime(header['DOB'], format='%d/%m/%Y', errors='coerce')
# prepare to merge
episodes.reset_index(inplace=True)
header.reset_index(inplace=True)
merged = episodes.merge(header, on='CHILD', how='left', suffixes=['_eps', '_er'])
# Where <LS> = ‘J1’ or ‘J2’ or ‘J3’ then <DOB> should <= to 10 years prior to <DECOM>
mask = merged['LS'].isin(code_list) & (merged['DOB'] + pd.offsets.DateOffset(years=10) < merged['DECOM'])
# That is, raise error if DECOM > DOB + 10years
# error locations
header_error_locs = merged.loc[mask, 'index_er']
episode_error_locs = merged.loc[mask, 'index_eps']
# one to many join implies use .unique on the 'one'
return {'Episodes': episode_error_locs.tolist(), 'Header': header_error_locs.unique().tolist()}
return error, _validate
def validate_407():
error = ErrorDefinition(
code='407',
description='Reason episode ceased is Special Guardianship Order, but child has reached age 18.',
affected_fields=['DEC', 'DOB', 'REC']
)
def _validate(dfs):
if 'Episodes' not in dfs or 'Header' not in dfs:
return {}
else:
episodes = dfs['Episodes']
header = dfs['Header']
code_list = ['E45', 'E46', 'E47', 'E48']
# convert dates to datetime format
episodes['DEC'] = pd.to_datetime(episodes['DEC'], format='%d/%m/%Y', errors='coerce')
header['DOB'] = pd.to_datetime(header['DOB'], format='%d/%m/%Y', errors='coerce')
# prepare to merge
episodes.reset_index(inplace=True)
header.reset_index(inplace=True)
merged = episodes.merge(header, on='CHILD', how='left', suffixes=['_eps', '_er'])
# If <REC> = ‘E45’ or ‘E46’ or ‘E47’ or ‘E48’ then <DOB> must be < 18 years prior to <DEC>
mask = merged['REC'].isin(code_list) & (merged['DOB'] + pd.offsets.DateOffset(years=18) < merged['DEC'])
# That is, raise error if DEC > DOB + 10years
# error locations
header_error_locs = merged.loc[mask, 'index_er']
episode_error_locs = merged.loc[mask, 'index_eps']
# one to many join implies use .unique on the 'one'
return {'Episodes': episode_error_locs.tolist(), 'Header': header_error_locs.unique().tolist()}
return error, _validate
def validate_1007():
error = ErrorDefinition(
code='1007',
description='Care leaver information is not required for 17- or 18-year olds who are still looked after.',
affected_fields=['DEC', 'REC', 'DOB', 'IN_TOUCH', 'ACTIV', 'ACCOM']
)
def _validate(dfs):
if 'Episodes' not in dfs or 'OC3' not in dfs:
return {}
else:
episodes = dfs['Episodes']
oc3 = dfs['OC3']
collection_end = dfs['metadata']['collection_end']
# convert dates to datetime format
oc3['DOB'] = pd.to_datetime(oc3['DOB'], format='%d/%m/%Y', errors='coerce')
collection_end = pd.to_datetime(collection_end, format='%d/%m/%Y', errors='coerce')
# prepare to merge
episodes.reset_index(inplace=True)
oc3.reset_index(inplace=True)
merged = episodes.merge(oc3, on='CHILD', how='left', suffixes=['_eps', '_oc3'])
# If <DOB> < 19 and >= to 17 years prior to <COLLECTION_END_DATE> and current episode <DEC> and or <REC> not provided then <IN_TOUCH>, <ACTIV> and <ACCOM> should not be provided
check_age = (merged['DOB'] + pd.offsets.DateOffset(years=17) <= collection_end) & (
merged['DOB'] + pd.offsets.DateOffset(years=19) > collection_end)
# That is, check that 17<=age<19
check_dec_rec = merged['REC'].isna() | merged['DEC'].isna()
# if either DEC or REC are absent
mask = check_age & check_dec_rec & (
merged['IN_TOUCH'].notna() | merged['ACTIV'].notna() | merged['ACCOM'].notna())
# Then raise an error if either IN_TOUCH, ACTIV, or ACCOM have been provided too
# error locations
oc3_error_locs = merged.loc[mask, 'index_oc3']
episode_error_locs = merged.loc[mask, 'index_eps']
# one to many join implies use .unique on the 'one'
return {'Episodes': episode_error_locs.tolist(), 'OC3': oc3_error_locs.unique().tolist()}
return error, _validate
def validate_442():
error = ErrorDefinition(
code='442',
description='Unique Pupil Number (UPN) field is not completed.',
affected_fields=['UPN', 'LS']
)
def _validate(dfs):
if ('Episodes' not in dfs) or ('Header' not in dfs):
return {}
else:
episodes = dfs['Episodes']
header = dfs['Header']
episodes.reset_index(inplace=True)
header.reset_index(inplace=True)
code_list = ['V3', 'V4']
# merge left on episodes to get all children for which episodes have been recorded even if they do not exist on the header.
merged = episodes.merge(header, on=['CHILD'], how='left', suffixes=['_eps', '_er'])
# Where any episode present, with an <LS> not = 'V3' or 'V4' then <UPN> must be provided
mask = (~merged['LS'].isin(code_list)) & merged['UPN'].isna()
episode_error_locs = merged.loc[mask, 'index_eps']
header_error_locs = merged.loc[mask, 'index_er']
return {'Episodes': episode_error_locs.tolist(),
# Select unique values since many episodes are joined to one header
# and multiple errors will be raised for the same index.
'Header': header_error_locs.dropna().unique().tolist()}
return error, _validate
def validate_344():
error = ErrorDefinition(
code='344',
description='The record shows the young person has died or returned home to live with parent(s) or someone with parental responsibility for a continuous period of 6 months or more, but activity and/or accommodation on leaving care have been completed.',
affected_fields=['IN_TOUCH', 'ACTIV', 'ACCOM']
)
def _validate(dfs):
if 'OC3' not in dfs:
return {}
else:
oc3 = dfs['OC3']
# If <IN_TOUCH> = 'DIED' or 'RHOM' then <ACTIV> and <ACCOM> should not be provided
mask = ((oc3['IN_TOUCH'] == 'DIED') | (oc3['IN_TOUCH'] == 'RHOM')) & (
oc3['ACTIV'].notna() | oc3['ACCOM'].notna())
error_locations = oc3.index[mask]
return {'OC3': error_locations.to_list()}
return error, _validate
def validate_345():
error = ErrorDefinition(
code='345',
description='The data collection record shows the local authority is in touch with this young person, but activity and/or accommodation data items are zero.',
affected_fields=['IN_TOUCH', 'ACTIV', 'ACCOM']
)
def _validate(dfs):
if 'OC3' not in dfs:
return {}
else:
oc3 = dfs['OC3']
# If <IN_TOUCH> = 'Yes' then <ACTIV> and <ACCOM> must be provided
mask = (oc3['IN_TOUCH'] == 'YES') & (oc3['ACTIV'].isna() | oc3['ACCOM'].isna())
error_locations = oc3.index[mask]
return {'OC3': error_locations.to_list()}
return error, _validate
def validate_384():
error = ErrorDefinition(
code='384',
description='A child receiving respite care cannot be in a long-term foster placement ',
affected_fields=['PLACE', 'LS']
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
# Where <LS> = 'V3' or 'V4' then <PL> must not be 'U1' or 'U4'
mask = ((episodes['LS'] == 'V3') | (episodes['LS'] == 'V4')) & (
(episodes['PLACE'] == 'U1') | (episodes['PLACE'] == 'U4'))
error_locations = episodes.index[mask]
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_390():
error = ErrorDefinition(
code='390',
description='Reason episode ceased is adopted but child has not been previously placed for adoption.',
affected_fields=['PLACE', 'REC']
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
# If <REC> = 'E11' or 'E12' then <PL> must be one of 'A3', 'A4', 'A5' or 'A6'
mask = ((episodes['REC'] == 'E11') | (episodes['REC'] == 'E12')) & ~(
(episodes['PLACE'] == 'A3') | (episodes['PLACE'] == 'A4') | (episodes['PLACE'] == 'A5') | (
episodes['PLACE'] == 'A6'))
error_locations = episodes.index[mask]
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_378():
error = ErrorDefinition(
code='378',
description='A child who is placed with parent(s) cannot be looked after under a single period of accommodation under Section 20 of the Children Act 1989.',
affected_fields=['PLACE', 'LS']
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
# the & sign supercedes the ==, so brackets are necessary here
mask = (episodes['PLACE'] == 'P1') & (episodes['LS'] == 'V2')
error_locations = episodes.index[mask]
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_398():
error = ErrorDefinition(
code='398',
description='Distance field completed but child looked after under legal status V3 or V4.',
affected_fields=['LS', 'HOME_POST', 'PL_POST']
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
mask = ((episodes['LS'] == 'V3') | (episodes['LS'] == 'V4')) & (
episodes['HOME_POST'].notna() | episodes['PL_POST'].notna())
error_locations = episodes.index[mask]
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_451():
error = ErrorDefinition(
code='451',
description='Child is still freed for adoption, but freeing orders could not be applied for since 30 December 2005.',
affected_fields=['DEC', 'REC', 'LS']
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
mask = episodes['DEC'].isna() & episodes['REC'].isna() & (episodes['LS'] == 'D1')
error_locations = episodes.index[mask]
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_519():
error = ErrorDefinition(
code='519',
description='Data entered on the legal status of adopters shows civil partnership couple, but data entered on genders of adopters does not show it as a couple.',
affected_fields=['LS_ADOPTR', 'SEX_ADOPTR']
)
def _validate(dfs):
if 'AD1' not in dfs:
return {}
else:
ad1 = dfs['AD1']
mask = (ad1['LS_ADOPTR'] == 'L2') & (
(ad1['SEX_ADOPTR'] != 'MM') & (ad1['SEX_ADOPTR'] != 'FF') & (ad1['SEX_ADOPTR'] != 'MF'))
error_locations = ad1.index[mask]
return {'AD1': error_locations.to_list()}
return error, _validate
def validate_520():
error = ErrorDefinition(
code='520',
description='Data entry on the legal status of adopters shows different gender married couple but data entry on genders of adopters shows it as a same gender couple.',
affected_fields=['LS_ADOPTR', 'SEX_ADOPTR']
)
def _validate(dfs):
if 'AD1' not in dfs:
return {}
else:
ad1 = dfs['AD1']
# check condition
mask = (ad1['LS_ADOPTR'] == 'L11') & (ad1['SEX_ADOPTR'] != 'MF')
error_locations = ad1.index[mask]
return {'AD1': error_locations.to_list()}
return error, _validate
def validate_522():
error = ErrorDefinition(
code='522',
description='Date of decision that the child should be placed for adoption must be on or before the date that a child should no longer be placed for adoption.',
affected_fields=['DATE_PLACED', 'DATE_PLACED_CEASED']
)
def _validate(dfs):
if 'PlacedAdoption' not in dfs:
return {}
else:
placed_adoption = dfs['PlacedAdoption']
# Convert to datetimes
placed_adoption['DATE_PLACED_CEASED'] = pd.to_datetime(placed_adoption['DATE_PLACED_CEASED'],
format='%d/%m/%Y', errors='coerce')
placed_adoption['DATE_PLACED'] = pd.to_datetime(placed_adoption['DATE_PLACED'], format='%d/%m/%Y',
errors='coerce')
# Boolean mask
mask = placed_adoption['DATE_PLACED_CEASED'] > placed_adoption['DATE_PLACED']
error_locations = placed_adoption.index[mask]
return {'PlacedAdoption': error_locations.to_list()}
return error, _validate
def validate_563():
error = ErrorDefinition(
code='563',
description='The child should no longer be placed for adoption but the date of the decision that the child should be placed for adoption is blank',
affected_fields=['DATE_PLACED', 'REASON_PLACED_CEASED', 'DATE_PLACED_CEASED'],
)
def _validate(dfs):
if 'PlacedAdoption' not in dfs:
return {}
else:
placed_adoption = dfs['PlacedAdoption']
mask = placed_adoption['REASON_PLACED_CEASED'].notna() & placed_adoption['DATE_PLACED_CEASED'].notna() & \
placed_adoption['DATE_PLACED'].isna()
error_locations = placed_adoption.index[mask]
return {'PlacedAdoption': error_locations.to_list()}
return error, _validate
def validate_544():
error = ErrorDefinition(
code='544',
description="Any child who has conviction information completed must also have immunisation, teeth check, health assessment and substance misuse problem identified fields completed.",
affected_fields=['CONVICTED', 'IMMUNISATIONS', 'TEETH_CHECK', 'HEALTH_ASSESSMENT', 'SUBSTANCE_MISUSE'],
)
def _validate(dfs):
if 'OC2' not in dfs:
return {}
else:
oc2 = dfs['OC2']
convict = oc2['CONVICTED'].astype(str) == '1'
immunisations = oc2['IMMUNISATIONS'].isna()
teeth_ck = oc2['TEETH_CHECK'].isna()
health_ass = oc2['HEALTH_ASSESSMENT'].isna()
sub_misuse = oc2['SUBSTANCE_MISUSE'].isna()
error_mask = convict & (immunisations | teeth_ck | health_ass | sub_misuse)
validation_error_locations = oc2.index[error_mask]
return {'OC2': validation_error_locations.to_list()}
return error, _validate
def validate_634():
error = ErrorDefinition(
code='634',
description='There are entries for previous permanence options, but child has not started to be looked after from 1 April 2016 onwards.',
affected_fields=['LA_PERM', 'PREV_PERM', 'DATE_PERM', 'DECOM']
)
def _validate(dfs):
if 'Episodes' not in dfs or 'PrevPerm' not in dfs:
return {}
else:
episodes = dfs['Episodes']
prevperm = dfs['PrevPerm']
collection_start = dfs['metadata']['collection_start']
# convert date field to appropriate format
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
collection_start = pd.to_datetime(collection_start, format='%d/%m/%Y', errors='coerce')
# the maximum date has the highest possibility of satisfying the condition
episodes['LAST_DECOM'] = episodes.groupby('CHILD')['DECOM'].transform('max')
# prepare to merge
episodes.reset_index(inplace=True)
prevperm.reset_index(inplace=True)
merged = prevperm.merge(episodes, on='CHILD', how='left', suffixes=['_prev', '_eps'])
# If <PREV_PERM> or <LA_PERM> or <DATE_PERM> provided, then at least 1 episode must have a <DECOM> later than 01/04/2016
mask = (merged['PREV_PERM'].notna() | merged['DATE_PERM'].notna() | merged['LA_PERM'].notna()) & (
merged['LAST_DECOM'] < collection_start)
eps_error_locs = merged.loc[mask, 'index_eps']
prevperm_error_locs = merged.loc[mask, 'index_prev']
# return {'PrevPerm':prevperm_error_locs}
return {'Episodes': eps_error_locs.unique().tolist(), 'PrevPerm': prevperm_error_locs.unique().tolist()}
return error, _validate
def validate_158():
error = ErrorDefinition(
code='158',
description='If a child has been recorded as receiving an intervention for their substance misuse problem, then the additional item on whether an intervention was offered should be left blank.',
affected_fields=['INTERVENTION_RECEIVED', 'INTERVENTION_OFFERED'],
)
def _validate(dfs):
if 'OC2' not in dfs:
return {}
else:
oc2 = dfs['OC2']
error_mask = oc2['INTERVENTION_RECEIVED'].astype(str).eq('1') & oc2['INTERVENTION_OFFERED'].notna()
error_locations = oc2.index[error_mask]
return {'OC2': error_locations.tolist()}
return error, _validate
def validate_133():
error = ErrorDefinition(
code='133',
description='Data entry for accommodation after leaving care is invalid. If reporting on a childs accommodation after leaving care the data entry must be valid',
affected_fields=['ACCOM'],
)
def _validate(dfs):
if 'OC3' not in dfs:
return {}
else:
oc3 = dfs['OC3']
valid_codes = ['B1', 'B2', 'C1', 'C2', 'D1', 'D2', 'E1', 'E2', 'G1', 'G2', 'H1', 'H2', 'K1', 'K2', 'R1',
'R2', 'S2', 'T1', 'T2', 'U1', 'U2', 'V1', 'V2', 'W1', 'W2', 'X2', 'Y1', 'Y2', 'Z1', 'Z2',
'0']
error_mask = ~oc3['ACCOM'].isna() & ~oc3['ACCOM'].isin(valid_codes)
error_locations = oc3.index[error_mask]
return {'OC3': error_locations.tolist()}
return error, _validate
def validate_565():
error = ErrorDefinition(
code='565',
description='The date that the child started to be missing or away from placement without authorisation has been completed but whether the child was missing or away from placement without authorisation has not been completed.',
affected_fields=['MISSING', 'MIS_START']
)
def _validate(dfs):
if 'Missing' not in dfs:
return {}
else:
missing = dfs['Missing']
mask = missing['MIS_START'].notna() & missing['MISSING'].isna()
error_locations = missing.index[mask]
return {'Missing': error_locations.to_list()}
return error, _validate
def validate_433():
error = ErrorDefinition(
code='433',
description='The reason for new episode suggests that this is a continuation episode, but the episode does not start on the same day as the last episode finished.',
affected_fields=['RNE', 'DECOM'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
episodes['original_index'] = episodes.index
episodes.sort_values(['CHILD', 'DECOM', 'DEC'], inplace=True)
episodes[['PREVIOUS_DEC', 'PREVIOUS_CHILD']] = episodes[['DEC', 'CHILD']].shift(1)
rne_is_ongoing = episodes['RNE'].str.upper().astype(str).isin(['P', 'L', 'T', 'U', 'B'])
date_mismatch = episodes['PREVIOUS_DEC'] != episodes['DECOM']
missing_date = episodes['PREVIOUS_DEC'].isna() | episodes['DECOM'].isna()
same_child = episodes['PREVIOUS_CHILD'] == episodes['CHILD']
error_mask = rne_is_ongoing & (date_mismatch | missing_date) & same_child
error_locations = episodes['original_index'].loc[error_mask].sort_values()
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_437():
error = ErrorDefinition(
code='437',
description='Reason episode ceased is child has died or is aged 18 or over but there are further episodes.',
affected_fields=['REC'],
)
# !# potential false negatives, as this only operates on the current year's data
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
episodes.sort_values(['CHILD', 'DECOM'], inplace=True)
episodes[['NEXT_DECOM', 'NEXT_CHILD']] = episodes[['DECOM', 'CHILD']].shift(-1)
# drop rows with missing DECOM as invalid/missing values can lead to errors
episodes = episodes.dropna(subset=['DECOM'])
ceased_e2_e15 = episodes['REC'].str.upper().astype(str).isin(['E2', 'E15'])
has_later_episode = episodes['CHILD'] == episodes['NEXT_CHILD']
error_mask = ceased_e2_e15 & has_later_episode
error_locations = episodes.index[error_mask]
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_547():
error = ErrorDefinition(
code='547',
description="Any child who has health promotion information completed must also have immunisation, teeth check, health assessment and substance misuse problem identified fields completed.",
affected_fields=['HEALTH_CHECK', 'IMMUNISATIONS', 'TEETH_CHECK', 'HEALTH_ASSESSMENT', 'SUBSTANCE_MISUSE'],
)
def _validate(dfs):
if 'OC2' not in dfs:
return {}
else:
oc2 = dfs['OC2']
healthck = oc2['HEALTH_CHECK'].astype(str) == '1'
immunisations = oc2['IMMUNISATIONS'].isna()
teeth_ck = oc2['TEETH_CHECK'].isna()
health_ass = oc2['HEALTH_ASSESSMENT'].isna()
sub_misuse = oc2['SUBSTANCE_MISUSE'].isna()
error_mask = healthck & (immunisations | teeth_ck | health_ass | sub_misuse)
validation_error_locations = oc2.index[error_mask]
return {'OC2': validation_error_locations.to_list()}
return error, _validate
def validate_635():
error = ErrorDefinition(
code='635',
description='There are entries for date of order and local authority code where previous permanence option was arranged but previous permanence code is Z1',
affected_fields=['LA_PERM', 'DATE_PERM', 'PREV_PERM']
)
def _validate(dfs):
if 'PrevPerm' not in dfs:
return {}
else:
prev_perm = dfs['PrevPerm']
# raise and error if either LA_PERM or DATE_PERM are present, yet PREV_PERM is absent.
mask = ((prev_perm['LA_PERM'].notna() | prev_perm['DATE_PERM'].notna()) & prev_perm['PREV_PERM'].isna())
error_locations = prev_perm.index[mask]
return {'PrevPerm': error_locations.to_list()}
return error, _validate
def validate_550():
error = ErrorDefinition(
code='550',
description='A placement provider code of PR0 can only be associated with placement P1.',
affected_fields=['PLACE', 'PLACE_PROVIDER'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
mask = (episodes['PLACE'] != 'P1') & episodes['PLACE_PROVIDER'].eq('PR0')
validation_error_locations = episodes.index[mask]
return {'Episodes': validation_error_locations.tolist()}
return error, _validate
def validate_217():
error = ErrorDefinition(
code='217',
description='Children who are placed for adoption with current foster carers (placement types A3 or A5) must have a reason for new episode of S, T or U.',
affected_fields=['PLACE', 'DECOM', 'RNE'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
max_decom_allowed = pd.to_datetime('01/04/2015', format='%d/%m/%Y', errors='coerce')
reason_new_ep = ['S', 'T', 'U']
place_codes = ['A3', 'A5']
mask = (episodes['PLACE'].isin(place_codes) & (episodes['DECOM'] >= max_decom_allowed)) & ~episodes[
'RNE'].isin(reason_new_ep)
validation_error_mask = mask
validation_error_locations = episodes.index[validation_error_mask]
return {'Episodes': validation_error_locations.tolist()}
return error, _validate
def validate_518():
error = ErrorDefinition(
code='518',
description='If reporting legal status of adopters is L4 then the genders of adopters should be coded as MM or FF. MM = the adopting couple are both males. FF = the adopting couple are both females.',
affected_fields=['LS_ADOPTR', 'SEX_ADOPTR'],
)
def _validate(dfs):
if 'AD1' not in dfs:
return {}
else:
AD1 = dfs['AD1']
error_mask = AD1['LS_ADOPTR'].eq('L4') & ~AD1['SEX_ADOPTR'].isin(['MM', 'FF'])
error_locations = AD1.index[error_mask]
return {'AD1': error_locations.tolist()}
return error, _validate
def validate_517():
error = ErrorDefinition(
code='517',
description='If reporting legal status of adopters is L3 then the genders of adopters should be coded as MF. MF = the adopting couple are male and female.',
affected_fields=['LS_ADOPTR', 'SEX_ADOPTR'],
)
def _validate(dfs):
if 'AD1' not in dfs:
return {}
else:
AD1 = dfs['AD1']
error_mask = AD1['LS_ADOPTR'].eq('L3') & ~AD1['SEX_ADOPTR'].isin(['MF'])
error_locations = AD1.index[error_mask]
return {'AD1': error_locations.tolist()}
return error, _validate
def validate_558():
error = ErrorDefinition(
code='558',
description='If a child has been adopted, then the decision to place them for adoption has not been disrupted and the date of the decision that a child should no longer be placed for adoption should be left blank. if the REC code is either E11 or E12 then the DATE PLACED CEASED date should not be provided',
affected_fields=['DATE_PLACED_CEASED', 'REC'],
)
def _validate(dfs):
if 'Episodes' not in dfs or 'PlacedAdoption' not in dfs:
return {}
else:
episodes = dfs['Episodes']
placedAdoptions = dfs['PlacedAdoption']
episodes = episodes.reset_index()
rec_codes = ['E11', 'E12']
placeEpisodes = episodes[episodes['REC'].isin(rec_codes)]
merged = placeEpisodes.merge(placedAdoptions, how='left', on='CHILD').set_index('index')
episodes_with_errors = merged[merged['DATE_PLACED_CEASED'].notna()]
error_mask = episodes.index.isin(episodes_with_errors.index)
error_locations = episodes.index[error_mask]
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_453():
error = ErrorDefinition(
code='453',
description='Contradiction between placement distance in the last episode of the previous year and in the first episode of the current year.',
affected_fields=['PL_DISTANCE'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
if 'Episodes_last' not in dfs:
return {}
else:
episodes = dfs['Episodes']
episodes_last = dfs['Episodes_last']
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
episodes_last['DECOM'] = pd.to_datetime(episodes_last['DECOM'], format='%d/%m/%Y', errors='coerce')
episodes['PL_DISTANCE'] = | pd.to_numeric(episodes['PL_DISTANCE'], errors='coerce') | pandas.to_numeric |
import pandas as pd
import json
import numpy as np
import ast
from tqdm import tqdm_notebook
#Reading lasVegas.csv into Pandas Dataframe df
df=pd.DataFrame.from_csv('/home/rim/INF-Project/preprocessed_lasVegas.csv')
df.shape
#Select required columns and rename columns to standard names
df=df[['business_id','name','neighborhood','address','city','state','postal_code','stars','review_count','attributes']]
df.columns=['BusinessId',"RestaurantName",'Neighborhood',"Address",'City','State','Zip','Stars','ReviewCount','attributes']
df.columns.values
#Divide df into 2 parts - attributes and the rest
df_1=df[['BusinessId',"RestaurantName",'Neighborhood',"Address",'City','State','Zip','Stars','ReviewCount']]
df_2=df[['attributes']]
# Replace blank with ''
df_1.fillna('', inplace=True)
df_1
# Replace None or NA attributes with {}
df_2.replace('None','{}', inplace=True)
df_2.fillna('{}', inplace=True)
df_2
# Making a new dataframe out of attributes dictionary
cols=['BikeParking','BusinessAcceptsCreditCards','BusinessParking','GoodForKids','HasTV','NoiseLevel','OutdoorSeating','RestaurantsAttire','RestaurantsDelivery','RestaurantsGoodForGroups','RestaurantsPriceRange2','RestaurantsReservations','RestaurantsTakeOut']
df_3=pd.DataFrame(columns=cols)
for index, row in tqdm_notebook(df_2.iterrows()):
string=ast.literal_eval(row.attributes)
df_4=pd.DataFrame.from_dict([string])
df_3=pd.concat([df_3,df_4], axis=0)
df_3
# Concatenate New Attributes Columns and older remaining columns
df_1.reset_index(drop=True, inplace=True)
df_3.reset_index(drop=True, inplace=True)
dat1 = | pd.concat([df_1, df_3], axis=1) | pandas.concat |
import numpy as np
import pandas as pd
files = ['1.1.csv', '1.2.csv', '1.3.csv', '1.4.csv',
'2.1.csv', '2.2.csv', '2.3.csv', '2.4.csv',
'3.1.csv', '3.2.csv', '3.3.csv', '3.4.csv']
data = []
for fname in files:
data.append(pd.read_csv(fname))
data[2]['Location'][118] = '23:E'
data[2]['Location'][142] = '23:F'
data[2]['product_scaled'][118] = '0.0'
data[2]['product_scaled'][142] = '0.0'
data[3]['Location'][153] = '10:G'
data[3]['Location'][154] = '11:G'
data[3]['Location'][169] = '2:H'
data[3]['Location'][199] = '8:I'
data[3]['Location'][222] = '7:J'
data[3]['Location'][264] = '1:L'
data[3]['Location'][361] = '2:P'
data[3]['product_scaled'][153] = '0.0'
data[3]['product_scaled'][154] = '0.0'
data[3]['product_scaled'][169] = '0.0'
data[3]['product_scaled'][199] = '0.0'
data[3]['product_scaled'][222] = '0.0'
data[3]['product_scaled'][264] = '0.0'
data[3]['product_scaled'][361] = '0.0'
for i, df in enumerate(data):
if i < 8:
df[['col', 'row']] = df['Location'].str.split(':', expand=True)
else:
df[['row', 'col']] = df['Location'].str.split(':', expand=True)
df['row'] = df['row'].map(lambda x: ord(x) - 64)
df['col'] = pd.to_numeric(df['col'])
df['product_scaled'] = pd.to_numeric(df['product_scaled'])
data[i] = df[['Location', 'product_scaled', 'row', 'col']]
data[1]['col'] = data[1]['col'] + 24
data[3]['col'] = data[3]['col'] + 24
data[5]['col'] = data[5]['col'] + 24
data[7]['col'] = data[7]['col'] + 24
data[9]['col'] = data[9]['col'] + 24
data[11]['col'] = data[11]['col'] + 24
data[2]['row'] = data[2]['row'] + 16
data[3]['row'] = data[3]['row'] + 16
data[6]['row'] = data[6]['row'] + 16
data[7]['row'] = data[7]['row'] + 16
data[10]['row'] = data[10]['row'] + 16
data[11]['row'] = data[11]['row'] + 16
plate1 = pd.concat([data[0], data[1], data[2], data[3]])
plate2 = | pd.concat([data[4], data[5], data[6], data[7]]) | pandas.concat |
import json
import io
import plotly.graph_objects as go
from plotly.subplots import make_subplots
import dash
from dash import html
from dash import dcc
import dash_bootstrap_components as dbc
import pandas as pd
import numpy as np
import plotly.express as px
from dash.dependencies import Output, Input, State
from datetime import datetime, timedelta
from server import app
import plotly.graph_objects as go
import plotly.express as px
from sqlalchemy import create_engine
from flask import send_file
import os
from joblib import Parallel, delayed
from dash.exceptions import PreventUpdate
# ----------------------------------------------------------------------------------------------------- 一级图一 ----------------------------------------------------------------------------------------------------------------------
# 获取概览一级第一张图数据
def get_first_lev_first_fig_date(engine):
res_数据时间缺失及汇总 = pd.DataFrame(columns=['业务类型', '问题数', '总数', '问题数量占比'])
# 问题类别、问题数据量统计、全数据统计
bus_dic = {
# '患者基本信息': ['select count(distinct caseid) as num from overall where in_time is null or out_time is null','select count(distinct caseid) as num from overall'],
'入院时间': ['select count(distinct caseid) as num from overall where in_time is null ',
'select count(distinct caseid) as num from overall'],
'出院时间': ['select count(distinct caseid) as num from overall where out_time is null',
'select count(distinct caseid) as num from overall'],
'手术': ['select count(1) as num from oper2 where BEGINTIME is null or ENDTIME is null ','select count(1) as num from oper2 '],
'给药': ['select count(1) as num from ANTIBIOTICS where BEGINTIME is null or ENDTIME is null ','select count(1) as num from ANTIBIOTICS '],
'入出转': ['select count(1) as num from DEPARTMENT where BEGINTIME is null or ENDTIME is null ','select count(1) as num from DEPARTMENT '],
'菌检出': ['select count(1) as num from BACTERIA where REQUESTTIME is null ','select count(1) as num from BACTERIA '],
'体温': ['select count(1) as num from TEMPERATURE where RECORDDATE is null ','select count(1) as num from TEMPERATURE '],
'药敏': ['select count(1) as num from DRUGSUSCEPTIBILITY where REQUESTTIME is null or REPORTTIME is null ','select count(1) as num from DRUGSUSCEPTIBILITY '],
'检查': ['select count(1) as num from EXAM where EXAM_DATE is null ','select count(1) as num from EXAM '],
'生化': ['select count(1) as num from ROUTINE2 where REQUESTTIME is null or REPORTTIME is null ','select count(1) as num from ROUTINE2 '],
'三管': ['select count(1) as num from TREATMENT1 where BEGINTIME is null or ENDTIME is null ','select count(1) as num from TREATMENT1 '],
}
for bus in bus_dic:
try:
count_时间为空 = pd.read_sql(bus_dic[bus][0],con=engine)['num'][0]
count_总 = pd.read_sql(bus_dic[bus][1],con=engine)['num'][0]
res_数据时间缺失及汇总.loc[res_数据时间缺失及汇总.shape[0]] = [bus,count_时间为空,count_总,round(count_时间为空 / count_总, 4) * 100]
except:
res_数据时间缺失及汇总.loc[res_数据时间缺失及汇总.shape[0]] = [bus,-1,-1,-1]
print('一级图一',bus)
return res_数据时间缺失及汇总
# 更新一级图一
@app.callback(
Output('first_level_first_fig','figure'),
Output('general_situation_first_level_first_fig_data','data'),
Input('general_situation_first_level_first_fig_data','data'),
Input("db_con_url", "data"),
Input("count_time", "data"),
# prevent_initial_call=True
)
def update_first_level_first_fig(general_situation_first_level_first_fig_data,db_con_url,count_time):
if db_con_url is None :
return dash.no_update
else:
db_con_url = json.loads(db_con_url)
count_time = json.loads(count_time)
engine = create_engine(db_con_url['db'])
if general_situation_first_level_first_fig_data is None:
general_situation_first_level_first_fig_data = {}
first_level_first_fig_data = get_first_lev_first_fig_date(engine)
general_situation_first_level_first_fig_data['first_level_first_fig_data'] = first_level_first_fig_data.to_json(orient='split', date_format='iso')
general_situation_first_level_first_fig_data['hosname'] = db_con_url['hosname']
general_situation_first_level_first_fig_data = json.dumps(general_situation_first_level_first_fig_data)
else:
general_situation_first_level_first_fig_data = json.loads(general_situation_first_level_first_fig_data)
if db_con_url['hosname'] != general_situation_first_level_first_fig_data['hosname']:
first_level_first_fig_data = get_first_lev_first_fig_date(engine)
general_situation_first_level_first_fig_data['first_level_first_fig_data'] = first_level_first_fig_data.to_json(orient='split',date_format='iso')
general_situation_first_level_first_fig_data['hosname'] = db_con_url['hosname']
general_situation_first_level_first_fig_data = json.dumps(general_situation_first_level_first_fig_data)
else:
first_level_first_fig_data = pd.read_json(general_situation_first_level_first_fig_data['first_level_first_fig_data'], orient='split')
general_situation_first_level_first_fig_data = dash.no_update
#
fig_概览一级_时间缺失 = make_subplots(specs=[[{"secondary_y": True}]])
res_数据时间缺失及汇总 = first_level_first_fig_data.sort_values(['问题数'], ascending=False)
# 各业务缺失数量--柱形图
fig_概览一级_时间缺失.add_trace(
go.Bar(x=res_数据时间缺失及汇总['业务类型'], y=res_数据时间缺失及汇总['问题数'], name="问题数量",
marker_color=px.colors.qualitative.Dark24, ),
secondary_y=False,
)
# 各业务缺失数量占比--折线图
fig_概览一级_时间缺失.add_trace(
go.Scatter(x=res_数据时间缺失及汇总['业务类型'], y=res_数据时间缺失及汇总['问题数量占比'], name="问题数量占比", ),
secondary_y=True,
)
# 设置X轴title
fig_概览一级_时间缺失.update_xaxes(tickangle=45,title_text="业务指标")
# 设置Y轴title
fig_概览一级_时间缺失.update_yaxes(title_text="缺失数量", secondary_y=False)
fig_概览一级_时间缺失.update_yaxes(title_text="缺失占比(%)", secondary_y=True)
# 设置水平图例及位置
fig_概览一级_时间缺失.update_layout(
margin=dict(l=20, r=20, t=20, b=20),
legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="right",
x=1
))
# 设置图片边距
fig_概览一级_时间缺失.update_layout(margin=dict(l=20, r=20, t=20, b=20), )
return fig_概览一级_时间缺失,general_situation_first_level_first_fig_data
# 下载一级图一明细
@app.callback(
Output('first_level_first_fig_data_detail', 'data'),
Input('first_level_first_fig_data_detail_down','n_clicks'),
Input("db_con_url", "data"),
prevent_initial_call=True,
)
def download_first_level_first_fig_data_detail(n_clicks,db_con_url):
if db_con_url is None :
return dash.no_update
else:
if n_clicks is not None and n_clicks>0:
n_clicks = 0
db_con_url = json.loads(db_con_url)
engine = create_engine(db_con_url['db'])
bus_dic = {
'入院时间': 'select * from overall where in_time is null ',
'出院时间': 'select * from overall where out_time is null',
'手术': 'select * from oper2 where BEGINTIME is null or ENDTIME is null ',
'给药': 'select * from ANTIBIOTICS where BEGINTIME is null or ENDTIME is null ',
'入出转': 'select * from DEPARTMENT where BEGINTIME is null or ENDTIME is null ',
'菌检出': 'select * from BACTERIA where REQUESTTIME is null ',
'药敏': 'select * from DRUGSUSCEPTIBILITY where REQUESTTIME is null or REPORTTIME is null ',
'检查': 'select * from EXAM where EXAM_DATE is null',
'生化': 'select * from ROUTINE2 where REQUESTTIME is null or REPORTTIME is null ',
'三管': 'select * from TREATMENT1 where BEGINTIME is null or ENDTIME is null ',
}
output = io.BytesIO()
writer = pd.ExcelWriter(output, engine='xlsxwriter')
for key in bus_dic.keys():
try:
temp = pd.read_sql(bus_dic[key],con=engine)
if temp.shape[0]>0:
temp.to_excel(writer, sheet_name=key)
except:
error_df = pd.DataFrame(['明细数据获取出错'],columns=[key])
error_df.to_excel(writer, sheet_name = key)
writer.save()
data = output.getvalue()
hosName = db_con_url['hosname']
return dcc.send_bytes(data, f'{hosName}各业务时间缺失数量占比.xlsx')
else:
return dash.no_update
# ----------------------------------------------------------------------------------------------------- 一级图二 ----------------------------------------------------------------------------------------------------------------------
# 获取概览一级第二张图数据
def get_first_lev_second_fig_date(engine,btime,etime):
res_数据关键字缺失及汇总 = pd.DataFrame(columns=['业务类型', '问题数', '总数', '关键字缺失占比'])
bus_dic = {'用药目的': [f"select count(1) as num from ANTIBIOTICS where substr(BEGINTIME,1,7)>='{btime}' and substr(BEGINTIME,1,7)<='{etime}' and (GOAL is null or replace(GOAL,' ','') is null)",
f"select count(1) as num from ANTIBIOTICS where substr(BEGINTIME,1,7)>='{btime}' and substr(BEGINTIME,1,7)<='{etime}' "],
'药敏结果': [f"select count(1) as num from drugsusceptibility where substr(REQUESTTIME,1,7)>='{btime}' and substr(REQUESTTIME,1,7)<='{etime}' and SUSCEPTIBILITY is null or replace(SUSCEPTIBILITY,' ','') is null",
f"select count(1) as num from drugsusceptibility where substr(REQUESTTIME,1,7)>='{btime}' and substr(REQUESTTIME,1,7)<='{etime}' "],
'手术名称': [f"select count(1) as num from oper2 where substr(BEGINTIME,1,7)>='{btime}' and substr(BEGINTIME,1,7)<='{etime}' and (OPER_NAME is null or replace(OPER_NAME,' ','') is null)",
f"select count(1) as num from oper2 where substr(BEGINTIME,1,7)>='{btime}' and substr(BEGINTIME,1,7)<='{etime}' "],
'手术切口等级': [f"select count(1) as num from oper2 where substr(BEGINTIME,1,7)>='{btime}' and substr(BEGINTIME,1,7)<='{etime}' and ( WOUND_GRADE is null or replace(WOUND_GRADE,' ','') is null)",
f"select count(1) as num from oper2 where substr(BEGINTIME,1,7)>='{btime}' and substr(BEGINTIME,1,7)<='{etime}' "],
'出入院科室': [f"select count(1) as num from overall where substr(IN_TIME,1,7)>='{btime}' and substr(IN_TIME,1,7)<='{etime}' and ( IN_DEPT is null or replace(IN_DEPT,' ','') is null or OUT_DEPT is null or replace(OUT_DEPT,' ','') is null )",
f"select count(1) as num from overall where substr(IN_TIME,1,7)>='{btime}' and substr(IN_TIME,1,7)<='{etime}' "],
'入出转科室': [f"select count(1) as num from department where substr(BEGINTIME,1,7)>='{btime}' and substr(BEGINTIME,1,7)<='{etime}' and ( DEPT is null or replace(DEPT,' ','') is null)",
f"select count(1) as num from department where substr(BEGINTIME,1,7)>='{btime}' and substr(BEGINTIME,1,7)<='{etime}' "]
}
for bus in bus_dic:
try:
count_时间为空 = pd.read_sql(bus_dic[bus][0],con=engine)['num'][0]
count_总 = pd.read_sql(bus_dic[bus][1],con=engine)['num'][0]
res_数据关键字缺失及汇总.loc[res_数据关键字缺失及汇总.shape[0]] = [bus,count_时间为空,count_总,round(count_时间为空 / count_总, 4) * 100]
except:
res_数据关键字缺失及汇总.loc[res_数据关键字缺失及汇总.shape[0]] = [bus,-1,-1,-1]
print('一级图二', bus)
return res_数据关键字缺失及汇总
# 更新一级图二
@app.callback(
Output('first_level_second_fig','figure'),
Output('general_situation_first_level_second_fig_data','data'),
Input('general_situation_first_level_second_fig_data','data'),
Input("db_con_url", "data"),
Input("count_time", "data"),
# prevent_initial_call=True
)
def update_first_level_second_fig(general_situation_first_level_second_fig_data,db_con_url,count_time):
if db_con_url is None :
return dash.no_update
else:
db_con_url = json.loads(db_con_url)
count_time = json.loads(count_time)
engine = create_engine(db_con_url['db'])
btime = count_time['btime'][0:7]
etime = count_time['etime'][0:7]
if general_situation_first_level_second_fig_data is None:
general_situation_first_level_second_fig_data = {}
first_level_second_fig_data = get_first_lev_second_fig_date(engine,btime,etime)
general_situation_first_level_second_fig_data['first_level_second_fig_data'] = first_level_second_fig_data.to_json(orient='split', date_format='iso')
general_situation_first_level_second_fig_data['hosname'] = db_con_url['hosname']
general_situation_first_level_second_fig_data['btime'] = btime
general_situation_first_level_second_fig_data['etime'] = etime
general_situation_first_level_second_fig_data = json.dumps(general_situation_first_level_second_fig_data)
else:
general_situation_first_level_second_fig_data = json.loads(general_situation_first_level_second_fig_data)
if db_con_url['hosname'] != general_situation_first_level_second_fig_data['hosname']:
first_level_second_fig_data = get_first_lev_second_fig_date(engine, btime, etime)
general_situation_first_level_second_fig_data['first_level_second_fig_data'] = first_level_second_fig_data.to_json(orient='split',date_format='iso')
general_situation_first_level_second_fig_data['hosname'] = db_con_url['hosname']
general_situation_first_level_second_fig_data['btime'] = btime
general_situation_first_level_second_fig_data['etime'] = etime
general_situation_first_level_second_fig_data = json.dumps( general_situation_first_level_second_fig_data)
else:
if general_situation_first_level_second_fig_data['btime'] != btime or general_situation_first_level_second_fig_data['etime'] != etime:
first_level_second_fig_data = get_first_lev_second_fig_date(engine, btime, etime)
general_situation_first_level_second_fig_data[ 'first_level_second_fig_data'] = first_level_second_fig_data.to_json(orient='split', date_format='iso')
general_situation_first_level_second_fig_data['btime'] = btime
general_situation_first_level_second_fig_data['etime'] = etime
general_situation_first_level_second_fig_data = json.dumps(general_situation_first_level_second_fig_data)
else:
first_level_second_fig_data = pd.read_json(general_situation_first_level_second_fig_data['first_level_second_fig_data'], orient='split')
general_situation_first_level_second_fig_data = dash.no_update
print("一级第二张图数据:")
print(first_level_second_fig_data)
fig_概览一级_关键字缺失 = make_subplots()
res_数据关键字缺失及汇总 = first_level_second_fig_data.sort_values(['关键字缺失占比'], ascending=False)
fig_概览一级_关键字缺失.add_trace(
go.Bar(x=res_数据关键字缺失及汇总['业务类型'], y=res_数据关键字缺失及汇总['关键字缺失占比'], marker_color=px.colors.qualitative.Dark24, )
)
fig_概览一级_关键字缺失.update_layout(
margin=dict(l=20, r=20, t=20, b=20),
#title=f"{btime}--{etime}",
)
fig_概览一级_关键字缺失.update_yaxes(title_text="关键字缺失占比(%)")
fig_概览一级_关键字缺失.update_xaxes(title_text="业务指标")
return fig_概览一级_关键字缺失,general_situation_first_level_second_fig_data
# 下载一级图二明细
@app.callback(
Output('first_level_second_fig_data_detail', 'data'),
Input('first_level_second_fig_data_detail_down','n_clicks'),
Input("db_con_url", "data"),
Input("count_time", "data"),
prevent_initial_call=True,
)
def download_first_level_second_fig_data_detail(n_clicks,db_con_url,count_time):
if db_con_url is None :
return dash.no_update
else:
if n_clicks is not None and n_clicks>0:
n_clicks = 0
db_con_url = json.loads(db_con_url)
count_time = json.loads(count_time)
engine = create_engine(db_con_url['db'])
btime = count_time['btime']
etime = count_time['etime']
bus_dic = {
'用药目的': f"select * from ANTIBIOTICS where (GOAL is null or replace(GOAL,' ','') is null) and BEGINTIME is not null and substr(BEGINTIME,1,10)>='{btime}' and substr(BEGINTIME,1,10)<='{etime}' ",
'药敏结果': f"select * from drugsusceptibility where (SUSCEPTIBILITY is null or replace(SUSCEPTIBILITY,' ','') is null) and REQUESTTIME is not null and substr(REQUESTTIME,1,10)>='{btime}' and substr(REQUESTTIME,1,10)<='{etime}' ",
'手术名称': f"select * from oper2 where (OPER_NAME is null or replace(OPER_NAME,' ','') is null) and BEGINTIME is not null and substr(BEGINTIME,1,10)>='{btime}' and substr(BEGINTIME,1,10)<='{etime}'",
'手术切口等级': f"select * from oper2 where (WOUND_GRADE is null or replace(WOUND_GRADE,' ','') is null) and BEGINTIME is not null and substr(BEGINTIME,1,10)>='{btime}' and substr(BEGINTIME,1,10)<='{etime}' ",
'出入院科室': f" select * from overall where (IN_DEPT is null or replace(IN_DEPT,' ','') is null or OUT_DEPT is null or replace(OUT_DEPT,' ','') is null) and in_time is not null and substr(in_time,1,10)>='{btime}' and substr(in_time,1,10)<='{etime}' ",
'入出转科室': f"select * from department where (DEPT is null or replace(DEPT,' ','') is null) and BEGINTIME is not null and substr(BEGINTIME,1,10)>='{btime}' and substr(BEGINTIME,1,10)<='{etime}' ",
}
output = io.BytesIO()
writer = | pd.ExcelWriter(output, engine='xlsxwriter') | pandas.ExcelWriter |
import pandas as pd
import numpy as np
import copy
from sklearn.naive_bayes import GaussianNB
from sklearn.model_selection import cross_val_score, train_test_split, GridSearchCV
from sklearn.feature_selection import mutual_info_classif, SelectKBest
import matplotlib.pyplot as plt
from sklearn import svm
from sklearn.neighbors import KNeighborsClassifier
from datetime import datetime
from os import listdir
from os.path import isfile, join
import sys
import math
from sklearn.metrics import accuracy_score, f1_score
import re
from Extractor import get_word_length_matrix, get_word_length_matrix_with_interval, get_average_word_length, \
get_word_length_matrix_with_margin, get_char_count, get_digits, get_sum_digits, get_word_n_grams, \
get_char_affix_n_grams, get_char_word_n_grams, get_char_punct_n_grams, get_pos_tags_n_grams, get_bow_matrix, \
get_yules_k, get_special_char_matrix, get_function_words, get_pos_tags, get_sentence_end_start, \
get_flesch_reading_ease_vector, get_sentence_count, get_word_count
from sklearn.preprocessing import StandardScaler, Normalizer
# Chapter 7.1.1. method to trim a feature with low sum e.g. ngrams lower then 5
def trim_df_sum_feature(par_df, par_n):
par_df = par_df.fillna(value=0)
columns = par_df.columns.to_numpy()
data_array = par_df.to_numpy(dtype=float)
sum_arr = data_array.sum(axis=0)
# reduce n if 0 features would be returned
while len(par_df.columns) - len(np.where(sum_arr < par_n)[0]) == 0:
par_n -= 1
positions = list(np.where(sum_arr < par_n))
columns = np.delete(columns, positions)
data_array = np.delete(data_array, positions, axis=1)
return pd.DataFrame(data=data_array, columns=columns)
# Chapter 7.1.1. method to trim feature with low occurrence over all article
def trim_df_by_occurrence(par_df, n):
df_masked = par_df.notnull().astype('int')
word_rate = df_masked.sum()
columns = []
filtered_bow = pd.DataFrame()
for i in range(0, len(word_rate)):
if word_rate[i] > n:
columns.append(word_rate.index[i])
for c in columns:
filtered_bow[c] = par_df[c]
return filtered_bow
# Chapter 7.1.1. Process of filtering the data with low occurrence and save the filtered features in a new file
def filter_low_occurrence():
df_bow = pd.read_csv("daten/raw/bow.csv", sep=',', encoding="utf-8", nrows=2500)
print(f"BOW before: {len(df_bow.columns)}")
df_bow = trim_df_by_occurrence(df_bow, 1)
print(f"BOW after: {len(df_bow.columns)}")
df_bow.to_csv(f"daten/2_filter_low_occurrence/bow.csv", index=False)
for n in range(2, 7):
word_n_gram = pd.read_csv(f"daten/raw/word_{n}_gram.csv", sep=',', encoding="utf-8", nrows=2500)
print(f"Word_{n}_gram before: {len(word_n_gram.columns)}")
word_n_gram = trim_df_by_occurrence(word_n_gram, 1)
print(f"Word_{n}_gram after: {len(word_n_gram.columns)}")
word_n_gram.to_csv(f"daten/2_filter_low_occurrence/word_{n}_gram.csv", index=False)
for n in range(2, 6):
char_affix_n_gram = pd.read_csv(f"daten/trimmed_occ_greater_one/char_affix_{n}_gram_1.csv", sep=',',
encoding="utf-8", nrows=2500)
print(f"char_affix_{n}_gram before: {len(char_affix_n_gram.columns)}")
char_affix_n_gram = trim_df_sum_feature(char_affix_n_gram, 5)
print(f"char_affix_{n}_gram after: {len(char_affix_n_gram.columns)}")
char_affix_n_gram.to_csv(f"daten/2_filter_low_occurrence/char_affix_{n}_gram.csv", index=False)
char_word_n_gram = pd.read_csv(f"daten/trimmed_occ_greater_one/char_word_{n}_gram_1.csv", sep=',',
encoding="utf-8", nrows=2500)
print(f"char_word_{n}_gram before: {len(char_word_n_gram.columns)}")
char_word_n_gram = trim_df_sum_feature(char_word_n_gram, 5)
print(f"char_word_{n}_gram after: {len(char_word_n_gram.columns)}")
char_word_n_gram.to_csv(f"daten/2_filter_low_occurrence/char_word_{n}_gram.csv", index=False)
char_punct_n_gram = pd.read_csv(f"daten/trimmed_occ_greater_one/char_punct_{n}_gram_1.csv", sep=',',
encoding="utf-8", nrows=2500)
print(f"char_punct_{n}_gram before: {len(char_punct_n_gram.columns)}")
char_punct_n_gram = trim_df_sum_feature(char_punct_n_gram, 5)
print(f"char_punct_{n}_gram after: {len(char_punct_n_gram.columns)}")
char_punct_n_gram.to_csv(f"daten/2_filter_low_occurrence/char_punct_{n}_gram.csv", index=False)
df_f_word = pd.read_csv("daten/raw/function_words.csv", sep=',', encoding="utf-8", nrows=2500)
print(f"Function Words before: {len(df_f_word.columns)}")
df_f_word = trim_df_by_occurrence(df_f_word, 1)
print(f"Function Words after: {len(df_f_word.columns)}")
df_f_word.to_csv(f"daten/2_filter_low_occurrence/function_words.csv", index=False)
for n in range(2, 6):
pos_tags_n_gram = pd.read_csv(f"daten/raw/pos_tag_{n}_gram.csv", sep=',', encoding="utf-8", nrows=2500)
print(f"pos_tag_{n}_gram before: {len(pos_tags_n_gram.columns)}")
pos_tags_n_gram = trim_df_by_occurrence(pos_tags_n_gram, 1)
print(f"pos_tag_{n}_gram after: {len(pos_tags_n_gram.columns)}")
pos_tags_n_gram.to_csv(f"daten/2_filter_low_occurrence/pos_tag_{n}_gram.csv", index=False)
# Chapter 7.1.2. method to filter words based on document frequency
def trim_df_by_doc_freq(par_df, par_doc_freq):
df_masked = par_df.notnull().astype('int')
word_rate = df_masked.sum() / len(par_df)
columns = []
filtered_bow = pd.DataFrame()
for i in range(0, len(word_rate)):
if word_rate[i] < par_doc_freq:
columns.append(word_rate.index[i])
for c in columns:
filtered_bow[c] = par_df[c]
return filtered_bow
# Chapter 7.1.2 Process of filtering the data with high document frequency and save the filtered features in a new file
def filter_high_document_frequency():
# Filter words with high document frequency
df_bow = pd.read_csv("daten/2_filter_low_occurrence/bow.csv", sep=',', encoding="utf-8", nrows=2500)
print(f"BOW before: {len(df_bow.columns)}")
df_bow = trim_df_by_doc_freq(df_bow, 0.5)
print(f"BOW after: {len(df_bow.columns)}")
df_bow.to_csv(f"daten/3_fiter_high_frequency/bow.csv", index=False)
df_f_word = pd.read_csv("daten/2_filter_low_occurrence/function_words.csv", sep=',', encoding="utf-8", nrows=2500)
print(f"Function Word before: {len(df_f_word.columns)}")
df_f_word = trim_df_by_doc_freq(df_f_word, 0.5)
print(f"Function Word after: {len(df_f_word.columns)}")
df_f_word.to_csv(f"daten/3_fiter_high_frequency/function_words.csv", index=False)
for n in range(2, 7):
word_n_gram = pd.read_csv(f"daten/2_filter_low_occurrence/word_{n}_gram.csv", sep=',', encoding="utf-8",
nrows=2500)
print(f"Word_{n}_gram before: {len(word_n_gram.columns)}")
word_n_gram = trim_df_by_doc_freq(word_n_gram, 0.5)
print(f"Word_{n}_gram after: {len(word_n_gram.columns)}")
word_n_gram.to_csv(f"daten/3_fiter_high_frequency/word_{n}_gram.csv", index=False)
# Chapter 7.1.4. get the relative frequency based on a length metric (char, word, sentence)
def get_rel_frequency(par_df_count, par_df_len_metric_vector):
df_rel_freq = pd.DataFrame(columns=par_df_count.columns)
for index, row in par_df_count.iterrows():
df_rel_freq = df_rel_freq.append(row.div(par_df_len_metric_vector[index]))
return df_rel_freq
# Chapter 7.1.4. whole process of the chapter. Get the individual relative frequency of a feature and compare
# the correlation to the article length from the absolute and relative feature, save the feature with the estimated
# relative frequency in a new file
def individual_relative_frequency():
df_len_metrics = pd.read_csv(f"daten/1_raw/length_metrics.csv", sep=',', encoding="utf-8", nrows=2500)
# different metrics for individual relative frequencies
metrics = ['word_count', 'char_count', 'sentence_count']
for m in metrics:
# The csv is placed in a folder based on the metric for the individual relative frequency
path = f'daten/4_relative_frequency/{m}'
files = [f for f in listdir(path) if isfile(join(path, f))]
for f in files:
x = pd.read_csv(f"daten/4_relative_frequency/{m}/{f}",
sep=',', encoding="utf-8", nrows=2500).fillna(value=0)
x_rel = get_rel_frequency(x, df_len_metrics[m])
# Save the CSV with relative frequency
x_rel.to_csv(
f"daten/4_relative_frequency/{f.split('.')[0]}"
f"_rel.csv", index=False)
# Correlation is always between the metrics and the word_count
x['word_count'] = df_len_metrics['word_count']
x_rel['word_count'] = df_len_metrics['word_count']
# only on the test data 60/40 split
x_train, x_test = train_test_split(x, test_size=0.4, random_state=42)
x_train_rel, x_test_rel = train_test_split(x_rel, test_size=0.4, random_state=42)
# Calculate the median correlation
print(f"{f}_abs: {x_train.corr(method='pearson', min_periods=1)['word_count'].iloc[:-1].mean()}")
print(f"{f}_rel: {x_train_rel.corr(method='pearson', min_periods=1)['word_count'].iloc[:-1].mean()}")
# Chapter 7.2.1 First step of the iterative filter: Rank the features
def sort_features_by_score(par_x, par_y, par_select_metric):
# Get a sorted ranking of all features by the selected metric
selector = SelectKBest(par_select_metric, k='all')
selector.fit(par_x, par_y)
# Sort the features by their score
return pd.DataFrame(dict(feature_names=par_x.columns, scores=selector.scores_)).sort_values('scores',
ascending=False)
# Chapter 7.2.1 method to get the best percentile for GNB
def get_best_percentile_gnb(par_x_train, par_y_train, par_iter, par_df_sorted_features, step):
result_list = []
gnb = GaussianNB()
best_perc_round = par_iter - 1 # If no other point is found, highest amount of features (-1 starts to count from 0)
# define the splits for the hyperparameter tuning, cannot be greater than the number of members in each class
if len(par_y_train.index) / len(np.unique(par_y_train.values).tolist()) < 10:
cv = int(len(par_y_train.index) / len(np.unique(par_y_train.values).tolist())) - 1
else:
cv = 10
for perc_features in np.arange(step, par_iter + 1, step):
start_time = datetime.now()
# 1%*i best features to keep and create new dataframe with those only
number_of_features = int(perc_features * (len(par_x_train.columns) / 100))
# minimum one feature
number_of_features = 1 if number_of_features < 1 else number_of_features
feature_list = par_df_sorted_features['feature_names'][: number_of_features].tolist()
x_new_training = copy.deepcopy(par_x_train[feature_list])
# GNB Training
result_list.append(
cross_val_score(gnb, x_new_training, par_y_train, cv=cv, n_jobs=-1, scoring='accuracy').mean())
# Compares the accuracy with the 5 following points => needs 6 points minimum
if len(result_list) > 5:
# list starts to count at 0, subtract one more from len
difference_list_p2p = [result_list[p + 1] - result_list[p] for p in
range(len(result_list) - 6, len(result_list) - 1)]
difference_list_1p = [result_list[p + 1] - result_list[len(result_list) - 6] for p in
range(len(result_list) - 6, len(result_list) - 1)]
# Find the best percent if 5 following points were lower then the point before or had a deviation <= 0.5%
# or all points are 2% lower then the first point
if all(point_y <= 0 for point_y in difference_list_p2p) or \
all(-0.005 <= point_y <= 0.005 for point_y in difference_list_1p) or \
all(point_y < -0.02 for point_y in difference_list_1p):
# the best perc is the results - 6 point in the result list
best_perc_round = len(result_list) - 6
break
# Console Output
print(f"GNB Round {perc_features / step}: {datetime.now() - start_time}")
# Optimization of the best percent
# If any point with a lower percent is higher, it is the new optimum
if any(point_y > result_list[best_perc_round] for point_y in result_list[:len(result_list) - 5]):
best_perc_round = result_list.index(max(result_list[:len(result_list) - 5]))
# Tradeoff of 0.5% accuracy for lesser percent of features
# As long as there is a lesser maximum with 1% lesser accuracy, which has a minimum of 2% less percent features
better_perc_exists = True
best_accuracy_tradeoff = result_list[best_perc_round] - 0.01
# If there are no 5% left for the tradeoff there is no better perc
if best_perc_round - int(2 / step) < 0:
better_perc_exists = False
while better_perc_exists:
earliest_pos = best_perc_round - int(2 / step)
# if its less then 0 it starts to count backside
earliest_pos = 0 if earliest_pos < 0 else earliest_pos
if any(point_y > best_accuracy_tradeoff for point_y in result_list[:earliest_pos]):
best_perc_round = result_list.index(max(result_list[:earliest_pos]))
else:
better_perc_exists = False
# the best percent of the features is calculated by the percent start plus the rounds * step
best_perc = step + step * best_perc_round
print(best_perc)
return best_perc, best_perc_round, result_list
# Chapter 7.2.1 method to get the best percentile for SVC
def get_best_percentile_svc(par_x_train, par_y_train, par_iter, par_df_sorted_features, step):
result_list = []
# Parameter for SVC
param_grid_svc = {'C': (0.001, 0.01, 0.1, 1, 10),
'kernel': ('linear', 'poly', 'rbf'),
'gamma': ('scale', 'auto')}
# define the splits for the hyperparameter tuning, cannot be greater than the number of members in each class
if len(par_y_train.index) / len(np.unique(par_y_train.values).tolist()) < 10:
cv = int(len(par_y_train.index) / len(np.unique(par_y_train.values).tolist())) - 1
else:
cv = 10
best_perc_round = par_iter - 1 # If no other point is found, highest amount of features (-1 starts to count from 0)
for perc_features in np.arange(step, par_iter + 1, step):
start_time = datetime.now()
# 1%*i best features to keep and create new dataframe with those only
number_of_features = int(perc_features * (len(par_x_train.columns) / 100))
# minimum one feature
number_of_features = 1 if number_of_features < 1 else number_of_features
feature_list = par_df_sorted_features['feature_names'][: number_of_features].tolist()
x_new_training = copy.deepcopy(par_x_train[feature_list])
# SVC Test
grid_search = GridSearchCV(svm.SVC(), param_grid_svc, cv=cv, n_jobs=-1, scoring='accuracy')
grid_results = grid_search.fit(x_new_training, par_y_train)
result_list.append(grid_results.best_score_)
# Compares the accuracy with the 5 following points => needs 6 points minimum
if len(result_list) > 5:
# list starts to count at 0, subtract one more from len
difference_list_p2p = [result_list[p + 1] - result_list[p] for p in
range(len(result_list) - 6, len(result_list) - 1)]
difference_list_1p = [result_list[p + 1] - result_list[len(result_list) - 6] for p in
range(len(result_list) - 6, len(result_list) - 1)]
# Find the best percent if 5 following points were lower then the point before or had a deviation <= 0.5%
# or all points are 2% lower then the first point
if all(point_y <= 0 for point_y in difference_list_p2p) or \
all(-0.005 <= point_y <= 0.005 for point_y in difference_list_1p) or \
all(point_y < -0.02 for point_y in difference_list_1p):
# the best perc is the results - 6 point in the result list
best_perc_round = len(result_list) - 6
break
# Console Output
print(f"SVC Round {perc_features / step}: {datetime.now() - start_time}")
# Optimization of the best percent
# If any point with a lower percent is higher, it is the new optimum
if any(point_y > result_list[best_perc_round] for point_y in result_list[:len(result_list) - 5]):
best_perc_round = result_list.index(max(result_list[:len(result_list) - 5]))
# Tradeoff of 1% accuracy for lesser percent of features
# As long as there is a lesser maximum with 1% lesser accuracy, which has a minimum of 2% less percent features
better_perc_exists = True
best_accuracy_tradeoff = result_list[best_perc_round] - 0.01
# If there are no 5% left for the tradeoff there is no better perc
if best_perc_round - int(2 / step) < 0:
better_perc_exists = False
while better_perc_exists:
earliest_pos = best_perc_round - int(2 / step)
# if its less then 0 it starts to count backside
earliest_pos = 0 if earliest_pos < 0 else earliest_pos
if any(point_y > best_accuracy_tradeoff for point_y in result_list[:earliest_pos]):
best_perc_round = result_list.index(max(result_list[:earliest_pos]))
else:
better_perc_exists = False
# the best percent of the features is calculated by the percent start plus the rounds * step
best_perc = step + step * best_perc_round
print(best_perc)
return best_perc, best_perc_round, result_list
# Chapter 7.2.1 method to get the best percentile for KNN
def get_best_percentile_knn(par_x_train, par_y_train, par_iter, par_df_sorted_features, step):
result_list = []
best_perc_round = par_iter - 1 # If no other point is found, highest amount of features (-1 starts to count from 0)
# define the splits for the hyperparameter tuning, cannot be greater than the number of members in each class
if len(par_y_train.index) / len(np.unique(par_y_train.values).tolist()) < 10:
cv = int(len(par_y_train.index) / len(np.unique(par_y_train.values).tolist())) - 1
else:
cv = 10
for perc_features in np.arange(step, par_iter + 1, step):
start_time = datetime.now()
# 1%*i best features to keep and create new dataframe with those only
number_of_features = int(perc_features * (len(par_x_train.columns) / 100))
# minimum one feature
number_of_features = 1 if number_of_features < 1 else number_of_features
feature_list = par_df_sorted_features['feature_names'][: number_of_features].tolist()
x_new_training = copy.deepcopy(par_x_train[feature_list])
# Parameter for KNN
# Some Values from 3 to square of samples
neighbors = [i for i in range(3, int(math.sqrt(len(x_new_training.index))), 13)]
neighbors += [1, 3, 5, 11, 19, 36]
if int(math.sqrt(len(feature_list))) not in neighbors:
neighbors.append(int(math.sqrt(len(x_new_training.index))))
# Not more neighbors then samples-2
neighbors = [x for x in neighbors if x < len(x_new_training.index) - 2]
# remove duplicates
neighbors = list(set(neighbors))
param_grid_knn = {'n_neighbors': neighbors,
'weights': ['uniform', 'distance'],
'metric': ['euclidean', 'manhattan']}
# KNN Training
grid_search = GridSearchCV(KNeighborsClassifier(), param_grid_knn, cv=cv, n_jobs=-1, scoring='accuracy')
grid_results = grid_search.fit(x_new_training, par_y_train)
result_list.append(grid_results.best_score_)
# Compares the accuracy with the 5 following points => needs 6 points minimum
if len(result_list) > 5:
# list starts to count at 0, subtract one more from len
difference_list_p2p = [result_list[p + 1] - result_list[p] for p in
range(len(result_list) - 6, len(result_list) - 1)]
difference_list_1p = [result_list[p + 1] - result_list[len(result_list) - 6] for p in
range(len(result_list) - 6, len(result_list) - 1)]
# Find the best percent if 5 following points were lower then the point before or had a deviation <= 0.5%
# or all points are 2% lower then the first point
if all(point_y <= 0 for point_y in difference_list_p2p) or \
all(-0.005 <= point_y <= 0.005 for point_y in difference_list_1p) or \
all(point_y < -0.02 for point_y in difference_list_1p):
# the best perc is the results - 6 point in the result list
best_perc_round = len(result_list) - 6
break
# Console Output
print(f"KNN Round {perc_features / step}: {datetime.now() - start_time}")
# Optimization of the best percent
# If any point with a lower percent is higher, it is the new optimum
if any(point_y > result_list[best_perc_round] for point_y in result_list[:len(result_list) - 5]):
best_perc_round = result_list.index(max(result_list[:len(result_list) - 5]))
# Tradeoff of 1% accuracy for lesser percent of features
# As long as there is a lesser maximum with 1% lesser accuracy, which has a minimum of 2% less percent features
better_perc_exists = True
best_accuracy_tradeoff = result_list[best_perc_round] - 0.01
# If there are no 5% left for the tradeoff there is no better perc
if best_perc_round - int(2 / step) < 0:
better_perc_exists = False
while better_perc_exists:
earliest_pos = best_perc_round - int(2 / step)
# if its less then 0 it starts to count backside
earliest_pos = 0 if earliest_pos < 0 else earliest_pos
if any(point_y >= best_accuracy_tradeoff for point_y in result_list[:earliest_pos]):
best_perc_round = result_list.index(max(result_list[:earliest_pos]))
else:
better_perc_exists = False
# the best percent of the features is calculated by the percent start plus the rounds * step
best_perc = step + step * best_perc_round
print(best_perc)
return best_perc, best_perc_round, result_list
# Chapter 7.2.1 Filter the feature based on the estimated best percentile and save it into a new file
def print_filter_feature_percentile(par_path, par_df_sorted_features, par_percent, par_x, par_file_name):
# select the 1 percent of the features (len/100) multiplied by par_best_percent
number_features = round(par_percent * (len(par_x.columns) / 100))
# If the 1st percent is less then 1
number_features = 1 if number_features < 1 else number_features
feature_list = par_df_sorted_features['feature_names'][:number_features].tolist()
# print the name of the features in a file
original_stdout = sys.stdout
with open(f'{par_path}selected_features/{par_file_name}_filtered.txt', 'w', encoding="utf-8") as f:
sys.stdout = f
print(f"Features: {len(feature_list)}")
print(f"{feature_list}")
sys.stdout = original_stdout
# select the best features from the original dataset
par_x[feature_list].to_csv(f"{par_path}csv_after_filter/{par_file_name}_filtered.csv", index=False)
# Chapter 7.2.1 Complete process of the iterative Filter
def iterative_filter_process(par_path, par_df, par_num_texts, par_num_authors):
y = par_df['label_encoded']
path = f'{par_path}csv_before_filter'
files = [f for f in listdir(path) if isfile(join(path, f))]
# Filter the files for author and text numbers if 'all' is not set.
if par_num_authors != "all":
r = re.compile(f"a{par_num_authors}_")
files = list(filter(r.match, files))
if par_num_authors != "all":
r = re.compile(f".*t{par_num_texts}_")
files = list(filter(r.match, files))
step_perc = 1.0
for f in files:
filename = f.split(".")[0]
print(f)
x = pd.read_csv(f"{par_path}csv_before_filter/{f}", sep=',', encoding="utf-8", nrows=2500)
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.4, random_state=42, stratify=y)
# Get sorted features
df_sorted_features = sort_features_by_score(x_train, y_train, mutual_info_classif)
# Calculate the best percentiles of the data for the different classifier
best_perc_gnb, best_round_gnb, result_list_gnb = get_best_percentile_gnb(x_train, y_train, 50,
df_sorted_features, step_perc)
best_perc_svc, best_round_svc, result_list_svc = get_best_percentile_svc(x_train, y_train, 50,
df_sorted_features, step_perc)
best_perc_knn, best_round_knn, result_list_knn = get_best_percentile_knn(x_train, y_train, 50,
df_sorted_features, step_perc)
# select the beast features from the original dataset
print_filter_feature_percentile(par_path, df_sorted_features, best_perc_gnb, x, "gnb_" + filename)
print_filter_feature_percentile(par_path, df_sorted_features, best_perc_svc, x, "svc_" + filename)
print_filter_feature_percentile(par_path, df_sorted_features, best_perc_knn, x, "knn_" + filename)
# print best perc to a file
original_stdout = sys.stdout
with open(f'{par_path}best_perc/{filename}.txt', 'w') as f:
sys.stdout = f
print(f"best_perc_gnb: ({best_perc_gnb}|{result_list_gnb[best_round_gnb]})\n"
f"best_perc_svc: ({best_perc_svc}|{result_list_svc[best_round_svc]})\n"
f"best_perc_knn: ({best_perc_knn}|{result_list_knn[best_round_knn]})")
sys.stdout = original_stdout
# draw diagram
len_list = [len(result_list_gnb), len(result_list_svc), len(result_list_knn)]
plt.plot([i * step_perc for i in range(1, len(result_list_gnb) + 1)], result_list_gnb, 'r-', label="gnb")
plt.plot(best_perc_gnb, result_list_gnb[best_round_gnb], 'rx')
plt.plot([i * step_perc for i in range(1, len(result_list_svc) + 1)], result_list_svc, 'g-', label="svc")
plt.plot(best_perc_svc, result_list_svc[best_round_svc], 'gx')
plt.plot([i * step_perc for i in range(1, len(result_list_knn) + 1)], result_list_knn, 'b-', label="knn")
plt.plot(best_perc_knn, result_list_knn[best_round_knn], 'bx')
plt.axis([step_perc, (max(len_list) + 1) * step_perc, 0, 1])
plt.xlabel('Daten in %')
plt.ylabel('Genauigkeit')
plt.legend()
plt.savefig(f"{par_path}/diagrams/{filename}")
plt.cla()
# print accuracy to file
df_percent = pd.DataFrame(data=[i * step_perc for i in range(1, max(len_list) + 1)], columns=['percent'])
df_gnb = pd.DataFrame(data=result_list_gnb, columns=['gnb'])
df_svc = pd.DataFrame(data=result_list_svc, columns=['svc'])
df_knn = pd.DataFrame(data=result_list_knn, columns=['knn'])
df_accuracy = pd.concat([df_percent, df_gnb, df_svc, df_knn], axis=1)
df_accuracy = df_accuracy.fillna(value="")
df_accuracy.to_csv(f'{par_path}accuracy/{filename}_filtered.csv', index=False)
# Chapter 8.1. and later, basically the process of the iterative filter only with the svc classifier
def iterative_filter_process_svm(par_path, par_df, par_num_texts, par_num_authors):
y = par_df['label_encoded']
path = f'{par_path}csv_before_filter'
files = [f for f in listdir(path) if isfile(join(path, f))]
# Filter the files for author and text numbers if 'all' is not set.
if par_num_authors != "all":
r = re.compile(f"a{par_num_authors}_")
files = list(filter(r.match, files))
if par_num_authors != "all":
r = re.compile(f".*t{par_num_texts}_")
files = list(filter(r.match, files))
step_perc = 1.0
for f in files:
filename = f.split(".")[0]
print(f)
x = pd.read_csv(f"{par_path}csv_before_filter/{f}", sep=',', encoding="utf-8", nrows=2500)
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.4, random_state=42, stratify=y)
# Get sorted features
df_sorted_features = sort_features_by_score(x_train, y_train, mutual_info_classif)
# Calculate the best percentiles of the data for svc
best_perc_svc, best_round_svc, result_list_svc = get_best_percentile_svc(x_train, y_train, 50,
df_sorted_features, step_perc)
# select the beast features from the original dataset
print_filter_feature_percentile(par_path, df_sorted_features, best_perc_svc, x, filename)
# print best perc to a file
original_stdout = sys.stdout
with open(f'{par_path}best_perc/{filename}.txt', 'w') as out_f:
sys.stdout = out_f
print(f"best_perc_svc: ({best_perc_svc}|{result_list_svc[best_round_svc]})\n")
sys.stdout = original_stdout
# draw diagram
plt.plot([i * step_perc for i in range(1, len(result_list_svc) + 1)], result_list_svc, 'g-', label="svc")
plt.plot(best_perc_svc, result_list_svc[best_round_svc], 'gx')
plt.axis([step_perc, (len(result_list_svc) + 1) * step_perc, 0, 1])
plt.xlabel('Daten in %')
plt.ylabel('Genauigkeit')
plt.legend()
plt.savefig(f"{par_path}/diagrams/{filename}")
plt.cla()
# print accuracy to file
df_percent = pd.DataFrame(data=[i * step_perc for i in range(1, len(result_list_svc) + 1)], columns=['percent'])
df_svc = pd.DataFrame(data=result_list_svc, columns=['svc'])
df_accuracy = pd.concat([df_percent, df_svc], axis=1)
df_accuracy = df_accuracy.fillna(value="")
df_accuracy.to_csv(f'{par_path}accuracy/{filename}_filtered.csv', index=False)
# Chapter 7.2.1. Get the accuracy of the features before the iterative filter, results in table 18
def get_accuracy_before_iterative_filter():
gnb_result_list, svc_result_list, knn_result_list, gnb_time_list, svc_time_list, knn_time_list \
= [], [], [], [], [], []
y = pd.read_csv("musikreviews_balanced_authors.csv", sep=',', encoding="utf-8", nrows=2500)['label_encoded']
path = f'daten/5_iterative_filter/csv_before_filter'
files = [f for f in listdir(path) if isfile(join(path, f))]
gnb = GaussianNB()
param_grid_svc = {'C': (0.001, 0.01, 0.1, 1, 10),
'kernel': ('linear', 'poly', 'rbf'),
'gamma': ('scale', 'auto')}
# Get the feature names for the table
feature_list = [re.search("(.+?(?=_rel))", f).group(1) for f in files]
for f in files:
print(f)
x = pd.read_csv(f"daten/5_iterative_filter/csv_before_filter/{f}", sep=',', encoding="utf-8", nrows=2500)
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.4, random_state=42)
# GNB fit
start_time = datetime.now()
gnb.fit(x_train, y_train)
# score on test data
score = accuracy_score(gnb.predict(x_test), y_test)
time_taken = datetime.now() - start_time
print(f"GNB test score for {f}: {score}")
print(f"GNB time for {f}: {time_taken}")
gnb_result_list.append(score)
gnb_time_list.append(time_taken)
# SVC parameter optimization
grid_search = GridSearchCV(svm.SVC(), param_grid_svc, cv=10, n_jobs=-1, scoring='accuracy')
grid_results = grid_search.fit(x_train, y_train)
svc = svm.SVC(C=grid_results.best_params_['C'], gamma=grid_results.best_params_['gamma'],
kernel=grid_results.best_params_['kernel'])
start_time = datetime.now()
# fit on train data
svc.fit(x_train, y_train)
# predict test data
score = accuracy_score(svc.predict(x_test), y_test)
time_taken = datetime.now() - start_time
print(f"SVC test score for {f}: {score}")
print(f"SVC time for {f}: {time_taken}")
svc_result_list.append(score)
svc_time_list.append(time_taken)
# Parameter for KNN
# Some Values from 3 to square of k
neighbors = [i for i in range(3, int(math.sqrt(len(x.columns))), 13)]
neighbors += [5, 11, 19, 36]
if int(math.sqrt(len(x.columns))) not in neighbors:
neighbors.append(int(math.sqrt(len(x.columns))))
param_grid_knn = {'n_neighbors': neighbors,
'weights': ['uniform', 'distance'],
'metric': ['euclidean', 'manhattan']}
# KNN parameter optimization
grid_search = GridSearchCV(KNeighborsClassifier(), param_grid_knn, cv=10, n_jobs=-1, scoring='accuracy')
grid_results = grid_search.fit(x_train, y_train)
knn = KNeighborsClassifier(n_neighbors=grid_results.best_params_['n_neighbors'],
metric=grid_results.best_params_['metric'],
weights=grid_results.best_params_['weights'])
# fit on train data
knn.fit(x_train, y_train)
# KNN predict test data
start_time = datetime.now()
# predict test data
score = accuracy_score(knn.predict(x_test), y_test)
time_taken = datetime.now() - start_time
print(f"KNN test score for {f}: {score}")
print(f"KNN time for {f}: {time_taken}")
knn_result_list.append(score)
knn_time_list.append(time_taken)
# create dataframe with the scores and times
df_results = pd.DataFrame()
df_results['feature'] = feature_list
df_results['score_gnb'] = gnb_result_list
df_results['time_gnb'] = gnb_time_list
df_results['score_svc'] = svc_result_list
df_results['time_svc'] = svc_time_list
df_results['score_knn'] = knn_result_list
df_results['time_knn'] = knn_time_list
return df_results
# Chapter 7.2.1. Get the accuracy of the features after the iterative filter, results in table 18
def get_accuracy_after_iterative_filter():
df_gnb_result = pd.DataFrame(columns=['feature', 'score_gnb', 'time_gnb'])
df_svc_result = pd.DataFrame(columns=['feature', 'score_svc', 'time_svc'])
df_knn_result = pd.DataFrame(columns=['feature', 'score_knn', 'time_knn'])
y = pd.read_csv("musikreviews_balanced_authors.csv", sep=',', encoding="utf-8", nrows=2500)['label_encoded']
# path = f'daten/5_iterative_filter/csv_after_filter'
path = f'daten/5_iterative_filter/5_iterative_filter/csv_after_filter'
files = [f for f in listdir(path) if isfile(join(path, f))]
gnb = GaussianNB()
param_grid_svc = {'C': (0.001, 0.01, 0.1, 1, 10),
'kernel': ('linear', 'poly', 'rbf'),
'gamma': ('scale', 'auto')}
for f in files:
print(f)
# Get the feature name for the table
feature = re.search(".{4}(.+?(?=_rel))", f).group(1)
# x = pd.read_csv(f"daten/5_iterative_filter/csv_after_filter/{f}", sep=',', encoding="utf-8", nrows=2500)
x = pd.read_csv(f"daten/5_iterative_filter/5_iterative_filter/csv_after_filter/{f}", sep=',', encoding="utf-8",
nrows=2500)
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.4, random_state=42)
# Select the classifier by the start of the filename
if f.split("_")[0] == "gnb":
# GNB fit
start_time = datetime.now()
gnb.fit(x_train, y_train)
# score on test data
score = accuracy_score(gnb.predict(x_test), y_test)
time_taken = datetime.now() - start_time
print(f"GNB test score for {f}: {score}")
print(f"GNB time for {f}: {time_taken}")
df_gnb_result = df_gnb_result.append(pd.DataFrame(data={'feature': [feature], 'score_gnb': [score],
'time_gnb': [time_taken]}), ignore_index=True)
elif f.split("_")[0] == "svc":
# SVC parameter optimization
grid_search = GridSearchCV(svm.SVC(), param_grid_svc, cv=10, n_jobs=-1, scoring='accuracy')
grid_results = grid_search.fit(x_train, y_train)
svc = svm.SVC(C=grid_results.best_params_['C'], gamma=grid_results.best_params_['gamma'],
kernel=grid_results.best_params_['kernel'])
start_time = datetime.now()
# fit on train data
svc.fit(x_train, y_train)
# predict test data
score = accuracy_score(svc.predict(x_test), y_test)
time_taken = datetime.now() - start_time
print(f"SVC test score for {f}: {score}")
print(f"SVC training time for {f}: {time_taken}")
df_svc_result = df_svc_result.append(pd.DataFrame(data={'feature': [feature], 'score_svc': [score],
'time_svc': [time_taken]}), ignore_index=True)
elif f.split("_")[0] == "knn":
# Parameter for KNN
# Some Values from 3 to square of k
neighbors = [i for i in range(3, int(math.sqrt(len(x.columns))), 13)]
neighbors += [5, 11, 19, 36]
if int(math.sqrt(len(x.columns))) not in neighbors:
neighbors.append(int(math.sqrt(len(x.columns))))
param_grid_knn = {'n_neighbors': neighbors,
'weights': ['uniform', 'distance'],
'metric': ['euclidean', 'manhattan']}
# KNN parameter optimization
grid_search = GridSearchCV(KNeighborsClassifier(), param_grid_knn, cv=10, n_jobs=-1, scoring='accuracy')
grid_results = grid_search.fit(x_train, y_train)
knn = KNeighborsClassifier(n_neighbors=grid_results.best_params_['n_neighbors'],
metric=grid_results.best_params_['metric'],
weights=grid_results.best_params_['weights'])
start_time = datetime.now()
# fit on train data
knn.fit(x_train, y_train)
# KNN predict test data
start_time = datetime.now()
# predict test data
score = accuracy_score(knn.predict(x_test), y_test)
time_taken = datetime.now() - start_time
print(f"KNN test score for {f}: {score}")
print(f"KNN test time for {f}: {time_taken}")
df_knn_result = df_knn_result.append(pd.DataFrame(data={'feature': [feature], 'score_knn': [score],
'time_knn': [time_taken]}), ignore_index=True)
df_merge = pd.merge(df_gnb_result, df_knn_result, on="feature", how='outer')
df_merge = pd.merge(df_merge, df_svc_result, on="feature", how='outer')
return df_merge
# Get n article for a given number of authors. Required for setups with different numbers of authors and article
def get_n_article_by_author(par_df, par_label_count, par_article_count):
df_articles = pd.DataFrame(columns=['label_encoded', 'text'])
# only keep entries of the "par_label_count" first labels
par_df = par_df.where(par_df['label_encoded'] <= par_label_count).dropna()
labels = np.unique(par_df['label_encoded'].values).tolist()
list_article_count = [par_article_count for i in labels]
for index, row in par_df.iterrows():
if list_article_count[labels.index(row['label_encoded'])] != 0:
d = {'label_encoded': [row['label_encoded']], 'text': [row['text']]}
df_articles = df_articles.append(pd.DataFrame.from_dict(d), ignore_index=True)
list_article_count[labels.index(row['label_encoded'])] -= 1
if sum(list_article_count) == 0:
break
return df_articles
# Return indices for n article for a given number of authors. Required for setups with different
# numbers of authors and article
def get_n_article_index_by_author(par_df, par_label_count, par_article_count):
index_list = []
# only keep entries of the "par_label_count" first labels
par_df = par_df.where(par_df['label_encoded'] <= par_label_count).dropna()
labels = np.unique(par_df['label_encoded'].values).tolist()
list_article_count = [par_article_count for i in labels]
for index, row in par_df.iterrows():
if row['label_encoded'] in labels:
if list_article_count[labels.index(row['label_encoded'])] != 0:
index_list.append(index)
list_article_count[labels.index(row['label_encoded'])] -= 1
if sum(list_article_count) == 0:
break
return index_list
# Method to estimate the f1 score of the test data for GNB
def get_f1_for_gnb(par_x_train, par_x_test, par_y_train, par_y_test):
gnb = GaussianNB()
# GNB fit
gnb.fit(par_x_train, par_y_train)
# score on test data
gnb_score = f1_score(gnb.predict(par_x_test), par_y_test, average='micro')
return gnb_score
# Method to estimate the f1 score of the test data for SVC
def get_f1_for_svc(par_x_train, par_x_test, par_y_train, par_y_test, par_cv):
# Param Grid SVC
param_grid_svc = {'C': (0.001, 0.01, 0.1, 1, 10),
'kernel': ('linear', 'poly', 'rbf'),
'gamma': ('scale', 'auto')}
# SVC parameter optimization
grid_search = GridSearchCV(svm.SVC(), param_grid_svc, cv=par_cv, n_jobs=-1, scoring='f1_micro')
grid_results = grid_search.fit(par_x_train, par_y_train)
svc = svm.SVC(C=grid_results.best_params_['C'], gamma=grid_results.best_params_['gamma'],
kernel=grid_results.best_params_['kernel'])
# fit on train data
svc.fit(par_x_train, par_y_train)
# predict test data
svc_score = f1_score(svc.predict(par_x_test), par_y_test, average='micro')
return svc_score
# Method to estimate the f1 score of the test data for KNN
def get_f1_for_knn(par_x_train, par_x_test, par_y_train, par_y_test, par_cv):
# define param grid for knn, neighbors has the be lower than samples
neighbors = [1, 3, 5, 11, 19, 36, 50]
# number of neighbors must be less than number of samples
neighbors = [x for x in neighbors if x < len(par_x_test)]
param_grid_knn = {'n_neighbors': neighbors,
'weights': ['uniform', 'distance'],
'metric': ['euclidean', 'manhattan']}
# KNN parameter optimization
grid_search = GridSearchCV(KNeighborsClassifier(), param_grid_knn, cv=par_cv, n_jobs=-1, scoring='f1_micro')
grid_results = grid_search.fit(par_x_train, par_y_train)
knn = KNeighborsClassifier(n_neighbors=grid_results.best_params_['n_neighbors'],
metric=grid_results.best_params_['metric'],
weights=grid_results.best_params_['weights'])
# fit on train data
knn.fit(par_x_train, par_y_train)
# predict test data
knn_score = f1_score(knn.predict(par_x_test), par_y_test, average='micro')
return knn_score
# Method to estimate the accuracy of the test data for SVC
def get_accuracy_for_svc(par_x_train, par_x_test, par_y_train, par_y_test, par_cv):
# Param Grid SVC
param_grid_svc = {'C': (0.001, 0.01, 0.1, 1, 10),
'kernel': ('linear', 'poly', 'rbf'),
'gamma': ('scale', 'auto')}
# SVC parameter optimization
grid_search = GridSearchCV(svm.SVC(), param_grid_svc, cv=par_cv, n_jobs=-1, scoring='f1_micro')
grid_results = grid_search.fit(par_x_train, par_y_train)
svc = svm.SVC(C=grid_results.best_params_['C'], gamma=grid_results.best_params_['gamma'],
kernel=grid_results.best_params_['kernel'])
# fit on train data
svc.fit(par_x_train, par_y_train)
# predict test data
svc_score = accuracy_score(svc.predict(par_x_test), par_y_test)
return svc_score
# Chapter 7.3.1. comparison of the word length feature alternatives
def compare_word_length_features():
df_all_texts = pd.read_csv("musikreviews_balanced_authors.csv", sep=',', encoding="utf-8")
# Different values for the texts by authors
list_author_texts = [10, 15, 25, 50, 75, 100]
# save the results in a dictionary
dic_f1_results = {'wl_matrix_gnb': [], 'wl_matrix_svc': [], 'wl_matrix_knn': [],
'wl_matrix_bins_20_30_gnb': [], 'wl_matrix_bins_20_30_svc': [], 'wl_matrix_bins_20_30_knn': [],
'wl_matrix_bins_10_20_gnb': [], 'wl_matrix_bins_10_20_svc': [], 'wl_matrix_bins_10_20_knn': [],
'wl_matrix_20_gnb': [], 'wl_matrix_20_svc': [], 'wl_matrix_20_knn': [],
'wl_avg_gnb': [], 'wl_avg_svc': [], 'wl_avg_knn': []}
for author_texts in list_author_texts:
# get article for n authors with number of author texts
df_article = get_n_article_by_author(df_all_texts, 25, author_texts)
# Get the word count for the individual relative frequency
word_count = get_word_count(df_article)
# define the splits for the hyperparameter tuning, cannot be greater than the number of members in each class
if author_texts * 0.4 < 10:
cv = int(author_texts * 0.4)
else:
cv = 10
# Get the scores for every feature
for feature in ["wl_matrix", "wl_matrix_bins_20_30", "wl_matrix_bins_10_20", "wl_avg", "wl_matrix_20"]:
# select the test/train data by the feature name and calculate the individual relative frequency
if feature == "wl_matrix":
x = get_rel_frequency(get_word_length_matrix(df_article).fillna(value=0), word_count['word_count'])
elif feature == "wl_matrix_bins_20_30":
x = get_rel_frequency(get_word_length_matrix_with_interval(df_article, 20, 30).fillna(value=0),
word_count['word_count'])
elif feature == "wl_matrix_bins_10_20":
x = get_rel_frequency(get_word_length_matrix_with_interval(df_article, 10, 20).fillna(value=0),
word_count['word_count'])
elif feature == "wl_avg":
x = get_average_word_length(df_article)
elif feature == "wl_matrix_20":
x = get_word_length_matrix_with_margin(df_article, 20)
# Scale the data, else high counter in wl_matrix can dominate and hyperparameter optimization for svc
# takes a while because of small differences from average
scaler = StandardScaler()
scaler.fit(x)
x = scaler.transform(x)
y = df_article['label_encoded']
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.4, random_state=42, stratify=y)
y = df_article['label_encoded']
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.4, random_state=42, stratify=y)
# calculate scores
gnb_score = get_f1_for_gnb(x_train, x_test, y_train, y_test)
svc_score = get_f1_for_svc(x_train, x_test, y_train, y_test, cv)
knn_score = get_f1_for_knn(x_train, x_test, y_train, y_test, cv)
# Append scores to dictionary
dic_f1_results[f'{feature}_gnb'].append(gnb_score)
dic_f1_results[f'{feature}_svc'].append(svc_score)
dic_f1_results[f'{feature}_knn'].append(knn_score)
# Console output
print(f"GNB-Score for {feature} with {author_texts}: {gnb_score}")
print(f"SVC-Score for {feature} with {author_texts}: {svc_score}")
print(f"KNN-Score for {feature} with {author_texts}: {knn_score}")
df_results = pd.DataFrame(dic_f1_results)
df_results['number_article'] = list_author_texts
return df_results
# Chapter 7.3.2. comparison of the digit feature alternatives
def compare_digit_features():
df_all_texts = pd.read_csv("musikreviews_balanced_authors.csv", sep=',', encoding="utf-8")
# Different values for the texts by authors
list_author_texts = [10, 15, 25, 50, 75, 100]
# save the results in a dictionary
dic_f1_results = {'digit_sum_gnb': [], 'digit_sum_svc': [], 'digit_sum_knn': [],
'digits_gnb': [], 'digits_svc': [], 'digits_knn': []}
for author_texts in list_author_texts:
# get article for n authors with number of author texts
df_article = get_n_article_by_author(df_all_texts, 25, author_texts)
# Get the word count for the individual relative frequency
char_count = get_char_count(df_article)
# define the splits for the hyperparameter tuning, cannot be greater than the number of members in each class
if author_texts * 0.4 < 10:
cv = int(author_texts * 0.4)
else:
cv = 10
# Get the scores for every feature
for feature in ["digit_sum", "digits"]:
# select the test/train data by the feature name and calculate the individual relative frequency
if feature == "digit_sum":
x = get_rel_frequency(get_sum_digits(df_article).fillna(value=0), char_count['char_count'])
elif feature == "digits":
x = get_rel_frequency(get_digits(df_article).fillna(value=0), char_count['char_count'])
y = df_article['label_encoded']
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.4, random_state=42, stratify=y)
# calculate scores
gnb_score = get_f1_for_gnb(x_train, x_test, y_train, y_test)
svc_score = get_f1_for_svc(x_train, x_test, y_train, y_test, cv)
knn_score = get_f1_for_knn(x_train, x_test, y_train, y_test, cv)
# Append scores to dictionary
dic_f1_results[f'{feature}_gnb'].append(gnb_score)
dic_f1_results[f'{feature}_svc'].append(svc_score)
dic_f1_results[f'{feature}_knn'].append(knn_score)
# Console output
print(f"GNB-Score for {feature} with {author_texts}: {gnb_score}")
print(f"SVC-Score for {feature} with {author_texts}: {svc_score}")
print(f"KNN-Score for {feature} with {author_texts}: {knn_score}")
df_results = pd.DataFrame(dic_f1_results)
df_results['number_article'] = list_author_texts
return df_results
# Chapter 7.3.3. comparison of the word ngrams with n 4-6
def compare_word_4_6_grams():
df_all_texts = pd.read_csv("musikreviews_balanced_authors.csv", sep=',', encoding="utf-8")
# Different values for the texts by authors
list_author_texts = [10, 15, 25, 50, 75, 100]
# save the results in a dictionary
dic_f1_results = {'w4g_gnb': [], 'w4g_svc': [], 'w4g_knn': [],
'w5g_gnb': [], 'w5g_svc': [], 'w5g_knn': [],
'w6g_gnb': [], 'w6g_svc': [], 'w6g_knn': []}
# load the data
df_w4g = pd.read_csv("daten/6_feature_analysis/input_data/word_4_gram_rel.csv", sep=',', encoding="utf-8")
df_w5g = pd.read_csv("daten/6_feature_analysis/input_data/word_5_gram_rel.csv", sep=',', encoding="utf-8")
df_w6g = pd.read_csv("daten/6_feature_analysis/input_data/word_6_gram_rel.csv", sep=',', encoding="utf-8")
for author_texts in list_author_texts:
# indices for article for n authors with m texts
index_list = get_n_article_index_by_author(df_all_texts, 25, author_texts)
# define the splits for the hyperparameter tuning, cannot be greater than the number of members in each class
if author_texts * 0.4 < 10:
cv = int(author_texts * 0.4)
# CV between 5 and 10 is unusual
cv = 5 if cv > 5 else cv
else:
cv = 10
# Get the scores for every feature
for feature in ["w4g", "w5g", "w6g"]:
# select the indices from the article rows by the given indices
if feature == "w4g":
x = df_w4g.iloc[index_list]
elif feature == "w5g":
x = df_w5g.iloc[index_list]
elif feature == "w6g":
x = df_w6g.iloc[index_list]
# Delete features which only occur once
x = trim_df_by_occurrence(x, 1)
# reset the indices to have a order from 0 to authors * text per author - 1
x = x.reset_index(drop=True)
y = df_all_texts.iloc[index_list]['label_encoded']
y = y.reset_index(drop=True)
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.4, random_state=42, stratify=y)
# calculate scores
gnb_score = get_f1_for_gnb(x_train, x_test, y_train, y_test)
svc_score = get_f1_for_svc(x_train, x_test, y_train, y_test, cv)
knn_score = get_f1_for_knn(x_train, x_test, y_train, y_test, cv)
# Append scores to dictionary
dic_f1_results[f'{feature}_gnb'].append(gnb_score)
dic_f1_results[f'{feature}_svc'].append(svc_score)
dic_f1_results[f'{feature}_knn'].append(knn_score)
# Console output
print(f"GNB-Score for {feature} with {author_texts}: {gnb_score}")
print(f"SVC-Score for {feature} with {author_texts}: {svc_score}")
print(f"KNN-Score for {feature} with {author_texts}: {knn_score}")
df_results = pd.DataFrame(dic_f1_results)
df_results['number_article'] = list_author_texts
return df_results
# Chapter 7.3.3. comparison of the word ngrams with n 2-3
def compare_word_2_3_grams():
df_all_texts = pd.read_csv("musikreviews_balanced_authors.csv", sep=',', encoding="utf-8")
# Different values for the texts by authors
list_author_texts = [10, 15, 25, 50, 75, 100]
# save the results in a dictionary
dic_f1_results = {'w2g_gnb': [], 'w2g_svc': [], 'w2g_knn': [],
'w3g_gnb': [], 'w3g_svc': [], 'w3g_knn': []}
for author_texts in list_author_texts:
print(f"Texte pro Autor: {author_texts}")
# indices for article for n authors with m texts
index_list = get_n_article_index_by_author(df_balanced, 25, author_texts)
# define the splits for the hyperparameter tuning, cannot be greater than the number of members in each class
if author_texts * 0.4 < 10:
cv = int(author_texts * 0.4)
# CV between 5 and 10 is unusual
cv = 5 if cv > 5 else cv
else:
cv = 10
# select the indices from the article rows by the given indices
df_balanced = df_all_texts.iloc[index_list].reset_index(drop=True)
print(f"Artikel: {len(df_balanced.index)}")
# extract the features
df_w2g = get_word_n_grams(df_balanced, 2)
df_w3g = get_word_n_grams(df_balanced, 3)
# Preprocessing steps
word_count = get_word_count(df_balanced)
df_w2g = preprocessing_steps_pos_tag_n_grams(df_w2g, word_count['word_count'])
df_w3g = preprocessing_steps_pos_tag_n_grams(df_w3g, word_count['word_count'])
# Scaler, else SVM need a lot of time with very low numbers.
scaler = StandardScaler()
df_w2g[df_w2g.columns] = scaler.fit_transform(df_w2g[df_w2g.columns])
df_w3g[df_w3g.columns] = scaler.fit_transform(df_w3g[df_w3g.columns])
label = df_balanced['label_encoded']
# Train/Test 60/40 split
df_w2g_train, df_w2g_test, df_w3g_train, df_w3g_test, label_train, label_test = \
train_test_split(df_w2g, df_w3g, label, test_size=0.4, random_state=42, stratify=label)
# Get the scores for every feature
for feature in ["w2g", "w3g"]:
# select the indices from the article rows by the given indices
# iterative filter
# returns df_x_train_gnb, df_x_test_gnb, df_x_train_svc, df_x_test_svc, df_x_train_knn, df_x_test_knn
if feature == "w2g":
x_gnb_train, x_gnb_test, x_svc_train, x_svc_test, x_knn_train, x_knn_test = \
feature_selection_iterative_filter(df_w2g_train, df_w2g_test, label_train, 1.0, mutual_info_classif)
elif feature == "w3g":
x_gnb_train, x_gnb_test, x_svc_train, x_svc_test, x_knn_train, x_knn_test = \
feature_selection_iterative_filter(df_w3g_train, df_w3g_test, label_train, 1.0, mutual_info_classif)
# Do not use iterative filter for gnb train caused by bad results
x_gnb_train, x_gnb_test, label_train, label_test = \
train_test_split(df_w3g, label, test_size=0.4, random_state=42, stratify=label)
print(f"cv: {cv}")
print(f"Train Labels: {label_train.value_counts()}")
print(f"Test Labels: {label_test.value_counts()}")
# calculate scores
gnb_score = get_f1_for_gnb(x_gnb_train, x_gnb_test, label_train, label_test)
svc_score = get_f1_for_svc(x_svc_train, x_svc_test, label_train, label_test, cv)
knn_score = get_f1_for_knn(x_knn_train, x_knn_test, label_train, label_test, cv)
# Append scores to dictionary
dic_f1_results[f'{feature}_gnb'].append(gnb_score)
dic_f1_results[f'{feature}_svc'].append(svc_score)
dic_f1_results[f'{feature}_knn'].append(knn_score)
# Console output
print(f"GNB-Score for {feature} with {author_texts}: {gnb_score}")
print(f"SVC-Score for {feature} with {author_texts}: {svc_score}")
print(f"KNN-Score for {feature} with {author_texts}: {knn_score}")
df_results = pd.DataFrame(dic_f1_results)
df_results['number_article'] = list_author_texts
return df_results
# Chapter 7.3.4. comparison of the different lengths of char ngrams
# Chapter 7.3.4. whole process of the comparison of the char-n-gram features
def compare_char_n_grams_process(par_base_path):
df_all_texts = pd.read_csv(f"musikreviews_balanced_authors.csv", sep=',', encoding="utf-8")
author_counts = [25]
text_counts = [10, 15, 25, 50, 75, 100]
for number_authors in author_counts:
for number_texts in text_counts:
index_list = get_n_article_index_by_author(df_all_texts, number_authors, number_texts)
df_balanced = df_all_texts.iloc[index_list].reset_index(drop=True)
extract_n_gram_features_to_csv(df_balanced, par_base_path, number_authors, number_texts)
iterative_filter_process(par_base_path, df_balanced, number_texts, number_authors)
compare_char_affix_ngrams(text_counts, author_counts, par_base_path, df_all_texts) \
.to_csv(f"{par_base_path}results/char_affix_n_grams.csv", index=False)
compare_char_word_ngrams(text_counts, author_counts, par_base_path, df_all_texts) \
.to_csv(f"{par_base_path}results/char_word_n_grams.csv", index=False)
compare_char_punct_ngrams(text_counts, author_counts, par_base_path, df_all_texts) \
.to_csv(f"{par_base_path}results/char_punct_n_grams.csv", index=False)
# Chapter 7.3.4. char-affix-ngrams
def compare_char_affix_ngrams(par_author_texts, par_authors, par_base_path, par_df):
# save the results in a dictionary
dic_f1_results = {'c_affix_2_gnb': [], 'c_affix_2_svc': [], 'c_affix_2_knn': [],
'c_affix_3_gnb': [], 'c_affix_3_svc': [], 'c_affix_3_knn': [],
'c_affix_4_gnb': [], 'c_affix_4_svc': [], 'c_affix_4_knn': [],
'c_affix_5_gnb': [], 'c_affix_5_svc': [], 'c_affix_5_knn': [],
'number_authors': [], 'number_texts': []}
for number_authors in par_authors:
for number_texts in par_author_texts:
index_list = get_n_article_index_by_author(par_df, number_authors, number_texts)
df_balanced = par_df.iloc[index_list].reset_index(drop=True)
# define the splits for the hyperparameter tuning, cannot be greater than number of members in each class
if number_texts * 0.4 < 10:
cv = int(number_texts * 0.4)
# CV between 5 and 10 is unusual
cv = 5 if cv > 5 else cv
else:
cv = 10
label = df_balanced['label_encoded']
# Get the scores for every feature
# Append authors and texts
dic_f1_results['number_authors'].append(number_authors)
dic_f1_results['number_texts'].append(number_texts)
for feature in ["c_affix_2", "c_affix_3", "c_affix_4", "c_affix_5"]:
# read the data based on n, texts and authors
if feature == "c_affix_2":
df_gnb = pd.read_csv(
f"{par_base_path}csv_after_filter/gnb_a{number_authors}_t{number_texts}"
f"_char_affix_2_gram_filtered.csv")
df_svc = pd.read_csv(
f"{par_base_path}csv_after_filter/svc_a{number_authors}_t{number_texts}"
f"_char_affix_2_gram_filtered.csv")
df_knn = pd.read_csv(
f"{par_base_path}csv_after_filter/knn_a{number_authors}_t{number_texts}"
f"_char_affix_2_gram_filtered.csv")
elif feature == "c_affix_3":
df_gnb = pd.read_csv(
f"{par_base_path}csv_after_filter/gnb_a{number_authors}_t{number_texts}"
f"_char_affix_3_gram_filtered.csv")
df_svc = pd.read_csv(
f"{par_base_path}csv_after_filter/svc_a{number_authors}_t{number_texts}"
f"_char_affix_3_gram_filtered.csv")
df_knn = pd.read_csv(
f"{par_base_path}csv_after_filter/knn_a{number_authors}_t{number_texts}"
f"_char_affix_3_gram_filtered.csv")
elif feature == "c_affix_4":
df_gnb = pd.read_csv(
f"{par_base_path}csv_after_filter/gnb_a{number_authors}_t{number_texts}"
f"_char_affix_4_gram_filtered.csv")
df_svc = pd.read_csv(
f"{par_base_path}csv_after_filter/svc_a{number_authors}_t{number_texts}"
f"_char_affix_4_gram_filtered.csv")
df_knn = pd.read_csv(
f"{par_base_path}csv_after_filter/knn_a{number_authors}_t{number_texts}"
f"_char_affix_4_gram_filtered.csv")
elif feature == "c_affix_5":
df_gnb = pd.read_csv(
f"{par_base_path}csv_after_filter/gnb_a{number_authors}_t{number_texts}"
f"_char_affix_5_gram_filtered.csv")
df_svc = pd.read_csv(
f"{par_base_path}csv_after_filter/svc_a{number_authors}_t{number_texts}"
f"_char_affix_5_gram_filtered.csv")
df_knn = pd.read_csv(
f"{par_base_path}csv_after_filter/knn_a{number_authors}_t{number_texts}"
f"_char_affix_5_gram_filtered.csv")
# Scaler, else SVM need a lot of time with very low numbers.
scaler = StandardScaler()
df_gnb[df_gnb.columns] = scaler.fit_transform(df_gnb[df_gnb.columns])
df_svc[df_svc.columns] = scaler.fit_transform(df_svc[df_svc.columns])
df_knn[df_knn.columns] = scaler.fit_transform(df_knn[df_knn.columns])
# Train/Test 60/40 split
x_gnb_train, x_gnb_test, x_svc_train, x_svc_test, x_knn_train, x_knn_test, label_train, label_test = \
train_test_split(df_gnb, df_svc, df_knn, label, test_size=0.4, random_state=42, stratify=label)
# calculate scores
gnb_score = get_f1_for_gnb(x_gnb_train, x_gnb_test, label_train, label_test)
svc_score = get_f1_for_svc(x_svc_train, x_svc_test, label_train, label_test, cv)
knn_score = get_f1_for_knn(x_knn_train, x_knn_test, label_train, label_test, cv)
# Append scores to dictionary
dic_f1_results[f'{feature}_gnb'].append(gnb_score)
dic_f1_results[f'{feature}_svc'].append(svc_score)
dic_f1_results[f'{feature}_knn'].append(knn_score)
# Console output
print(f"GNB-Score for {feature} with {number_authors} authors and {number_texts} texts: {gnb_score}")
print(f"SVC-Score for {feature} with {number_authors} authors and {number_texts} texts: {svc_score}")
print(f"KNN-Score for {feature} with {number_authors} authors and {number_texts} texts: {knn_score}")
return pd.DataFrame(dic_f1_results)
# Chapter 7.3.4. char-word-ngrams
def compare_char_word_ngrams(par_author_texts, par_authors, par_base_path, par_df):
# save the results in a dictionary
dic_f1_results = {'c_word_2_gnb': [], 'c_word_2_svc': [], 'c_word_2_knn': [],
'c_word_3_gnb': [], 'c_word_3_svc': [], 'c_word_3_knn': [],
'c_word_4_gnb': [], 'c_word_4_svc': [], 'c_word_4_knn': [],
'c_word_5_gnb': [], 'c_word_5_svc': [], 'c_word_5_knn': [],
'number_authors': [], 'number_texts': []}
for number_authors in par_authors:
for number_texts in par_author_texts:
index_list = get_n_article_index_by_author(par_df, number_authors, number_texts)
df_balanced = par_df.iloc[index_list].reset_index(drop=True)
# define the splits for the hyperparameter tuning, cannot be greater than number of members in each class
if number_texts * 0.4 < 10:
cv = int(number_texts * 0.4)
# CV between 5 and 10 is unusual
cv = 5 if cv > 5 else cv
else:
cv = 10
label = df_balanced['label_encoded']
# Get the scores for every feature
# Append authors and texts
dic_f1_results['number_authors'].append(number_authors)
dic_f1_results['number_texts'].append(number_texts)
for feature in ["c_word_2", "c_word_3", "c_word_4", "c_word_5"]:
# read the data based on n, texts and authors
if feature == "c_word_2":
df_gnb = pd.read_csv(
f"{par_base_path}csv_after_filter/gnb_a{number_authors}_t{number_texts}"
f"_char_word_2_gram_filtered.csv")
df_svc = pd.read_csv(
f"{par_base_path}csv_after_filter/svc_a{number_authors}_t{number_texts}"
f"_char_word_2_gram_filtered.csv")
df_knn = pd.read_csv(
f"{par_base_path}csv_after_filter/knn_a{number_authors}_t{number_texts}"
f"_char_word_2_gram_filtered.csv")
elif feature == "c_word_3":
df_gnb = pd.read_csv(
f"{par_base_path}csv_after_filter/gnb_a{number_authors}_t{number_texts}"
f"_char_word_3_gram_filtered.csv")
df_svc = pd.read_csv(
f"{par_base_path}csv_after_filter/svc_a{number_authors}_t{number_texts}"
f"_char_word_3_gram_filtered.csv")
df_knn = pd.read_csv(
f"{par_base_path}csv_after_filter/knn_a{number_authors}_t{number_texts}"
f"_char_word_3_gram_filtered.csv")
elif feature == "c_word_4":
df_gnb = pd.read_csv(
f"{par_base_path}csv_after_filter/gnb_a{number_authors}_t{number_texts}"
f"_char_word_4_gram_filtered.csv")
df_svc = pd.read_csv(
f"{par_base_path}csv_after_filter/svc_a{number_authors}_t{number_texts}"
f"_char_word_4_gram_filtered.csv")
df_knn = pd.read_csv(
f"{par_base_path}csv_after_filter/knn_a{number_authors}_t{number_texts}"
f"_char_word_4_gram_filtered.csv")
elif feature == "c_word_5":
df_gnb = pd.read_csv(
f"{par_base_path}csv_after_filter/gnb_a{number_authors}_t{number_texts}"
f"_char_word_5_gram_filtered.csv")
df_svc = pd.read_csv(
f"{par_base_path}csv_after_filter/svc_a{number_authors}_t{number_texts}"
f"_char_word_5_gram_filtered.csv")
df_knn = pd.read_csv(
f"{par_base_path}csv_after_filter/knn_a{number_authors}_t{number_texts}"
f"_char_word_5_gram_filtered.csv")
# Scaler, else SVM need a lot of time with very low numbers.
scaler = StandardScaler()
df_gnb[df_gnb.columns] = scaler.fit_transform(df_gnb[df_gnb.columns])
df_svc[df_svc.columns] = scaler.fit_transform(df_svc[df_svc.columns])
df_knn[df_knn.columns] = scaler.fit_transform(df_knn[df_knn.columns])
# Train/Test 60/40 split
x_gnb_train, x_gnb_test, x_svc_train, x_svc_test, x_knn_train, x_knn_test, label_train, label_test = \
train_test_split(df_gnb, df_svc, df_knn, label, test_size=0.4, random_state=42, stratify=label)
# calculate scores
gnb_score = get_f1_for_gnb(x_gnb_train, x_gnb_test, label_train, label_test)
svc_score = get_f1_for_svc(x_svc_train, x_svc_test, label_train, label_test, cv)
knn_score = get_f1_for_knn(x_knn_train, x_knn_test, label_train, label_test, cv)
# Append scores to dictionary
dic_f1_results[f'{feature}_gnb'].append(gnb_score)
dic_f1_results[f'{feature}_svc'].append(svc_score)
dic_f1_results[f'{feature}_knn'].append(knn_score)
# Console output
print(f"GNB-Score for {feature} with {number_authors} authors and {number_texts} texts: {gnb_score}")
print(f"SVC-Score for {feature} with {number_authors} authors and {number_texts} texts: {svc_score}")
print(f"KNN-Score for {feature} with {number_authors} authors and {number_texts} texts: {knn_score}")
return pd.DataFrame(dic_f1_results)
# Chapter 7.3.4. char-punct-ngrams
def compare_char_punct_ngrams(par_author_texts, par_authors, par_base_path, par_df):
# save the results in a dictionary
dic_f1_results = {'c_punct_2_gnb': [], 'c_punct_2_svc': [], 'c_punct_2_knn': [],
'c_punct_3_gnb': [], 'c_punct_3_svc': [], 'c_punct_3_knn': [],
'c_punct_4_gnb': [], 'c_punct_4_svc': [], 'c_punct_4_knn': [],
'c_punct_5_gnb': [], 'c_punct_5_svc': [], 'c_punct_5_knn': [],
'number_authors': [], 'number_texts': []}
for number_authors in par_authors:
for number_texts in par_author_texts:
index_list = get_n_article_index_by_author(par_df, number_authors, number_texts)
df_balanced = par_df.iloc[index_list].reset_index(drop=True)
# define the splits for the hyperparameter tuning, cannot be greater than number of members in each class
if number_texts * 0.4 < 10:
cv = int(number_texts * 0.4)
# CV between 5 and 10 is unusual
cv = 5 if cv > 5 else cv
else:
cv = 10
label = df_balanced['label_encoded']
# Get the scores for every feature
# Append authors and texts
dic_f1_results['number_authors'].append(number_authors)
dic_f1_results['number_texts'].append(number_texts)
for feature in ["c_punct_2", "c_punct_3", "c_punct_4", "c_punct_5"]:
# read the data based on n, texts and authors
if feature == "c_punct_2":
df_gnb = pd.read_csv(
f"{par_base_path}csv_after_filter/gnb_a{number_authors}_t{number_texts}"
f"_char_punct_2_gram_filtered.csv")
df_svc = pd.read_csv(
f"{par_base_path}csv_after_filter/svc_a{number_authors}_t{number_texts}"
f"_char_punct_2_gram_filtered.csv")
df_knn = pd.read_csv(
f"{par_base_path}csv_after_filter/knn_a{number_authors}_t{number_texts}"
f"_char_punct_2_gram_filtered.csv")
elif feature == "c_punct_3":
df_gnb = pd.read_csv(
f"{par_base_path}csv_after_filter/gnb_a{number_authors}_t{number_texts}"
f"_char_punct_3_gram_filtered.csv")
df_svc = pd.read_csv(
f"{par_base_path}csv_after_filter/svc_a{number_authors}_t{number_texts}"
f"_char_punct_3_gram_filtered.csv")
df_knn = pd.read_csv(
f"{par_base_path}csv_after_filter/knn_a{number_authors}_t{number_texts}"
f"_char_punct_3_gram_filtered.csv")
elif feature == "c_punct_4":
df_gnb = pd.read_csv(
f"{par_base_path}csv_after_filter/gnb_a{number_authors}_t{number_texts}"
f"_char_punct_4_gram_filtered.csv")
df_svc = pd.read_csv(
f"{par_base_path}csv_after_filter/svc_a{number_authors}_t{number_texts}"
f"_char_punct_4_gram_filtered.csv")
df_knn = pd.read_csv(
f"{par_base_path}csv_after_filter/knn_a{number_authors}_t{number_texts}"
f"_char_punct_4_gram_filtered.csv")
elif feature == "c_punct_5":
df_gnb = pd.read_csv(
f"{par_base_path}csv_after_filter/gnb_a{number_authors}_t{number_texts}"
f"_char_punct_5_gram_filtered.csv")
df_svc = pd.read_csv(
f"{par_base_path}csv_after_filter/svc_a{number_authors}_t{number_texts}"
f"_char_punct_5_gram_filtered.csv")
df_knn = pd.read_csv(
f"{par_base_path}csv_after_filter/knn_a{number_authors}_t{number_texts}"
f"_char_punct_5_gram_filtered.csv")
# Scaler, else SVM need a lot of time with very low numbers.
scaler = StandardScaler()
df_gnb[df_gnb.columns] = scaler.fit_transform(df_gnb[df_gnb.columns])
df_svc[df_svc.columns] = scaler.fit_transform(df_svc[df_svc.columns])
df_knn[df_knn.columns] = scaler.fit_transform(df_knn[df_knn.columns])
# Train/Test 60/40 split
x_gnb_train, x_gnb_test, x_svc_train, x_svc_test, x_knn_train, x_knn_test, label_train, label_test = \
train_test_split(df_gnb, df_svc, df_knn, label, test_size=0.4, random_state=42, stratify=label)
# calculate scores
gnb_score = get_f1_for_gnb(x_gnb_train, x_gnb_test, label_train, label_test)
svc_score = get_f1_for_svc(x_svc_train, x_svc_test, label_train, label_test, cv)
knn_score = get_f1_for_knn(x_knn_train, x_knn_test, label_train, label_test, cv)
# Append scores to dictionary
dic_f1_results[f'{feature}_gnb'].append(gnb_score)
dic_f1_results[f'{feature}_svc'].append(svc_score)
dic_f1_results[f'{feature}_knn'].append(knn_score)
# Console output
print(f"GNB-Score for {feature} with {number_authors} authors and {number_texts} texts: {gnb_score}")
print(f"SVC-Score for {feature} with {number_authors} authors and {number_texts} texts: {svc_score}")
print(f"KNN-Score for {feature} with {number_authors} authors and {number_texts} texts: {knn_score}")
return pd.DataFrame(dic_f1_results)
# Chapter 7.3.4. Print the char-n-gram features in different files
def extract_n_gram_features_to_csv(par_df, par_base_path, par_number_authors, par_number_texts):
char_count = get_char_count(par_df)
# n from 2-5
for n in range(2, 6):
ca_ng = get_char_affix_n_grams(par_df, n)
preprocessing_steps_char_n_grams(ca_ng, char_count['char_count'])\
.to_csv(f"{par_base_path}csv_before_filter/a{par_number_authors}_t{par_number_texts}"
f"_char_affix_{n}_gram.csv", index=False)
cw_ng = get_char_word_n_grams(par_df, n)
preprocessing_steps_char_n_grams(cw_ng, char_count['char_count'])\
.to_csv(f"{par_base_path}csv_before_filter/a{par_number_authors}_t{par_number_texts}"
f"_char_word_{n}_gram.csv", index=False)
cp_ng = get_char_punct_n_grams(par_df, n)
preprocessing_steps_char_n_grams(cp_ng, char_count['char_count'])\
.to_csv(f"{par_base_path}csv_before_filter/a{par_number_authors}_t{par_number_texts}"
f"_char_punct_{n}_gram.csv", index=False)
print(f"Extraction Round {n - 1} done")
return True
# combined preprocessing steps of the pos-tag-n-grams
def preprocessing_steps_pos_tag_n_grams(par_feature, length_metric):
# Filter features which only occur once
par_feature = trim_df_by_occurrence(par_feature, 1)
# Individual relative frequency
par_feature = get_rel_frequency(par_feature.fillna(value=0), length_metric)
return par_feature
# combined preprocessing steps of the char-n-grams
def preprocessing_steps_char_n_grams(par_feature, length_metric):
# Filter features which only occur once
par_feature = trim_df_sum_feature(par_feature, 5)
# Individual relative frequency
par_feature = get_rel_frequency(par_feature.fillna(value=0), length_metric)
return par_feature
# Feature selection with the iterative filter without printing the results in a file
def feature_selection_iterative_filter(par_x_train, par_x_test, par_y_train, par_step, par_classif):
df_sorted_features = sort_features_by_score(par_x_train, par_y_train, par_classif)
# Calculate the best percentiles of the data for the different classifier
best_perc_gnb = get_best_percentile_gnb(par_x_train, par_y_train, 50, df_sorted_features, par_step)[0]
best_perc_svc = get_best_percentile_svc(par_x_train, par_y_train, 50, df_sorted_features, par_step)[0]
best_perc_knn = get_best_percentile_knn(par_x_train, par_y_train, 50, df_sorted_features, par_step)[0]
# select the 1 percent of the features (len/100) multiplied by par_best_percent
# select the best features from the original dataset
df_x_train_gnb = par_x_train[
df_sorted_features['feature_names'][: round(best_perc_gnb * (len(par_x_train.columns) / 100))].tolist()]
df_x_test_gnb = par_x_test[
df_sorted_features['feature_names'][: round(best_perc_gnb * (len(par_x_train.columns) / 100))].tolist()]
df_x_train_svc = par_x_train[
df_sorted_features['feature_names'][: round(best_perc_svc * (len(par_x_train.columns) / 100))].tolist()]
df_x_test_svc = par_x_test[
df_sorted_features['feature_names'][: round(best_perc_svc * (len(par_x_train.columns) / 100))].tolist()]
df_x_train_knn = par_x_train[
df_sorted_features['feature_names'][: round(best_perc_knn * (len(par_x_train.columns) / 100))].tolist()]
df_x_test_knn = par_x_test[
df_sorted_features['feature_names'][: round(best_perc_knn * (len(par_x_train.columns) / 100))].tolist()]
return df_x_train_gnb, df_x_test_gnb, df_x_train_svc, df_x_test_svc, df_x_train_knn, df_x_test_knn
# Chapter 7.3.5. function to compare the pos-tag-n-grams
def compare_pos_tag_ngrams(par_author_texts, par_authors, par_base_path, par_df):
# save the results in a dictionary
dic_f1_results = {'pos_2_gnb': [], 'pos_2_svc': [], 'pos_2_knn': [],
'pos_3_gnb': [], 'pos_3_svc': [], 'pos_3_knn': [],
'pos_4_gnb': [], 'pos_4_svc': [], 'pos_4_knn': [],
'pos_5_gnb': [], 'pos_5_svc': [], 'pos_5_knn': [],
'number_authors': [], 'number_texts': []}
for number_authors in par_authors:
for number_texts in par_author_texts:
index_list = get_n_article_index_by_author(par_df, number_authors, number_texts)
df_balanced = par_df.iloc[index_list].reset_index(drop=True)
# define the splits for the hyperparameter tuning, cannot be greater than number of members in each class
if number_texts * 0.4 < 10:
cv = int(number_texts * 0.4)
# CV between 5 and 10 is unusual
cv = 5 if cv > 5 else cv
else:
cv = 10
label = df_balanced['label_encoded']
# Get the scores for every feature
# Append authors and texts
dic_f1_results['number_authors'].append(number_authors)
dic_f1_results['number_texts'].append(number_texts)
for feature in ["pos_2", "pos_3", "pos_4", "pos_5"]:
# read the data based on n, texts and authors
if feature == "pos_2":
df_gnb = pd.read_csv(
f"{par_base_path}csv_after_filter/gnb_a{number_authors}_t{number_texts}"
f"_pos_tag_2_gram_filtered.csv")
df_svc = pd.read_csv(
f"{par_base_path}csv_after_filter/svc_a{number_authors}_t{number_texts}"
f"_pos_tag_2_gram_filtered.csv")
df_knn = pd.read_csv(
f"{par_base_path}csv_after_filter/knn_a{number_authors}_t{number_texts}"
f"_pos_tag_2_gram_filtered.csv")
elif feature == "pos_3":
df_gnb = pd.read_csv(
f"{par_base_path}csv_after_filter/gnb_a{number_authors}_t{number_texts}"
f"_pos_tag_3_gram_filtered.csv")
df_svc = pd.read_csv(
f"{par_base_path}csv_after_filter/svc_a{number_authors}_t{number_texts}"
f"_pos_tag_3_gram_filtered.csv")
df_knn = pd.read_csv(
f"{par_base_path}csv_after_filter/knn_a{number_authors}_t{number_texts}"
f"_pos_tag_3_gram_filtered.csv")
elif feature == "pos_4":
df_gnb = pd.read_csv(
f"{par_base_path}csv_after_filter/gnb_a{number_authors}_t{number_texts}"
f"_pos_tag_4_gram_filtered.csv")
df_svc = pd.read_csv(
f"{par_base_path}csv_after_filter/svc_a{number_authors}_t{number_texts}"
f"_pos_tag_4_gram_filtered.csv")
df_knn = pd.read_csv(
f"{par_base_path}csv_after_filter/knn_a{number_authors}_t{number_texts}"
f"_pos_tag_4_gram_filtered.csv")
elif feature == "pos_5":
df_gnb = pd.read_csv(
f"{par_base_path}csv_after_filter/gnb_a{number_authors}_t{number_texts}"
f"_pos_tag_5_gram_filtered.csv")
df_svc = pd.read_csv(
f"{par_base_path}csv_after_filter/svc_a{number_authors}_t{number_texts}"
f"_pos_tag_5_gram_filtered.csv")
df_knn = pd.read_csv(
f"{par_base_path}csv_after_filter/knn_a{number_authors}_t{number_texts}"
f"_pos_tag_5_gram_filtered.csv")
# Scaler, else SVM need a lot of time with very low numbers.
scaler = StandardScaler()
df_gnb[df_gnb.columns] = scaler.fit_transform(df_gnb[df_gnb.columns])
df_svc[df_svc.columns] = scaler.fit_transform(df_svc[df_svc.columns])
df_knn[df_knn.columns] = scaler.fit_transform(df_knn[df_knn.columns])
# Train/Test 60/40 split
x_gnb_train, x_gnb_test, x_svc_train, x_svc_test, x_knn_train, x_knn_test, label_train, label_test = \
train_test_split(df_gnb, df_svc, df_knn, label, test_size=0.4, random_state=42, stratify=label)
# calculate scores
gnb_score = get_f1_for_gnb(x_gnb_train, x_gnb_test, label_train, label_test)
svc_score = get_f1_for_svc(x_svc_train, x_svc_test, label_train, label_test, cv)
knn_score = get_f1_for_knn(x_knn_train, x_knn_test, label_train, label_test, cv)
# Append scores to dictionary
dic_f1_results[f'{feature}_gnb'].append(gnb_score)
dic_f1_results[f'{feature}_svc'].append(svc_score)
dic_f1_results[f'{feature}_knn'].append(knn_score)
# Console output
print(f"GNB-Score for {feature} with {number_authors} authors and {number_texts} texts: {gnb_score}")
print(f"SVC-Score for {feature} with {number_authors} authors and {number_texts} texts: {svc_score}")
print(f"KNN-Score for {feature} with {number_authors} authors and {number_texts} texts: {knn_score}")
return pd.DataFrame(dic_f1_results)
# Chapter 7.3.5. complete process of the pos-tag-n-grams comparison
def compare_pos_n_grams_process(par_base_path):
df_all_texts = pd.read_csv(f"musikreviews_balanced_authors.csv", sep=',', encoding="utf-8")
author_counts = [25]
text_counts = [10, 15, 25, 50, 75, 100]
for number_authors in author_counts:
for number_texts in text_counts:
index_list = get_n_article_index_by_author(df_all_texts, number_authors, number_texts)
df_balanced = df_all_texts.iloc[index_list].reset_index(drop=True)
word_count = get_word_count(df_balanced)
# extract features and preprocessing
for n in range(2, 6):
pt_ng = get_pos_tags_n_grams(df_balanced, n)
preprocessing_steps_pos_tag_n_grams(pt_ng, word_count['word_count']) \
.to_csv(f"{par_base_path}csv_before_filter/"
f"a{number_authors}_t{number_texts}_pos_tag_{n}_gram.csv", index=False)
iterative_filter_process(par_base_path, df_balanced, number_texts, number_authors)
# 2 grams for svc get not filtered, overwrite unfiltered for svc
pt_ng = get_pos_tags_n_grams(df_balanced, 2)
preprocessing_steps_pos_tag_n_grams(pt_ng, word_count['word_count']) \
.to_csv(f"{par_base_path}csv_after_filter/"
f"svc_a{number_authors}_t{number_texts}_pos_tag_2_gram_filtered.csv", index=False)
compare_pos_tag_ngrams(text_counts, author_counts, par_base_path, df_all_texts) \
.to_csv(f"{par_base_path}results/pos_tag_n_grams.csv", index=False)
# Method to print all features for different counts of authors and texts
# Including all Preprocessing steps and filtering
def print_all_features_svc(par_base_path, par_article_path):
df_all_texts = pd.read_csv(f"{par_article_path}", sep=',', encoding="utf-8")
author_counts = [2, 3, 4, 5, 10, 15, 25]
text_counts = [5, 10, 15, 25, 50, 75, 100]
for number_authors in author_counts:
for number_texts in text_counts:
index_list = get_n_article_index_by_author(df_all_texts, number_authors, number_texts)
df_balanced = df_all_texts.iloc[index_list].reset_index(drop=True)
# get all the features
df_bow = get_bow_matrix(df_balanced)
df_word_2g = get_word_n_grams(df_balanced, 2)
df_word_count = get_word_count(df_balanced)
df_word_length = get_word_length_matrix_with_margin(df_balanced, 20)
df_yules_k = get_yules_k(df_balanced)
sc_label_vector = ["!", "„", "“", "§", "$", "%", "&", "/", "(", ")", "=", "?", "{", "}", "[", "]", "\\",
"@", "#",
"‚", "‘", "-", "_", "+", "*", ".", ",", ";"]
special_char_matrix = get_special_char_matrix(df_balanced, sc_label_vector)
sc_label_vector = ["s_char:" + sc for sc in sc_label_vector]
df_special_char = pd.DataFrame(data=special_char_matrix, columns=sc_label_vector)
df_char_affix_4g = get_char_affix_n_grams(df_balanced, 4)
df_char_word_3g = get_char_word_n_grams(df_balanced, 3)
df_char_punct_3g = get_char_punct_n_grams(df_balanced, 3)
df_digits = get_sum_digits(df_balanced)
df_fwords = get_function_words(df_balanced)
df_pos_tags = get_pos_tags(df_balanced)
df_pos_tag_2g = get_pos_tags_n_grams(df_balanced, 2)
df_start_pos, df_end_pos = get_sentence_end_start(df_balanced)
df_start_end_pos = pd.concat([df_start_pos, df_end_pos], axis=1)
df_fre = get_flesch_reading_ease_vector(df_balanced)
# 7.1.1 Remove low occurrence
df_bow = trim_df_by_occurrence(df_bow, 1)
df_word_2g = trim_df_by_occurrence(df_word_2g, 1)
df_fwords = trim_df_by_occurrence(df_fwords, 1)
df_pos_tag_2g = trim_df_by_occurrence(df_pos_tag_2g, 1)
df_char_affix_4g = trim_df_sum_feature(df_char_affix_4g, 5)
df_char_word_3g = trim_df_sum_feature(df_char_word_3g, 5)
df_char_punct_3g = trim_df_sum_feature(df_char_punct_3g, 5)
# 7.1.2 Remove high frequency
df_bow = trim_df_by_doc_freq(df_bow, 0.5)
df_word_2g = trim_df_by_doc_freq(df_word_2g, 0.5)
df_fwords = trim_df_by_doc_freq(df_fwords, 0.5)
# 7.1.4 individual relative frequency
df_len_metrics = pd.concat([get_char_count(df_balanced), get_sentence_count(df_balanced),
df_word_count], axis=1)
df_bow = get_rel_frequency(df_bow.fillna(value=0), df_len_metrics['word_count'])
df_word_2g = get_rel_frequency(df_word_2g.fillna(value=0), df_len_metrics['word_count'])
df_word_length = get_rel_frequency(df_word_length.fillna(value=0), df_len_metrics['word_count'])
df_special_char = get_rel_frequency(df_special_char.fillna(value=0), df_len_metrics['char_count'])
df_char_affix_4g = get_rel_frequency(df_char_affix_4g.fillna(value=0), df_len_metrics['char_count'])
df_char_word_3g = get_rel_frequency(df_char_word_3g.fillna(value=0), df_len_metrics['char_count'])
df_char_punct_3g = get_rel_frequency(df_char_punct_3g.fillna(value=0), df_len_metrics['char_count'])
df_digits = get_rel_frequency(df_digits.fillna(value=0), df_len_metrics['char_count'])
df_fwords = get_rel_frequency(df_fwords.fillna(value=0), df_len_metrics['word_count'])
df_pos_tags = get_rel_frequency(df_pos_tags.fillna(value=0), df_len_metrics['word_count'])
df_pos_tag_2g = get_rel_frequency(df_pos_tag_2g.fillna(value=0), df_len_metrics['word_count'])
df_start_end_pos = get_rel_frequency(df_start_end_pos.fillna(value=0), df_len_metrics['sentence_count'])
# Print to CSV
# Files for iterative filter
df_bow.to_csv(f"{par_base_path}csv_before_filter/a{number_authors}_t{number_texts}_bow.csv", index=False)
df_word_2g.to_csv(f"{par_base_path}csv_before_filter/a{number_authors}_t{number_texts}"
f"_word_2_gram.csv", index=False)
df_char_affix_4g.to_csv(f"{par_base_path}csv_before_filter/a{number_authors}_t{number_texts}"
f"_char_affix_4_gram.csv", index=False)
df_char_word_3g.to_csv(f"{par_base_path}csv_before_filter/a{number_authors}_t{number_texts}"
f"_char_word_3_gram.csv", index=False)
df_char_punct_3g.to_csv(f"{par_base_path}csv_before_filter/a{number_authors}_t{number_texts}"
f"_char_punct_3_gram.csv", index=False)
df_fwords.to_csv(f"{par_base_path}csv_before_filter/a{number_authors}_t{number_texts}"
f"_function_words.csv", index=False)
# Files not for iterative filter directly in after filter folder
df_word_count.to_csv(f"{par_base_path}csv_after_filter/a{number_authors}_t{number_texts}"
f"_word_count.csv", index=False)
df_word_length.to_csv(f"{par_base_path}csv_after_filter/a{number_authors}_t{number_texts}"
f"_word_length.csv", index=False)
df_yules_k.to_csv(f"{par_base_path}csv_after_filter/a{number_authors}_t{number_texts}"
f"_yules_k.csv", index=False)
df_special_char.to_csv(f"{par_base_path}csv_after_filter/a{number_authors}_t{number_texts}"
f"_special_char.csv", index=False)
df_digits.to_csv(f"{par_base_path}csv_after_filter/a{number_authors}_t{number_texts}"
f"_digits.csv", index=False)
df_pos_tags.to_csv(f"{par_base_path}csv_after_filter/a{number_authors}_t{number_texts}"
f"_pos_tag.csv", index=False)
df_pos_tag_2g.to_csv(f"{par_base_path}csv_after_filter/a{number_authors}_t{number_texts}"
f"_pos_tag_2_gram.csv", index=False)
df_start_end_pos.to_csv(f"{par_base_path}csv_after_filter/a{number_authors}_t{number_texts}"
f"_pos_tag_start_end.csv", index=False)
df_fre.to_csv(f"{par_base_path}csv_after_filter/a{number_authors}_t{number_texts}_fre.csv", index=False)
print(f"Extraction for {number_authors} authors with {number_texts} texts done. Starting iterative filter")
# Run the iterative filter
iterative_filter_process_svm(par_base_path, df_balanced, number_texts, number_authors)
# create a dataframe with the combined features for a specific number of authors and texts
# features can be excluded by name
def create_df_combined_features(par_path, par_num_texts, par_num_authors, par_exclude):
path = f'{par_path}csv_after_filter'
files = [f for f in listdir(path) if isfile(join(path, f))]
# Filter the files for author and text numbers
r = re.compile(f"a{par_num_authors}_")
files = list(filter(r.match, files))
r = re.compile(f".*t{par_num_texts}_")
files = list(filter(r.match, files))
# exclude a feature by regex
regex = re.compile(f'.*{par_exclude}')
files = [i for i in files if not regex.match(i)]
df_all = pd.DataFrame()
# combine all features
for feature in files:
df_feature = pd.read_csv(f"{par_path}csv_after_filter/{feature}", sep=',', encoding="utf-8")
df_all = pd.concat([df_all, df_feature], axis=1)
return df_all
# Chapter 8.4. comparison of normalization and standardization
def compare_normalization_standardization(par_article_path, par_feature_path, par_author_texts, par_authors):
df_all_texts = pd.read_csv(f"{par_article_path}", sep=',', encoding="utf-8")
dic_f1_results = {'without': [], 'standard': [], 'normal': [],
'number_authors': [], 'number_texts': []}
for number_authors in par_authors:
for number_texts in par_author_texts:
index_list = get_n_article_index_by_author(df_all_texts, number_authors, number_texts)
df_balanced = df_all_texts.iloc[index_list].reset_index(drop=True)
# define the splits for the hyperparameter tuning, cannot be greater than number of members in each class
if number_texts * 0.4 < 10:
cv = int(number_texts * 0.4)
# CV between 5 and 10 is unusual
cv = 5 if cv > 5 else cv
else:
cv = 10
label = df_balanced['label_encoded']
# Append authors and texts
dic_f1_results['number_authors'].append(number_authors)
dic_f1_results['number_texts'].append(number_texts)
df_features = create_df_combined_features(par_feature_path, number_texts, number_authors, "nothing")
# standardization of features
df_features_stand = copy.deepcopy(df_features)
scaler = StandardScaler()
df_features_stand[df_features_stand.columns] = \
scaler.fit_transform(df_features_stand[df_features_stand.columns])
# normalization of features
df_features_norm = copy.deepcopy(df_features)
normalizer = Normalizer()
df_features_norm[df_features_norm.columns] = \
normalizer.fit_transform(df_features_norm[df_features_norm.columns])
x_train, x_test, x_train_stand, x_test_stand, x_train_norm, x_test_norm, label_train, label_test = \
train_test_split(df_features, df_features_stand, df_features_norm, label,
test_size=0.4, random_state=42, stratify=label)
# append the results
dic_f1_results['without'].append(get_f1_for_svc(x_train, x_test, label_train, label_test, cv))
dic_f1_results['standard'].append(get_f1_for_svc(x_train_stand, x_test_stand, label_train,
label_test, cv))
dic_f1_results['normal'].append(get_f1_for_svc(x_train_norm, x_test_norm, label_train,
label_test, cv))
print(f"Scores for {number_authors} authors with {number_texts} texts created.")
return pd.DataFrame(dic_f1_results)
# Chapter 8.5.1. Comparison of the individual features, data for table 21
def compare_single_features(par_article_path, par_feature_path, par_author_texts, par_authors):
df_all_texts = pd.read_csv(f"{par_article_path}", sep=',', encoding="utf-8")
dic_results = {'number_authors': [], 'number_texts': []}
path = f'{par_feature_path}csv_after_filter'
files = [f for f in listdir(path) if isfile(join(path, f))]
# get unique values for the list of the features
feature_list = list(set([re.search(r"a\d+_t\d+_(.+?(?=$))", f).group(1) for f in files]))
for feature in feature_list:
dic_results[feature] = []
for number_authors in par_authors:
for number_texts in par_author_texts:
index_list = get_n_article_index_by_author(df_all_texts, number_authors, number_texts)
df_balanced = df_all_texts.iloc[index_list].reset_index(drop=True)
# define the splits for the hyperparameter tuning, cannot be greater than number of members in each class
if number_texts * 0.4 < 10:
cv = int(number_texts * 0.4)
# CV between 5 and 10 is unusual
cv = 5 if cv > 5 else cv
else:
cv = 10
label = df_balanced['label_encoded']
# Append authors and texts
dic_results['number_authors'].append(number_authors)
dic_results['number_texts'].append(number_texts)
for feature in feature_list:
df_feature = pd.read_csv(
f"{par_feature_path}csv_after_filter/a{number_authors}_t{number_texts}_{feature}")
# standardization of features
scaler = StandardScaler()
df_feature[df_feature.columns] = \
scaler.fit_transform(df_feature[df_feature.columns])
x_train, x_test, label_train, label_test = \
train_test_split(df_feature, label, test_size=0.4, random_state=42, stratify=label)
dic_results[feature].append(
get_f1_for_svc(x_train, x_test, label_train, label_test, cv))
print(f"Scores for {number_authors} authors with {number_texts} texts created.")
return pd.DataFrame(dic_results)
# Chapter 8.5.2. Get the values of the difference functions, data for table 22
def get_feature_function_difference(par_article_path, par_feature_path, par_author_texts, par_authors):
df_all_texts = pd.read_csv(f"{par_article_path}", sep=',', encoding="utf-8")
dic_f1_wo_feature = {'wo_bow': [], 'wo_word_2_gram': [], 'wo_word_count': [], 'wo_word_length': [],
'wo_yules_k': [], 'wo_special_char': [], 'wo_char_affix': [], 'wo_char_word': [],
'wo_char_punct': [], 'wo_digits': [], 'wo_function_words': [], 'wo_pos_tag.csv': [],
'wo_pos_tag_2_gram': [], 'wo_pos_tag_start_end': [], 'wo_fre': [], 'number_authors': [],
'number_texts': []}
dic_f1_diff_feature = {'diff_bow': [], 'diff_word_2_gram': [], 'diff_word_count': [], 'diff_word_length': [],
'diff_yules_k': [], 'diff_special_char': [], 'diff_char_affix': [], 'diff_char_word': [],
'diff_char_punct': [], 'diff_digits': [], 'diff_function_words': [], 'diff_pos_tag.csv': [],
'diff_pos_tag_2_gram': [], 'diff_pos_tag_start_end': [], 'diff_fre': [],
'number_authors': [],
'number_texts': []}
for number_authors in par_authors:
for number_texts in par_author_texts:
index_list = get_n_article_index_by_author(df_all_texts, number_authors, number_texts)
df_balanced = df_all_texts.iloc[index_list].reset_index(drop=True)
# define the splits for the hyperparameter tuning, cannot be greater than number of members in each class
if number_texts * 0.4 < 10:
cv = int(number_texts * 0.4)
# CV between 5 and 10 is unusual
cv = 5 if cv > 5 else cv
else:
cv = 10
label = df_balanced['label_encoded']
# Append authors and texts
dic_f1_wo_feature['number_authors'].append(number_authors)
dic_f1_wo_feature['number_texts'].append(number_texts)
dic_f1_diff_feature['number_authors'].append(number_authors)
dic_f1_diff_feature['number_texts'].append(number_texts)
# Read the f1 Score from the previous calculations
df_score_all = pd.read_csv(f"{par_feature_path}/results/compared_stand_normal.csv")
f1_score_all = df_score_all.loc[(df_score_all['number_authors'] == number_authors) &
(df_score_all['number_texts'] == number_texts)]['standard'].iloc[0]
for key in dic_f1_diff_feature:
if key != "number_authors" and key != "number_texts":
key = re.search(r'.+?(?=_)_(.*)', key).group(1)
# exclude the specific feature
df_features = create_df_combined_features(par_feature_path, number_texts, number_authors, key)
# standardization of features
scaler = StandardScaler()
df_features[df_features.columns] = \
scaler.fit_transform(df_features[df_features.columns])
x_train, x_test, label_train, label_test = \
train_test_split(df_features, label, test_size=0.4, random_state=42, stratify=label)
# append the results
score_wo = get_f1_for_svc(x_train, x_test, label_train, label_test, cv)
dic_f1_wo_feature[f'wo_{key}'].append(score_wo)
dic_f1_diff_feature[f'diff_{key}'].append(f1_score_all - score_wo)
print(f"{key} done for {number_authors} authors and {number_texts} texts.")
return pd.DataFrame(dic_f1_wo_feature), pd.DataFrame(dic_f1_diff_feature)
# Chapter 8.5.3. Comparison of the model with or without content features, picture 28
def compare_content_features(par_article_path, par_feature_path, par_author_texts, par_authors):
df_all_texts = pd.read_csv(f"{par_article_path}", sep=',', encoding="utf-8")
dic_results = {'wo_content_features': [], 'with_content_features': [], 'number_authors': [], 'number_texts': []}
for number_authors in par_authors:
for number_texts in par_author_texts:
index_list = get_n_article_index_by_author(df_all_texts, number_authors, number_texts)
df_balanced = df_all_texts.iloc[index_list].reset_index(drop=True)
# define the splits for the hyperparameter tuning, cannot be greater than number of members in each class
if number_texts * 0.4 < 10:
cv = int(number_texts * 0.4)
# CV between 5 and 10 is unusual
cv = 5 if cv > 5 else cv
else:
cv = 10
label = df_balanced['label_encoded']
# Append authors and texts
dic_results['number_authors'].append(number_authors)
dic_results['number_texts'].append(number_texts)
# calculate f1 with all features
df_all = create_df_combined_features(par_feature_path, number_texts, number_authors, "nothing")
# standardization of features
scaler = StandardScaler()
df_all[df_all.columns] = \
scaler.fit_transform(df_all[df_all.columns])
x_train, x_test, label_train, label_test = \
train_test_split(df_all, label, test_size=0.4, random_state=42, stratify=label)
dic_results['with_content_features'].append(get_f1_for_svc(x_train, x_test, label_train, label_test, cv))
# calculate f1 without content features
df_wo_content = create_df_combined_features(par_feature_path, number_texts, number_authors,
"(word_count|word_2_gram|char_word_3_gram|bow)")
# standardization of features
scaler = StandardScaler()
df_wo_content[df_wo_content.columns] = \
scaler.fit_transform(df_wo_content[df_wo_content.columns])
x_train, x_test, label_train, label_test = \
train_test_split(df_wo_content, label, test_size=0.4, random_state=42, stratify=label)
dic_results['wo_content_features'].append(
get_f1_for_svc(x_train, x_test, label_train, label_test, cv))
print(f"{number_authors} authors with {number_texts} texts compared.")
return | pd.DataFrame(dic_results) | pandas.DataFrame |
import pandas as pd
import sys
# from urllib import urlopen # python2
from urllib.request import urlopen
#try:
# from rpy2.robjects.packages import importr
# try:
# biomaRt = importr("biomaRt")
# except:
# print "rpy2 could be loaded but 'biomaRt' could not be found.\nIf you want to use 'biomaRt' related functions please install 'biomaRt' in R.\n\n$ R\n> source('http://bioconductor.org/biocLite.R')\n> biocLite()\n> biocLite('biomaRt')\n> quit()"
# sys.stdout.flush()
#except:
# print "Failed to import rpy2 module.\nPlease make sure you are using the same version of R you had when AGEpy was installed."
# sys.stdout.flush()
import biomart
from biomart import BiomartServer
def organismsKEGG():
"""
Lists all organisms present in the KEGG database.
:returns: a dataframe containing one organism per row.
"""
organisms=urlopen("http://rest.kegg.jp/list/organism").read()
organisms=organisms.decode().split("\n")
#for o in organisms:
# print o
# sys.stdout.flush()
organisms=[ s.split("\t") for s in organisms ]
organisms=pd.DataFrame(organisms)
return organisms
def databasesKEGG(organism,ens_ids):
"""
Finds KEGG database identifiers for a respective organism given example ensembl ids.
:param organism: an organism as listed in organismsKEGG()
:param ens_ids: a list of ensenbl ids of the respective organism
:returns: nothing if no database was found, or a string if a database was found
"""
all_genes=urlopen("http://rest.kegg.jp/list/"+organism).read()
all_genes=all_genes.decode().split("\n")
dbs=[]
while len(dbs) == 0:
for g in all_genes:
if len(dbs) == 0:
kid = g.split("\t")[0]
gene=urlopen("http://rest.kegg.jp/get/"+kid).read()
DBLINKS=gene.decode().split("\n")
DBLINKS=[ s for s in DBLINKS if ":" in s ]
for d in DBLINKS:
test=d.split(" ")
test=test[len(test)-1]
if test in ens_ids:
DBLINK=[ s for s in DBLINKS if test in s ]
DBLINK=DBLINK[0].split(":")
DBLINK=DBLINK[len(DBLINK)-2]
dbs.append(DBLINK)
else:
break
ens_db=dbs[0].split(" ")
ens_db=ens_db[len(ens_db)-1]
test_db=urlopen("http://rest.genome.jp/link/"+ens_db+"/"+organism).read()
test_db=test_db.decode().split("\n")
if len(test_db) == 1:
print("For "+organism+" the following db was found: "+ens_db)
print("This database does not seem to be valid KEGG-linked database identifier")
print("For \n'hsa' use 'ensembl-hsa'\n'mmu' use 'ensembl-mmu'\n'cel' use 'EnsemblGenomes-Gn'\n'dme' use 'FlyBase'")
sys.stdout.flush()
ens_db = None
else:
print("For "+organism+" the following db was found: "+ens_db)
sys.stdout.flush()
return ens_db
def ensembl_to_kegg(organism,kegg_db):
"""
Looks up KEGG mappings of KEGG ids to ensembl ids
:param organism: an organisms as listed in organismsKEGG()
:param kegg_db: a matching KEGG db as reported in databasesKEGG
:returns: a Pandas dataframe of with 'KEGGid' and 'ENSid'.
"""
print("KEGG API: http://rest.genome.jp/link/"+kegg_db+"/"+organism)
sys.stdout.flush()
kegg_ens=urlopen("http://rest.genome.jp/link/"+kegg_db+"/"+organism).read()
kegg_ens=kegg_ens.decode().split("\n")
final=[]
for i in kegg_ens:
final.append(i.split("\t"))
df=pd.DataFrame(final[0:len(final)-1])[[0,1]]
ens_id=pd.DataFrame(df[1].str.split(":").tolist())[1]
df=pd.concat([df,ens_id],axis=1)
df.columns=['KEGGid','ensDB','ENSid']
df=df[['KEGGid','ENSid']]
return df
def ecs_idsKEGG(organism):
"""
Uses KEGG to retrieve all ids and respective ecs for a given KEGG organism
:param organism: an organisms as listed in organismsKEGG()
:returns: a Pandas dataframe of with 'ec' and 'KEGGid'.
"""
kegg_ec=urlopen("http://rest.kegg.jp/link/"+organism+"/enzyme").read()
kegg_ec=kegg_ec.decode().split("\n")
final=[]
for k in kegg_ec:
final.append(k.split("\t"))
df=pd.DataFrame(final[0:len(final)-1])[[0,1]]
df.columns=['ec','KEGGid']
return df
def idsKEGG(organism):
"""
Uses KEGG to retrieve all ids for a given KEGG organism
:param organism: an organism as listed in organismsKEGG()
:returns: a Pandas dataframe of with 'gene_name' and 'KEGGid'.
"""
ORG=urlopen("http://rest.kegg.jp/list/"+organism).read()
ORG=ORG.decode().split("\n")
final=[]
for k in ORG:
final.append(k.split("\t"))
df=pd.DataFrame(final[0:len(final)-1])[[0,1]]
df.columns=['KEGGid','description']
field = pd.DataFrame(df['description'].str.split(';',1).tolist())[0]
field = pd.DataFrame(field)
df = pd.concat([df[['KEGGid']],field],axis=1)
df.columns=['KEGGid','gene_name']
df=df[['gene_name','KEGGid']]
return df
def pathwaysKEGG(organism):
"""
Retrieves all pathways for a given organism.
:param organism: an organism as listed in organismsKEGG()
:returns df: a Pandas dataframe with the columns 'KEGGid','pathIDs', and 'pathName'.
:returns df_: a Pandas dataframe with a columns for 'KEGGid', and one column for each pathway with the corresponding gene ids below
"""
print("KEGG API: http://rest.kegg.jp/list/pathway/"+organism)
sys.stdout.flush()
kegg_paths=urlopen("http://rest.kegg.jp/list/pathway/"+organism).read()
kegg_paths=kegg_paths.decode().split("\n")
final=[]
for k in kegg_paths:
final.append(k.split("\t"))
df=pd.DataFrame(final[0:len(final)-1])[[0,1]]
df.columns=['pathID','pathName']
print("KEGG API: http://rest.kegg.jp/link/"+organism+"/pathway/")
sys.stdout.flush()
kegg_paths_genes=urlopen("http://rest.kegg.jp/link/"+organism+"/pathway/").read()
kegg_paths_genes=kegg_paths_genes.decode().split("\n")
kegg_paths_genes=[ s.split("\t") for s in kegg_paths_genes ]
kegg_paths_genes=pd.DataFrame(kegg_paths_genes)
kegg_paths_genes.columns=['pathID','KEGGid']
df=pd.merge(kegg_paths_genes,df,on=["pathID"],how="outer")
def CombineAnn(df):
return pd.Series(dict(KEGGid = ', '.join([ s for s in list(set(df['KEGGid'])) if str(s) != "nan" ] ) ,
pathIDs = ', '.join([ s for s in list(set(df['pathID'])) if str(s) != "nan" ]),
pathName = ', '.join([ s for s in list(set(df['pathName'])) if str(s) != "nan" ] ) ) )
df=df.groupby('KEGGid',as_index=True).apply(CombineAnn)
df.reset_index(inplace=True, drop=True)
df_=kegg_paths_genes[['KEGGid']].drop_duplicates()
for c in list(set(kegg_paths_genes["pathID"].tolist())):
tmp=kegg_paths_genes[kegg_paths_genes["pathID"]==c][["KEGGid"]].drop_duplicates().dropna()
tmp.columns=[c]
df_=pd.merge(df_,tmp,left_on=["KEGGid"],right_on=[c],how="outer")
return df, df_
def biomaRtTOkegg(df):
"""
Transforms a pandas dataframe with the columns 'ensembl_gene_id','kegg_enzyme'
to dataframe ready for use in ...
:param df: a pandas dataframe with the following columns: 'ensembl_gene_id','kegg_enzyme'
:returns: a pandas dataframe with the following columns: 'ensembl_gene_id','kegg_enzyme'
"""
df=df.dropna()
ECcols=df.columns.tolist()
df.reset_index(inplace=True,drop=True)
# field = ECsb[['kegg_enzyme']]
field = pd.DataFrame(df['kegg_enzyme'].str.split('+',1).tolist())[1]
field = pd.DataFrame(field)
df=pd.concat([df[['ensembl_gene_id']],field],axis=1)
df.columns=ECcols
df.drop_duplicates(inplace=True)
df.reset_index(inplace=True,drop=True)
plus=df['kegg_enzyme'].tolist()
plus=[ s for s in plus if "+" in s ]
noPlus=df[~df['kegg_enzyme'].isin(plus)]
plus=df[df['kegg_enzyme'].isin(plus)]
noPlus.reset_index(inplace=True, drop=True)
plus.reset_index(inplace=True, drop=True)
for p in range(0,len(plus)):
enz=plus.ix[p]['kegg_enzyme']
enz=enz.split("+")
enz= | pd.DataFrame(enz) | pandas.DataFrame |
# 数据处理
import numpy as np
import pandas as pd
# 绘图
import seaborn as sns
import matplotlib.pyplot as plt
# %matplotlib inline
# 各种模型、数据处理方法
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split, GridSearchCV, cross_val_score, StratifiedKFold, learning_curve
from sklearn.linear_model import LogisticRegression, Perceptron, SGDClassifier
from sklearn.svm import SVC, LinearSVC
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.tree import DecisionTreeClassifier
from xgboost import XGBClassifier
from sklearn.metrics import precision_score
import warnings
data = r'E:\OpenSourceDatasetCode\Dataset\Titiannic Disaster'
train_df = pd.read_csv(data + r'\train.csv')
test_df = pd.read_csv(data + r'\test.csv')
combine_df = pd.concat([train_df, test_df])
# NameLength
train_df.groupby(train_df.Name.apply(lambda x: len(x)))['Survived'].mean().plot()
combine_df['Name_Len'] = combine_df['Name'].apply(lambda x: len(x))
combine_df['Name_Len'] = pd.qcut(combine_df['Name_Len'], 5)
combine_df.groupby(combine_df['Name'].apply(lambda x: x.split(', ')[1]).apply(lambda x: x.split('.')[0]))[
'Survived'].mean().plot()
# Title
combine_df['Title'] = combine_df['Name'].apply(lambda x: x.split(', ')[1]).apply(lambda x: x.split('.')[0])
combine_df['Title'] = combine_df['Title'].replace(
['Don', 'Dona', 'Major', 'Capt', 'Jonkheer', 'Rev', 'Col', 'Sir', 'Dr'], 'Mr')
combine_df['Title'] = combine_df['Title'].replace(['Mlle', 'Ms'], 'Miss')
combine_df['Title'] = combine_df['Title'].replace(['the Countess', 'Mme', 'Lady', 'Dr'], 'Mrs')
df = pd.get_dummies(combine_df['Title'], prefix='Title')
combine_df = pd.concat([combine_df, df], axis=1)
combine_df['Fname'] = combine_df['Name'].apply(lambda x: x.split(',')[0])
combine_df['Familysize'] = combine_df['SibSp'] + combine_df['Parch']
# 有女性死亡的家庭
dead_female_Fname = list(set(combine_df[(combine_df.Sex == 'female') & (combine_df.Age >= 12)
& (combine_df.Survived == 0) & (combine_df.Familysize >= 1)]['Fname'].values))
# 有男性存活的家庭
survive_male_Fname = list(set(combine_df[(combine_df.Sex == 'male') & (combine_df.Age >= 12)
& (combine_df.Survived == 1) & (combine_df.Familysize >= 1)]['Fname'].values))
combine_df['Dead_female_family'] = np.where(combine_df['Fname'].isin(dead_female_Fname), 0, 1)
combine_df['Survive_male_family'] = np.where(combine_df['Fname'].isin(survive_male_Fname), 0, 1)
# Name->Title
combine_df = combine_df.drop(['Name', 'Fname'], axis=1)
# 添加一个小孩子标签
group = combine_df.groupby(['Title', 'Pclass'])['Age']
combine_df['Age'] = group.transform(lambda x: x.fillna(x.median()))
combine_df = combine_df.drop('Title', axis=1)
combine_df['IsChild'] = np.where(combine_df['Age'] <= 12, 1, 0)
combine_df['Age'] = pd.cut(combine_df['Age'], 5)
combine_df = combine_df.drop('Age', axis=1)
# 将上面提取过的Familysize再离散化
combine_df['Familysize'] = np.where(combine_df['Familysize'] == 0, 'ALone',
np.where(combine_df['Familysize'] <= 3, 'Normal', 'Big'))
df = pd.get_dummies(combine_df['Familysize'], prefix='Familysize')
combine_df = pd.concat([combine_df, df], axis=1).drop(['SibSp', 'Parch', 'Familysize'], axis=1)
# ticket
combine_df['Ticket_Lett'] = combine_df['Ticket'].apply(lambda x: str(x)[0])
combine_df['Ticket_Lett'] = combine_df['Ticket_Lett'].apply(lambda x: str(x))
combine_df['High_Survival_Ticket'] = np.where(combine_df['Ticket_Lett'].isin(['1', '2', 'P']), 1, 0)
combine_df['Low_Survival_Ticket'] = np.where(combine_df['Ticket_Lett'].isin(['A', 'W', '3', '7']), 1, 0)
combine_df = combine_df.drop(['Ticket', 'Ticket_Lett'], axis=1)
# 缺省的Embarked用S填充
combine_df.Embarked = combine_df.Embarked.fillna('S')
df = pd.get_dummies(combine_df['Embarked'], prefix='Embarked')
combine_df = pd.concat([combine_df, df], axis=1).drop('Embarked', axis=1)
# Cabin
combine_df['Cabin_isNull'] = np.where(combine_df['Cabin'].isnull(), 0, 1)
combine_df = combine_df.drop('Cabin', axis=1)
# Pclass
df = | pd.get_dummies(combine_df['Pclass'], prefix='Pclass') | pandas.get_dummies |
from functools import reduce
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.figure import Figure
# set jupyter's max row display
pd.set_option("display.max_row", 1000)
# set jupyter's max column width to 50
pd.set_option("display.max_columns", 50)
matplotlib.use("agg")
def load(kommune):
df_confirmed_raw = pd.read_csv(
"data/time_series/time_series_covid-19_nrw_confirmed.csv"
)
df_confirmed = (
df_confirmed_raw[df_confirmed_raw.Kommune == kommune]
.transpose()
.reset_index()
.drop([0])
)
df_confirmed.columns = ["date", "confirmed"]
# df_confirmed.dropna(subset=["confirmed"], inplace=True)
df_confirmed["date"] = pd.to_datetime(df_confirmed["date"])
df_confirmed["confirmed_yesterday"] = (
df_confirmed["confirmed"] - df_confirmed["confirmed"].diff()
)
df_confirmed["confirmed_new"] = df_confirmed["confirmed"].diff()
df_confirmed["confirmed_change_rate"] = df_confirmed["confirmed"].pct_change()
df_recovered_raw = pd.read_csv(
"data/time_series/time_series_covid-19_nrw_recovered.csv"
)
df_recovered = (
df_recovered_raw[df_recovered_raw.Kommune == kommune]
.transpose()
.reset_index()
.drop([0])
)
df_recovered.columns = ["date", "recovered"]
df_recovered.dropna(subset=["recovered"], inplace=True)
df_recovered["date"] = pd.to_datetime(df_recovered["date"])
df_recovered["recovered_delta"] = df_recovered["recovered"].diff()
df_recovered["recovered_change_rate"] = df_recovered["recovered"].pct_change()
df_deaths_raw = pd.read_csv("data/time_series/time_series_covid-19_nrw_deaths.csv")
df_deaths = (
df_deaths_raw[df_deaths_raw.Kommune == kommune]
.transpose()
.reset_index()
.drop([0])
)
df_deaths.columns = ["date", "deaths"]
df_deaths.dropna(subset=["deaths"], inplace=True)
df_deaths["date"] = pd.to_datetime(df_deaths["date"])
df_deaths["deaths_delta"] = df_deaths["deaths"].diff()
df_deaths["deaths_change_rate"] = df_deaths["deaths"].pct_change()
dfs = [df_confirmed, df_recovered, df_deaths]
df = reduce(lambda left, right: | pd.merge(left, right, on="date") | pandas.merge |
from datetime import datetime
import pandas as pd
from botocore.exceptions import ClientError
from fbprophet import Prophet
from flask import request
from flask_restx import Namespace, Resource, fields
from core.data import ReturnDocument
from db import Expense, RepositoryException, User
from db.factory import create_repository
from settings import REPOSITORY_NAME, REPOSITORY_SETTINGS
# Database
repository = create_repository(REPOSITORY_NAME, REPOSITORY_SETTINGS)
api = Namespace('expenses', description='For managing expenses')
expense_fields = api.model(
'AddExpense', {
'email': fields.String(description="Email ID of payee", required=True),
'amount': fields.String(description="Description of expense", required=True, min=1),
'date': fields.Date(description='Date of expense in ISO format(yy-mm-dd)'),
'description': fields.String(description='Description of expense'),
'comments': fields.String(description='Additional comments'),
"payor": fields.String(description='Email ID of payor'),
},
)
@api.route('/add/')
class AddExpense(Resource):
@api.expect(expense_fields, validate=False)
def post(self):
"""
Add expense
"""
try:
data = request.get_json(force=True)
email = data['email']
amount = data['amount']
date = data['date']
description = data['description']
comments = data['comments']
payor = data['payor']
exp: Expense = Expense(user_id=email, amount=amount, date=date, description=description, comments=comments,
payor=payor)
repository.add_expense(exp)
return ReturnDocument(exp.id, "success").asdict()
except RepositoryException as err:
return ReturnDocument(err.__doc__, "error").asdict()
except KeyError as err:
return ReturnDocument(f"{err.__str__()}-{err.__doc__}", "error").asdict()
except ValueError as err:
return ReturnDocument(f"{err.__str__()}-{err.__doc__}", "error").asdict()
@api.route('/delete/')
class DeleteExpense(Resource):
model = api.model(
"DeleteExpense", {
"id": fields.String(description="Expense to be deleted")
}
)
@api.expect(model, validate=True)
def post(self):
"""
Delete expense
"""
try:
data = request.get_json(force=True)
id = data['id']
repository.delete_expense(id)
return ReturnDocument(id, "success").asdict()
except RepositoryException as err:
return ReturnDocument(err.__doc__, "error").asdict()
except KeyError or ValueError as err:
return ReturnDocument(f"{err.__str__()}-{err.__doc__}", "error").asdict()
@api.route('/')
class GetExpense(Resource):
model = api.model(
"GetExpense", {"id": fields.String()}
)
@api.expect(model)
def post(self):
"""Get details of a particular expense"""
try:
data = request.get_json()
id = data['id']
exp: Expense = repository.get_expense(id)
return ReturnDocument(exp.to_dict(), "success").asdict()
except RepositoryException as err:
return ReturnDocument(err.__doc__, "error").asdict()
except KeyError or ValueError as err:
return ReturnDocument(f"{err.__str__()}-{err.__doc__}", "error").asdict()
@api.route('/stats/')
class ExpenseStats(Resource):
model = api.model(
"GetStats", {
'email': fields.String(description="User email ID", required=True),
}
)
@api.expect(model)
def post(self):
exp_list = []
data = request.get_json(force=True)
email_id = data['email']
try:
usr: User = repository.get_user(email_id)
for exp in usr.expense_ids:
exp_obj: Expense = repository.get_expense(exp)
exp_list.append(exp_obj.to_dict())
if not exp_list:
data = {
"exp_list": [],
"area_chart": {},
"bar_chart": {},
"pie_chart": {},
"new_expenses": 0,
"monthly_expenses": 0,
"friends_amount": 0,
"owed_amount": 0
}
else:
df = pd.DataFrame(exp_list).sort_values('date')
df['amount'] = pd.to_numeric(df['amount'])
df['month'] = pd.to_numeric(df["date"].apply(lambda x: x[5:7]))
df['year'] = pd.to_numeric(df["date"].apply(lambda x: x[0:4]))
df['day'] = pd.to_numeric(df["date"].apply(lambda x: x[8:10]))
now = datetime.now()
area_chart = df[df['year'] == now.year].groupby(['date'])['amount'].sum()
bar_chart = df.groupby(['month'])['amount'].sum()
new_expenses = df[(df['year'] == now.year) & (df['month'] == now.month) & (df['day'] == now.day)][
'amount'].sum()
monthly_expenses = df[(df['year'] == now.year) & (df['month'] == now.month)]['amount'].sum()
friends_amount = df[(df['payor'] != email_id) & (df['user_id'] == email_id)]['amount'].sum()
owed_amount = df[(df['payor'] == email_id) & (df['user_id'] != email_id)]['amount'].sum()
pie_chart = df[(df['payor'] != email_id) & (df['user_id'] == email_id)].groupby(['payor'])[
'amount'].sum()
data = {
"exp_list": exp_list,
"area_chart": area_chart.to_dict(),
"bar_chart": bar_chart.to_dict(),
"pie_chart": pie_chart.to_dict(),
"new_expenses": new_expenses,
"monthly_expenses": monthly_expenses,
"friends_amount": friends_amount,
"owed_amount": owed_amount
}
return ReturnDocument(data, "success").asdict()
except RepositoryException as err:
return ReturnDocument(err.__doc__, "error").asdict()
except ClientError as err:
return ReturnDocument(err.__str__(), "error").asdict()
@api.route('/stats/predict/')
class PredictStats(Resource):
model = api.model(
"PredictStats", {
'email': fields.String(description="User email ID", required=True),
}
)
@api.expect(model)
def post(self):
exp_list = []
data = request.get_json(force=True)
email_id = data['email']
try:
usr: User = repository.get_user(email_id)
for exp in usr.expense_ids:
exp_obj: Expense = repository.get_expense(exp)
exp_list.append(exp_obj.to_dict())
if not exp_list or len(exp_list) <= 10:
data = {
"predict_chart": "Insufficient Data (atleast 10 entries required)"
}
else:
df = pd.DataFrame(exp_list).sort_values('date')
df['amount'] = pd.to_numeric(df['amount'])
df['ds'] = | pd.to_datetime(df['date']) | pandas.to_datetime |
"""
Removes non-linear ground reaction force signal drift in a stepwise manner. It is intended
for running ground reaction force data commonly analyzed in the field of Biomechanics. The aerial phase before and after
a given stance phase are used to tare the signal instead of assuming an overall linear trend or signal offset.
Licensed under an MIT License (c) <NAME> 2019
Distributed here: https://github.com/alcantarar/dryft
"""
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
def detrend(force_f, aerial, aerial_loc):
"""Remove drift from running ground reaction force signal based on aerial phases.
Parameters
----------
force_f : `ndarray`
Filtered ground reaction force signal [n,]. Using unfiltered signal may cause unreliable results.
aerial : `ndarray`
Array of force signal measured at middle of each aerial phase.
aerial_loc : `ndarray`
Array of frame indexes for values in aerial. output from `aerialforce()`
Returns
-------
force_fd : `ndarray`
Array with shape of force_f, but with drift removed (detrended).
Examples
--------
from dryft import signal
force_fd = signal.detrend(GRF_filt, aerial_vals, aerial_loc)
"""
force_f = force_f.flatten()
# Create NaN array with aerial values at respective frame locations
drift_signal = np.full(force_f.shape, np.nan)
drift_signal[aerial_loc] = aerial
# Use 3rd order spline to fill NaNs, creating the underlying drift of the signal.
drift_signal_p = | pd.Series(drift_signal) | pandas.Series |
import os, sys
import numpy as np
import pandas as pd
import pickle
from tqdm import tqdm
import argparse
from sklearn.utils import shuffle
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import CountVectorizer
#from nltk.stem import PorterStemmer
from pyspark.sql.types import *
from pyspark import SparkFiles
from pyspark.context import SparkContext
from pyspark.sql.session import SparkSession
##################################################################################################
#home = str(Path.home())
home = str('.')
##################################################################################################
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--dataset", help="Name of the dataset.", default='darknet')
parser.add_argument("-v", "--vocab_size", type=int, default=10000, help="The number of vocabs.")
parser.add_argument("--num_train", type=int, default=0, help="The number of training samples.")
parser.add_argument("--num_test", type=int, default=0, help="The number of testing and cv samples.")
parser.add_argument("--max_df", default=0.8, type=float)
parser.add_argument("--min_df", default=3, type=int)
parser.add_argument('--remove_short_docs', dest='remove_short_docs', action='store_true', help='Remove any document that has a length less than 5 words.')
parser.add_argument('--remove_long_docs', dest='remove_long_docs', action='store_true', help='Remove any document that has a length more than 500 words.')
parser.set_defaults(remove_short_docs=True)
parser.set_defaults(remove_long_docs=True)
args = parser.parse_args()
if not args.dataset:
parser.error("Need to provide the dataset.")
##################################################################################################
remove_short_document = args.remove_short_docs
remove_long_document = args.remove_long_docs
if args.dataset == 'ng20':
train = fetch_20newsgroups(subset='train', remove=('headers', 'footers', 'quotes'))
test = fetch_20newsgroups(subset='test', remove=('headers', 'footers', 'quotes'))
train_docs = train.data
train_tags = train.target
test_docs = test.data
test_tags = test.target
elif args.dataset == 'dbpedia':
root_dir = os.path.join(home, 'datasets/dbpedia')
train_fn = os.path.join(root_dir, 'train.csv')
df = pd.read_csv(train_fn, header=None)
df.columns = ['label', 'title', 'body']
train_docs = list(df.body)
train_tags = list(df.label - 1)
del df
test_fn = os.path.join(root_dir, 'test.csv')
df = | pd.read_csv(test_fn, header=None) | pandas.read_csv |
import lehd
import pandas as pd
import geopandas as gpd
import urllib.request
import gzip
from shapely import wkt
class to_geo:
"""
Takes downloaded LEHD data and converts it to GeoDataFrames which can be used for spatial analysis and visualization
"""
def od(df):
gtype = lehd.utils.infer_geog_input(df[df.columns[0]][0])
if gtype == "B":
raise Exception('Joining to blocks is currently not supported')
return None
elif gtype == "BG":
df["state_o"] = df[df.columns[0]].str[:2]
df["state_d"] = df[df.columns[1]].str[:2]
states_for_dl = list(df["state_o"].unique())
gdfo = []
for state in states_for_dl:
dl_url = "https://www2.census.gov/geo/docs/reference/cenpop2010/blkgrp/CenPop2010_Mean_BG" + state + ".txt"
gdf = pd.read_csv(
urllib.request.urlopen(dl_url),
dtype={
'TRACTCE': 'str',
'BLKGRPCE': 'str',
'COUNTYFP': 'str',
'STATEFP': 'str'
}, encoding="ISO-8859-1")
gdf["geoid"] = gdf["STATEFP"] + gdf["COUNTYFP"] + gdf["TRACTCE"] + gdf["BLKGRPCE"]
del gdf["STATEFP"], gdf["COUNTYFP"], gdf["TRACTCE"], gdf["BLKGRPCE"], gdf["POPULATION"]
gdfo.append(gdf)
gdf = pd.concat(gdfo)
# merge the output, and delete an excess ID column
df = pd.merge(df, gdf, how = "left", left_on = "h_geoid_BG", right_on = "geoid")
del df["geoid"]
df["LATITUDE"] = df["LATITUDE"].astype(str)
df["LONGITUDE"] = df["LONGITUDE"].astype(str)
df["geometry"] = "LINESTRING (" + df["LONGITUDE"] + " " + df["LATITUDE"] + ","
del df["LATITUDE"], df["LONGITUDE"]
states_for_dl = list(df["state_d"].unique())
gdfo = []
for state in states_for_dl:
dl_url = "https://www2.census.gov/geo/docs/reference/cenpop2010/blkgrp/CenPop2010_Mean_BG" + state + ".txt"
gdf = pd.read_csv(
urllib.request.urlopen(dl_url),
dtype={
'TRACTCE': 'str',
'BLKGRPCE': 'str',
'COUNTYFP': 'str',
'STATEFP': 'str'
}, encoding="ISO-8859-1")
gdf["geoid"] = gdf["STATEFP"] + gdf["COUNTYFP"] + gdf["TRACTCE"] + gdf["BLKGRPCE"]
del gdf["STATEFP"], gdf["COUNTYFP"], gdf["TRACTCE"], gdf["BLKGRPCE"], gdf["POPULATION"]
gdfo.append(gdf)
gdf = pd.concat(gdfo)
# merge the output, and delete an excess ID column
df = pd.merge(df, gdf, how = "left", left_on = "w_geoid_BG", right_on = "geoid")
del df["geoid"]
df["LATITUDE"] = df["LATITUDE"].astype(str)
df["LONGITUDE"] = df["LONGITUDE"].astype(str)
df["geometry"] = df["geometry"] + df["LONGITUDE"] + " " + df["LATITUDE"] + ")"
del df["LATITUDE"], df["LONGITUDE"]
df['geometry'] = df['geometry'].apply(wkt.loads)
gdf = gpd.GeoDataFrame(df, geometry='geometry')
elif gtype == "CT":
df["state_o"] = df[df.columns[0]].str[:2]
df["state_d"] = df[df.columns[1]].str[:2]
states_for_dl = list(df["state_o"].unique())
gdfo = []
for state in states_for_dl:
dl_url = "https://www2.census.gov/geo/docs/reference/cenpop2010/tract/CenPop2010_Mean_TR" + state + ".txt"
gdf = pd.read_csv(
urllib.request.urlopen(dl_url),
dtype={
'TRACTCE': 'str',
'COUNTYFP': 'str',
'STATEFP': 'str'
}, encoding="ISO-8859-1")
gdf["geoid"] = gdf["STATEFP"] + gdf["COUNTYFP"] + gdf["TRACTCE"]
del gdf["STATEFP"], gdf["COUNTYFP"], gdf["TRACTCE"], gdf["POPULATION"]
gdfo.append(gdf)
gdf = pd.concat(gdfo)
# merge the output, and delete an excess ID column
df = pd.merge(df, gdf, how = "left", left_on = df.columns[0], right_on = "geoid")
del df["geoid"]
df["LATITUDE"] = df["LATITUDE"].astype(str)
df["LONGITUDE"] = df["LONGITUDE"].astype(str)
df["geometry"] = "LINESTRING (" + df["LONGITUDE"] + " " + df["LATITUDE"] + ","
del df["LATITUDE"], df["LONGITUDE"]
states_for_dl = list(df["state_d"].unique())
gdfo = []
for state in states_for_dl:
dl_url = "https://www2.census.gov/geo/docs/reference/cenpop2010/tract/CenPop2010_Mean_TR" + state + ".txt"
gdf = pd.read_csv(
urllib.request.urlopen(dl_url),
dtype={
'TRACTCE': 'str',
'COUNTYFP': 'str',
'STATEFP': 'str'
}, encoding="ISO-8859-1")
gdf["geoid"] = gdf["STATEFP"] + gdf["COUNTYFP"] + gdf["TRACTCE"]
del gdf["STATEFP"], gdf["COUNTYFP"], gdf["TRACTCE"], gdf["POPULATION"]
gdfo.append(gdf)
gdf = pd.concat(gdfo)
# merge the output, and delete an excess ID column
df = pd.merge(df, gdf, how = "left", left_on = df.columns[1], right_on = "geoid")
del df["geoid"]
df["LATITUDE"] = df["LATITUDE"].astype(str)
df["LONGITUDE"] = df["LONGITUDE"].astype(str)
df["geometry"] = df["geometry"] + df["LONGITUDE"] + " " + df["LATITUDE"] + ")"
del df["LATITUDE"], df["LONGITUDE"]
df['geometry'] = df['geometry'].apply(wkt.loads)
gdf = gpd.GeoDataFrame(df, geometry='geometry')
return gdf
def wac(df, geo = "pts"):
"""
df : input data frame from dl_lodes class
geo : str indicating to link data to points "pts" or polygons "poly". The default are "pts" since they take up less storage
"""
df["state"] = df[df.columns[0]].str[:2]
states_for_dl = list(df["state"].unique())
gtype = lehd.utils.infer_geog_input(df[df.columns[0]][0])
if geo == "pts":
if gtype == "B":
raise Exception('Joining to blocks is currently not supported')
return None
elif gtype == "BG":
gdfo = []
for state in states_for_dl:
dl_url = "https://www2.census.gov/geo/docs/reference/cenpop2010/blkgrp/CenPop2010_Mean_BG" + state + ".txt"
#print(dl_url)
gdf = pd.read_csv(
urllib.request.urlopen(dl_url),
dtype={
'TRACTCE': 'str',
'BLKGRPCE': 'str',
'COUNTYFP': 'str',
'STATEFP': 'str'
}, encoding="ISO-8859-1")
gdf["geoid"] = gdf["STATEFP"] + gdf["COUNTYFP"] + gdf["TRACTCE"] + gdf["BLKGRPCE"]
del gdf["STATEFP"], gdf["COUNTYFP"], gdf["TRACTCE"], gdf["BLKGRPCE"], gdf["POPULATION"]
gdf = gpd.GeoDataFrame(gdf, geometry=gpd.points_from_xy(x=gdf.LONGITUDE, y=gdf.LATITUDE))
del gdf["LATITUDE"], gdf["LONGITUDE"]
gdfo.append(gdf)
gdf = pd.concat(gdfo)
elif gtype == "CT":
gdfo = []
for state in states_for_dl:
dl_url = "https://www2.census.gov/geo/docs/reference/cenpop2010/tract/CenPop2010_Mean_TR" + state + ".txt"
print(dl_url)
gdf = pd.read_csv(
urllib.request.urlopen(dl_url),
dtype={
'TRACTCE': 'str',
'COUNTYFP': 'str',
'STATEFP': 'str'
}, encoding="ISO-8859-1")
gdf["geoid"] = gdf["STATEFP"] + gdf["COUNTYFP"] + gdf["TRACTCE"]
del gdf["STATEFP"], gdf["COUNTYFP"], gdf["TRACTCE"], gdf["POPULATION"]
gdf = gpd.GeoDataFrame(gdf, geometry=gpd.points_from_xy(x=gdf.LONGITUDE, y=gdf.LATITUDE))
del gdf["LATITUDE"], gdf["LONGITUDE"]
gdfo.append(gdf)
gdf = pd.concat(gdfo)
elif gtype == "C":
gdfo = []
for state in states_for_dl:
dl_url = "https://www2.census.gov/geo/docs/reference/cenpop2010/county/CenPop2010_Mean_CO" + state + ".txt"
print(dl_url)
gdf = pd.read_csv(
urllib.request.urlopen(dl_url),
dtype={
'COUNTYFP': 'str',
'STATEFP': 'str'
}, encoding="ISO-8859-1")
gdf["geoid"] = gdf["STATEFP"] + gdf["COUNTYFP"]
del gdf["STATEFP"], gdf["COUNTYFP"], gdf["POPULATION"],
gdf = gpd.GeoDataFrame(gdf, geometry=gpd.points_from_xy(x=gdf.LONGITUDE, y=gdf.LATITUDE))
del gdf["LATITUDE"], gdf["LONGITUDE"]
gdfo.append(gdf)
gdf = | pd.concat(gdfo) | pandas.concat |
# Packages
# Basic packages
import numpy as np
from scipy import integrate, stats, spatial
from scipy.special import expit, binom
import pandas as pd
import xlrd # help read excel files directly from source into pandas
import copy
import warnings
# Building parameter/computation graph
import inspect
from collections import OrderedDict
# OS/filesystem tools
import time
from datetime import datetime
import random
import string
import os
import shutil
import sys
import cloudpickle
# Distributed computing tools
import dask
import distributed
from dask.distributed import Client
from dask.distributed import as_completed
import itertools
# State Dimensions
# Health states (S, E and D are fixed to 1 dimension)
nI_symp = 2 # number of sympyomatic infected states
nI = 2+nI_symp # number of total infected states (disease stages), the +2 are Exposed and I_nonsymptomatic
nR = 2 # number of recovery states (antibody development post-disease, IgM and IgG are two stages)
nHS = 2+nI+nR # number of total health states, the +2: S, D are suspectible and dead
# Age groups (risk groups)
nAge = 9 # In accordance w Imperial #13 report (0-9, 10-19, ... 70-79, 80+)
# Isolation states
nIso = 4 # None/distancing, Case isolation, Hospitalised, Hospital staff
# Testing states
nTest = 4 # untested/negative, Virus positive, Antibody positive, Both positive
stateTensor = np.ones((nAge, nHS, nIso, nTest))
# Population (data from Imperial #13 ages.csv/UK)
agePopulationTotal = 1000.*np.array([8044.056,7642.473,8558.707,9295.024,8604.251,9173.465,7286.777,5830.635,3450.616])
#agePopulationTotal = 1000.*pd.read_csv("https://raw.githubusercontent.com/ImperialCollegeLondon/covid19model/master/data/ages.csv").iloc[3].values[2:]
# Currently: let's work with england population only instead of full UK, as NHS England + CHESS data is much clearer than other regions
agePopulationTotal *= 55.98/66.27 # (google england/uk population 2018, assuming age dist is similar)
agePopulationRatio = agePopulationTotal/np.sum(agePopulationTotal)
# Helper function to adjust average rates to age-aware rates
def adjustRatesByAge_KeepAverageRate(rate, ageRelativeAdjustment, agePopulationRatio=agePopulationRatio, maxOutRate=10):
"""This is a helper function and wont be picked up as a model parameter!"""
if rate == 0:
return np.zeros_like(ageRelativeAdjustment)
if rate >= maxOutRate:
warnings.warn("covidTesting::adjustRatesByAge_KeepAverageRate Input rate {} > maxOutRate {}, returning input rates".format(rate, maxOutRate))
return rate*np.ones_like(ageRelativeAdjustment)
out = np.zeros_like(ageRelativeAdjustment)
out[0] = maxOutRate+1 # just to start the while loop below
while np.sum(out>=maxOutRate)>0:
corrFactor = np.sum(agePopulationRatio/(1+ageRelativeAdjustment))
out = rate * (1+ageRelativeAdjustment) * corrFactor
if np.sum(out>=maxOutRate)>0:
warnings.warn("covidTesting::adjustRatesByAge_KeepAverageRate Adjusted rate larger than {} encountered, reducing ageAdjustment variance by 10%".format(maxOutRate))
tmp_mean = np.mean(ageRelativeAdjustment)
ageRelativeAdjustment = tmp_mean + np.sqrt(0.9)*(ageRelativeAdjustment-tmp_mean)
return out
# For calculations see data_cleaning_py.ipynb, calculations from NHS England dataset as per 05 Apr
relativeDeathRisk_given_COVID_by_age = np.array([-0.99742186, -0.99728639, -0.98158438, -0.9830432 , -0.82983414,
-0.84039294, 0.10768979, 0.38432409, 5.13754904])
#ageRelativeDiseaseSeverity = np.array([-0.8, -0.6, -0.3, -0.3, -0.1, 0.1, 0.35, 0.4, 0.5]) # FIXED (above) - this is a guess, find data and fix
#ageRelativeRecoverySpeed = np.array([0.2]*5+[-0.1, -0.2, -0.3, -0.5]) # TODO - this is a guess, find data and fix
ageRelativeRecoverySpeed = np.array([0.]*9) # For now we make it same for everyone, makes calculations easier
# For calculations see data_cleaning_py.ipynb, calculations from NHS England dataset as per 05 Apr
caseFatalityRatioHospital_given_COVID_by_age = np.array([0.00856164, 0.03768844, 0.02321319, 0.04282494, 0.07512237,
0.12550367, 0.167096 , 0.37953452, 0.45757006])
def trFunc_diseaseProgression(
# Basic parameters to adhere to
nonsymptomatic_ratio = 0.86,
# number of days between measurable events
infect_to_symptoms = 5.,
#symptom_to_death = 16.,
symptom_to_recovery = 10., # 20.5, #unrealiticly long for old people
symptom_to_hospitalisation = 5.76,
hospitalisation_to_recovery = 14.51,
IgG_formation = 15.,
# Age related parameters
# for now we'll assume that all hospitalised cases are known (overall 23% of hospitalised COVID patients die. 9% overall case fatality ratio)
caseFatalityRatioHospital_given_COVID_by_age = caseFatalityRatioHospital_given_COVID_by_age,
ageRelativeRecoverySpeed = ageRelativeRecoverySpeed,
# Unknown rates to estimate
nonsymp_to_recovery = 15.,
inverse_IS1_IS2 = 4.,
**kwargs
):
# Now we have all the information to build the age-aware multistage SIR model transition matrix
# The full transition tensor is a sparse map from the Age x HealthState x isolation state to HealthState,
# and thus is a 4th order tensor itself, representing a linear mapping
# from "number of people aged A in health state B and isolation state C to health state D.
trTensor_diseaseProgression = np.zeros((nAge, nHS, nIso, nHS))
# Use basic parameters to regularise inputs
E_IS1 = 1./infect_to_symptoms
# Numbers nonsymptomatic is assumed to be 86% -> E->IN / E-IS1 = 0.86/0.14
E_IN = 0.86/0.14 * E_IS1
# Nonsymptomatic recovery
IN_R1 = 1./nonsymp_to_recovery
IS1_IS2 = 1./inverse_IS1_IS2
IS2_R1 = 1./(symptom_to_recovery-inverse_IS1_IS2)
R1_R2 = 1./IgG_formation
# Disease progression matrix # TODO - calibrate (together with transmissionInfectionStage)
# rows: from-state, cols: to-state (non-symmetric!)
# - this represent excess deaths only, doesn't contain baseline deaths!
# Calculate all non-serious cases that do not end up in hospitals.
# Note that we only have reliable death data from hospitals (NHS England), so we do not model people dieing outside hospitals
diseaseProgBaseline = np.array([
# to: E, IN, IS1, IS2, R1, R2, D
[ 0 , E_IN, E_IS1, 0, 0, 0, 0 ], # from E
[ 0, 0, 0, 0, IN_R1, 0, 0 ], # from IN
[ 0 , 0, 0, IS1_IS2, 0, 0, 0 ], # from IS1
[ 0 , 0, 0, 0, IS2_R1, 0, 0 ], # from IS2
[ 0 , 0, 0, 0, 0, R1_R2, 0 ], # from R1
[ 0 , 0, 0, 0, 0, 0, 0 ], # from R2
[ 0 , 0, 0, 0, 0, 0, 0 ] # from D
])
ageAdjusted_diseaseProgBaseline = copy.deepcopy(np.repeat(diseaseProgBaseline[np.newaxis],nAge,axis=0))
# Modify all death and R1 rates:
for ii in range(ageAdjusted_diseaseProgBaseline.shape[1]):
# Adjust death rate by age dependent disease severity
ageAdjusted_diseaseProgBaseline[:,ii,-1] = adjustRatesByAge_KeepAverageRate(
ageAdjusted_diseaseProgBaseline[0,ii,-1],
ageRelativeAdjustment=relativeDeathRisk_given_COVID_by_age
)
# Adjust recovery rate by age dependent recovery speed
ageAdjusted_diseaseProgBaseline[:,ii,-3] = adjustRatesByAge_KeepAverageRate(
ageAdjusted_diseaseProgBaseline[0,ii,-3],
ageRelativeAdjustment=ageRelativeRecoverySpeed,
agePopulationRatio=agePopulationRatio
)
ageAdjusted_diseaseProgBaseline_Hospital = copy.deepcopy(ageAdjusted_diseaseProgBaseline)
# Calculate hospitalisation based rates, for which we do have data. Hospitalisation can end up with deaths
# Make sure that the ratio of recoveries in hospital honour the case fatality ratio appropriately
# IS2 -> death
ageAdjusted_diseaseProgBaseline_Hospital[:,3,-1] = (
# IS2 -> recovery
ageAdjusted_diseaseProgBaseline_Hospital[:,3,-3] * (
# multiply by cfr / (1-cfr) to get correct rate towards death
caseFatalityRatioHospital_given_COVID_by_age/(
1 - caseFatalityRatioHospital_given_COVID_by_age)
)
)
# TODO - time to death might be incorrect overall without an extra delay state, especially for young people
# Non-hospitalised disease progression
for i1 in [0,1,3]:
trTensor_diseaseProgression[:,1:,i1,1:] = ageAdjusted_diseaseProgBaseline
# hospitalised disease progression
trTensor_diseaseProgression[:,1:,2,1:] = ageAdjusted_diseaseProgBaseline_Hospital
return trTensor_diseaseProgression
# Larger data driver approaches, with age distribution, see data_cleaning_R.ipynb for details
ageHospitalisationRateBaseline = pd.read_csv('../data/clean_hosp-epis-stat-admi-summ-rep-2015-16-rep_table_6.csv', sep=',').iloc[:,-1].values
ageHospitalisationRecoveryRateBaseline = 1./pd.read_csv('../data/clean_10641_LoS_age_provider_suppressed.csv', sep=',').iloc[:,-1].values
# Calculate initial hospitalisation (occupancy), that will be used to initialise the model
initBaselineHospitalOccupancyEquilibriumAgeRatio = ageHospitalisationRateBaseline/(ageHospitalisationRateBaseline+ageHospitalisationRecoveryRateBaseline)
# Take into account the NHS work-force in hospitals that for our purposes count as "hospitalised S" population,
# also unaffected by quarantine measures
ageNhsClinicalStaffPopulationRatio = pd.read_csv('../data/clean_nhsclinicalstaff.csv', sep=',').iloc[:,-1].values
# Extra rate of hospitalisation due to COVID-19 infection stages
# TODO - find / estimate data on this (unfortunately true rates are hard to get due to many unknown cases)
# Symptom to hospitalisation is 5.76 days on average (Imperial #8)
infToHospitalExtra = np.array([1e-4, 1e-3, 2e-2, 1e-2])
# We do know at least how age affects these risks:
# For calculations see data_cleaning_py.ipynb, calculations from CHESS dataset as per 05 Apr
relativeAdmissionRisk_given_COVID_by_age = np.array([-0.94886625, -0.96332087, -0.86528671, -0.79828999, -0.61535305,
-0.35214767, 0.12567034, 0.85809052, 3.55950368])
riskOfAEAttandance_by_age = np.array([0.41261361, 0.31560648, 0.3843979 , 0.30475704, 0.26659415,
0.25203475, 0.24970244, 0.31549102, 0.65181376])
# Build the transition tensor from any non-hospitalised state to a hospitalised state
# (being in home quarantine is assumed to affect only the infection probability [below], not the hospitalisation probability)
# caseIsolationHospitalisationRateAdjustment = 1.
# This function takes as input the number of people in given age and health state, and in any non-hospitalised state
# and returns the number of people staying in the same age and health state,
# but now hospitalised (the rest of people remain in whatever state they were in)
def trFunc_HospitalAdmission(
ageHospitalisationRateBaseline = ageHospitalisationRateBaseline,
infToHospitalExtra = infToHospitalExtra,
ageRelativeExtraAdmissionRiskToCovid = relativeAdmissionRisk_given_COVID_by_age * riskOfAEAttandance_by_age,
**kwargs
):
# This tensor will pointwise multiply an nAge x nHS slice of the stateTensor
trTensor_HospitalAdmission = np.zeros((nAge, nHS))
ageAdjusted_infToHospitalExtra = copy.deepcopy(np.repeat(infToHospitalExtra[np.newaxis],nAge,axis=0))
for ii in range(ageAdjusted_infToHospitalExtra.shape[1]):
# Adjust death rate by age dependent disease severity
ageAdjusted_infToHospitalExtra[:,ii] = adjustRatesByAge_KeepAverageRate(
infToHospitalExtra[ii],
ageRelativeAdjustment=ageRelativeExtraAdmissionRiskToCovid
)
# Add baseline hospitalisation to all non-dead states
trTensor_HospitalAdmission[:,:-1] += np.expand_dims(ageHospitalisationRateBaseline,-1)
# Add COVID-caused hospitalisation to all infected states (TODO: This is summation of rates for independent processes, should be correct, but check)
trTensor_HospitalAdmission[:,1:(nI+1)] += ageAdjusted_infToHospitalExtra
return trTensor_HospitalAdmission
def trFunc_HospitalDischarge(
ageHospitalisationRecoveryRateBaseline = ageHospitalisationRecoveryRateBaseline,
dischargeDueToCovidRateMultiplier = 3.,
**kwargs
):
trTensor_HospitalDischarge = np.zeros((nAge, nHS))
# Baseline discharges apply to all non-symptomatic patients (TODO: take into account testing state!)
trTensor_HospitalDischarge[:, :3] += ageHospitalisationRecoveryRateBaseline[:,np.newaxis]
# No discharges for COVID symptomatic people from the hospital until they recover
# TODO - check with health experts if this is correct assumption; probably also depends on testing state
trTensor_HospitalDischarge[:, 3:5] = 0.
trTensor_HospitalDischarge[:, 5:7] = dischargeDueToCovidRateMultiplier * ageHospitalisationRecoveryRateBaseline[:,np.newaxis]
return trTensor_HospitalDischarge
ageSocialMixingBaseline = pd.read_csv('../data/socialcontactdata_UK_Mossong2008_social_contact_matrix.csv', sep=',').iloc[:,1:].values
ageSocialMixingBaseline = (ageSocialMixingBaseline+ageSocialMixingBaseline.T)/2.
ageSocialMixingDistancing = | pd.read_csv('../data/socialcontactdata_UK_Mossong2008_social_contact_matrix_with_distancing.csv', sep=',') | pandas.read_csv |
from __future__ import print_function
# from: https://github.com/asap-report/carla/blob/racetrack/PythonClient/racetrack/client_controller.py
import os
import argparse
import logging
import random
import time
import pandas as pd
import numpy as np
from scipy.interpolate import splprep, splev
# I need to prepend `sys.path` with '..' to get to the carla module there.
# I'm pre-pending `sys.path` because there are other carla modules specified
# in PYTHONPATH already
import sys
sys.path = ['..'] + sys.path
from carla.client import make_carla_client
from carla.sensor import Camera, Lidar
from carla.settings import CarlaSettings
from carla.tcp import TCPConnectionError
from config import (
IMAGE_SIZE,
IMAGE_DECIMATION,
MIN_SPEED,
DTYPE,
STEER_NOISE,
THROTTLE_NOISE,
STEER_NOISE_NN,
THROTTLE_NOISE_NN,
IMAGE_CLIP_LOWER,
IMAGE_CLIP_UPPER
)
from utils import clip_throttle, print_measurements
from model_predictive_control import MPCController
from proportion_derivative_control import PDController
from gamepad_controller import PadController
def run_carla_client(args):
frames_per_episode = 10000
spline_points = 10000
report = {
'num_episodes': args.num_episodes,
'controller_name': args.controller_name,
'distances': [],
'target_speed': args.target_speed,
}
track_DF = pd.read_csv('racetrack{}.txt'.format(args.racetrack), header=None)
# The track data are rescaled by 100x with relation to Carla measurements
track_DF = track_DF / 100
pts_2D = track_DF.loc[:, [0, 1]].values
tck, u = splprep(pts_2D.T, u=None, s=2.0, per=1, k=3)
u_new = np.linspace(u.min(), u.max(), spline_points)
x_new, y_new = splev(u_new, tck, der=0)
pts_2D = np.c_[x_new, y_new]
steer = 0.0
throttle = 0.5
depth_array = None
if args.controller_name == 'mpc':
weather_id = 2
controller = MPCController(args.target_speed)
elif args.controller_name == 'pd':
weather_id = 1
controller = PDController(args.target_speed)
elif args.controller_name == 'pad':
weather_id = 5
controller = PadController()
elif args.controller_name == 'nn':
# Import it here because importing TensorFlow is time consuming
from neural_network_controller import NNController # noqa
weather_id = 11
controller = NNController(
args.target_speed,
args.model_dir_name,
args.which_model,
args.throttle_coeff_A,
args.throttle_coeff_B,
args.ensemble_prediction,
)
report['model_dir_name'] = args.model_dir_name
report['which_model'] = args.which_model
report['throttle_coeff_A'] = args.throttle_coeff_A
report['throttle_coeff_B'] = args.throttle_coeff_B
report['ensemble_prediction'] = args.ensemble_prediction
with make_carla_client(args.host, args.port) as client:
print('CarlaClient connected')
episode = 0
num_fails = 0
while episode < args.num_episodes:
# Start a new episode
if args.store_data:
depth_storage = np.zeros((
(IMAGE_CLIP_LOWER-IMAGE_CLIP_UPPER) // IMAGE_DECIMATION,
IMAGE_SIZE[1] // IMAGE_DECIMATION,
frames_per_episode
)).astype(DTYPE)
log_dicts = frames_per_episode * [None]
else:
depth_storage = None
log_dicts = None
if args.settings_filepath is None:
# Create a CarlaSettings object. This object is a wrapper around
# the CarlaSettings.ini file. Here we set the configuration we
# want for the new episode.
settings = CarlaSettings()
settings.set(
SynchronousMode=True,
SendNonPlayerAgentsInfo=False,
NumberOfVehicles=0,
NumberOfPedestrians=0,
WeatherId=weather_id,
QualityLevel=args.quality_level
)
settings.randomize_seeds()
# Now we want to add a couple of cameras to the player vehicle.
# We will collect the images produced by these cameras every
# frame.
# Let's add another camera producing ground-truth depth.
camera = Camera('CameraDepth', PostProcessing='Depth', FOV=69.4)
# MD: I got the 69.4 from here: https://click.intel.com/intelr-realsensetm-depth-camera-d435.html
camera.set_image_size(IMAGE_SIZE[1], IMAGE_SIZE[0])
camera.set_position(2.30, 0, 1.30)
settings.add_sensor(camera)
else:
# Alternatively, we can load these settings from a file.
with open(args.settings_filepath, 'r') as fp:
settings = fp.read()
# Now we load these settings into the server. The server replies
# with a scene description containing the available start spots for
# the player. Here we can provide a CarlaSettings object or a
# CarlaSettings.ini file as string.
scene = client.load_settings(settings)
# Choose one player start at random.
num_of_player_starts = len(scene.player_start_spots)
player_start = random.randint(0, max(0, num_of_player_starts - 1))
# Notify the server that we want to start the episode at the
# player_start index. This function blocks until the server is ready
# to start the episode.
print('Starting new episode...', )
client.start_episode(player_start)
status, depth_storage, one_log_dict, log_dicts, distance_travelled = run_episode(
client,
controller,
pts_2D,
depth_storage,
log_dicts,
frames_per_episode,
args.controller_name,
args.store_data
)
if 'FAIL' in status:
num_fails += 1
print(status)
continue
else:
print('SUCCESS: ' + str(episode))
report['distances'].append(distance_travelled)
if args.store_data:
np.save('depth_data/{}_racetrack{}_depth_data{}.npy'.format(args.controller_name, args.racetrack, episode), depth_storage)
| pd.DataFrame(log_dicts) | pandas.DataFrame |
#Creates a BPT diagram for all objects, and a second figure that shows objects for which single lines are low
import numpy as np
import matplotlib.pyplot as plt
from astropy.io import ascii
import sys, os, string
import pandas as pd
from astropy.io import fits
import collections
#Folder to save the figures
figout = '/Users/blorenz/COSMOS/Reports/2018/Images/'
#The location with the file for all of our data
fluxdatapath = '/Users/blorenz/COSMOS/COSMOSData/lineflux.txt'
#Location of the equivalent width data
ewdata = '/Users/blorenz/COSMOS/COSMOSData/lineew.txt'
#Read in the ew of the lines
ew_df = ascii.read(ewdata).to_pandas()
#The location to store the scale and its stddev of each line
qualdatapath = '/Users/blorenz/COSMOS/COSMOSData/dataqual.txt'
#Read in the scale of the lines
dataqual = ascii.read(qualdatapath).to_pandas()
#File with the error array
errdatapath = '/Users/blorenz/COSMOS/COSMOSData/errs.txt'
#Read in the scale of the lines
err_df = ascii.read(errdatapath,data_start=1,header_start=0,format='csv').to_pandas()
#Read the datafile:
fluxdata = ascii.read(fluxdatapath).to_pandas()
#Check if bpt correlates with stellar mass
#The location of the muzzin et al data:
mdatapath = '/Users/blorenz/COSMOS/muzzin_data/UVISTA_final_colors_sfrs_v4.1.dat'
#Read in the muzzin data
mdata = ascii.read(mdatapath).to_pandas()
mdata = mdata.rename(columns={'ID':'OBJID'})
fluxdata = pd.merge(fluxdata,mdata)
#Fontsizes for plotting
axisfont = 24
ticksize = 18
ticks = 8
titlefont = 24
legendfont = 16
textfont = 16
#Division function
def divz(X,Y):
return X/np.where(Y,Y,Y+1)*np.not_equal(Y,0)
#BPT
#Strings of all of the lines needed for bpt
lines = ['4861','5007']
Hb = lines[0]
O3 = lines[1]
#fig2,axarr2 = plt.subplots(2,2,figsize=(15,12))
#ax1,ax2,ax3,ax4 = axarr2[0,0],axarr2[0,1],axarr2[1,0],axarr2[1,1]
#Takes the dataframe and the four lines to combine into the bpt
def getO3Hb(pd_df,err_df,N2,O3):
errHb = err_df[Hb]
errO3 = err_df[O3]
#Divide by the scale to calibrate the flux
calHb = divz(pd_df[Hb+'_flux'],pd_df[Hb+'_scale'])
calO3 = divz(pd_df[O3+'_flux'],pd_df[O3+'_scale'])
#Find the ratios
Hbrat = np.log10(divz(calO3,calHb))
#Find the errors
eHbrat = (1/np.log(10))*divz(calHb,calO3)*np.sqrt((divz(1,calHb) * errO3)**2 + (divz(-calO3,(calHb**2)) * errHb)**2)
return (Hbrat,eHbrat)
#Plotting parameters
ms = 3
lw=0.5
mark='o'
d = {'True': True, 'False': False}
#Filter the data
goodlines = [dataqual[line+'_good'].map(d) for line in lines]
#Needs to be good in all lines to be good
allgood = np.logical_and.reduce(goodlines)
#Needs to be bad in any line to be bad
badlines = [dataqual[line+'_bad'].map(d) for line in lines]
baddata = np.logical_or.reduce(badlines)
lowlines = [dataqual[line+'_low'].map(d) for line in lines]
#Needs to be low in any line to be low, and also not bad in a line
somelow = np.logical_and(np.logical_or.reduce(lowlines),np.logical_not(baddata))
plotframe = | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
import os
import numpy as np
import statsmodels.api as sm # recommended import according to the docs
import matplotlib.pyplot as plt
import pandas as pd
import scipy.stats.mstats as mstats
from common import globals as glob
from datetime import datetime, timedelta
import seaborn as sb
sb.set_style('darkgrid')
from statsmodels.tsa.seasonal import seasonal_decompose
from statsmodels.tsa.arima_model import ARIMA
from dateutil.relativedelta import relativedelta
import plotly.plotly as py
from plotly.graph_objs import *
THRESHOLD_FOR_TOTAL_NUMBER_OF_STORES = 25
def plot_sparklines_for_countries_with_greatest_increase(countries, df):
data = []
i = 1
for c in countries:
dft, dates, overall_change, overall_change_in_percentage = get_timeseries(df, None, 'countries', None, [c], create_csv=False)
#glob.log.info(dft.columns)
xa='x' + str(i)
ya='y' + str(i)
print('country %s' %(c))
i += 1
trace = Scatter(
x=dates,
y=dft['count'],
fill='tozeroy',
line=Line(
shape='spline',
smoothing=1.3,
width=0.5
),
mode='lines',
name=c,
visible=True,
xaxis=xa,
yaxis=ya,
)
data.append(trace)
layout = Layout(
autosize=False,
height=1000,
showlegend=False,
title='<b>Timeseries for number Starbucks stores 2013-2016</b><br>Countries with the maximum percentage increase in number Starbucks stores. <br><i>Only includes countries with at least 25 stores as of November 2016.</i>',
width=800)
i = 1
#xdomain and ydomain are divisions in which the plots will be displayed, we are
#looking for a 3x5 display
xdomain = [[0, 0.25], [0.33, 0.6], [0.7, 1.0]]
ydomain = [[0.8, 0.95], [0.6, 0.75], [0.4, 0.55], [0.2, 0.35], [0.0, 0.15]]
#we would like replace the country code with the name, this mapping is available in WDI dataset
fname = os.path.join(glob.OUTPUT_DIR_NAME, glob.WDI_CSV_FILE_AFTER_CLEANING)
df_WB=pd.read_csv(fname)
df_WB = df_WB.set_index('country_code')
for c in countries:
xa = 'xaxis' + str(i)
ya = 'yaxis' + str(i)
layout[xa] = dict(XAxis(
anchor='y' + str(i),
autorange=True,
domain=xdomain[(i%3) - 1],
mirror=False,
showgrid=False,
showline=False,
showticklabels=False,
showticksuffix='none',
title=df_WB.ix[c]['name'],
titlefont=dict(
#family='Courier New, monospace',
size=12,
#color='#7f7f7f'
),
zeroline=False
))
layout[ya] = dict(YAxis(
#autorange=False,
#range=[0,3000],
autorange=True,
anchor='x' + str(i),
domain=ydomain[(i%5) - 1],
mirror=False,
showgrid=False,
showline=False,
showticklabels=True,
showticksuffix='last',
title='',
type='linear',
zeroline=False
))
#move to the next
i += 1
fig = Figure(data=data, layout=layout)
plot_url = py.plot(fig, filename='sparklines')
fname = os.path.join(glob.OUTPUT_DIR_NAME, glob.TSA_DIR, 'countries_w_greatest_increase_in_starbucks_stores.png')
py.image.save_as(fig, filename=fname)
def plot_bar_graph_for_abs_increase(df):
import plotly.plotly as py
import plotly.graph_objs as go
df1 = df.sort_values(by='overall_change', ascending=False)
trace1 = go.Bar(
x=df1['country'],
y=df1['overall_change'],
name='Overall change in number of stores',
marker=dict(
#color='rgb(55, 83, 109)'
color='rgb(49,130,189)'
)
)
data = [trace1]
layout = go.Layout(
title='Overall increase in number of Starbucks stores from 2013 to 2016',
xaxis=dict(
tickfont=dict(
size=14,
color='rgb(107, 107, 107)'
)
),
yaxis=dict(
title='Increase in number of Starbucks stores',
titlefont=dict(
size=16,
color='rgb(107, 107, 107)'
),
tickfont=dict(
size=14,
color='rgb(107, 107, 107)'
)
),
legend=dict(
x=0,
y=1.0,
bgcolor='rgba(255, 255, 255, 0)',
bordercolor='rgba(255, 255, 255, 0)'
),
barmode='group',
bargap=0.15,
bargroupgap=0.1
)
fig = go.Figure(data=data, layout=layout)
py.plot(fig, filename='style-bar')
fname = os.path.join(glob.OUTPUT_DIR_NAME, glob.TSA_DIR, 'bar_graph_for_increase_in_number_of_starbucks_stores.png')
py.image.save_as(fig, filename=fname)
def get_next_month_and_year(prev_date):
next_month = prev_date.month + 1
next_year = prev_date.year
if next_month == 13:
next_month = 1
next_year += 1
return next_year, next_month
def get_timeseries(df, dir_name, scope, scope_label, scope_list, create_csv=True):
#countries = ['US', 'CN', 'CA', 'IN', 'GB', 'JP', 'FR']
#create a new df based on scope
if scope == 'all':
glob.log.info('scope is all so no filtering needed....')
elif scope == 'continent':
glob.log.info('scope is continent...')
glob.log.info(scope_list)
df = df[df['continent'].isin(scope_list)]
elif scope == 'countries':
glob.log.info('scope is countries...')
glob.log.info(scope_list)
df = df[df['country'].isin(scope_list)]
elif scope == 'US_states':
glob.log.info('scope is US states...')
glob.log.info(scope_list)
df = df[(df['country'] == 'US') & (df['country_subdivision'].isin(scope_list))]
else:
glob.log.info('unknown scope -> %s, defaulting to scope=all' %(scope))
#add a new datetime field which holds the DateTime version of the first seent field
df['first_seen_as_dt'] = pd.to_datetime(df['first_seen'])
start_date = min(df['first_seen_as_dt'])
final_date = max(df['first_seen_as_dt'])
final_year = final_date.year
final_month = final_date.month
glob.log.info('start date: %s, final date %s' %(str(start_date), str(final_date)))
#create a new dataframe to hold the timeseries data
dft = pd.DataFrame(columns=['date', 'count'])
dates = []
counts = []
#add the first element
count = len(df[df['first_seen_as_dt'] == start_date])
dates.append(start_date)
counts.append(float(count))
prev_date = start_date
while True:
next_year, next_month = get_next_month_and_year(prev_date)
if (next_year > final_year) or (next_year == final_year and next_month > (final_month + 1)):
glob.log.info('reached end of timeseries data at year=%d, month=%d' %(next_year, next_month))
break
next_date = datetime(next_year, next_month, 1)
count += len(df[(df['first_seen_as_dt'] > prev_date) & (df['first_seen_as_dt'] <= next_date)])
#glob.log.info('date %s, count %d' %(next_date, count))
dates.append(next_date)
counts.append(float(count))
#move to the next date
prev_date = next_date
dft['date'] = dates
dft['count'] = counts
#add a rate parameter as well to see what is the rate of increase (or decrease) with time
dft['change'] = dft['count'] - dft['count'].shift()
overall_change = sum(dft['change'].dropna())
#2nd order differences, to see if there is an increase in the differences themselves
dft['change_in_percentage'] = 100*((dft['count'] - dft['count'].shift())/(dft['count']))
overall_change_in_percentage = sum(dft['change_in_percentage'].dropna())
if create_csv == True:
fname = os.path.join(dir_name, scope_label + '_timeseries.csv')
dft.to_csv(fname, index=False)
dft = dft.set_index('date')
return dft, dates, overall_change, overall_change_in_percentage
def explore_timeseries(df, scope, scope_label, scope_list, order=(2, 1, 2)):
#create subdir for the scope so that all plots can be kept in that directory
dir_name = os.path.join(glob.OUTPUT_DIR_NAME, glob.TSA_DIR, scope_label)
os.makedirs(dir_name, exist_ok = True)
#et the series to be analyzed
dft, dates, overall_change, overall_change_in_percentage = get_timeseries(df, dir_name, scope, scope_label, scope_list)
#plot it
print(dft['count'])
dft['count'].plot(figsize=(16, 12))
fname = os.path.join(dir_name, 'num_stores.png')
plt.savefig(fname)
decomposition = seasonal_decompose(dft['count'], model='additive', freq=5)
fig = plt.figure()
fig = decomposition.plot()
fname = os.path.join(dir_name, 'decomposition.png')
plt.savefig(fname)
#store the df column as a time series, for easier processing
ts = dft['count']
#take a log of the series and then a difference of the logs, this is needed
#to make the series stationary
ts_log = np.log(dft['count'])
ts_log_diff = ts_log - ts_log.shift()
#we choose the ARIMA model on the log of the series
model = ARIMA(ts_log, order=order)
results_ARIMA = model.fit(disp=-1)
#plot the differences and overlay the fitted values to get a sense
#of how good the model is
fig = plt.figure()
plt.plot(ts_log_diff)
plt.plot(results_ARIMA.fittedvalues, color='red')
plt.title('RSS: %.4f'% sum((results_ARIMA.fittedvalues-ts_log_diff[1:])**2))
fname = os.path.join(dir_name, 'log_diff_and_fitted_values.png')
plt.savefig(fname)
#now begin converting the fitted values into the original scale
predictions_ARIMA_diff = pd.Series(results_ARIMA.fittedvalues, copy=True)
predictions_ARIMA_diff_cumsum = predictions_ARIMA_diff.cumsum()
#retrieve the log of the predicted values by adding the cumulative sum to the original
#starting value
predictions_ARIMA_log = pd.Series(ts_log.ix[0], index=ts_log.index)
predictions_ARIMA_log = predictions_ARIMA_log.add(predictions_ARIMA_diff_cumsum,fill_value=0)
#all done, now recreate the entire series to the original scale
fig = plt.figure()
predictions_ARIMA = np.exp(predictions_ARIMA_log)
ax=plt.gca()
plt.plot(ts, label='Actual')
plt.ylabel('Number of stores')
ax = predictions_ARIMA.plot(ax=ax, style='r--', label='Predicted');
plt.title('RMSE: %.4f'% np.sqrt(sum((predictions_ARIMA-ts)**2)/len(ts)))
ax.legend()
fname = os.path.join(dir_name, 'orig_with_fitted_values.png')
plt.savefig(fname)
#create dates for the next one year
next_year, next_month = get_next_month_and_year(dates[-1])
start = datetime(next_year, next_month, 1)
date_list = [start + relativedelta(months=x) for x in range(0,12)]
future = pd.DataFrame(index=date_list, columns= dft.columns)
original_len_of_ts = len(dft)
dft = pd.concat([dft, future])
#for some reason we have to provide the start and end as integers
#and only then it works...dates as strings do not work, so we do this
#roundabout thing of providing integers as index and then changing the
#index to date strings once we have the predicted values..
#we predict next 12 months of data
predict_counts = results_ARIMA.predict(start=original_len_of_ts-1, end=original_len_of_ts+10, dynamic=True)
predict_counts.index = date_list
predict_counts = results_ARIMA.fittedvalues.append(predict_counts)
predictions_ARIMA_diff = pd.Series(predict_counts, copy=True)
predictions_ARIMA_diff_cumsum = predictions_ARIMA_diff.cumsum()
ts_log = np.log(dft['count'])
predictions_ARIMA_log = | pd.Series(ts_log.ix[0], index=ts_log.index) | pandas.Series |
import os
import sys
import numpy as np
import pytest
import pandas as pd
from pandas import DataFrame, compat
from pandas.util import testing as tm
class TestToCSV:
@pytest.mark.xfail((3, 6, 5) > sys.version_info >= (3, 5),
reason=("Python csv library bug "
"(see https://bugs.python.org/issue32255)"))
def test_to_csv_with_single_column(self):
# see gh-18676, https://bugs.python.org/issue32255
#
# Python's CSV library adds an extraneous '""'
# before the newline when the NaN-value is in
# the first row. Otherwise, only the newline
# character is added. This behavior is inconsistent
# and was patched in https://bugs.python.org/pull_request4672.
df1 = DataFrame([None, 1])
expected1 = """\
""
1.0
"""
with tm.ensure_clean('test.csv') as path:
df1.to_csv(path, header=None, index=None)
with open(path, 'r') as f:
assert f.read() == expected1
df2 = DataFrame([1, None])
expected2 = """\
1.0
""
"""
with tm.ensure_clean('test.csv') as path:
df2.to_csv(path, header=None, index=None)
with open(path, 'r') as f:
assert f.read() == expected2
def test_to_csv_defualt_encoding(self):
# GH17097
df = DataFrame({'col': ["AAAAA", "ÄÄÄÄÄ", "ßßßßß", "聞聞聞聞聞"]})
with tm.ensure_clean('test.csv') as path:
# the default to_csv encoding is uft-8.
df.to_csv(path)
tm.assert_frame_equal(pd.read_csv(path, index_col=0), df)
def test_to_csv_quotechar(self):
df = DataFrame({'col': [1, 2]})
expected = """\
"","col"
"0","1"
"1","2"
"""
with tm.ensure_clean('test.csv') as path:
df.to_csv(path, quoting=1) # 1=QUOTE_ALL
with open(path, 'r') as f:
assert f.read() == expected
expected = """\
$$,$col$
$0$,$1$
$1$,$2$
"""
with tm.ensure_clean('test.csv') as path:
df.to_csv(path, quoting=1, quotechar="$")
with open(path, 'r') as f:
assert f.read() == expected
with tm.ensure_clean('test.csv') as path:
with pytest.raises(TypeError, match='quotechar'):
df.to_csv(path, quoting=1, quotechar=None)
def test_to_csv_doublequote(self):
df = DataFrame({'col': ['a"a', '"bb"']})
expected = '''\
"","col"
"0","a""a"
"1","""bb"""
'''
with tm.ensure_clean('test.csv') as path:
df.to_csv(path, quoting=1, doublequote=True) # QUOTE_ALL
with open(path, 'r') as f:
assert f.read() == expected
from _csv import Error
with tm.ensure_clean('test.csv') as path:
with pytest.raises(Error, match='escapechar'):
df.to_csv(path, doublequote=False) # no escapechar set
def test_to_csv_escapechar(self):
df = DataFrame({'col': ['a"a', '"bb"']})
expected = '''\
"","col"
"0","a\\"a"
"1","\\"bb\\""
'''
with tm.ensure_clean('test.csv') as path: # QUOTE_ALL
df.to_csv(path, quoting=1, doublequote=False, escapechar='\\')
with open(path, 'r') as f:
assert f.read() == expected
df = DataFrame({'col': ['a,a', ',bb,']})
expected = """\
,col
0,a\\,a
1,\\,bb\\,
"""
with tm.ensure_clean('test.csv') as path:
df.to_csv(path, quoting=3, escapechar='\\') # QUOTE_NONE
with open(path, 'r') as f:
assert f.read() == expected
def test_csv_to_string(self):
df = DataFrame({'col': [1, 2]})
expected_rows = [',col',
'0,1',
'1,2']
expected = tm.convert_rows_list_to_csv_str(expected_rows)
assert df.to_csv() == expected
def test_to_csv_decimal(self):
# see gh-781
df = DataFrame({'col1': [1], 'col2': ['a'], 'col3': [10.1]})
expected_rows = [',col1,col2,col3',
'0,1,a,10.1']
expected_default = tm.convert_rows_list_to_csv_str(expected_rows)
assert df.to_csv() == expected_default
expected_rows = [';col1;col2;col3',
'0;1;a;10,1']
expected_european_excel = tm.convert_rows_list_to_csv_str(
expected_rows)
assert df.to_csv(decimal=',', sep=';') == expected_european_excel
expected_rows = [',col1,col2,col3',
'0,1,a,10.10']
expected_float_format_default = tm.convert_rows_list_to_csv_str(
expected_rows)
assert df.to_csv(float_format='%.2f') == expected_float_format_default
expected_rows = [';col1;col2;col3',
'0;1;a;10,10']
expected_float_format = tm.convert_rows_list_to_csv_str(expected_rows)
assert df.to_csv(decimal=',', sep=';',
float_format='%.2f') == expected_float_format
# see gh-11553: testing if decimal is taken into account for '0.0'
df = pd.DataFrame({'a': [0, 1.1], 'b': [2.2, 3.3], 'c': 1})
expected_rows = ['a,b,c',
'0^0,2^2,1',
'1^1,3^3,1']
expected = tm.convert_rows_list_to_csv_str(expected_rows)
assert df.to_csv(index=False, decimal='^') == expected
# same but for an index
assert df.set_index('a').to_csv(decimal='^') == expected
# same for a multi-index
assert df.set_index(['a', 'b']).to_csv(decimal="^") == expected
def test_to_csv_float_format(self):
# testing if float_format is taken into account for the index
# GH 11553
df = pd.DataFrame({'a': [0, 1], 'b': [2.2, 3.3], 'c': 1})
expected_rows = ['a,b,c',
'0,2.20,1',
'1,3.30,1']
expected = tm.convert_rows_list_to_csv_str(expected_rows)
assert df.set_index('a').to_csv(float_format='%.2f') == expected
# same for a multi-index
assert df.set_index(['a', 'b']).to_csv(
float_format='%.2f') == expected
def test_to_csv_na_rep(self):
# see gh-11553
#
# Testing if NaN values are correctly represented in the index.
df = DataFrame({'a': [0, np.NaN], 'b': [0, 1], 'c': [2, 3]})
expected_rows = ['a,b,c',
'0.0,0,2',
'_,1,3']
expected = tm.convert_rows_list_to_csv_str(expected_rows)
assert df.set_index('a').to_csv(na_rep='_') == expected
assert df.set_index(['a', 'b']).to_csv(na_rep='_') == expected
# now with an index containing only NaNs
df = DataFrame({'a': np.NaN, 'b': [0, 1], 'c': [2, 3]})
expected_rows = ['a,b,c',
'_,0,2',
'_,1,3']
expected = tm.convert_rows_list_to_csv_str(expected_rows)
assert df.set_index('a').to_csv(na_rep='_') == expected
assert df.set_index(['a', 'b']).to_csv(na_rep='_') == expected
# check if na_rep parameter does not break anything when no NaN
df = DataFrame({'a': 0, 'b': [0, 1], 'c': [2, 3]})
expected_rows = ['a,b,c',
'0,0,2',
'0,1,3']
expected = tm.convert_rows_list_to_csv_str(expected_rows)
assert df.set_index('a').to_csv(na_rep='_') == expected
assert df.set_index(['a', 'b']).to_csv(na_rep='_') == expected
def test_to_csv_date_format(self):
# GH 10209
df_sec = DataFrame({'A': pd.date_range('20130101', periods=5, freq='s')
})
df_day = DataFrame({'A': pd.date_range('20130101', periods=5, freq='d')
})
expected_rows = [',A',
'0,2013-01-01 00:00:00',
'1,2013-01-01 00:00:01',
'2,2013-01-01 00:00:02',
'3,2013-01-01 00:00:03',
'4,2013-01-01 00:00:04']
expected_default_sec = tm.convert_rows_list_to_csv_str(expected_rows)
assert df_sec.to_csv() == expected_default_sec
expected_rows = [',A',
'0,2013-01-01 00:00:00',
'1,2013-01-02 00:00:00',
'2,2013-01-03 00:00:00',
'3,2013-01-04 00:00:00',
'4,2013-01-05 00:00:00']
expected_ymdhms_day = tm.convert_rows_list_to_csv_str(expected_rows)
assert (df_day.to_csv(date_format='%Y-%m-%d %H:%M:%S') ==
expected_ymdhms_day)
expected_rows = [',A',
'0,2013-01-01',
'1,2013-01-01',
'2,2013-01-01',
'3,2013-01-01',
'4,2013-01-01']
expected_ymd_sec = tm.convert_rows_list_to_csv_str(expected_rows)
assert df_sec.to_csv(date_format='%Y-%m-%d') == expected_ymd_sec
expected_rows = [',A',
'0,2013-01-01',
'1,2013-01-02',
'2,2013-01-03',
'3,2013-01-04',
'4,2013-01-05']
expected_default_day = tm.convert_rows_list_to_csv_str(expected_rows)
assert df_day.to_csv() == expected_default_day
assert df_day.to_csv(date_format='%Y-%m-%d') == expected_default_day
# see gh-7791
#
# Testing if date_format parameter is taken into account
# for multi-indexed DataFrames.
df_sec['B'] = 0
df_sec['C'] = 1
expected_rows = ['A,B,C',
'2013-01-01,0,1']
expected_ymd_sec = tm.convert_rows_list_to_csv_str(expected_rows)
df_sec_grouped = df_sec.groupby([pd.Grouper(key='A', freq='1h'), 'B'])
assert (df_sec_grouped.mean().to_csv(date_format='%Y-%m-%d') ==
expected_ymd_sec)
def test_to_csv_multi_index(self):
# see gh-6618
df = DataFrame([1], columns=pd.MultiIndex.from_arrays([[1], [2]]))
exp_rows = [',1',
',2',
'0,1']
exp = | tm.convert_rows_list_to_csv_str(exp_rows) | pandas.util.testing.convert_rows_list_to_csv_str |
# -*- coding: utf-8 -*-
###########################################################################
# we have searched for keywords in the original news
# for stemmed keywords in the stemmed news
# for lemmatized keywords int the lemmatized news
# now, want to merge all the results to see whats happening
###########################################################################
import pandas as pd
import numpy as np
from functions import add_stem
newsid_synonyms_origin=pd.read_csv('output/file1_keywords_original_keywords.csv') #input: output of solr_indexing_data
print(len(newsid_synonyms_origin))
#287199
newsid_synonyms_stem=pd.read_csv('output/file1_keywords_stemmed_keywords.csv') #input: output of solr_indexing_data
print(len(newsid_synonyms_stem))
#639888
newsid_synonyms_lemma=pd.read_csv('output/file1_keywords_lemmatized_keywords.csv') # input: output of solr_indexing_data
print(len(newsid_synonyms_lemma))
#484864
newsid_synonyms=newsid_synonyms_origin.copy()
newsid_synonyms=newsid_synonyms.append(newsid_synonyms_stem)
newsid_synonyms=newsid_synonyms.append(newsid_synonyms_lemma)
newsid_synonyms=newsid_synonyms.drop_duplicates()
print(len(newsid_synonyms))
#514806
newsid_synonyms.rename(index=str, columns={"disease_query":"synonyms"}, inplace=True)
####################################################
selected_news_origin= | pd.read_csv('output/file1output_origin_news.csv') | pandas.read_csv |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Race-car Data Creation Class.
This script contains all utilities to create proper dataset.
Revision History:
2020-05-10 (Animesh): Baseline Software.
2020-08-22 (Animesh): Updated Docstring.
Example:
from _data_handler import DataHandler
"""
#___Import Modules:
import os
import random
import pandas as pd
import matplotlib.pyplot as plt
from rc_nn_utility import ParseData
#___Global Variables:
SEED = 717
#__Classes:
class DataHandler:
"""Data Creation Utility Class.
This class contains all methods to complete create datasets such as random
data set, or 5 fold cross validation dataset.
"""
def __init__(self):
"""Constructor.
"""
pass
def merge_all(self, idir, output):
"""File Merger.
This method merges contents from multiple csv files.
Args:
idir (directory path): Directory path containing all csv files.
output (csv file): File containing all contents.
Returns:
(float): Accuracy percentage.
"""
# read all files from provided folder
files = os.listdir(idir)
content = []
for ifile in files:
# collect contents from files in provided folder
if ifile[-4:] == ".csv":
content.extend(pd.read_csv(os.path.join(idir, \
ifile))['image'].to_list())
# write merged contents to output file
pd.DataFrame(content, columns =['image']).to_csv(output, index=False)
return None
def list_merge(self, lists):
"""List Merger.
This method merges contents from multiple lists.
Args:
lists (list): List of multiple lists to merge.
Returns:
data (list): Merged list.
"""
# loop over lists and put them all in one list
data = []
for list in lists:
data.extend(list)
return data
def refine_running(self, input, output, speed = 15):
"""Refine Running.
This method removes data with provided motor value from a list.
Args:
input (csv file): File containing contents to refine.
output (csv file): File containing refined contents.
speed (int): Motor value to be removed.
"""
parsedata = ParseData()
# read file contents
data = pd.read_csv(input)
file = []
for index in range(len(data)):
# parse motor data to varify speed
_,_,mot = parsedata.parse_data(data["image"][index])
# append data if car is runneing
if mot != speed:
file.append(data["image"][index])
# write merged contents to output file
pd.DataFrame(file, columns=["image"]).to_csv(output, index=False)
return None
def histogram(self, ilist, odir):
"""Plot Histogram.
This method plots histogram from servo and motor value parsed from a
list of images.
Args:
ilist (csv file): File containing list of images.
odir (directory path): Output directory.
"""
parsedata = ParseData()
# read file contents
data = pd.read_csv(ilist)
servo = []
motor = []
for index in range(len(data)):
# parse servo and motor data
_,ser,mot = parsedata.parse_data(data["image"][index])
servo.append(ser)
motor.append(mot)
# plot histogram of servo data
plt.figure()
plt.hist(servo, bins=11)
plt.title("Servo Data Histogram")
plt.savefig(os.path.join(odir,"Servo Data Histogram.png"))
# plot histogram of motor data
plt.figure()
plt.hist(motor, bins=11)
plt.title("Motor Data Histogram")
plt.savefig(os.path.join(odir,"Motor Data Histogram.png"))
return None
def devide_data(self, ilist, odir):
"""Dataset Devider.
This method devides dataset according to servo value.
Args:
ilist (csv file): File containing list of images.
odir (directory path): Output directory.
"""
parsedata = ParseData()
# read file contents
data = pd.read_csv(ilist)
data_10 = []
data_11 = []
data_12 = []
data_13 = []
data_14 = []
data_15 = []
data_16 = []
data_17 = []
data_18 = []
data_19 = []
data_20 = []
for index in range(len(data)):
# parse servo and motor data
_,servo,_ = parsedata.parse_data(data["image"][index])
# devide dataset
if servo == 10:
data_10.append(data["image"][index])
elif servo == 11:
data_11.append(data["image"][index])
elif servo == 12:
data_12.append(data["image"][index])
elif servo == 13:
data_13.append(data["image"][index])
elif servo == 14:
data_14.append(data["image"][index])
elif servo == 15:
data_15.append(data["image"][index])
elif servo == 16:
data_16.append(data["image"][index])
elif servo == 17:
data_17.append(data["image"][index])
elif servo == 18:
data_18.append(data["image"][index])
elif servo == 19:
data_19.append(data["image"][index])
elif servo == 20:
data_20.append(data["image"][index])
# write data
pd.DataFrame(data_10, columns=["image"]).to_csv(os.path.join(odir, \
"servo_10.csv"), index=False)
pd.DataFrame(data_11, columns=["image"]).to_csv(os.path.join(odir, \
"servo_11.csv"), index=False)
pd.DataFrame(data_12, columns=["image"]).to_csv(os.path.join(odir, \
"servo_12.csv"), index=False)
pd.DataFrame(data_13, columns=["image"]).to_csv(os.path.join(odir, \
"servo_13.csv"), index=False)
pd.DataFrame(data_14, columns=["image"]).to_csv(os.path.join(odir, \
"servo_14.csv"), index=False)
pd.DataFrame(data_15, columns=["image"]).to_csv(os.path.join(odir, \
"servo_15.csv"), index=False)
pd.DataFrame(data_16, columns=["image"]).to_csv(os.path.join(odir, \
"servo_16.csv"), index=False)
| pd.DataFrame(data_17, columns=["image"]) | pandas.DataFrame |
import pandas as pd
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from datetime import time
import joblib
import pickle
def time_to_seconds(time):
return time.hour * 3600 + time.minute * 60 + time.second
df = | pd.read_csv('./data.csv') | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 26 22:55:37 2020
@author: <NAME> <EMAIL>
Data and Model from:
A conceptual model for the coronavirus disease 2019 (COVID-19)
outbreak in Wuhan, China with individual reaction and
governmental action
DOI:https://doi.org/10.1016/j.ijid.2020.02.058
https://www.ijidonline.com/article/S1201-9712(20)30117-X/fulltext
"""
import matplotlib.pyplot as plt
from scipy.integrate import solve_ivp
import numpy as np
import math
import pandas as pd
import os
import time
start = time.time() #Real time when the program starts to run
clear = lambda: os.system('cls')
cwd = os.getcwd()
dir_path = os.path.dirname(os.path.realpath(__file__))
path_fol = "{}\SEIR Model for Spread of Disease".format(dir_path)
try:
os.mkdir(path_fol)
except:
pass
def R0(α, β, μ, γ):
# R_0 = (α/(μ + α))*(β/(μ + λ))
R_0 = (β/γ)*(α/(α + μ))
return R_0
def R0b(β, γ, σ, μ):
return (β*σ)/((γ + μ)*(μ + σ))
def aplha(day):
if int(day) <= 23:
return 0
elif int(day) > 23 and int(day) <= 29:
return 0.4239
else:
return 0.8478
def Beta(α,β0, D, N, k):
B = β0*(1 - α)*((1 - D/N)**k)
return B
def SEIR(t, y, *args):
σ, β, γ, μ, Λ, F, α, d, κ, λ = args
β_t = Beta(α, β, y[5], y[4], κ)
dsdt = Λ - μ*y[0] - ((β*F*y[0])/y[4]) - (β_t/y[4])*y[2]*y[0]
dedt = ((β*F*y[0])/y[4]) + (β_t/y[4])*y[2]*y[0] - (μ + σ)*y[1]
didt = σ*y[1] - (μ + γ)*y[2]
drdt = γ*y[2] - μ*y[3]
dndt = -μ*y[4]
dDdt = d*γ*y[2] - λ*y[5]
dcdt = σ*y[1]
return [dsdt, dedt, didt, drdt, dndt, dDdt, dcdt]
def jacobian(t, y, *args):
σ, β, γ, μ, Λ, F, α, d, κ, λ = args
β_t = Beta(α, β, y[5], y[4], κ)
return [[-F*β/y[4]- y[2]*β_t/y[4]- μ, 0, -y[0]*β_t/y[4], 0, F*y[0]*β/y[4]**2 + y[2]*y[0]*β_t/y[4]**2, 0, 0],
[ F*β/y[4]+ y[2]*β_t/y[4], -μ - σ, y[0]*β_t/y[4], 0, -F*y[0]*β/y[4]**2 - y[2]*y[0]*β_t/y[4]**2, 0, 0],
[ 0, σ, -γ - μ, 0, 0, 0, 0],
[ 0, 0, γ, -μ, 0, 0, 0],
[ 0, 0, 0, 0, -μ, 0, 0],
[ 0, 0, d*γ, 0, 0, -λ, 0],
[ 0, σ, 0, 0, 0, 0, 0]]
def roundup(x, places):
return int(math.ceil(x / int(places))) * int(places)
Λ = 0.0 # Birth rate
μ = 0.0 # Death rate
# Λ = 0.01 # Birth rate
# μ = 0.0205 # Death rate
Tc = 2.0 # Typical time between contacts
# β = 0.5944 #1.0/Tc
β = 1.68
# Tr = 11.1 # Typical time until recovery
Tinfs = [2.9, 2.3, 2.3, 2.9, 10.0, 1.5]
# Tr = sum(Tinfs)/len(Tinfs) #5.0
# Tr = 11.1
Tr = 14.0
γ = 1.0/Tr
Tincs = [5.2, 5.2, 6.1, 5.5, 4.8, 5.0, 6.5, 4.8]
Tinc = sum(Tincs)/len(Tincs)
σ = Tinc**-1
# σ = 3.0**-1
F = 10
α = 0.0
# α = 0.05
# α = 0.4239
# α = 0.8478
d = 0.05
# k = 1117.3
# k = 200
k = 0
λb = 11.2
λ = λb**-1
Infi = 10 # Initial infected
Daysnn = 150
NP = 329436928 # 1437904257
S0 = NP - Infi
its = 10000
itern = Daysnn/its
Days = [0.0, Daysnn]
Time = [i for i in range(0, int(Daysnn + 1), 1)]
tt = list(range(0,its,1))
Time_f = [i*itern for i in tt]
Y0 = [NP, 0.0, Infi, 0.0, NP, d, Infi]
Ro = R0b(β, γ, σ, μ)
# print(Ro)
# print('Λ')
# print('μ')
# print(α)
# print(β)
# print(Ro, 1.68**-1)
# print(λ)
# print(σ)
answer = solve_ivp(SEIR, Days, Y0, t_eval=Time_f, method = 'Radau', args=(σ, β, γ, μ, Λ, F, α, d, k, λ), jac=jacobian, rtol=1E-10, atol=1E-10)
ts = answer.t
Bs = [Beta(σ, β, i, j, k) for i,j in zip(answer.y[5],answer.y[4])]
Sn = answer.y[0]
En = answer.y[1]
In = answer.y[2]
Rn = answer.y[3]
Nn = answer.y[4]
Dn = answer.y[5]
Cn = answer.y[6]
Spb = answer.y[0]/NP
Epb = answer.y[1]/NP
Ipb = answer.y[2]/NP
Rpb = answer.y[3]/NP
Npb = answer.y[4]/NP
Dpb = answer.y[5]/NP
Cpb = answer.y[6]/NP
Sp = [i*100.0 for i in Spb]
Ep = [i*100.0 for i in Epb]
Ip = [i*100.0 for i in Ipb]
Rp = [i*100.0 for i in Rpb]
Np = [i*100.0 for i in Npb]
Dp = [i*100.0 for i in Dpb]
Cp = [i*100.0 for i in Cpb]
m = max(In)
mi = (In.tolist()).index(max(In))
mip = mi/its
peakn = round(Daysnn*mip)
my = max(Ip)
myi = (Ip).index(max(Ip))
myp = myi/its
peakyn = round(Daysnn*myp)
PEAK = [int(round(Daysnn*(mi/its)))]
nPEAK = np.array(PEAK, ndmin=2)
Tdata = np.array((Time_f, Sn, En, In, Rn))
TTdata = np.array((Time_f, Spb, Epb, Ipb, Rpb, Sp, Ep, Ip, Rp))
Tdatal = Tdata.tolist()
if its <= 16384:
writer = pd.ExcelWriter(r'{}\SIR Population.xlsx', engine='xlsxwriter')
writerp = pd.ExcelWriter(r'{}\SIR Percent.xlsx', engine='xlsxwriter')
indexes = ['Time [Days]', 'Susceptible', 'Exposed', 'Infected', 'Recovered', 'Peak [Day]']
indexest = ['Time [Days]', 'Susceptible', 'Exposed', 'Infected', 'Recovered' , 'Susceptible [%]', 'Exposed [%]', 'Infected [%]', 'Recovered [%]', 'Peak [Day]']
df = pd.DataFrame([Time_f, Sn, En, In, Rn, PEAK], index=[*indexes])
dft = pd.DataFrame([Time_f, Spb, Epb, Ipb, Rpb, Sp, Ep, Ip, Rp, PEAK], index=[*indexest])
df.to_excel(r"{}\SIR Population.xlsx".format(path_fol), sheet_name="SIR Population.xlsx", header=True, startrow=1)
dft.to_excel(r"{}\SIR Percent.xlsx".format(path_fol), sheet_name="SIR Percent.xlsx", header=True, startrow=1)
elif its > 16384 and its <= 1048576:
writer = pd.ExcelWriter(r'{}\SIR Population.xlsx', engine='xlsxwriter')
writerp = pd.ExcelWriter(r'{}\SIR Percent.xlsx', engine='xlsxwriter')
indexesb = ['Time [Days]', 'Susceptible', 'Exposed', 'Infected', 'Recovered']
indexestb = ['Time [Days]', 'Susceptible', 'Exposed', 'Infected', 'Recovered' , 'Susceptible [%]', 'Exposed [%]', 'Infected [%]', 'Recovered [%]']
df = | pd.DataFrame(Tdata.T, columns=[*indexesb]) | pandas.DataFrame |
#!/usr/bin/env python
import unittest
import os
import logging
import numpy as np
import filecmp
import pandas as pd
from vaws.model.house import House
from vaws.model.config import Config
# from model import zone
# from model import engine
def check_file_consistency(file1, file2, **kwargs):
try:
identical = filecmp.cmp(file1, file2)
except OSError:
print(f'{file2} does not exist')
else:
if not identical:
try:
data1 = pd.read_csv(file1, **kwargs)
data2 = pd.read_csv(file2, **kwargs)
except ValueError:
print(f'No columns to parse from {file2}')
else:
try:
pd.util.testing.assert_frame_equal(data1, data2)
except AssertionError:
print(f'{file1} and {file2} are different')
def consistency_house_damage_idx(path_reference, path_output):
file1 = os.path.join(path_reference, 'house_dmg_idx.csv')
file2 = os.path.join(path_output, 'results_model.h5')
data1 = pd.read_csv(file1)
data2 = | pd.read_hdf(file2, 'di') | pandas.read_hdf |
import os
import sys
import requests
import logging
import json
import pandas as pd
from bs4 import BeautifulSoup
import pickle
from git import Git
class FPL_Review_Scraper:
""" Scrape FPL Review website """
def __init__(self, logger, season_data, team_id):
"""
Args:
logger (logging.logger): logging package
season_data (int): Season
team_id (int): Player team ID
"""
self.season = season_data['season']
self.root = f'data/fpl_review/{self.season}-{self.season % 2000 + 1}/gameweek/'
if not os.path.exists(self.root):
os.makedirs(self.root)
self.next_gw, self.players = self.get_fpl_metadata()
self.logger = logger
self.team_id = team_id
def get_fpl_metadata(self):
""" Request the FPL API
Returns:
(tuple): Next GW and player ids
"""
url = 'https://fantasy.premierleague.com/api/bootstrap-static/'
res = requests.get(url).json()
# Get current gameweek
next_gw = self.get_next_gw(res['events'])
if not os.path.exists(os.path.join(self.root, str(next_gw))):
os.mkdir(os.path.join(self.root, str(next_gw)))
# Get player ids
cols = ["id", "first_name", "second_name", "team"]
players = pd.DataFrame(res['elements'])[cols]
players = players.set_index("id")
return next_gw, players
def get_next_gw(self, events):
""" Get the next gameweek to be played in the EPL
Args:
events (json): FPL API response
Returns:
(int): Next gameweek
"""
for idx, gw in enumerate(events):
if gw['is_next']:
return idx + 1
def get_free_planner_data(self):
"""Get the FPL Review data"""
period = min(5, 39 - self.next_gw)
url = 'https://fplreview.com/free-planner/#forecast_table'
body = {
'HiveMind': 'Yes',
'Weeks': period,
'TeamID': self.team_id,
}
x = requests.post(url, data=body)
soup = BeautifulSoup(x.content, 'html.parser')
logger.info("Saving raw data.")
for fplr_api in soup.find(id="fplr_api"):
with open(
os.path.join(
os.path.join(self.root, str(self.next_gw)),
'raw_fplreview_fp.json'),
'w') as outfile:
json.dump(json.loads(fplr_api), outfile)
# Columns
csv_cols = ["id", "Pos", "Name", "BV", "SV", "Team"]
for gw in range(self.next_gw, self.next_gw + period):
csv_cols.append(str(gw) + '_xMins')
csv_cols.append(str(gw) + '_Pts')
logger.info("Saving processed data.")
pd.DataFrame(columns=csv_cols).to_csv(
os.path.join(
os.path.join(self.root, str(self.next_gw)),
'fplreview_fp.csv'),
index=False)
for fplr_api in soup.find(id="fplr_api"):
for idx, key in enumerate(json.loads(fplr_api).keys()):
try:
row = [
key,
json.loads(fplr_api)[key]['pos'],
json.loads(fplr_api)[key]['name'],
json.loads(fplr_api)[key]['def_cost'],
json.loads(fplr_api)[key]['now_cost'],
json.loads(fplr_api)[key]['team_abbrev']
]
for gw in range(self.next_gw, self.next_gw + period):
row.append(
json.loads(fplr_api)[key][str(gw)]['dmins'])
row.append(
json.loads(fplr_api)[key][str(gw)]['livpts'])
(
pd.DataFrame([row], columns=csv_cols)
.to_csv(
os.path.join(
os.path.join(self.root, str(self.next_gw)),
'fplreview_fp.csv'),
index=False, mode='a', header=False))
except:
self.logger.warning(f"Failed to save row {key}.")
continue
def get_free_planner_data_fast(self):
"""Get the FPL Review data"""
period = min(5, 39 - self.next_gw)
url = 'https://fplreview.com/free-planner/#forecast_table'
body = {
'HiveMind': 'Yes',
'Weeks': period,
'TeamID': self.team_id,
}
x = requests.post(url, data=body)
soup = BeautifulSoup(x.content, 'html.parser')
logger.info("Saving raw data.")
for fplr_api in soup.find(id="fplr_api"):
with open(
os.path.join(
os.path.join(self.root, str(self.next_gw)),
'raw_fplreview_fp.json'),
'w') as outfile:
json.dump(json.loads(fplr_api), outfile)
logger.info("Saving processed data.")
# Columns
csv_cols = ["id", "Pos", "Name", "BV", "SV", "Team"]
df = pd.DataFrame(columns=csv_cols)
df_json = pd.read_json(
os.path.join(
os.path.join(self.root, str(self.next_gw)),
'raw_fplreview_fp.json')
).T
df[['Pos', 'Name', 'BV', 'SV', 'Team']] = df_json[['pos', 'name', 'def_cost', 'now_cost', 'team_abbrev']]
df['id'] = df_json.index
df_json = df_json.reset_index()
for gw in range(self.next_gw, self.next_gw + period):
df_gw = pd.json_normalize(df_json[str(gw)]).join(df_json['index'])
df_gw = df_gw.rename(
columns={
'dmins': f'{gw}_xMins',
'livpts': f'{gw}_Pts',
'index': 'id'
})
df = pd.merge(
df,
df_gw[[f'{gw}_xMins', f'{gw}_Pts', 'id']],
left_on='id',
right_on='id',
)
df.to_csv(
os.path.join(
os.path.join(self.root, str(self.next_gw)),
'fplreview_fp.csv'),
index=False)
def get_premium_planner_data_fast(self):
"""Get the FPL Review data"""
period = min(8, 39 - self.next_gw)
url = 'https://fplreview.com/massive-data-planner/#forecast_table'
body = {
'HiveMind': 'Yes',
'Weeks': period,
'TeamID': self.team_id,
}
logger.info("Logging in with cookies.")
# Get the saved cookies.
cookies = pickle.load(open("cookies.pkl", "rb"))
# Set cookies
session = requests.Session()
session.cookies.set(cookies['name'], cookies['value'])
# Request url
x = session.post(url, data=body)
soup = BeautifulSoup(x.content, 'html.parser')
for fplr_api in soup.find(id="fplr_api"):
with open(
os.path.join(
os.path.join(self.root, str(self.next_gw)),
'raw_fplreview_mp.json'),
'w') as outfile:
json.dump(json.loads(fplr_api), outfile)
logger.info("Processing data.")
# Columns
csv_cols = ["id", "Pos", "Name", "BV", "SV", "Team"]
df = | pd.DataFrame(columns=csv_cols) | pandas.DataFrame |
import os
import argparse
from typing import List, Dict, Tuple, Optional, Iterable, Any, Union
from enum import Enum
import numpy as np
import pandas as pd
from . import BaseAddOn
from .. import GutenTAG
from ..generator import Overview, TimeSeries
from ..utils.global_variables import SUPERVISED_FILENAME, UNSUPERVISED_FILENAME, SEMI_SUPERVISED_FILENAME, \
BASE_OSCILLATIONS, ANOMALIES, PARAMETERS, BASE_OSCILLATION, BASE_OSCILLATION_NAMES
from ..utils.default_values import default_values
columns = [
"collection_name",
"dataset_name",
"train_path",
"test_path",
"dataset_type",
"datetime_index",
"split_at",
"train_type",
"train_is_normal",
"input_type",
"length",
"dimensions",
"contamination",
"num_anomalies",
"min_anomaly_length",
"median_anomaly_length",
"max_anomaly_length",
"mean",
"stddev",
"trend",
"stationarity",
"period_size"
]
class LearningType(Enum):
Unsupervised = "unsupervised"
Supervised = "supervised"
SemiSupervised = "semi-supervised"
def get_filename(self) -> Optional[str]:
if self == LearningType.Supervised:
return SUPERVISED_FILENAME
elif self == LearningType.SemiSupervised:
return SEMI_SUPERVISED_FILENAME
return None
class TimeEvalAddOn(BaseAddOn):
def process(self, overview: Overview, gutenTAG: GutenTAG, args: argparse.Namespace) -> Tuple[Overview, GutenTAG]:
for i, (generator, config) in enumerate(zip(gutenTAG.timeseries, overview.datasets)):
self._process_timeseries(config, i, generator, LearningType.Unsupervised)
if generator.supervised:
self._process_timeseries(config, i, generator, LearningType.Supervised)
if generator.semi_supervised:
self._process_timeseries(config, i, generator, LearningType.SemiSupervised)
self._set_global_vals()
if args.no_save:
return overview, gutenTAG
self.df.to_csv(os.path.join(args.output_dir, "datasets.csv"), index=False)
return overview, gutenTAG
def _set_global_vals(self):
self.df["collection_name"] = "GutenTAG"
self.df["dataset_type"] = "synthetic"
self.df["datetime_index"] = False
self.df["split_at"] = np.NAN
self.df["train_is_normal"] = True
self.df["stationarity"] = np.NAN
def _process_timeseries(self, config: Dict, i: int, generator: TimeSeries, tpe: LearningType):
dataset: Dict[str, Any] = dict()
dataset_name = generator.dataset_name or i
filename = tpe.get_filename()
if filename is not None:
dataset["train_path"] = f"{dataset_name}/{filename}"
ts = generator.timeseries
assert ts is not None, "Timeseries should not be None!"
dataset["dataset_name"] = f"{dataset_name}.{tpe.value}"
dataset["test_path"] = f"{dataset_name}/{UNSUPERVISED_FILENAME}"
dataset["input_type"] = "univariate" if ts.shape[1] == 1 else "multivariate"
dataset["length"] = config.get(PARAMETERS.LENGTH, 10000)
dataset["dimensions"] = ts.shape[1]
dataset["contamination"] = self._calc_contamination(config.get(ANOMALIES, []), dataset[PARAMETERS.LENGTH])
dataset["num_anomalies"] = len(config.get(ANOMALIES, []))
dataset["min_anomaly_length"] = min([anomaly.get("length") for anomaly in config.get(ANOMALIES, [])])
dataset["median_anomaly_length"] = np.median([anomaly.get(PARAMETERS.LENGTH) for anomaly in config.get(ANOMALIES, [])])
dataset["max_anomaly_length"] = max([anomaly.get(PARAMETERS.LENGTH) for anomaly in config.get(ANOMALIES, [])])
dataset["train_type"] = tpe.value
dataset["mean"] = None if ts is None else ts.mean()
dataset["stddev"] = None if ts is None else ts.std(axis=1).mean()
dataset["trend"] = config.get(BASE_OSCILLATION, {}).get(PARAMETERS.TREND, {}).get(PARAMETERS.KIND, np.NAN)
dataset["period_size"] = TimeEvalAddOn._calc_period_size(config.get(BASE_OSCILLATION, config.get(BASE_OSCILLATIONS, [{}])), dataset[PARAMETERS.LENGTH])
self.df: pd.DataFrame = self.df.append(dataset, ignore_index=True)
@staticmethod
def _calc_contamination(anomalies: Iterable[Dict], ts_length: int) -> float:
anomaly_lengths = [anomaly.get(PARAMETERS.LENGTH, default_values[ANOMALIES][PARAMETERS.LENGTH]) for anomaly in anomalies]
if len(anomaly_lengths) > 0:
return sum(anomaly_lengths) / ts_length
return 0
@staticmethod
def _calc_period_size(base: Union[Dict[str, Any], List[Dict[str, Any]]], length: int) -> float:
bases: List[Dict[str, Any]] = []
if type(base) == dict:
bases.append(base) # type: ignore # does not understand the condition before
elif type(base) == list:
bases = base # type: ignore # does not understand the condition before
periods = []
for dim in bases:
frequency = dim.get(PARAMETERS.FREQUENCY)
kind = dim.get(PARAMETERS.KIND)
if frequency is None or kind not in [BASE_OSCILLATION_NAMES.SINE, BASE_OSCILLATION_NAMES.ECG, BASE_OSCILLATION_NAMES.RANDOM_MODE_JUMP]:
periods.append(np.NAN)
elif kind in [BASE_OSCILLATION_NAMES.SINE, BASE_OSCILLATION_NAMES.ECG]:
periods.append(int(100 / frequency))
elif kind == BASE_OSCILLATION_NAMES.RANDOM_MODE_JUMP:
periods.append(int(length / frequency))
return float(np.nanmedian(periods))
def __init__(self):
self.df = | pd.DataFrame(columns=columns) | pandas.DataFrame |
# import modules
import bcolz
import pickle
import random
import argparse
import numpy as np
import pandas as pd
from os.path import dirname, realpath, join
from IPython.terminal.debugger import set_trace as keyboard
# function for tokenizing
def corpus_indexify(corpus_dict, word2idx):
# initialize corpus array
token_found = 0
token_count = 0
max_length = 20
n_items = len(corpus_dict)
corpus_arr = word2idx['PAD'] * np.ones((n_items, max_length))
# loop over the descriptions
for idx, key in enumerate(corpus_dict.keys()):
tokens = corpus_dict[key].split(' ')
for tidx, token in enumerate(tokens):
try:
corpus_arr[idx, tidx] = word2idx[token]
token_found += 1
except KeyError:
corpus_arr[idx, tidx] = word2idx['UNK']
token_count += 1
# compute coverage
coverage = token_found / token_count
return corpus_arr, coverage
def main():
parser = argparse.ArgumentParser(description='generate datasets')
parser.add_argument('--embed_type', type=str, default='glove',
help='embedding type')
parser.add_argument('--embed_dim', type=int, default=300,
help='embedding dimension')
parser.add_argument('--seed', type=int, default=42,
help='seed dataset generation')
args = parser.parse_args()
print('Initialize environment')
# seed the environment
seed = args.seed
random.seed(seed)
np.random.seed(seed)
# list of levels
levels = ['level1', 'level2', 'level3', 'level4']
samples = {'level1': 25, 'level2': 10, 'level3': 5, 'level4': 5}
thresholds = {'level1': 500, 'level2': 100, 'level3': 50, 'level4': 20}
# obtain root dir
root_dir = dirname(realpath(__file__))
data_path = join(root_dir, '..', '..', 'data', 'amazon')
embed_path = join(root_dir, '..', '..', 'embeddings')
# load csv file
print('Load dataset')
with open(join(data_path, 'amazon.csv'), 'r', encoding='latin1') as f:
amazon_df = | pd.read_csv(f) | pandas.read_csv |
# -*- coding: utf-8 -*-
"""Device curtailment plots.
This module creates plots are related to the curtailment of generators.
@author: <NAME>
"""
import os
import logging
import pandas as pd
from collections import OrderedDict
import matplotlib.pyplot as plt
import matplotlib as mpl
import matplotlib.dates as mdates
import matplotlib.ticker as mtick
import marmot.config.mconfig as mconfig
import marmot.plottingmodules.plotutils.plot_library as plotlib
from marmot.plottingmodules.plotutils.plot_data_helper import PlotDataHelper
from marmot.plottingmodules.plotutils.plot_exceptions import (MissingInputData, DataSavedInModule,
UnderDevelopment, MissingZoneData)
class MPlot(PlotDataHelper):
"""curtailment MPlot class.
All the plotting modules use this same class name.
This class contains plotting methods that are grouped based on the
current module name.
The curtailment.py module contains methods that are
related to the curtailment of generators .
MPlot inherits from the PlotDataHelper class to assist in creating figures.
"""
def __init__(self, argument_dict: dict):
"""
Args:
argument_dict (dict): Dictionary containing all
arguments passed from MarmotPlot.
"""
# iterate over items in argument_dict and set as properties of class
# see key_list in Marmot_plot_main for list of properties
for prop in argument_dict:
self.__setattr__(prop, argument_dict[prop])
# Instantiation of MPlotHelperFunctions
super().__init__(self.Marmot_Solutions_folder, self.AGG_BY, self.ordered_gen,
self.PLEXOS_color_dict, self.Scenarios, self.ylabels,
self.xlabels, self.gen_names_dict, Region_Mapping=self.Region_Mapping)
self.logger = logging.getLogger('marmot_plot.'+__name__)
self.x = mconfig.parser("figure_size","xdimension")
self.y = mconfig.parser("figure_size","ydimension")
self.y_axes_decimalpt = mconfig.parser("axes_options","y_axes_decimalpt")
self.curtailment_prop = mconfig.parser("plot_data","curtailment_property")
def curt_duration_curve(self, prop: str = None,
start_date_range: str = None, end_date_range: str = None, **_):
"""Curtailment duration curve (line plot)
Displays curtailment sorted from highest occurrence to lowest
over given time period.
Args:
prop (str, optional): Controls type of re to include in plot.
Controlled through the plot_select.csv.
Defaults to None.
start_date_range (str, optional): Defines a start date at which to represent data from.
Defaults to None.
end_date_range (str, optional): Defines a end date at which to represent data to.
Defaults to None.
Returns:
dict: dictionary containing the created plot and its data table.
"""
outputs = {}
# List of properties needed by the plot, properties are a set of tuples and contain 3 parts:
# required True/False, property name and scenarios required, scenarios must be a list.
properties = [(True,f"generator_{self.curtailment_prop}",self.Scenarios)]
# Runs get_formatted_data within PlotDataHelper to populate PlotDataHelper dictionary
# with all required properties, returns a 1 if required data is missing
check_input_data = self.get_formatted_data(properties)
if 1 in check_input_data:
return MissingInputData()
for zone_input in self.Zones:
self.logger.info(f"{self.AGG_BY} = {zone_input}")
RE_Curtailment_DC = pd.DataFrame()
PV_Curtailment_DC = pd.DataFrame()
for scenario in self.Scenarios:
self.logger.info(f"Scenario = {scenario}")
re_curt = self[f"generator_{self.curtailment_prop}"].get(scenario)
# Timeseries [MW] RE curtailment [MWh]
try: #Check for regions missing all generation.
re_curt = re_curt.xs(zone_input,level = self.AGG_BY)
except KeyError:
self.logger.info(f'No curtailment in {zone_input}')
continue
re_curt = self.df_process_gen_inputs(re_curt)
# If using Marmot's curtailment property
if self.curtailment_prop == 'Curtailment':
re_curt = self.assign_curtailment_techs(re_curt)
# Timeseries [MW] PV curtailment [MWh]
pv_curt = re_curt[re_curt.columns.intersection(self.pv_gen_cat)]
re_curt = re_curt.sum(axis=1)
pv_curt = pv_curt.sum(axis=1)
re_curt = re_curt.squeeze() #Convert to Series
pv_curt = pv_curt.squeeze() #Convert to Series
if pd.notna(start_date_range):
self.logger.info(f"Plotting specific date range: \
{str(start_date_range)} to {str(end_date_range)}")
re_curt = re_curt[start_date_range : end_date_range]
pv_curt = pv_curt[start_date_range : end_date_range]
if re_curt.empty is True and prop == "PV+Wind":
self.logger.warning('No data in selected Date Range')
continue
if pv_curt.empty is True and prop == "PV":
self.logger.warning('No data in selected Date Range')
continue
# Sort from larget to smallest
re_cdc = re_curt.sort_values(ascending=False).reset_index(drop=True)
pv_cdc = pv_curt.sort_values(ascending=False).reset_index(drop=True)
re_cdc.rename(scenario, inplace=True)
pv_cdc.rename(scenario, inplace=True)
RE_Curtailment_DC = pd.concat([RE_Curtailment_DC, re_cdc], axis=1, sort=False)
PV_Curtailment_DC = pd.concat([PV_Curtailment_DC, pv_cdc], axis=1, sort=False)
# Remove columns that have values less than 1
RE_Curtailment_DC = RE_Curtailment_DC.loc[:, (RE_Curtailment_DC >= 1).any(axis=0)]
PV_Curtailment_DC = PV_Curtailment_DC.loc[:, (PV_Curtailment_DC >= 1).any(axis=0)]
# Replace _ with white space
RE_Curtailment_DC.columns = RE_Curtailment_DC.columns.str.replace('_',' ')
PV_Curtailment_DC.columns = PV_Curtailment_DC.columns.str.replace('_',' ')
# Create Dictionary from scenario names and color list
colour_dict = dict(zip(RE_Curtailment_DC.columns, self.color_list))
fig2, ax = plt.subplots(figsize=(self.x,self.y))
if prop == "PV":
if PV_Curtailment_DC.empty:
out = MissingZoneData()
outputs[zone_input] = out
continue
# unit conversion return divisor and energy units
unitconversion = PlotDataHelper.capacity_energy_unitconversion(PV_Curtailment_DC.values.max())
PV_Curtailment_DC = PV_Curtailment_DC/unitconversion['divisor']
Data_Table_Out = PV_Curtailment_DC
Data_Table_Out = Data_Table_Out.add_suffix(f" ({unitconversion['units']})")
x_axis_lim = 1.25 * len(PV_Curtailment_DC)
for column in PV_Curtailment_DC:
ax.plot(PV_Curtailment_DC[column], linewidth=3, color=colour_dict[column],
label=column)
ax.legend(loc='lower left',bbox_to_anchor=(1,0),
facecolor='inherit', frameon=True)
ax.set_ylabel(f"PV Curtailment ({unitconversion['units']})", color='black', rotation='vertical')
if prop == "PV+Wind":
if RE_Curtailment_DC.empty:
out = MissingZoneData()
outputs[zone_input] = out
continue
# unit conversion return divisor and energy units
unitconversion = PlotDataHelper.capacity_energy_unitconversion(RE_Curtailment_DC.values.max())
RE_Curtailment_DC = RE_Curtailment_DC/unitconversion['divisor']
Data_Table_Out = RE_Curtailment_DC
Data_Table_Out = Data_Table_Out.add_suffix(f" ({unitconversion['units']})")
x_axis_lim = 1.25 * len(RE_Curtailment_DC)
for column in RE_Curtailment_DC:
ax.plot(RE_Curtailment_DC[column], linewidth=3, color=colour_dict[column],
label=column)
ax.legend(loc='lower left',bbox_to_anchor=(1,0),
facecolor='inherit', frameon=True)
ax.set_ylabel(f"PV + Wind Curtailment ({unitconversion['units']})", color='black', rotation='vertical')
ax.set_xlabel('Hours', color='black', rotation='horizontal')
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.tick_params(axis='y', which='major', length=5, width=1)
ax.tick_params(axis='x', which='major', length=5, width=1)
ax.yaxis.set_major_formatter(mpl.ticker.FuncFormatter(lambda x, p: format(x, f',.{self.y_axes_decimalpt}f')))
ax.margins(x=0.01)
#ax.set_xlim(0, 9490)
ax.set_xlim(0,x_axis_lim)
ax.set_ylim(bottom=0)
if mconfig.parser("plot_title_as_region"):
ax.set_title(zone_input)
outputs[zone_input] = {'fig': fig2, 'data_table': Data_Table_Out}
return outputs
def curt_pen(self, prop: str = None,
start_date_range: str = None, end_date_range: str = None, **_):
"""Plot of curtailment vs penetration.
Each scenario is represented by a different symbel on a x, y axis
Args:
prop (str, optional): Controls type of re to include in plot.
Controlled through the plot_select.csv.
Defaults to None.
start_date_range (str, optional): Defines a start date at which to represent data from.
Defaults to None.
end_date_range (str, optional): Defines a end date at which to represent data to.
Defaults to None.
Returns:
dict: dictionary containing the created plot and its data table.
"""
outputs = {}
# List of properties needed by the plot, properties are a set of tuples and contain 3 parts:
# required True/False, property name and scenarios required, scenarios must be a list.
properties = [(True, "generator_Generation", self.Scenarios),
(True, "generator_Available_Capacity", self.Scenarios),
(True, f"generator_{self.curtailment_prop}", self.Scenarios),
(True, "generator_Total_Generation_Cost", self.Scenarios)]
# Runs get_formatted_data within PlotDataHelper to populate PlotDataHelper dictionary
# with all required properties, returns a 1 if required data is missing
check_input_data = self.get_formatted_data(properties)
if 1 in check_input_data:
return MissingInputData()
for zone_input in self.Zones:
Penetration_Curtailment_out = pd.DataFrame()
self.logger.info(f"{self.AGG_BY } = {zone_input}")
for scenario in self.Scenarios:
self.logger.info(f"Scenario = {scenario}")
gen = self["generator_Generation"].get(scenario)
try: #Check for regions missing all generation.
gen = gen.xs(zone_input,level = self.AGG_BY)
except KeyError:
self.logger.info(f'No generation in {zone_input}')
continue
avail_gen = self["generator_Available_Capacity"].get(scenario)
avail_gen = avail_gen.xs(zone_input,level=self.AGG_BY)
re_curt = self[f"generator_{self.curtailment_prop}"].get(scenario)
try:
re_curt = re_curt.xs(zone_input,level=self.AGG_BY)
except KeyError:
self.logger.info(f'No curtailment in {zone_input}')
continue
re_curt = self.df_process_gen_inputs(re_curt)
# If using Marmot's curtailment property
if self.curtailment_prop == 'Curtailment':
re_curt = self.assign_curtailment_techs(re_curt)
# Finds the number of unique hours in the year
no_hours_year = len(gen.index.unique(level="timestamp"))
# Total generation across all technologies [MWh]
total_gen = float(gen.sum())
# Timeseries [MW] and Total VRE generation [MWh]
vre_gen = (gen.loc[(slice(None), self.vre_gen_cat),:])
total_vre_gen = float(vre_gen.sum())
# Timeseries [MW] and Total RE generation [MWh]
re_gen = (gen.loc[(slice(None), self.re_gen_cat),:])
total_re_gen = float(re_gen.sum())
# Timeseries [MW] and Total PV generation [MWh]
pv_gen = (gen.loc[(slice(None), self.pv_gen_cat),:])
total_pv_gen = float(pv_gen.sum())
# % Penetration of generation classes across the year
VRE_Penetration = (total_vre_gen/total_gen)*100
RE_Penetration = (total_re_gen/total_gen)*100
PV_Penetration = (total_pv_gen/total_gen)*100
# Timeseries [MW] and Total RE available [MWh]
re_avail = (avail_gen.loc[(slice(None), self.re_gen_cat),:])
total_re_avail = float(re_avail.sum())
# Timeseries [MW] and Total PV available [MWh]
pv_avail = (avail_gen.loc[(slice(None), self.pv_gen_cat),:])
total_pv_avail = float(pv_avail.sum())
# Total RE curtailment [MWh]
total_re_curt = float(re_curt.sum().sum())
# Timeseries [MW] and Total PV curtailment [MWh]
pv_curt = re_curt[re_curt.columns.intersection(self.pv_gen_cat)]
total_pv_curt = float(pv_curt.sum().sum())
# % of hours with curtailment
Prct_hr_RE_curt = (len((re_curt.sum(axis=1)).loc[(re_curt.sum(axis=1))>0])/no_hours_year)*100
Prct_hr_PV_curt = (len((pv_curt.sum(axis=1)).loc[(pv_curt.sum(axis=1))>0])/no_hours_year)*100
# Max instantaneous curtailment
if re_curt.empty == True:
continue
else:
Max_RE_Curt = max(re_curt.sum(axis=1))
if pv_curt.empty == True:
continue
else:
Max_PV_Curt = max(pv_curt.sum(axis=1))
# % RE and PV Curtailment Capacity Factor
if total_pv_curt > 0:
RE_Curt_Cap_factor = (total_re_curt/Max_RE_Curt)/no_hours_year
PV_Curt_Cap_factor = (total_pv_curt/Max_PV_Curt)/no_hours_year
else:
RE_Curt_Cap_factor = 0
PV_Curt_Cap_factor = 0
# % Curtailment across the year
if total_re_avail == 0:
continue
else:
Prct_RE_curt = (total_re_curt/total_re_avail)*100
if total_pv_avail == 0:
continue
else:
Prct_PV_curt = (total_pv_curt/total_pv_avail)*100
# Total generation cost
Total_Gen_Cost = self["generator_Total_Generation_Cost"].get(scenario)
Total_Gen_Cost = Total_Gen_Cost.xs(zone_input,level=self.AGG_BY)
Total_Gen_Cost = float(Total_Gen_Cost.sum())
vg_out = pd.Series([PV_Penetration ,RE_Penetration, VRE_Penetration, Max_PV_Curt,
Max_RE_Curt, Prct_PV_curt, Prct_RE_curt, Prct_hr_PV_curt,
Prct_hr_RE_curt, PV_Curt_Cap_factor, RE_Curt_Cap_factor, Total_Gen_Cost],
index=["% PV Penetration", "% RE Penetration", "% VRE Penetration",
"Max PV Curtailment [MW]", "Max RE Curtailment [MW]",
"% PV Curtailment", '% RE Curtailment',"% PV hrs Curtailed",
"% RE hrs Curtailed", "PV Curtailment Capacity Factor",
"RE Curtailment Capacity Factor", "Gen Cost"])
vg_out = vg_out.rename(scenario)
Penetration_Curtailment_out = pd.concat([Penetration_Curtailment_out, vg_out], axis=1, sort=False)
Penetration_Curtailment_out = Penetration_Curtailment_out.T
# Data table of values to return to main program
Data_Table_Out = Penetration_Curtailment_out
VG_index = pd.Series(Penetration_Curtailment_out.index)
# VG_index = VG_index.str.split(n=1, pat="_", expand=True)
# VG_index.rename(columns = {0:"Scenario"}, inplace=True)
VG_index.rename("Scenario", inplace=True)
# VG_index = VG_index["Scenario"]
Penetration_Curtailment_out.loc[:, "Scenario"] = VG_index[:,].values
marker_dict = dict(zip(VG_index.unique(), self.marker_style))
colour_dict = dict(zip(VG_index.unique(), self.color_list))
Penetration_Curtailment_out["colour"] = [colour_dict.get(x, '#333333') for x in Penetration_Curtailment_out.Scenario]
Penetration_Curtailment_out["marker"] = [marker_dict.get(x, '.') for x in Penetration_Curtailment_out.Scenario]
if Penetration_Curtailment_out.empty:
self.logger.warning(f'No Generation in {zone_input}')
out = MissingZoneData()
outputs[zone_input] = out
continue
fig1, ax = plt.subplots(figsize=(self.x,self.y))
for index, row in Penetration_Curtailment_out.iterrows():
if prop == "PV":
ax.scatter(row["% PV Penetration"], row["% PV Curtailment"],
marker=row["marker"], c=row["colour"], s=100, label = row["Scenario"])
ax.set_ylabel('% PV Curtailment', color='black', rotation='vertical')
ax.set_xlabel('% PV Penetration', color='black', rotation='horizontal')
elif prop == "PV+Wind":
ax.scatter(row["% RE Penetration"], row["% RE Curtailment"],
marker=row["marker"], c=row["colour"], s=40, label = row["Scenario"])
ax.set_ylabel('% PV + Wind Curtailment', color='black', rotation='vertical')
ax.set_xlabel('% PV + Wind Penetration', color='black', rotation='horizontal')
ax.set_ylim(bottom=0)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.tick_params(axis='y', which='major', length=5, width=1)
ax.tick_params(axis='x', which='major', length=5, width=1)
ax.margins(x=0.01)
if mconfig.parser("plot_title_as_region"):
ax.set_title(zone_input)
handles, labels = plt.gca().get_legend_handles_labels()
by_label = OrderedDict(zip(labels, handles))
plt.legend(by_label.values(), by_label.keys(), loc = 'lower right')
outputs[zone_input] = {'fig': fig1, 'data_table': Data_Table_Out}
return outputs
def curt_total(self, start_date_range: str = None, end_date_range: str = None, **_):
"""Creates stacked barplots of total curtailment by technology.
A separate bar is created for each scenario.
Args:
start_date_range (str, optional): Defines a start date at which to represent data from.
Defaults to None.
end_date_range (str, optional): Defines a end date at which to represent data to.
Defaults to None.
Returns:
dict: dictionary containing the created plot and its data table.
"""
outputs = {}
# List of properties needed by the plot, properties are a set of tuples and contain 3 parts:
# required True/False, property name and scenarios required, scenarios must be a list.
properties = [(True, f"generator_{self.curtailment_prop}", self.Scenarios),
(True, "generator_Available_Capacity", self.Scenarios)]
# Runs get_formatted_data within PlotDataHelper to populate PlotDataHelper dictionary
# with all required properties, returns a 1 if required data is missing
check_input_data = self.get_formatted_data(properties)
if 1 in check_input_data:
return MissingInputData()
for zone_input in self.Zones:
self.logger.info(f"{self.AGG_BY} = {zone_input}")
Total_Curtailment_out = pd.DataFrame()
Total_Available_gen = pd.DataFrame()
vre_curt_chunks = []
avail_gen_chunks = []
for scenario in self.Scenarios:
self.logger.info(f"Scenario = {scenario}")
vre_collection = {}
avail_vre_collection = {}
vre_curt = self[f"generator_{self.curtailment_prop}"].get(scenario)
try:
vre_curt = vre_curt.xs(zone_input,level=self.AGG_BY)
except KeyError:
self.logger.info(f'No curtailment in {zone_input}')
continue
vre_curt = self.df_process_gen_inputs(vre_curt)
# If using Marmot's curtailment property
if self.curtailment_prop == 'Curtailment':
vre_curt = self.assign_curtailment_techs(vre_curt)
avail_gen = self["generator_Available_Capacity"].get(scenario)
try: #Check for regions missing all generation.
avail_gen = avail_gen.xs(zone_input,level = self.AGG_BY)
except KeyError:
self.logger.info(f'No available generation in {zone_input}')
continue
avail_gen = self.df_process_gen_inputs(avail_gen)
# If using Marmot's curtailment property
if self.curtailment_prop == 'Curtailment':
avail_gen = self.assign_curtailment_techs(avail_gen)
all_empty = True
if pd.notna(start_date_range):
self.logger.info(f"Plotting specific date range: \
{str(start_date_range)} to {str(end_date_range)}")
for vre_type in self.vre_gen_cat:
try:
vre_curt_type = vre_curt[vre_type]
# vre_curt_type = vre_curt.xs(vre_type,level='tech')
except KeyError:
self.logger.info(f'No {vre_type} in {zone_input}')
continue
avail_gen_type = avail_gen[vre_type]
# Code to index data by date range, if a date range is listed in marmot_plot_select.csv
if pd.notna(start_date_range):
avail_gen_type = avail_gen_type.groupby(['timestamp']).sum()
vre_curt_type = vre_curt_type.groupby(['timestamp']).sum()
vre_curt_type = vre_curt_type[start_date_range : end_date_range]
avail_gen_type = avail_gen_type[start_date_range : end_date_range]
if vre_curt_type.empty is False and avail_gen_type.empty is False:
all_empty = False
vre_collection[vre_type] = float(vre_curt_type.sum())
avail_vre_collection[vre_type] = float(avail_gen_type.sum())
if all_empty:
self.logger.warning('No data in selected Date Range')
continue
vre_table = pd.DataFrame(vre_collection,index=[scenario])
avail_gen_table = pd.DataFrame(avail_vre_collection,index=[scenario])
vre_curt_chunks.append(vre_table)
avail_gen_chunks.append(avail_gen_table)
if not vre_curt_chunks:
outputs[zone_input] = MissingZoneData()
continue
Total_Curtailment_out = pd.concat(vre_curt_chunks, axis=0, sort=False)
Total_Available_gen = pd.concat(avail_gen_chunks, axis=0, sort=False)
vre_pct_curt = Total_Curtailment_out.sum(axis=1)/Total_Available_gen.sum(axis=1)
Total_Curtailment_out.index = Total_Curtailment_out.index.str.replace('_',' ')
if Total_Curtailment_out.empty == True:
outputs[zone_input] = MissingZoneData()
continue
# unit conversion return divisor and energy units
unitconversion = PlotDataHelper.capacity_energy_unitconversion(max(Total_Curtailment_out.sum()))
Total_Curtailment_out = Total_Curtailment_out/unitconversion['divisor']
# Data table of values to return to main program
Data_Table_Out = Total_Curtailment_out
Data_Table_Out = Data_Table_Out.add_suffix(f" ({unitconversion['units']}h)")
fig3, ax = plt.subplots(figsize=(self.x,self.y))
Total_Curtailment_out.plot.bar(stacked=True,
color=[self.PLEXOS_color_dict.get(x, '#333333') for x in Total_Curtailment_out.columns],
edgecolor='black', linewidth='0.1', ax=ax)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.set_ylabel(f"Total Curtailment ({unitconversion['units']}h)", color='black', rotation='vertical')
ax.yaxis.set_major_formatter(mpl.ticker.FuncFormatter(lambda x, p: format(x, f',.{self.y_axes_decimalpt}f')))
# Set x-tick labels
if len(self.custom_xticklabels) > 1:
tick_labels = self.custom_xticklabels
else:
tick_labels = Total_Curtailment_out.index
PlotDataHelper.set_barplot_xticklabels(tick_labels, ax=ax)
ax.tick_params(axis='y', which='major', length=5, width=1)
ax.tick_params(axis='x', which='major', length=5, width=1)
ax.margins(x=0.01)
if mconfig.parser("plot_title_as_region"):
ax.set_title(zone_input)
handles, labels = ax.get_legend_handles_labels()
ax.legend(reversed(handles), reversed(labels), loc='lower left',bbox_to_anchor=(1,0),
facecolor='inherit', frameon=True)
curt_totals = Total_Curtailment_out.sum(axis=1)
#inserts total bar value above each bar
for k, patch in enumerate(ax.patches):
height = curt_totals[k]
width = patch.get_width()
x, y = patch.get_xy()
ax.text(x+width/2,
y+height + 0.05*max(ax.get_ylim()),
'{:.2%}\n|{:,.2f}|'.format(vre_pct_curt[k],curt_totals[k]),
horizontalalignment='center',
verticalalignment='center', fontsize=11, color='red')
if k>=len(vre_pct_curt)-1:
break
outputs[zone_input] = {'fig': fig3, 'data_table': Data_Table_Out}
return outputs
def curt_total_diff(self, start_date_range: str = None, end_date_range: str = None, **_):
"""Creates stacked barplots of total curtailment by technology relative to a base scenario.
Barplots show the change in total curtailment relative to a base scenario.
The default is to comapre against the first scenario provided in the inputs list.
Args:
start_date_range (str, optional): Defines a start date at which to represent data from.
Defaults to None.
end_date_range (str, optional): Defines a end date at which to represent data to.
Defaults to None.
Returns:
dict: dictionary containing the created plot and its data table.
"""
return UnderDevelopment()
outputs = {}
properties = [(True, f"generator_{self.curtailment_prop}", self.Scenarios),
(True, "generator_Available_Capacity", self.Scenarios)]
# Runs get_formatted_data within PlotDataHelper to populate PlotDataHelper dictionary
# with all required properties, returns a 1 if required data is missing
check_input_data = self.get_formatted_data(properties)
# Checks if all data required by plot is available, if 1 in list required data is missing
if 1 in check_input_data:
outputs = MissingInputData()
return outputs
for zone_input in self.Zones:
self.logger.info(self.AGG_BY + " = " + zone_input)
Total_Curtailment_out = pd.DataFrame()
Total_Available_gen = pd.DataFrame()
vre_curt_chunks = []
avail_gen_chunks = []
for scenario in self.Scenarios:
self.logger.info("Scenario = " + scenario)
# Adjust list of values to drop from vre_gen_cat depending on if it exists in processed techs
#self.vre_gen_cat = [name for name in self.vre_gen_cat if name in curtailment_collection.get(scenario).index.unique(level="tech")]
vre_collection = {}
avail_vre_collection = {}
vre_curt = self[f"generator_{self.curtailment_prop}"].get(scenario)
try:
vre_curt = vre_curt.xs(zone_input,level=self.AGG_BY)
except KeyError:
self.logger.info('No curtailment in ' + zone_input)
continue
vre_curt = self.df_process_gen_inputs(vre_curt)
# If using Marmot's curtailment property
if self.curtailment_prop == 'Curtailment':
vre_curt = self.assign_curtailment_techs(vre_curt)
avail_gen = self["generator_Available_Capacity"].get(scenario)
try: #Check for regions missing all generation.
avail_gen = avail_gen.xs(zone_input,level = self.AGG_BY)
except KeyError:
self.logger.info('No available generation in ' + zone_input)
continue
avail_gen = self.df_process_gen_inputs(avail_gen)
# If using Marmot's curtailment property
if self.curtailment_prop == 'Curtailment':
avail_gen = self.assign_curtailment_techs(avail_gen)
for vre_type in self.vre_gen_cat:
try:
vre_curt_type = vre_curt[vre_type]
except KeyError:
self.logger.info('No ' + vre_type + ' in ' + zone_input)
continue
vre_collection[vre_type] = float(vre_curt_type.sum())
avail_gen_type = avail_gen[vre_type]
avail_vre_collection[vre_type] = float(avail_gen_type.sum())
vre_table = | pd.DataFrame(vre_collection,index=[scenario]) | pandas.DataFrame |
"""
Multi criteria decision analysis
"""
from __future__ import division
from __future__ import print_function
import json
import os
import pandas as pd
import numpy as np
import cea.config
import cea.inputlocator
from cea.optimization.lca_calculations import lca_calculations
from cea.analysis.multicriteria.optimization_post_processing.electricity_imports_exports_script import electricity_import_and_exports
from cea.technologies.solar.photovoltaic import calc_Cinv_pv
from cea.optimization.constants import PUMP_ETA
from cea.constants import DENSITY_OF_WATER_AT_60_DEGREES_KGPERM3
from cea.optimization.constants import SIZING_MARGIN
from cea.analysis.multicriteria.optimization_post_processing.individual_configuration import calc_opex_PV
from cea.technologies.chiller_vapor_compression import calc_Cinv_VCC
from cea.technologies.chiller_absorption import calc_Cinv
from cea.technologies.cooling_tower import calc_Cinv_CT
import cea.optimization.distribution.network_opt_main as network_opt
from cea.analysis.multicriteria.optimization_post_processing.locating_individuals_in_generation_script import locating_individuals_in_generation_script
from cea.technologies.heat_exchangers import calc_Cinv_HEX
from math import ceil, log
__author__ = "<NAME>"
__copyright__ = "Copyright 2018, Architecture and Building Systems - ETH Zurich"
__credits__ = ["<NAME>"]
__license__ = "MIT"
__version__ = "0.1"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Production"
def multi_criteria_main(locator, config):
# local variables
generation = config.multi_criteria.generations
category = "optimization-detailed"
if not os.path.exists(locator.get_address_of_individuals_of_a_generation(generation)):
data_address = locating_individuals_in_generation_script(generation, locator)
else:
data_address = pd.read_csv(locator.get_address_of_individuals_of_a_generation(generation))
# initialize class
data_generation = preprocessing_generations_data(locator, generation)
objectives = data_generation['final_generation']['population']
individual_list = objectives.axes[0].values
data_processed = preprocessing_cost_data(locator, data_generation['final_generation'], individual_list[0], generation, data_address, config)
column_names = data_processed.columns.values
compiled_data = pd.DataFrame(np.zeros([len(individual_list), len(column_names)]), columns=column_names)
for i, individual in enumerate(individual_list):
data_processed = preprocessing_cost_data(locator, data_generation['final_generation'], individual, generation, data_address, config)
for name in column_names:
compiled_data.loc[i][name] = data_processed[name][0]
compiled_data = compiled_data.assign(individual=individual_list)
normalized_TAC = (compiled_data['TAC_Mio'] - min(compiled_data['TAC_Mio'])) / (
max(compiled_data['TAC_Mio']) - min(compiled_data['TAC_Mio']))
normalized_emissions = (compiled_data['total_emissions_kiloton'] - min(compiled_data['total_emissions_kiloton'])) / (
max(compiled_data['total_emissions_kiloton']) - min(compiled_data['total_emissions_kiloton']))
normalized_prim = (compiled_data['total_prim_energy_TJ'] - min(compiled_data['total_prim_energy_TJ'])) / (
max(compiled_data['total_prim_energy_TJ']) - min(compiled_data['total_prim_energy_TJ']))
normalized_Capex_total = (compiled_data['Capex_total_Mio'] - min(compiled_data['Capex_total_Mio'])) / (
max(compiled_data['Capex_total_Mio']) - min(compiled_data['Capex_total_Mio']))
normalized_Opex = (compiled_data['Opex_total_Mio'] - min(compiled_data['Opex_total_Mio'])) / (
max(compiled_data['Opex_total_Mio']) - min(compiled_data['Opex_total_Mio']))
normalized_renewable_share = (compiled_data['renewable_share_electricity'] - min(compiled_data['renewable_share_electricity'])) / (
max(compiled_data['renewable_share_electricity']) - min(compiled_data['renewable_share_electricity']))
compiled_data = compiled_data.assign(normalized_TAC=normalized_TAC)
compiled_data = compiled_data.assign(normalized_emissions=normalized_emissions)
compiled_data = compiled_data.assign(normalized_prim=normalized_prim)
compiled_data = compiled_data.assign(normalized_Capex_total=normalized_Capex_total)
compiled_data = compiled_data.assign(normalized_Opex=normalized_Opex)
compiled_data = compiled_data.assign(normalized_renewable_share=normalized_renewable_share)
compiled_data['TAC_rank'] = compiled_data['normalized_TAC'].rank(ascending=True)
compiled_data['emissions_rank'] = compiled_data['normalized_emissions'].rank(ascending=True)
compiled_data['prim_rank'] = compiled_data['normalized_prim'].rank(ascending=True)
# user defined mcda
compiled_data['user_MCDA'] = compiled_data['normalized_Capex_total'] * config.multi_criteria.capextotal * config.multi_criteria.economicsustainability + \
compiled_data['normalized_Opex'] * config.multi_criteria.opex * config.multi_criteria.economicsustainability + \
compiled_data['normalized_TAC'] * config.multi_criteria.annualizedcosts * config.multi_criteria.economicsustainability + \
compiled_data['normalized_emissions'] *config.multi_criteria.emissions * config.multi_criteria.environmentalsustainability + \
compiled_data['normalized_prim'] *config.multi_criteria.primaryenergy * config.multi_criteria.environmentalsustainability + \
compiled_data['normalized_renewable_share'] * config.multi_criteria.renewableshare * config.multi_criteria.socialsustainability
compiled_data['user_MCDA_rank'] = compiled_data['user_MCDA'].rank(ascending=True)
compiled_data.to_csv(locator.get_multi_criteria_analysis(generation))
return compiled_data
def preprocessing_generations_data(locator, generations):
data_processed = []
with open(locator.get_optimization_checkpoint(generations), "rb") as fp:
data = json.load(fp)
# get lists of data for performance values of the population
costs_Mio = [round(objectives[0] / 1000000, 2) for objectives in
data['population_fitness']] # convert to millions
emissions_kiloton = [round(objectives[1] / 1000000, 2) for objectives in
data['population_fitness']] # convert to tons x 10^3 (kiloton)
prim_energy_TJ = [round(objectives[2] / 1000000, 2) for objectives in
data['population_fitness']] # convert to gigajoules x 10^3 (Terajoules)
individual_names = ['ind' + str(i) for i in range(len(costs_Mio))]
df_population = pd.DataFrame({'Name': individual_names, 'costs_Mio': costs_Mio,
'emissions_kiloton': emissions_kiloton, 'prim_energy_TJ': prim_energy_TJ
}).set_index("Name")
individual_barcode = [[str(ind) if type(ind) == float else str(ind) for ind in
individual] for individual in data['population']]
def_individual_barcode = pd.DataFrame({'Name': individual_names,
'individual_barcode': individual_barcode}).set_index("Name")
# get lists of data for performance values of the population (hall_of_fame
costs_Mio_HOF = [round(objectives[0] / 1000000, 2) for objectives in
data['halloffame_fitness']] # convert to millions
emissions_kiloton_HOF = [round(objectives[1] / 1000000, 2) for objectives in
data['halloffame_fitness']] # convert to tons x 10^3
prim_energy_TJ_HOF = [round(objectives[2] / 1000000, 2) for objectives in
data['halloffame_fitness']] # convert to gigajoules x 10^3
individual_names_HOF = ['ind' + str(i) for i in range(len(costs_Mio_HOF))]
df_halloffame = pd.DataFrame({'Name': individual_names_HOF, 'costs_Mio': costs_Mio_HOF,
'emissions_kiloton': emissions_kiloton_HOF,
'prim_energy_TJ': prim_energy_TJ_HOF}).set_index("Name")
# get dataframe with capacity installed per individual
for i, individual in enumerate(individual_names):
dict_capacities = data['capacities'][i]
dict_network = data['disconnected_capacities'][i]["network"]
list_dict_disc_capacities = data['disconnected_capacities'][i]["disconnected_capacity"]
for building, dict_disconnected in enumerate(list_dict_disc_capacities):
if building == 0:
df_disc_capacities = pd.DataFrame(dict_disconnected, index=[dict_disconnected['building_name']])
else:
df_disc_capacities = df_disc_capacities.append(
pd.DataFrame(dict_disconnected, index=[dict_disconnected['building_name']]))
df_disc_capacities = df_disc_capacities.set_index('building_name')
dict_disc_capacities = df_disc_capacities.sum(axis=0).to_dict() # series with sum of capacities
if i == 0:
df_disc_capacities_final = pd.DataFrame(dict_disc_capacities, index=[individual])
df_capacities = pd.DataFrame(dict_capacities, index=[individual])
df_network = pd.DataFrame({"network": dict_network}, index=[individual])
else:
df_capacities = df_capacities.append( | pd.DataFrame(dict_capacities, index=[individual]) | pandas.DataFrame |
"""
Provide a generic structure to support window functions,
similar to how we have a Groupby object.
"""
from collections import defaultdict
from datetime import timedelta
from textwrap import dedent
from typing import List, Optional, Set
import warnings
import numpy as np
import pandas._libs.window as libwindow
from pandas.compat._optional import import_optional_dependency
from pandas.compat.numpy import function as nv
from pandas.util._decorators import Appender, Substitution, cache_readonly
from pandas.core.dtypes.common import (
ensure_float64,
is_bool,
is_float_dtype,
is_integer,
is_integer_dtype,
is_list_like,
is_scalar,
is_timedelta64_dtype,
needs_i8_conversion,
)
from pandas.core.dtypes.generic import (
ABCDataFrame,
ABCDateOffset,
ABCDatetimeIndex,
ABCPeriodIndex,
ABCSeries,
ABCTimedeltaIndex,
)
from pandas._typing import Axis, FrameOrSeries
from pandas.core.base import DataError, PandasObject, SelectionMixin
import pandas.core.common as com
from pandas.core.generic import _shared_docs
from pandas.core.groupby.base import GroupByMixin
_shared_docs = dict(**_shared_docs)
_doc_template = """
Returns
-------
Series or DataFrame
Return type is determined by the caller.
See Also
--------
Series.%(name)s : Series %(name)s.
DataFrame.%(name)s : DataFrame %(name)s.
"""
class _Window(PandasObject, SelectionMixin):
_attributes = [
"window",
"min_periods",
"center",
"win_type",
"axis",
"on",
"closed",
] # type: List[str]
exclusions = set() # type: Set[str]
def __init__(
self,
obj,
window=None,
min_periods: Optional[int] = None,
center: Optional[bool] = False,
win_type: Optional[str] = None,
axis: Axis = 0,
on: Optional[str] = None,
closed: Optional[str] = None,
**kwargs
):
self.__dict__.update(kwargs)
self.obj = obj
self.on = on
self.closed = closed
self.window = window
self.min_periods = min_periods
self.center = center
self.win_type = win_type
self.win_freq = None
self.axis = obj._get_axis_number(axis) if axis is not None else None
self.validate()
@property
def _constructor(self):
return Window
@property
def is_datetimelike(self) -> Optional[bool]:
return None
@property
def _on(self):
return None
@property
def is_freq_type(self) -> bool:
return self.win_type == "freq"
def validate(self):
if self.center is not None and not is_bool(self.center):
raise ValueError("center must be a boolean")
if self.min_periods is not None and not is_integer(self.min_periods):
raise ValueError("min_periods must be an integer")
if self.closed is not None and self.closed not in [
"right",
"both",
"left",
"neither",
]:
raise ValueError("closed must be 'right', 'left', 'both' or " "'neither'")
def _create_blocks(self):
"""
Split data into blocks & return conformed data.
"""
obj = self._selected_obj
# filter out the on from the object
if self.on is not None:
if obj.ndim == 2:
obj = obj.reindex(columns=obj.columns.difference([self.on]), copy=False)
blocks = obj._to_dict_of_blocks(copy=False).values()
return blocks, obj
def _gotitem(self, key, ndim, subset=None):
"""
Sub-classes to define. Return a sliced object.
Parameters
----------
key : str / list of selections
ndim : 1,2
requested ndim of result
subset : object, default None
subset to act on
"""
# create a new object to prevent aliasing
if subset is None:
subset = self.obj
self = self._shallow_copy(subset)
self._reset_cache()
if subset.ndim == 2:
if is_scalar(key) and key in subset or is_list_like(key):
self._selection = key
return self
def __getattr__(self, attr):
if attr in self._internal_names_set:
return object.__getattribute__(self, attr)
if attr in self.obj:
return self[attr]
raise AttributeError(
"%r object has no attribute %r" % (type(self).__name__, attr)
)
def _dir_additions(self):
return self.obj._dir_additions()
def _get_window(self, other=None):
return self.window
@property
def _window_type(self) -> str:
return self.__class__.__name__
def __repr__(self) -> str:
"""
Provide a nice str repr of our rolling object.
"""
attrs = (
"{k}={v}".format(k=k, v=getattr(self, k))
for k in self._attributes
if getattr(self, k, None) is not None
)
return "{klass} [{attrs}]".format(
klass=self._window_type, attrs=",".join(attrs)
)
def __iter__(self):
url = "https://github.com/pandas-dev/pandas/issues/11704"
raise NotImplementedError("See issue #11704 {url}".format(url=url))
def _get_index(self) -> Optional[np.ndarray]:
"""
Return index as an ndarray.
Returns
-------
None or ndarray
"""
if self.is_freq_type:
return self._on.asi8
return None
def _prep_values(self, values: Optional[np.ndarray] = None) -> np.ndarray:
"""Convert input to numpy arrays for Cython routines"""
if values is None:
values = getattr(self._selected_obj, "values", self._selected_obj)
# GH #12373 : rolling functions error on float32 data
# make sure the data is coerced to float64
if is_float_dtype(values.dtype):
values = ensure_float64(values)
elif is_integer_dtype(values.dtype):
values = ensure_float64(values)
elif needs_i8_conversion(values.dtype):
raise NotImplementedError(
"ops for {action} for this "
"dtype {dtype} are not "
"implemented".format(action=self._window_type, dtype=values.dtype)
)
else:
try:
values = ensure_float64(values)
except (ValueError, TypeError):
raise TypeError(
"cannot handle this type -> {0}" "".format(values.dtype)
)
# Always convert inf to nan
values[np.isinf(values)] = np.NaN
return values
def _wrap_result(self, result, block=None, obj=None) -> FrameOrSeries:
"""
Wrap a single result.
"""
if obj is None:
obj = self._selected_obj
index = obj.index
if isinstance(result, np.ndarray):
# coerce if necessary
if block is not None:
if is_timedelta64_dtype(block.values.dtype):
from pandas import to_timedelta
result = to_timedelta(result.ravel(), unit="ns").values.reshape(
result.shape
)
if result.ndim == 1:
from pandas import Series
return Series(result, index, name=obj.name)
return type(obj)(result, index=index, columns=block.columns)
return result
def _wrap_results(self, results, blocks, obj, exclude=None) -> FrameOrSeries:
"""
Wrap the results.
Parameters
----------
results : list of ndarrays
blocks : list of blocks
obj : conformed data (may be resampled)
exclude: list of columns to exclude, default to None
"""
from pandas import Series, concat
from pandas.core.index import ensure_index
final = []
for result, block in zip(results, blocks):
result = self._wrap_result(result, block=block, obj=obj)
if result.ndim == 1:
return result
final.append(result)
# if we have an 'on' column
# we want to put it back into the results
# in the same location
columns = self._selected_obj.columns
if self.on is not None and not self._on.equals(obj.index):
name = self._on.name
final.append(Series(self._on, index=obj.index, name=name))
if self._selection is not None:
selection = ensure_index(self._selection)
# need to reorder to include original location of
# the on column (if its not already there)
if name not in selection:
columns = self.obj.columns
indexer = columns.get_indexer(selection.tolist() + [name])
columns = columns.take(sorted(indexer))
# exclude nuisance columns so that they are not reindexed
if exclude is not None and exclude:
columns = [c for c in columns if c not in exclude]
if not columns:
raise DataError("No numeric types to aggregate")
if not len(final):
return obj.astype("float64")
return concat(final, axis=1).reindex(columns=columns, copy=False)
def _center_window(self, result, window) -> np.ndarray:
"""
Center the result in the window.
"""
if self.axis > result.ndim - 1:
raise ValueError(
"Requested axis is larger then no. of argument " "dimensions"
)
offset = _offset(window, True)
if offset > 0:
if isinstance(result, (ABCSeries, ABCDataFrame)):
result = result.slice_shift(-offset, axis=self.axis)
else:
lead_indexer = [slice(None)] * result.ndim
lead_indexer[self.axis] = slice(offset, None)
result = np.copy(result[tuple(lead_indexer)])
return result
def aggregate(self, func, *args, **kwargs):
result, how = self._aggregate(func, *args, **kwargs)
if result is None:
return self.apply(func, raw=False, args=args, kwargs=kwargs)
return result
agg = aggregate
_shared_docs["sum"] = dedent(
"""
Calculate %(name)s sum of given DataFrame or Series.
Parameters
----------
*args, **kwargs
For compatibility with other %(name)s methods. Has no effect
on the computed value.
Returns
-------
Series or DataFrame
Same type as the input, with the same index, containing the
%(name)s sum.
See Also
--------
Series.sum : Reducing sum for Series.
DataFrame.sum : Reducing sum for DataFrame.
Examples
--------
>>> s = pd.Series([1, 2, 3, 4, 5])
>>> s
0 1
1 2
2 3
3 4
4 5
dtype: int64
>>> s.rolling(3).sum()
0 NaN
1 NaN
2 6.0
3 9.0
4 12.0
dtype: float64
>>> s.expanding(3).sum()
0 NaN
1 NaN
2 6.0
3 10.0
4 15.0
dtype: float64
>>> s.rolling(3, center=True).sum()
0 NaN
1 6.0
2 9.0
3 12.0
4 NaN
dtype: float64
For DataFrame, each %(name)s sum is computed column-wise.
>>> df = pd.DataFrame({"A": s, "B": s ** 2})
>>> df
A B
0 1 1
1 2 4
2 3 9
3 4 16
4 5 25
>>> df.rolling(3).sum()
A B
0 NaN NaN
1 NaN NaN
2 6.0 14.0
3 9.0 29.0
4 12.0 50.0
"""
)
_shared_docs["mean"] = dedent(
"""
Calculate the %(name)s mean of the values.
Parameters
----------
*args
Under Review.
**kwargs
Under Review.
Returns
-------
Series or DataFrame
Returned object type is determined by the caller of the %(name)s
calculation.
See Also
--------
Series.%(name)s : Calling object with Series data.
DataFrame.%(name)s : Calling object with DataFrames.
Series.mean : Equivalent method for Series.
DataFrame.mean : Equivalent method for DataFrame.
Examples
--------
The below examples will show rolling mean calculations with window sizes of
two and three, respectively.
>>> s = pd.Series([1, 2, 3, 4])
>>> s.rolling(2).mean()
0 NaN
1 1.5
2 2.5
3 3.5
dtype: float64
>>> s.rolling(3).mean()
0 NaN
1 NaN
2 2.0
3 3.0
dtype: float64
"""
)
class Window(_Window):
"""
Provide rolling window calculations.
.. versionadded:: 0.18.0
Parameters
----------
window : int, or offset
Size of the moving window. This is the number of observations used for
calculating the statistic. Each window will be a fixed size.
If its an offset then this will be the time period of each window. Each
window will be a variable sized based on the observations included in
the time-period. This is only valid for datetimelike indexes. This is
new in 0.19.0
min_periods : int, default None
Minimum number of observations in window required to have a value
(otherwise result is NA). For a window that is specified by an offset,
`min_periods` will default to 1. Otherwise, `min_periods` will default
to the size of the window.
center : bool, default False
Set the labels at the center of the window.
win_type : str, default None
Provide a window type. If ``None``, all points are evenly weighted.
See the notes below for further information.
on : str, optional
For a DataFrame, a datetime-like column on which to calculate the rolling
window, rather than the DataFrame's index. Provided integer column is
ignored and excluded from result since an integer index is not used to
calculate the rolling window.
axis : int or str, default 0
closed : str, default None
Make the interval closed on the 'right', 'left', 'both' or
'neither' endpoints.
For offset-based windows, it defaults to 'right'.
For fixed windows, defaults to 'both'. Remaining cases not implemented
for fixed windows.
.. versionadded:: 0.20.0
Returns
-------
a Window or Rolling sub-classed for the particular operation
See Also
--------
expanding : Provides expanding transformations.
ewm : Provides exponential weighted functions.
Notes
-----
By default, the result is set to the right edge of the window. This can be
changed to the center of the window by setting ``center=True``.
To learn more about the offsets & frequency strings, please see `this link
<http://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__.
The recognized win_types are:
* ``boxcar``
* ``triang``
* ``blackman``
* ``hamming``
* ``bartlett``
* ``parzen``
* ``bohman``
* ``blackmanharris``
* ``nuttall``
* ``barthann``
* ``kaiser`` (needs beta)
* ``gaussian`` (needs std)
* ``general_gaussian`` (needs power, width)
* ``slepian`` (needs width)
* ``exponential`` (needs tau), center is set to None.
If ``win_type=None`` all points are evenly weighted. To learn more about
different window types see `scipy.signal window functions
<https://docs.scipy.org/doc/scipy/reference/signal.html#window-functions>`__.
Examples
--------
>>> df = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]})
>>> df
B
0 0.0
1 1.0
2 2.0
3 NaN
4 4.0
Rolling sum with a window length of 2, using the 'triang'
window type.
>>> df.rolling(2, win_type='triang').sum()
B
0 NaN
1 0.5
2 1.5
3 NaN
4 NaN
Rolling sum with a window length of 2, min_periods defaults
to the window length.
>>> df.rolling(2).sum()
B
0 NaN
1 1.0
2 3.0
3 NaN
4 NaN
Same as above, but explicitly set the min_periods
>>> df.rolling(2, min_periods=1).sum()
B
0 0.0
1 1.0
2 3.0
3 2.0
4 4.0
A ragged (meaning not-a-regular frequency), time-indexed DataFrame
>>> df = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]},
... index = [pd.Timestamp('20130101 09:00:00'),
... pd.Timestamp('20130101 09:00:02'),
... pd.Timestamp('20130101 09:00:03'),
... pd.Timestamp('20130101 09:00:05'),
... pd.Timestamp('20130101 09:00:06')])
>>> df
B
2013-01-01 09:00:00 0.0
2013-01-01 09:00:02 1.0
2013-01-01 09:00:03 2.0
2013-01-01 09:00:05 NaN
2013-01-01 09:00:06 4.0
Contrasting to an integer rolling window, this will roll a variable
length window corresponding to the time period.
The default for min_periods is 1.
>>> df.rolling('2s').sum()
B
2013-01-01 09:00:00 0.0
2013-01-01 09:00:02 1.0
2013-01-01 09:00:03 3.0
2013-01-01 09:00:05 NaN
2013-01-01 09:00:06 4.0
"""
def validate(self):
super().validate()
window = self.window
if isinstance(window, (list, tuple, np.ndarray)):
pass
elif is_integer(window):
if window <= 0:
raise ValueError("window must be > 0 ")
import_optional_dependency(
"scipy", extra="Scipy is required to generate window weight."
)
import scipy.signal as sig
if not isinstance(self.win_type, str):
raise ValueError("Invalid win_type {0}".format(self.win_type))
if getattr(sig, self.win_type, None) is None:
raise ValueError("Invalid win_type {0}".format(self.win_type))
else:
raise ValueError("Invalid window {0}".format(window))
def _prep_window(self, **kwargs):
"""
Provide validation for our window type, return the window
we have already been validated.
"""
window = self._get_window()
if isinstance(window, (list, tuple, np.ndarray)):
return com.asarray_tuplesafe(window).astype(float)
elif is_integer(window):
import scipy.signal as sig
# the below may pop from kwargs
def _validate_win_type(win_type, kwargs):
arg_map = {
"kaiser": ["beta"],
"gaussian": ["std"],
"general_gaussian": ["power", "width"],
"slepian": ["width"],
"exponential": ["tau"],
}
if win_type in arg_map:
win_args = _pop_args(win_type, arg_map[win_type], kwargs)
if win_type == "exponential":
# exponential window requires the first arg (center)
# to be set to None (necessary for symmetric window)
win_args.insert(0, None)
return tuple([win_type] + win_args)
return win_type
def _pop_args(win_type, arg_names, kwargs):
msg = "%s window requires %%s" % win_type
all_args = []
for n in arg_names:
if n not in kwargs:
raise ValueError(msg % n)
all_args.append(kwargs.pop(n))
return all_args
win_type = _validate_win_type(self.win_type, kwargs)
# GH #15662. `False` makes symmetric window, rather than periodic.
return sig.get_window(win_type, window, False).astype(float)
def _apply_window(self, mean=True, **kwargs):
"""
Applies a moving window of type ``window_type`` on the data.
Parameters
----------
mean : bool, default True
If True computes weighted mean, else weighted sum
Returns
-------
y : same type as input argument
"""
window = self._prep_window(**kwargs)
center = self.center
blocks, obj = self._create_blocks()
block_list = list(blocks)
results = []
exclude = []
for i, b in enumerate(blocks):
try:
values = self._prep_values(b.values)
except (TypeError, NotImplementedError):
if isinstance(obj, ABCDataFrame):
exclude.extend(b.columns)
del block_list[i]
continue
else:
raise DataError("No numeric types to aggregate")
if values.size == 0:
results.append(values.copy())
continue
offset = _offset(window, center)
additional_nans = np.array([np.NaN] * offset)
def f(arg, *args, **kwargs):
minp = _use_window(self.min_periods, len(window))
return libwindow.roll_window(
np.concatenate((arg, additional_nans)) if center else arg,
window,
minp,
avg=mean,
)
result = np.apply_along_axis(f, self.axis, values)
if center:
result = self._center_window(result, window)
results.append(result)
return self._wrap_results(results, block_list, obj, exclude)
_agg_see_also_doc = dedent(
"""
See Also
--------
pandas.DataFrame.rolling.aggregate
pandas.DataFrame.aggregate
"""
)
_agg_examples_doc = dedent(
"""
Examples
--------
>>> df = pd.DataFrame(np.random.randn(10, 3), columns=['A', 'B', 'C'])
>>> df
A B C
0 -2.385977 -0.102758 0.438822
1 -1.004295 0.905829 -0.954544
2 0.735167 -0.165272 -1.619346
3 -0.702657 -1.340923 -0.706334
4 -0.246845 0.211596 -0.901819
5 2.463718 3.157577 -1.380906
6 -1.142255 2.340594 -0.039875
7 1.396598 -1.647453 1.677227
8 -0.543425 1.761277 -0.220481
9 -0.640505 0.289374 -1.550670
>>> df.rolling(3, win_type='boxcar').agg('mean')
A B C
0 NaN NaN NaN
1 NaN NaN NaN
2 -0.885035 0.212600 -0.711689
3 -0.323928 -0.200122 -1.093408
4 -0.071445 -0.431533 -1.075833
5 0.504739 0.676083 -0.996353
6 0.358206 1.903256 -0.774200
7 0.906020 1.283573 0.085482
8 -0.096361 0.818139 0.472290
9 0.070889 0.134399 -0.031308
"""
)
@Substitution(
see_also=_agg_see_also_doc,
examples=_agg_examples_doc,
versionadded="",
klass="Series/DataFrame",
axis="",
)
@Appender(_shared_docs["aggregate"])
def aggregate(self, arg, *args, **kwargs):
result, how = self._aggregate(arg, *args, **kwargs)
if result is None:
# these must apply directly
result = arg(self)
return result
agg = aggregate
@Substitution(name="window")
@Appender(_shared_docs["sum"])
def sum(self, *args, **kwargs):
nv.validate_window_func("sum", args, kwargs)
return self._apply_window(mean=False, **kwargs)
@Substitution(name="window")
@Appender(_shared_docs["mean"])
def mean(self, *args, **kwargs):
nv.validate_window_func("mean", args, kwargs)
return self._apply_window(mean=True, **kwargs)
class _GroupByMixin(GroupByMixin):
"""
Provide the groupby facilities.
"""
def __init__(self, obj, *args, **kwargs):
parent = kwargs.pop("parent", None) # noqa
groupby = kwargs.pop("groupby", None)
if groupby is None:
groupby, obj = obj, obj.obj
self._groupby = groupby
self._groupby.mutated = True
self._groupby.grouper.mutated = True
super().__init__(obj, *args, **kwargs)
count = GroupByMixin._dispatch("count")
corr = GroupByMixin._dispatch("corr", other=None, pairwise=None)
cov = GroupByMixin._dispatch("cov", other=None, pairwise=None)
def _apply(
self, func, name=None, window=None, center=None, check_minp=None, **kwargs
):
"""
Dispatch to apply; we are stripping all of the _apply kwargs and
performing the original function call on the grouped object.
"""
def f(x, name=name, *args):
x = self._shallow_copy(x)
if isinstance(name, str):
return getattr(x, name)(*args, **kwargs)
return x.apply(name, *args, **kwargs)
return self._groupby.apply(f)
class _Rolling(_Window):
@property
def _constructor(self):
return Rolling
def _apply(
self, func, name=None, window=None, center=None, check_minp=None, **kwargs
):
"""
Rolling statistical measure using supplied function.
Designed to be used with passed-in Cython array-based functions.
Parameters
----------
func : str/callable to apply
name : str, optional
name of this function
window : int/array, default to _get_window()
center : bool, default to self.center
check_minp : function, default to _use_window
Returns
-------
y : type of input
"""
if center is None:
center = self.center
if window is None:
window = self._get_window()
if check_minp is None:
check_minp = _use_window
blocks, obj = self._create_blocks()
block_list = list(blocks)
index_as_array = self._get_index()
results = []
exclude = []
for i, b in enumerate(blocks):
try:
values = self._prep_values(b.values)
except (TypeError, NotImplementedError):
if isinstance(obj, ABCDataFrame):
exclude.extend(b.columns)
del block_list[i]
continue
else:
raise DataError("No numeric types to aggregate")
if values.size == 0:
results.append(values.copy())
continue
# if we have a string function name, wrap it
if isinstance(func, str):
cfunc = getattr(libwindow, func, None)
if cfunc is None:
raise ValueError(
"we do not support this function "
"in libwindow.{func}".format(func=func)
)
def func(arg, window, min_periods=None, closed=None):
minp = check_minp(min_periods, window)
# ensure we are only rolling on floats
arg = ensure_float64(arg)
return cfunc(arg, window, minp, index_as_array, closed, **kwargs)
# calculation function
if center:
offset = _offset(window, center)
additional_nans = np.array([np.NaN] * offset)
def calc(x):
return func(
np.concatenate((x, additional_nans)),
window,
min_periods=self.min_periods,
closed=self.closed,
)
else:
def calc(x):
return func(
x, window, min_periods=self.min_periods, closed=self.closed
)
with np.errstate(all="ignore"):
if values.ndim > 1:
result = np.apply_along_axis(calc, self.axis, values)
else:
result = calc(values)
if center:
result = self._center_window(result, window)
results.append(result)
return self._wrap_results(results, block_list, obj, exclude)
class _Rolling_and_Expanding(_Rolling):
_shared_docs["count"] = dedent(
r"""
The %(name)s count of any non-NaN observations inside the window.
Returns
-------
Series or DataFrame
Returned object type is determined by the caller of the %(name)s
calculation.
See Also
--------
Series.%(name)s : Calling object with Series data.
DataFrame.%(name)s : Calling object with DataFrames.
DataFrame.count : Count of the full DataFrame.
Examples
--------
>>> s = pd.Series([2, 3, np.nan, 10])
>>> s.rolling(2).count()
0 1.0
1 2.0
2 1.0
3 1.0
dtype: float64
>>> s.rolling(3).count()
0 1.0
1 2.0
2 2.0
3 2.0
dtype: float64
>>> s.rolling(4).count()
0 1.0
1 2.0
2 2.0
3 3.0
dtype: float64
"""
)
def count(self):
blocks, obj = self._create_blocks()
# Validate the index
self._get_index()
window = self._get_window()
window = min(window, len(obj)) if not self.center else window
results = []
for b in blocks:
result = b.notna().astype(int)
result = self._constructor(
result,
window=window,
min_periods=0,
center=self.center,
axis=self.axis,
closed=self.closed,
).sum()
results.append(result)
return self._wrap_results(results, blocks, obj)
_shared_docs["apply"] = dedent(
r"""
The %(name)s function's apply function.
Parameters
----------
func : function
Must produce a single value from an ndarray input if ``raw=True``
or a single value from a Series if ``raw=False``.
raw : bool, default None
* ``False`` : passes each row or column as a Series to the
function.
* ``True`` or ``None`` : the passed function will receive ndarray
objects instead.
If you are just applying a NumPy reduction function this will
achieve much better performance.
The `raw` parameter is required and will show a FutureWarning if
not passed. In the future `raw` will default to False.
.. versionadded:: 0.23.0
*args, **kwargs
Arguments and keyword arguments to be passed into func.
Returns
-------
Series or DataFrame
Return type is determined by the caller.
See Also
--------
Series.%(name)s : Series %(name)s.
DataFrame.%(name)s : DataFrame %(name)s.
"""
)
def apply(self, func, raw=None, args=(), kwargs={}):
from pandas import Series
kwargs.pop("_level", None)
window = self._get_window()
offset = _offset(window, self.center)
index_as_array = self._get_index()
# TODO: default is for backward compat
# change to False in the future
if raw is None:
warnings.warn(
"Currently, 'apply' passes the values as ndarrays to the "
"applied function. In the future, this will change to passing "
"it as Series objects. You need to specify 'raw=True' to keep "
"the current behaviour, and you can pass 'raw=False' to "
"silence this warning",
FutureWarning,
stacklevel=3,
)
raw = True
def f(arg, window, min_periods, closed):
minp = _use_window(min_periods, window)
if not raw:
arg = Series(arg, index=self.obj.index)
return libwindow.roll_generic(
arg,
window,
minp,
index_as_array,
closed,
offset,
func,
raw,
args,
kwargs,
)
return self._apply(f, func, args=args, kwargs=kwargs, center=False, raw=raw)
def sum(self, *args, **kwargs):
nv.validate_window_func("sum", args, kwargs)
return self._apply("roll_sum", "sum", **kwargs)
_shared_docs["max"] = dedent(
"""
Calculate the %(name)s maximum.
Parameters
----------
*args, **kwargs
Arguments and keyword arguments to be passed into func.
"""
)
def max(self, *args, **kwargs):
nv.validate_window_func("max", args, kwargs)
return self._apply("roll_max", "max", **kwargs)
_shared_docs["min"] = dedent(
"""
Calculate the %(name)s minimum.
Parameters
----------
**kwargs
Under Review.
Returns
-------
Series or DataFrame
Returned object type is determined by the caller of the %(name)s
calculation.
See Also
--------
Series.%(name)s : Calling object with a Series.
DataFrame.%(name)s : Calling object with a DataFrame.
Series.min : Similar method for Series.
DataFrame.min : Similar method for DataFrame.
Examples
--------
Performing a rolling minimum with a window size of 3.
>>> s = pd.Series([4, 3, 5, 2, 6])
>>> s.rolling(3).min()
0 NaN
1 NaN
2 3.0
3 2.0
4 2.0
dtype: float64
"""
)
def min(self, *args, **kwargs):
nv.validate_window_func("min", args, kwargs)
return self._apply("roll_min", "min", **kwargs)
def mean(self, *args, **kwargs):
nv.validate_window_func("mean", args, kwargs)
return self._apply("roll_mean", "mean", **kwargs)
_shared_docs["median"] = dedent(
"""
Calculate the %(name)s median.
Parameters
----------
**kwargs
For compatibility with other %(name)s methods. Has no effect
on the computed median.
Returns
-------
Series or DataFrame
Returned type is the same as the original object.
See Also
--------
Series.%(name)s : Calling object with Series data.
DataFrame.%(name)s : Calling object with DataFrames.
Series.median : Equivalent method for Series.
DataFrame.median : Equivalent method for DataFrame.
Examples
--------
Compute the rolling median of a series with a window size of 3.
>>> s = pd.Series([0, 1, 2, 3, 4])
>>> s.rolling(3).median()
0 NaN
1 NaN
2 1.0
3 2.0
4 3.0
dtype: float64
"""
)
def median(self, **kwargs):
return self._apply("roll_median_c", "median", **kwargs)
_shared_docs["std"] = dedent(
"""
Calculate %(name)s standard deviation.
Normalized by N-1 by default. This can be changed using the `ddof`
argument.
Parameters
----------
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations
is ``N - ddof``, where ``N`` represents the number of elements.
*args, **kwargs
For NumPy compatibility. No additional arguments are used.
Returns
-------
Series or DataFrame
Returns the same object type as the caller of the %(name)s calculation.
See Also
--------
Series.%(name)s : Calling object with Series data.
DataFrame.%(name)s : Calling object with DataFrames.
Series.std : Equivalent method for Series.
DataFrame.std : Equivalent method for DataFrame.
numpy.std : Equivalent method for Numpy array.
Notes
-----
The default `ddof` of 1 used in Series.std is different than the default
`ddof` of 0 in numpy.std.
A minimum of one period is required for the rolling calculation.
Examples
--------
>>> s = pd.Series([5, 5, 6, 7, 5, 5, 5])
>>> s.rolling(3).std()
0 NaN
1 NaN
2 0.577350
3 1.000000
4 1.000000
5 1.154701
6 0.000000
dtype: float64
>>> s.expanding(3).std()
0 NaN
1 NaN
2 0.577350
3 0.957427
4 0.894427
5 0.836660
6 0.786796
dtype: float64
"""
)
def std(self, ddof=1, *args, **kwargs):
nv.validate_window_func("std", args, kwargs)
window = self._get_window()
index_as_array = self._get_index()
def f(arg, *args, **kwargs):
minp = _require_min_periods(1)(self.min_periods, window)
return _zsqrt(
libwindow.roll_var(arg, window, minp, index_as_array, self.closed, ddof)
)
return self._apply(
f, "std", check_minp=_require_min_periods(1), ddof=ddof, **kwargs
)
_shared_docs["var"] = dedent(
"""
Calculate unbiased %(name)s variance.
Normalized by N-1 by default. This can be changed using the `ddof`
argument.
Parameters
----------
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations
is ``N - ddof``, where ``N`` represents the number of elements.
*args, **kwargs
For NumPy compatibility. No additional arguments are used.
Returns
-------
Series or DataFrame
Returns the same object type as the caller of the %(name)s calculation.
See Also
--------
Series.%(name)s : Calling object with Series data.
DataFrame.%(name)s : Calling object with DataFrames.
Series.var : Equivalent method for Series.
DataFrame.var : Equivalent method for DataFrame.
numpy.var : Equivalent method for Numpy array.
Notes
-----
The default `ddof` of 1 used in :meth:`Series.var` is different than the
default `ddof` of 0 in :func:`numpy.var`.
A minimum of 1 period is required for the rolling calculation.
Examples
--------
>>> s = pd.Series([5, 5, 6, 7, 5, 5, 5])
>>> s.rolling(3).var()
0 NaN
1 NaN
2 0.333333
3 1.000000
4 1.000000
5 1.333333
6 0.000000
dtype: float64
>>> s.expanding(3).var()
0 NaN
1 NaN
2 0.333333
3 0.916667
4 0.800000
5 0.700000
6 0.619048
dtype: float64
"""
)
def var(self, ddof=1, *args, **kwargs):
| nv.validate_window_func("var", args, kwargs) | pandas.compat.numpy.function.validate_window_func |
import pandas as pd
from .datastore import merge_postcodes
from .types import ErrorDefinition
from .utils import add_col_to_tables_CONTINUOUSLY_LOOKED_AFTER as add_CLA_column # Check 'Episodes' present before use!
def validate_165():
error = ErrorDefinition(
code = '165',
description = 'Data entry for mother status is invalid.',
affected_fields = ['MOTHER', 'SEX', 'ACTIV', 'ACCOM', 'IN_TOUCH', 'DECOM']
)
def _validate(dfs):
if 'Header' not in dfs or 'Episodes' not in dfs or 'OC3' not in dfs:
return {}
else:
header = dfs['Header']
episodes = dfs['Episodes']
oc3 = dfs['OC3']
collection_start = dfs['metadata']['collection_start']
collection_end = dfs['metadata']['collection_end']
valid_values = ['0','1']
# prepare to merge
oc3.reset_index(inplace=True)
header.reset_index(inplace=True)
episodes.reset_index(inplace=True)
collection_start = pd.to_datetime(collection_start, format='%d/%m/%Y', errors='coerce')
collection_end = pd.to_datetime(collection_end, format='%d/%m/%Y', errors='coerce')
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
episodes['EPS'] = (episodes['DECOM']>=collection_start) & (episodes['DECOM']<=collection_end)
episodes['EPS_COUNT'] = episodes.groupby('CHILD')['EPS'].transform('sum')
merged = episodes.merge(header, on='CHILD', how='left', suffixes=['_eps', '_er']).merge(oc3, on='CHILD', how='left')
# Raise error if provided <MOTHER> is not a valid value.
value_validity = merged['MOTHER'].notna() & (~merged['MOTHER'].isin(valid_values))
# If not provided
female = (merged['SEX']=='1')
eps_in_year = (merged['EPS_COUNT']>0)
none_provided = (merged['ACTIV'].isna()& merged['ACCOM'].isna()& merged['IN_TOUCH'].isna())
# If provided <MOTHER> must be a valid value. If not provided <MOTHER> then either <GENDER> is male or no episode record for current year and any of <IN_TOUCH>, <ACTIV> or <ACCOM> have been provided
mask = value_validity | (merged['MOTHER'].isna() & (female & (eps_in_year | none_provided)))
# That is, if value not provided and child is a female with eps in current year or no values of IN_TOUCH, ACTIV and ACCOM, then raise error.
error_locs_eps = merged.loc[mask, 'index_eps']
error_locs_header = merged.loc[mask, 'index_er']
error_locs_oc3 = merged.loc[mask, 'index']
return {'Header':error_locs_header.dropna().unique().tolist(),
'OC3':error_locs_oc3.dropna().unique().tolist()}
return error, _validate
def validate_1014():
error = ErrorDefinition(
code='1014',
description='UASC information is not required for care leavers',
affected_fields=['ACTIV', 'ACCOM', 'IN_TOUCH', 'DECOM']
)
def _validate(dfs):
if 'UASC' not in dfs or 'Episodes' not in dfs or 'OC3' not in dfs:
return {}
else:
uasc = dfs['UASC']
episodes = dfs['Episodes']
oc3 = dfs['OC3']
collection_start = dfs['metadata']['collection_start']
collection_end = dfs['metadata']['collection_end']
# prepare to merge
oc3.reset_index(inplace=True)
uasc.reset_index(inplace=True)
episodes.reset_index(inplace=True)
collection_start = pd.to_datetime(collection_start, format='%d/%m/%Y', errors='coerce')
collection_end = pd.to_datetime(collection_end, format='%d/%m/%Y', errors='coerce')
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
episodes['DEC'] = pd.to_datetime(episodes['DEC'], format='%d/%m/%Y', errors='coerce')
date_check = (
((episodes['DECOM'] >= collection_start) & (episodes['DECOM'] <= collection_end))
| ((episodes['DEC'] >= collection_start) & (episodes['DEC'] <= collection_end))
| ((episodes['DECOM'] <= collection_start) & episodes['DEC'].isna())
)
episodes['EPS'] = date_check
episodes['EPS_COUNT'] = episodes.groupby('CHILD')['EPS'].transform('sum')
# inner merge to take only episodes of children which are also found on the uasc table
merged = episodes.merge(uasc, on='CHILD', how='inner', suffixes=['_eps', '_sc']).merge(oc3, on='CHILD',
how='left')
# adding suffixes with the secondary merge here does not go so well yet.
some_provided = (merged['ACTIV'].notna() | merged['ACCOM'].notna() | merged['IN_TOUCH'].notna())
mask = (merged['EPS_COUNT'] == 0) & some_provided
error_locs_uasc = merged.loc[mask, 'index_sc']
error_locs_oc3 = merged.loc[mask, 'index']
return {'UASC': error_locs_uasc.unique().tolist(), 'OC3': error_locs_oc3.unique().tolist()}
return error, _validate
# !# not sure what this rule is actually supposed to be getting at - description is confusing
def validate_197B():
error = ErrorDefinition(
code='197B',
description="SDQ score or reason for no SDQ should be reported for 4- or 17-year-olds.",
affected_fields=['SDQ_REASON', 'DOB'],
)
def _validate(dfs):
if 'OC2' not in dfs or 'Episodes' not in dfs:
return {}
oc2 = add_CLA_column(dfs, 'OC2')
start = pd.to_datetime(dfs['metadata']['collection_start'], format='%d/%m/%Y', errors='coerce')
endo = pd.to_datetime(dfs['metadata']['collection_end'], format='%d/%m/%Y', errors='coerce')
oc2['DOB'] = pd.to_datetime(oc2['DOB'], format='%d/%m/%Y', errors='coerce')
ERRRR = (
(
(oc2['DOB'] + pd.DateOffset(years=4) == start) # ???
| (oc2['DOB'] + pd.DateOffset(years=17) == start)
)
& oc2['CONTINUOUSLY_LOOKED_AFTER']
& oc2['SDQ_SCORE'].isna()
& oc2['SDQ_REASON'].isna()
)
return {'OC2': oc2[ERRRR].index.to_list()}
return error, _validate
def validate_157():
error = ErrorDefinition(
code='157',
description="Child is aged 4 years or over at the beginning of the year or 16 years or under at the end of the "
"year and Strengths and Difficulties Questionnaire (SDQ) 1 has been recorded as the reason for no "
"Strengths and Difficulties Questionnaire (SDQ) score.",
affected_fields=['SDQ_REASON', 'DOB'],
)
def _validate(dfs):
if 'OC2' not in dfs or 'Episodes' not in dfs:
return {}
oc2 = add_CLA_column(dfs, 'OC2')
start = pd.to_datetime(dfs['metadata']['collection_start'], format='%d/%m/%Y', errors='coerce')
endo = pd.to_datetime(dfs['metadata']['collection_end'], format='%d/%m/%Y', errors='coerce')
oc2['DOB'] = pd.to_datetime(oc2['DOB'], format='%d/%m/%Y', errors='coerce')
ERRRR = (
oc2['CONTINUOUSLY_LOOKED_AFTER']
& (oc2['DOB'] + pd.DateOffset(years=4) <= start)
& (oc2['DOB'] + pd.DateOffset(years=16) >= endo)
& oc2['SDQ_SCORE'].isna()
& (oc2['SDQ_REASON'] == 'SDQ1')
)
return {'OC2': oc2[ERRRR].index.to_list()}
return error, _validate
def validate_357():
error = ErrorDefinition(
code='357',
description='If this is the first episode ever for this child, reason for new episode must be S. '
'Check whether there is an episode immediately preceding this one, which has been left out. '
'If not the reason for new episode code must be amended to S.',
affected_fields=['RNE'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
eps = dfs['Episodes']
eps['DECOM'] = pd.to_datetime(eps['DECOM'], format='%d/%m/%Y', errors='coerce')
eps = eps.loc[eps['DECOM'].notnull()]
first_eps = eps.loc[eps.groupby('CHILD')['DECOM'].idxmin()]
errs = first_eps[first_eps['RNE'] != 'S'].index.to_list()
return {'Episodes': errs}
return error, _validate
def validate_117():
error = ErrorDefinition(
code='117',
description='Date of decision that a child should/should no longer be placed for adoption is beyond the current collection year or after the child ceased to be looked after.',
affected_fields=['DATE_PLACED_CEASED', 'DATE_PLACED', 'DEC', 'REC', 'DECOM'],
)
def _validate(dfs):
if 'Episodes' not in dfs or 'PlacedAdoption' not in dfs:
return {}
else:
episodes = dfs['Episodes']
placed_adoption = dfs['PlacedAdoption']
collection_end = dfs['metadata']['collection_end']
# datetime
placed_adoption['DATE_PLACED_CEASED'] = pd.to_datetime(placed_adoption['DATE_PLACED_CEASED'],
format='%d/%m/%Y', errors='coerce')
placed_adoption['DATE_PLACED'] = pd.to_datetime(placed_adoption['DATE_PLACED'], format='%d/%m/%Y',
errors='coerce')
collection_end = pd.to_datetime(collection_end, format='%d/%m/%Y', errors='coerce')
episodes['DEC'] = pd.to_datetime(episodes['DEC'], format='%d/%m/%Y', errors='coerce')
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
# Drop nans and continuing episodes
episodes = episodes.dropna(subset=['DECOM'])
episodes = episodes[episodes['REC'] != 'X1']
episodes = episodes.loc[episodes.groupby('CHILD')['DECOM'].idxmax()]
# prepare to merge
placed_adoption.reset_index(inplace=True)
episodes.reset_index(inplace=True)
p4a_cols = ['DATE_PLACED', 'DATE_PLACED_CEASED']
# latest episodes
merged = episodes.merge(placed_adoption, on='CHILD', how='left', suffixes=['_eps', '_pa'])
mask = (
(merged['DATE_PLACED'] > collection_end)
| (merged['DATE_PLACED'] > merged['DEC'])
| (merged['DATE_PLACED_CEASED'] > collection_end)
| (merged['DATE_PLACED_CEASED'] > merged['DEC'])
)
# If provided <DATE_PLACED> and/or <DATE_PLACED_CEASED> must not be > <COLLECTION_END_DATE> or <DEC> of latest episode where <REC> not = 'X1'
pa_error_locs = merged.loc[mask, 'index_pa']
eps_error_locs = merged.loc[mask, 'index_eps']
return {'Episodes': eps_error_locs.tolist(), 'PlacedAdoption': pa_error_locs.unique().tolist()}
return error, _validate
def validate_118():
error = ErrorDefinition(
code='118',
description='Date of decision that a child should no longer be placed for adoption is before the current collection year or before the date the child started to be looked after.',
affected_fields=['DECOM', 'DECOM', 'LS']
)
def _validate(dfs):
if ('PlacedAdoption' not in dfs) or ('Episodes' not in dfs):
return {}
else:
placed_adoption = dfs['PlacedAdoption']
episodes = dfs['Episodes']
collection_start = dfs['metadata']['collection_start']
code_list = ['V3', 'V4']
# datetime
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
placed_adoption['DATE_PLACED_CEASED'] = pd.to_datetime(placed_adoption['DATE_PLACED_CEASED'],
format='%d/%m/%Y', errors='coerce')
collection_start = pd.to_datetime(collection_start, format='%d/%m/%Y', errors='coerce')
# <DECOM> of the earliest episode with an <LS> not = 'V3' or 'V4'
filter_by_ls = episodes[~(episodes['LS'].isin(code_list))]
earliest_episode_idxs = filter_by_ls.groupby('CHILD')['DECOM'].idxmin()
earliest_episodes = episodes[episodes.index.isin(earliest_episode_idxs)]
# prepare to merge
placed_adoption.reset_index(inplace=True)
earliest_episodes.reset_index(inplace=True)
# merge
merged = earliest_episodes.merge(placed_adoption, on='CHILD', how='left', suffixes=['_eps', '_pa'])
# drop rows where DATE_PLACED_CEASED is not provided
merged = merged.dropna(subset=['DATE_PLACED_CEASED'])
# If provided <DATE_PLACED_CEASED> must not be prior to <COLLECTION_START_DATE> or <DECOM> of the earliest episode with an <LS> not = 'V3' or 'V4'
mask = (merged['DATE_PLACED_CEASED'] < merged['DECOM']) | (merged['DATE_PLACED_CEASED'] < collection_start)
# error locations
pa_error_locs = merged.loc[mask, 'index_pa']
eps_error_locs = merged.loc[mask, 'index_eps']
return {'Episodes': eps_error_locs.tolist(), 'PlacedAdoption': pa_error_locs.unique().tolist()}
return error, _validate
def validate_352():
error = ErrorDefinition(
code='352',
description='Child who started to be looked after was aged 18 or over.',
affected_fields=['DECOM', 'RNE'],
)
def _validate(dfs):
if 'Header' not in dfs:
return {}
if 'Episodes' not in dfs:
return {}
else:
header = dfs['Header']
episodes = dfs['Episodes']
header['DOB'] = pd.to_datetime(header['DOB'], format='%d/%m/%Y', errors='coerce')
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
header['DOB18'] = header['DOB'] + pd.DateOffset(years=18)
episodes_merged = episodes.reset_index().merge(header, how='left', on=['CHILD'], suffixes=('', '_header'),
indicator=True).set_index('index')
care_start = episodes_merged['RNE'].str.upper().astype(str).isin(['S'])
started_over_18 = episodes_merged['DOB18'] <= episodes_merged['DECOM']
error_mask = care_start & started_over_18
error_locations = episodes.index[error_mask]
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_209():
error = ErrorDefinition(
code='209',
description='Child looked after is of school age and should not have an unknown Unique Pupil Number (UPN) code of UN1.',
affected_fields=['UPN', 'DOB']
)
def _validate(dfs):
if 'Header' not in dfs:
return {}
else:
header = dfs['Header']
collection_start = dfs['metadata']['collection_start']
# convert to datetime
header['DOB'] = pd.to_datetime(header['DOB'], format='%d/%m/%Y', errors='coerce')
collection_start = pd.to_datetime(collection_start, format='%d/%m/%Y', errors='coerce')
yr = collection_start.year - 1
reference_date = pd.to_datetime('31/08/' + str(yr), format='%d/%m/%Y', errors='coerce')
# If <DOB> >= 4 years prior to 31/08/YYYY then <UPN> should not be 'UN1' Note: YYYY in this instance refers to the year prior to the collection start (for collection year 2019-2020, it would be looking at the 31/08/2018).
mask = (reference_date >= (header['DOB'] + pd.offsets.DateOffset(years=4))) & (header['UPN'] == 'UN1')
# error locations
error_locs_header = header.index[mask]
return {'Header': error_locs_header.tolist()}
return error, _validate
def validate_198():
error = ErrorDefinition(
code='198',
description="Child has not been looked after continuously for at least 12 months at 31 March but a reason "
"for no Strengths and Difficulties (SDQ) score has been completed. ",
affected_fields=['SDQ_REASON'],
)
def _validate(dfs):
if 'Episodes' not in dfs or 'OC2' not in dfs:
return {}
oc2 = add_CLA_column(dfs, 'OC2')
error_mask = oc2['SDQ_REASON'].notna() & ~oc2['CONTINUOUSLY_LOOKED_AFTER']
error_locs = oc2.index[error_mask].to_list()
return {'OC2': error_locs}
return error, _validate
def validate_185():
error = ErrorDefinition(
code='185',
description="Child has not been looked after continuously for at least 12 months at " +
"31 March but a Strengths and Difficulties (SDQ) score has been completed.",
affected_fields=['SDQ_SCORE'],
)
def _validate(dfs):
if 'Episodes' not in dfs or 'OC2' not in dfs:
return {}
oc2 = add_CLA_column(dfs, 'OC2')
error_mask = oc2['SDQ_SCORE'].notna() & ~oc2['CONTINUOUSLY_LOOKED_AFTER']
error_locs = oc2.index[error_mask].to_list()
return {'OC2': error_locs}
return error, _validate
def validate_186():
error = ErrorDefinition(
code='186',
description="Children aged 4 or over at the start of the year and children aged under 17 at the " +
"end of the year and who have been looked after for at least 12 months continuously " +
"should have a Strengths and Difficulties (SDQ) score completed.",
affected_fields=['SDQ_SCORE'],
)
def _validate(dfs):
if 'Episodes' not in dfs or 'OC2' not in dfs:
return {}
oc2 = dfs['OC2']
collection_start_str = dfs['metadata']['collection_start']
collection_end_str = dfs['metadata']['collection_end']
collection_start = pd.to_datetime(collection_start_str, format='%d/%m/%Y', errors='coerce')
collection_end = pd.to_datetime(collection_end_str, format='%d/%m/%Y', errors='coerce')
oc2['DOB_dt'] = pd.to_datetime(oc2['DOB'], format='%d/%m/%Y', errors='coerce')
oc2 = add_CLA_column(dfs, 'OC2')
oc2['4th_bday'] = oc2['DOB_dt'] + pd.DateOffset(years=4)
oc2['17th_bday'] = oc2['DOB_dt'] + pd.DateOffset(years=17)
error_mask = (
(oc2['4th_bday'] <= collection_start)
& (oc2['17th_bday'] > collection_end)
& oc2['CONTINUOUSLY_LOOKED_AFTER']
& oc2['SDQ_SCORE'].isna()
)
oc2_errors = oc2.loc[error_mask].index.to_list()
return {'OC2': oc2_errors}
return error, _validate
def validate_187():
error = ErrorDefinition(
code='187',
description="Child cannot be looked after continuously for 12 months at " +
"31 March (OC2) and have any of adoption or care leavers returns completed.",
affected_fields=['DATE_INT', 'DATE_MATCH', 'FOSTER_CARE', 'NB_ADOPTR', 'SEX_ADOPTR', 'LS_ADOPTR', # OC3
'IN_TOUCH', 'ACTIV', 'ACCOM'], # AD1
)
def _validate(dfs):
if (
'OC3' not in dfs
or 'AD1' not in dfs
or 'Episodes' not in dfs
):
return {}
# add 'CONTINUOUSLY_LOOKED_AFTER' column
ad1, oc3 = add_CLA_column(dfs, ['AD1', 'OC3'])
# OC3
should_be_blank = ['IN_TOUCH', 'ACTIV', 'ACCOM']
oc3_mask = oc3['CONTINUOUSLY_LOOKED_AFTER'] & oc3[should_be_blank].notna().any(axis=1)
oc3_error_locs = oc3[oc3_mask].index.to_list()
# AD1
should_be_blank = ['DATE_INT', 'DATE_MATCH', 'FOSTER_CARE', 'NB_ADOPTR', 'SEX_ADOPTR', 'LS_ADOPTR']
ad1_mask = ad1['CONTINUOUSLY_LOOKED_AFTER'] & ad1[should_be_blank].notna().any(axis=1)
ad1_error_locs = ad1[ad1_mask].index.to_list()
return {'AD1': ad1_error_locs,
'OC3': oc3_error_locs}
return error, _validate
def validate_188():
error = ErrorDefinition(
code='188',
description="Child is aged under 4 years at the end of the year, "
"but a Strengths and Difficulties (SDQ) score or a reason "
"for no SDQ score has been completed. ",
affected_fields=['SDQ_SCORE', 'SDQ_REASON'],
)
def _validate(dfs):
if 'OC2' not in dfs:
return {}
oc2 = dfs['OC2']
collection_end_str = dfs['metadata']['collection_end']
collection_end = pd.to_datetime(collection_end_str, format='%d/%m/%Y', errors='coerce')
oc2['DOB_dt'] = pd.to_datetime(oc2['DOB'], format='%d/%m/%Y', errors='coerce')
oc2['4th_bday'] = oc2['DOB_dt'] + pd.DateOffset(years=4)
error_mask = (
(oc2['4th_bday'] > collection_end)
& oc2[['SDQ_SCORE', 'SDQ_REASON']].notna().any(axis=1)
)
oc2_errors = oc2.loc[error_mask].index.to_list()
return {'OC2': oc2_errors}
return error, _validate
def validate_190():
error = ErrorDefinition(
code='190',
description="Child has not been looked after continuously for at least 12 months at 31 March but one or more "
"data items relating to children looked after for 12 months have been completed.",
affected_fields=['CONVICTED', 'HEALTH_CHECK', 'IMMUNISATIONS', 'TEETH_CHECK', 'HEALTH_ASSESSMENT',
'SUBSTANCE_MISUSE', 'INTERVENTION_RECEIVED', 'INTERVENTION_OFFERED']
, # AD1
)
def _validate(dfs):
if (
'OC2' not in dfs
or 'Episodes' not in dfs
):
return {}
# add 'CONTINUOUSLY_LOOKED_AFTER' column
oc2 = add_CLA_column(dfs, 'OC2')
should_be_blank = ['CONVICTED', 'HEALTH_CHECK', 'IMMUNISATIONS', 'TEETH_CHECK', 'HEALTH_ASSESSMENT',
'SUBSTANCE_MISUSE', 'INTERVENTION_RECEIVED', 'INTERVENTION_OFFERED']
mask = ~oc2['CONTINUOUSLY_LOOKED_AFTER'] & oc2[should_be_blank].notna().any(axis=1)
error_locs = oc2[mask].index.to_list()
return {'OC2': error_locs}
return error, _validate
def validate_191():
error = ErrorDefinition(
code='191',
description="Child has been looked after continuously for at least 12 months at 31 March but one or more "
"data items relating to children looked after for 12 months have been left blank.",
affected_fields=['IMMUNISATIONS', 'TEETH_CHECK', 'HEALTH_ASSESSMENT', 'SUBSTANCE_MISUSE'], # OC2
)
def _validate(dfs):
if (
'OC2' not in dfs
or 'Episodes' not in dfs
):
return {}
# add 'CONTINUOUSLY_LOOKED_AFTER' column
oc2 = add_CLA_column(dfs, 'OC2')
should_be_present = ['IMMUNISATIONS', 'TEETH_CHECK', 'HEALTH_ASSESSMENT', 'SUBSTANCE_MISUSE']
mask = oc2['CONTINUOUSLY_LOOKED_AFTER'] & oc2[should_be_present].isna().any(axis=1)
error_locs = oc2[mask].index.to_list()
return {'OC2': error_locs}
return error, _validate
def validate_607():
error = ErrorDefinition(
code='607',
description='Child ceased to be looked after in the year, but mother field has not been completed.',
affected_fields=['DEC', 'REC', 'MOTHER', 'LS', 'SEX']
)
def _validate(dfs):
if 'Header' not in dfs or 'Episodes' not in dfs:
return {}
else:
header = dfs['Header']
episodes = dfs['Episodes']
collection_start = dfs['metadata']['collection_start']
collection_end = dfs['metadata']['collection_end']
code_list = ['V3', 'V4']
# convert to datetiime format
episodes['DEC'] = pd.to_datetime(episodes['DEC'], format='%d/%m/%Y', errors='coerce')
collection_start = pd.to_datetime(collection_start, format='%d/%m/%Y', errors='coerce')
collection_end = pd.to_datetime(collection_end, format='%d/%m/%Y', errors='coerce')
# prepare to merge
episodes.reset_index(inplace=True)
header.reset_index(inplace=True)
merged = episodes.merge(header, on='CHILD', how='left', suffixes=['_eps', '_er'])
# CEASED_TO_BE_LOOKED_AFTER = DEC is not null and REC is filled but not equal to X1
CEASED_TO_BE_LOOKED_AFTER = merged['DEC'].notna() & ((merged['REC'] != 'X1') & merged['REC'].notna())
# and <LS> not = ‘V3’ or ‘V4’
check_LS = ~(merged['LS'].isin(code_list))
# and <DEC> is in <CURRENT_COLLECTION_YEAR
check_DEC = (collection_start <= merged['DEC']) & (merged['DEC'] <= collection_end)
# Where <CEASED_TO_BE_LOOKED_AFTER> = ‘Y’, and <LS> not = ‘V3’ or ‘V4’ and <DEC> is in <CURRENT_COLLECTION_YEAR> and <SEX> = ‘2’ then <MOTHER> should be provided.
mask = CEASED_TO_BE_LOOKED_AFTER & check_LS & check_DEC & (merged['SEX'] == '2') & (merged['MOTHER'].isna())
header_error_locs = merged.loc[mask, 'index_er']
eps_error_locs = merged.loc[mask, 'index_eps']
return {'Episodes': eps_error_locs.tolist(), 'Header': header_error_locs.unique().tolist()}
return error, _validate
def validate_210():
error = ErrorDefinition(
code='210',
description='Children looked after for more than a week at 31 March should not have an unknown Unique Pupil Number (UPN) code of UN4.',
affected_fields=['UPN', 'DECOM']
)
def _validate(dfs):
if 'Header' not in dfs or 'Episodes' not in dfs:
return {}
else:
header = dfs['Header']
episodes = dfs['Episodes']
collection_end = dfs['metadata']['collection_end']
# convert to datetime
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
collection_end = pd.to_datetime(collection_end, format='%d/%m/%Y', errors='coerce')
yr = collection_end.year
reference_date = ref_date = pd.to_datetime('24/03/' + str(yr), format='%d/%m/%Y', errors='coerce')
# prepare to merge
episodes.reset_index(inplace=True)
header.reset_index(inplace=True)
# the logical way is to merge left on UPN but that will be a one to many merge and may not go as well as a many to one merge that we've been doing.
merged = episodes.merge(header, on='CHILD', how='left', suffixes=['_eps', '_er'])
# If <UPN> = 'UN4' then no episode <DECOM> must be >` = 24/03/YYYY Note: YYYY refers to the current collection year.
mask = (merged['UPN'] == 'UN4') & (merged['DECOM'] >= reference_date)
# error locations
error_locs_header = merged.loc[mask, 'index_er']
error_locs_eps = merged.loc[mask, 'index_eps']
return {'Episodes': error_locs_eps.tolist(), 'Header': error_locs_header.unique().tolist()}
return error, _validate
def validate_1010():
error = ErrorDefinition(
code='1010',
description='This child has no episodes loaded for current year even though there was an open episode of '
+ 'care at the end of the previous year, and care leaver data has been entered.',
affected_fields=['IN_TOUCH', 'ACTIV', 'ACCOM'],
)
def _validate(dfs):
if 'Episodes' not in dfs or 'Episodes_last' not in dfs or 'OC3' not in dfs:
return {}
else:
episodes = dfs['Episodes']
episodes_last = dfs['Episodes_last']
oc3 = dfs['OC3']
# convert DECOM to datetime, drop missing/invalid sort by CHILD then DECOM,
episodes_last['DECOM'] = pd.to_datetime(episodes_last['DECOM'], format='%d/%m/%Y', errors='coerce')
episodes_last = episodes_last.dropna(subset=['DECOM']).sort_values(['CHILD', 'DECOM'], ascending=True)
# Keep only the final episode for each child (ie where the following row has a different CHILD value)
episodes_last = episodes_last[
episodes_last['CHILD'].shift(-1) != episodes_last['CHILD']
]
# Keep only the final episodes that were still open
episodes_last = episodes_last[episodes_last['DEC'].isna()]
# The remaining children ought to have episode data in the current year if they are in OC3
has_current_episodes = oc3['CHILD'].isin(episodes['CHILD'])
has_open_episode_last = oc3['CHILD'].isin(episodes_last['CHILD'])
error_mask = ~has_current_episodes & has_open_episode_last
validation_error_locations = oc3.index[error_mask]
return {'OC3': validation_error_locations.tolist()}
return error, _validate
def validate_525():
error = ErrorDefinition(
code='525',
description='A child for whom the decision to be placed for adoption has been reversed cannot be adopted during the year.',
affected_fields=['DATE_PLACED_CEASED', 'DATE_INT', 'DATE_MATCH', 'FOSTER_CARE', 'NB_ADOPTR', 'SEX_ADOPTR',
'LS_ADOPTR']
)
def _validate(dfs):
if 'PlacedAdoption' not in dfs or 'AD1' not in dfs:
return {}
else:
placed_adoption = dfs['PlacedAdoption']
ad1 = dfs['AD1']
# prepare to merge
placed_adoption.reset_index(inplace=True)
ad1.reset_index(inplace=True)
merged = placed_adoption.merge(ad1, on='CHILD', how='left', suffixes=['_placed', '_ad1'])
# If <DATE_PLACED_CEASED> not Null, then <DATE_INT>; <DATE_MATCH>; <FOSTER_CARE>; <NB_ADOPTR>; <SEX_ADOPTR>; and <LS_ADOPTR> should not be provided
mask = merged['DATE_PLACED_CEASED'].notna() & (
merged['DATE_INT'].notna() | merged['DATE_MATCH'].notna() | merged['FOSTER_CARE'].notna() |
merged['NB_ADOPTR'].notna() | merged['SEX_ADOPTR'].notna() | merged['LS_ADOPTR'].notna())
# error locations
pa_error_locs = merged.loc[mask, 'index_placed']
ad_error_locs = merged.loc[mask, 'index_ad1']
# return result
return {'PlacedAdoption': pa_error_locs.tolist(), 'AD1': ad_error_locs.tolist()}
return error, _validate
def validate_335():
error = ErrorDefinition(
code='335',
description='The current foster value (0) suggests that child is not adopted by current foster carer, but last placement is A2, A3, or A5. Or the current foster value (1) suggests that child is adopted by current foster carer, but last placement is A1, A4 or A6.',
affected_fields=['PLACE', 'FOSTER_CARE']
)
def _validate(dfs):
if 'Episodes' not in dfs or 'AD1' not in dfs:
return {}
else:
episodes = dfs['Episodes']
ad1 = dfs['AD1']
# prepare to merge
episodes.reset_index(inplace=True)
ad1.reset_index(inplace=True)
merged = episodes.merge(ad1, on='CHILD', how='left', suffixes=['_eps', '_ad1'])
# Where <PL> = 'A2', 'A3' or 'A5' and <DEC> = 'E1', 'E11', 'E12' <FOSTER_CARE> should not be '0'; Where <PL> = ‘A1’, ‘A4’ or ‘A6’ and <REC> = ‘E1’, ‘E11’, ‘E12’ <FOSTER_CARE> should not be ‘1’.
mask = (
merged['REC'].isin(['E1', 'E11', 'E12']) & (
(merged['PLACE'].isin(['A2', 'A3', 'A5']) & (merged['FOSTER_CARE'].astype(str) == '0'))
| (merged['PLACE'].isin(['A1', 'A4', 'A6']) & (merged['FOSTER_CARE'].astype(str) == '1'))
)
)
eps_error_locs = merged.loc[mask, 'index_eps']
ad1_error_locs = merged.loc[mask, 'index_ad1']
# use .unique since join is many to one
return {'Episodes': eps_error_locs.tolist(), 'AD1': ad1_error_locs.unique().tolist()}
return error, _validate
def validate_215():
error = ErrorDefinition(
code='215',
description='Child has care leaver information but one or more data items relating to children looked after for 12 months have been completed.',
affected_fields=['IN_TOUCH', 'ACTIV', 'ACCOM', 'CONVICTED', 'HEALTH_CHECK', 'IMMUNISATIONS', 'TEETH_CHECK',
'HEALTH_ASSESSMENT', 'SUBSTANCE_MISUSE', 'INTERVENTION_RECEIVED', 'INTERVENTION_OFFERED']
)
def _validate(dfs):
if 'OC3' not in dfs or 'OC2' not in dfs:
return {}
else:
oc3 = dfs['OC3']
oc2 = dfs['OC2']
# prepare to merge
oc3.reset_index(inplace=True)
oc2.reset_index(inplace=True)
merged = oc3.merge(oc2, on='CHILD', how='left', suffixes=['_3', '_2'])
# If any of <IN_TOUCH>, <ACTIV> or <ACCOM> have been provided then <CONVICTED>; <HEALTH_CHECK>; <IMMUNISATIONS>; <TEETH_CHECK>; <HEALTH_ASSESSMENT>; <SUBSTANCE MISUSE>; <INTERVENTION_RECEIVED>; <INTERVENTION_OFFERED>; should not be provided
mask = (merged['IN_TOUCH'].notna() | merged['ACTIV'].notna() | merged['ACCOM'].notna()) & (
merged['CONVICTED'].notna() | merged['HEALTH_CHECK'].notna() | merged['IMMUNISATIONS'].notna() |
merged['TEETH_CHECK'].notna() | merged['HEALTH_ASSESSMENT'].notna() | merged[
'SUBSTANCE_MISUSE'].notna() | merged['INTERVENTION_RECEIVED'].notna() | merged[
'INTERVENTION_OFFERED'].notna())
# error locations
oc3_error_locs = merged.loc[mask, 'index_3']
oc2_error_locs = merged.loc[mask, 'index_2']
return {'OC3': oc3_error_locs.tolist(), 'OC2': oc2_error_locs.tolist()}
return error, _validate
def validate_399():
error = ErrorDefinition(
code='399',
description='Mother field, review field or participation field are completed but '
+ 'child is looked after under legal status V3 or V4.',
affected_fields=['MOTHER', 'LS', 'REVIEW', 'REVIEW_CODE']
)
def _validate(dfs):
if 'Episodes' not in dfs or 'Header' not in dfs or 'Reviews' not in dfs:
return {}
else:
episodes = dfs['Episodes']
header = dfs['Header']
reviews = dfs['Reviews']
code_list = ['V3', 'V4']
# prepare to merge
episodes['index_eps'] = episodes.index
header['index_hdr'] = header.index
reviews['index_revs'] = reviews.index
# merge
merged = (episodes.merge(header, on='CHILD', how='left')
.merge(reviews, on='CHILD', how='left'))
# If <LS> = 'V3' or 'V4' then <MOTHER>, <REVIEW> and <REVIEW_CODE> should not be provided
mask = merged['LS'].isin(code_list) & (
merged['MOTHER'].notna() | merged['REVIEW'].notna() | merged['REVIEW_CODE'].notna())
# Error locations
eps_errors = merged.loc[mask, 'index_eps']
header_errors = merged.loc[mask, 'index_hdr'].unique()
revs_errors = merged.loc[mask, 'index_revs'].unique()
return {'Episodes': eps_errors.tolist(),
'Header': header_errors.tolist(),
'Reviews': revs_errors.tolist()}
return error, _validate
def validate_189():
error = ErrorDefinition(
code='189',
description='Child is aged 17 years or over at the beginning of the year, but an Strengths and Difficulties '
+ '(SDQ) score or a reason for no Strengths and Difficulties (SDQ) score has been completed.',
affected_fields=['DOB', 'SDQ_SCORE', 'SDQ_REASON']
)
def _validate(dfs):
if 'OC2' not in dfs:
return {}
else:
oc2 = dfs['OC2']
collection_start = dfs['metadata']['collection_start']
# datetime format allows appropriate comparison between dates
oc2['DOB'] = pd.to_datetime(oc2['DOB'], format='%d/%m/%Y', errors='coerce')
collection_start = pd.to_datetime(collection_start, format='%d/%m/%Y', errors='coerce')
# If <DOB> >17 years prior to <COLLECTION_START_DATE> then <SDQ_SCORE> and <SDQ_REASON> should not be provided
mask = ((oc2['DOB'] + pd.offsets.DateOffset(years=17)) <= collection_start) & (
oc2['SDQ_REASON'].notna() | oc2['SDQ_SCORE'].notna())
# That is, raise error if collection_start > DOB + 17years
oc_error_locs = oc2.index[mask]
return {'OC2': oc_error_locs.tolist()}
return error, _validate
def validate_226():
error = ErrorDefinition(
code='226',
description='Reason for placement change is not required.',
affected_fields=['REASON_PLACE_CHANGE', 'PLACE']
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
code_list = ['T0', 'T1', 'T2', 'T3', 'T4']
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
# create column to see previous REASON_PLACE_CHANGE
episodes = episodes.sort_values(['CHILD', 'DECOM'])
episodes['PREVIOUS_REASON'] = episodes.groupby('CHILD')['REASON_PLACE_CHANGE'].shift(1)
# If <PL> = 'T0'; 'T1'; 'T2'; 'T3' or 'T4' then <REASON_PLACE_CHANGE> should be null in current episode and current episode - 1
mask = episodes['PLACE'].isin(code_list) & (
episodes['REASON_PLACE_CHANGE'].notna() | episodes['PREVIOUS_REASON'].notna())
# error locations
error_locs = episodes.index[mask]
return {'Episodes': error_locs.tolist()}
return error, _validate
def validate_358():
error = ErrorDefinition(
code='358',
description='Child with this legal status should not be under 10.',
affected_fields=['DECOM', 'DOB', 'LS']
)
def _validate(dfs):
if 'Episodes' not in dfs or 'Header' not in dfs:
return {}
else:
episodes = dfs['Episodes']
header = dfs['Header']
code_list = ['J1', 'J2', 'J3']
# convert dates to datetime format
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
header['DOB'] = pd.to_datetime(header['DOB'], format='%d/%m/%Y', errors='coerce')
# prepare to merge
episodes.reset_index(inplace=True)
header.reset_index(inplace=True)
merged = episodes.merge(header, on='CHILD', how='left', suffixes=['_eps', '_er'])
# Where <LS> = ‘J1’ or ‘J2’ or ‘J3’ then <DOB> should <= to 10 years prior to <DECOM>
mask = merged['LS'].isin(code_list) & (merged['DOB'] + pd.offsets.DateOffset(years=10) < merged['DECOM'])
# That is, raise error if DECOM > DOB + 10years
# error locations
header_error_locs = merged.loc[mask, 'index_er']
episode_error_locs = merged.loc[mask, 'index_eps']
# one to many join implies use .unique on the 'one'
return {'Episodes': episode_error_locs.tolist(), 'Header': header_error_locs.unique().tolist()}
return error, _validate
def validate_407():
error = ErrorDefinition(
code='407',
description='Reason episode ceased is Special Guardianship Order, but child has reached age 18.',
affected_fields=['DEC', 'DOB', 'REC']
)
def _validate(dfs):
if 'Episodes' not in dfs or 'Header' not in dfs:
return {}
else:
episodes = dfs['Episodes']
header = dfs['Header']
code_list = ['E45', 'E46', 'E47', 'E48']
# convert dates to datetime format
episodes['DEC'] = pd.to_datetime(episodes['DEC'], format='%d/%m/%Y', errors='coerce')
header['DOB'] = pd.to_datetime(header['DOB'], format='%d/%m/%Y', errors='coerce')
# prepare to merge
episodes.reset_index(inplace=True)
header.reset_index(inplace=True)
merged = episodes.merge(header, on='CHILD', how='left', suffixes=['_eps', '_er'])
# If <REC> = ‘E45’ or ‘E46’ or ‘E47’ or ‘E48’ then <DOB> must be < 18 years prior to <DEC>
mask = merged['REC'].isin(code_list) & (merged['DOB'] + pd.offsets.DateOffset(years=18) < merged['DEC'])
# That is, raise error if DEC > DOB + 10years
# error locations
header_error_locs = merged.loc[mask, 'index_er']
episode_error_locs = merged.loc[mask, 'index_eps']
# one to many join implies use .unique on the 'one'
return {'Episodes': episode_error_locs.tolist(), 'Header': header_error_locs.unique().tolist()}
return error, _validate
def validate_1007():
error = ErrorDefinition(
code='1007',
description='Care leaver information is not required for 17- or 18-year olds who are still looked after.',
affected_fields=['DEC', 'REC', 'DOB', 'IN_TOUCH', 'ACTIV', 'ACCOM']
)
def _validate(dfs):
if 'Episodes' not in dfs or 'OC3' not in dfs:
return {}
else:
episodes = dfs['Episodes']
oc3 = dfs['OC3']
collection_end = dfs['metadata']['collection_end']
# convert dates to datetime format
oc3['DOB'] = pd.to_datetime(oc3['DOB'], format='%d/%m/%Y', errors='coerce')
collection_end = pd.to_datetime(collection_end, format='%d/%m/%Y', errors='coerce')
# prepare to merge
episodes.reset_index(inplace=True)
oc3.reset_index(inplace=True)
merged = episodes.merge(oc3, on='CHILD', how='left', suffixes=['_eps', '_oc3'])
# If <DOB> < 19 and >= to 17 years prior to <COLLECTION_END_DATE> and current episode <DEC> and or <REC> not provided then <IN_TOUCH>, <ACTIV> and <ACCOM> should not be provided
check_age = (merged['DOB'] + pd.offsets.DateOffset(years=17) <= collection_end) & (
merged['DOB'] + pd.offsets.DateOffset(years=19) > collection_end)
# That is, check that 17<=age<19
check_dec_rec = merged['REC'].isna() | merged['DEC'].isna()
# if either DEC or REC are absent
mask = check_age & check_dec_rec & (
merged['IN_TOUCH'].notna() | merged['ACTIV'].notna() | merged['ACCOM'].notna())
# Then raise an error if either IN_TOUCH, ACTIV, or ACCOM have been provided too
# error locations
oc3_error_locs = merged.loc[mask, 'index_oc3']
episode_error_locs = merged.loc[mask, 'index_eps']
# one to many join implies use .unique on the 'one'
return {'Episodes': episode_error_locs.tolist(), 'OC3': oc3_error_locs.unique().tolist()}
return error, _validate
def validate_442():
error = ErrorDefinition(
code='442',
description='Unique Pupil Number (UPN) field is not completed.',
affected_fields=['UPN', 'LS']
)
def _validate(dfs):
if ('Episodes' not in dfs) or ('Header' not in dfs):
return {}
else:
episodes = dfs['Episodes']
header = dfs['Header']
episodes.reset_index(inplace=True)
header.reset_index(inplace=True)
code_list = ['V3', 'V4']
# merge left on episodes to get all children for which episodes have been recorded even if they do not exist on the header.
merged = episodes.merge(header, on=['CHILD'], how='left', suffixes=['_eps', '_er'])
# Where any episode present, with an <LS> not = 'V3' or 'V4' then <UPN> must be provided
mask = (~merged['LS'].isin(code_list)) & merged['UPN'].isna()
episode_error_locs = merged.loc[mask, 'index_eps']
header_error_locs = merged.loc[mask, 'index_er']
return {'Episodes': episode_error_locs.tolist(),
# Select unique values since many episodes are joined to one header
# and multiple errors will be raised for the same index.
'Header': header_error_locs.dropna().unique().tolist()}
return error, _validate
def validate_344():
error = ErrorDefinition(
code='344',
description='The record shows the young person has died or returned home to live with parent(s) or someone with parental responsibility for a continuous period of 6 months or more, but activity and/or accommodation on leaving care have been completed.',
affected_fields=['IN_TOUCH', 'ACTIV', 'ACCOM']
)
def _validate(dfs):
if 'OC3' not in dfs:
return {}
else:
oc3 = dfs['OC3']
# If <IN_TOUCH> = 'DIED' or 'RHOM' then <ACTIV> and <ACCOM> should not be provided
mask = ((oc3['IN_TOUCH'] == 'DIED') | (oc3['IN_TOUCH'] == 'RHOM')) & (
oc3['ACTIV'].notna() | oc3['ACCOM'].notna())
error_locations = oc3.index[mask]
return {'OC3': error_locations.to_list()}
return error, _validate
def validate_345():
error = ErrorDefinition(
code='345',
description='The data collection record shows the local authority is in touch with this young person, but activity and/or accommodation data items are zero.',
affected_fields=['IN_TOUCH', 'ACTIV', 'ACCOM']
)
def _validate(dfs):
if 'OC3' not in dfs:
return {}
else:
oc3 = dfs['OC3']
# If <IN_TOUCH> = 'Yes' then <ACTIV> and <ACCOM> must be provided
mask = (oc3['IN_TOUCH'] == 'YES') & (oc3['ACTIV'].isna() | oc3['ACCOM'].isna())
error_locations = oc3.index[mask]
return {'OC3': error_locations.to_list()}
return error, _validate
def validate_384():
error = ErrorDefinition(
code='384',
description='A child receiving respite care cannot be in a long-term foster placement ',
affected_fields=['PLACE', 'LS']
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
# Where <LS> = 'V3' or 'V4' then <PL> must not be 'U1' or 'U4'
mask = ((episodes['LS'] == 'V3') | (episodes['LS'] == 'V4')) & (
(episodes['PLACE'] == 'U1') | (episodes['PLACE'] == 'U4'))
error_locations = episodes.index[mask]
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_390():
error = ErrorDefinition(
code='390',
description='Reason episode ceased is adopted but child has not been previously placed for adoption.',
affected_fields=['PLACE', 'REC']
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
# If <REC> = 'E11' or 'E12' then <PL> must be one of 'A3', 'A4', 'A5' or 'A6'
mask = ((episodes['REC'] == 'E11') | (episodes['REC'] == 'E12')) & ~(
(episodes['PLACE'] == 'A3') | (episodes['PLACE'] == 'A4') | (episodes['PLACE'] == 'A5') | (
episodes['PLACE'] == 'A6'))
error_locations = episodes.index[mask]
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_378():
error = ErrorDefinition(
code='378',
description='A child who is placed with parent(s) cannot be looked after under a single period of accommodation under Section 20 of the Children Act 1989.',
affected_fields=['PLACE', 'LS']
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
# the & sign supercedes the ==, so brackets are necessary here
mask = (episodes['PLACE'] == 'P1') & (episodes['LS'] == 'V2')
error_locations = episodes.index[mask]
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_398():
error = ErrorDefinition(
code='398',
description='Distance field completed but child looked after under legal status V3 or V4.',
affected_fields=['LS', 'HOME_POST', 'PL_POST']
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
mask = ((episodes['LS'] == 'V3') | (episodes['LS'] == 'V4')) & (
episodes['HOME_POST'].notna() | episodes['PL_POST'].notna())
error_locations = episodes.index[mask]
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_451():
error = ErrorDefinition(
code='451',
description='Child is still freed for adoption, but freeing orders could not be applied for since 30 December 2005.',
affected_fields=['DEC', 'REC', 'LS']
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
mask = episodes['DEC'].isna() & episodes['REC'].isna() & (episodes['LS'] == 'D1')
error_locations = episodes.index[mask]
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_519():
error = ErrorDefinition(
code='519',
description='Data entered on the legal status of adopters shows civil partnership couple, but data entered on genders of adopters does not show it as a couple.',
affected_fields=['LS_ADOPTR', 'SEX_ADOPTR']
)
def _validate(dfs):
if 'AD1' not in dfs:
return {}
else:
ad1 = dfs['AD1']
mask = (ad1['LS_ADOPTR'] == 'L2') & (
(ad1['SEX_ADOPTR'] != 'MM') & (ad1['SEX_ADOPTR'] != 'FF') & (ad1['SEX_ADOPTR'] != 'MF'))
error_locations = ad1.index[mask]
return {'AD1': error_locations.to_list()}
return error, _validate
def validate_520():
error = ErrorDefinition(
code='520',
description='Data entry on the legal status of adopters shows different gender married couple but data entry on genders of adopters shows it as a same gender couple.',
affected_fields=['LS_ADOPTR', 'SEX_ADOPTR']
)
def _validate(dfs):
if 'AD1' not in dfs:
return {}
else:
ad1 = dfs['AD1']
# check condition
mask = (ad1['LS_ADOPTR'] == 'L11') & (ad1['SEX_ADOPTR'] != 'MF')
error_locations = ad1.index[mask]
return {'AD1': error_locations.to_list()}
return error, _validate
def validate_522():
error = ErrorDefinition(
code='522',
description='Date of decision that the child should be placed for adoption must be on or before the date that a child should no longer be placed for adoption.',
affected_fields=['DATE_PLACED', 'DATE_PLACED_CEASED']
)
def _validate(dfs):
if 'PlacedAdoption' not in dfs:
return {}
else:
placed_adoption = dfs['PlacedAdoption']
# Convert to datetimes
placed_adoption['DATE_PLACED_CEASED'] = pd.to_datetime(placed_adoption['DATE_PLACED_CEASED'],
format='%d/%m/%Y', errors='coerce')
placed_adoption['DATE_PLACED'] = pd.to_datetime(placed_adoption['DATE_PLACED'], format='%d/%m/%Y',
errors='coerce')
# Boolean mask
mask = placed_adoption['DATE_PLACED_CEASED'] > placed_adoption['DATE_PLACED']
error_locations = placed_adoption.index[mask]
return {'PlacedAdoption': error_locations.to_list()}
return error, _validate
def validate_563():
error = ErrorDefinition(
code='563',
description='The child should no longer be placed for adoption but the date of the decision that the child should be placed for adoption is blank',
affected_fields=['DATE_PLACED', 'REASON_PLACED_CEASED', 'DATE_PLACED_CEASED'],
)
def _validate(dfs):
if 'PlacedAdoption' not in dfs:
return {}
else:
placed_adoption = dfs['PlacedAdoption']
mask = placed_adoption['REASON_PLACED_CEASED'].notna() & placed_adoption['DATE_PLACED_CEASED'].notna() & \
placed_adoption['DATE_PLACED'].isna()
error_locations = placed_adoption.index[mask]
return {'PlacedAdoption': error_locations.to_list()}
return error, _validate
def validate_544():
error = ErrorDefinition(
code='544',
description="Any child who has conviction information completed must also have immunisation, teeth check, health assessment and substance misuse problem identified fields completed.",
affected_fields=['CONVICTED', 'IMMUNISATIONS', 'TEETH_CHECK', 'HEALTH_ASSESSMENT', 'SUBSTANCE_MISUSE'],
)
def _validate(dfs):
if 'OC2' not in dfs:
return {}
else:
oc2 = dfs['OC2']
convict = oc2['CONVICTED'].astype(str) == '1'
immunisations = oc2['IMMUNISATIONS'].isna()
teeth_ck = oc2['TEETH_CHECK'].isna()
health_ass = oc2['HEALTH_ASSESSMENT'].isna()
sub_misuse = oc2['SUBSTANCE_MISUSE'].isna()
error_mask = convict & (immunisations | teeth_ck | health_ass | sub_misuse)
validation_error_locations = oc2.index[error_mask]
return {'OC2': validation_error_locations.to_list()}
return error, _validate
def validate_634():
error = ErrorDefinition(
code='634',
description='There are entries for previous permanence options, but child has not started to be looked after from 1 April 2016 onwards.',
affected_fields=['LA_PERM', 'PREV_PERM', 'DATE_PERM', 'DECOM']
)
def _validate(dfs):
if 'Episodes' not in dfs or 'PrevPerm' not in dfs:
return {}
else:
episodes = dfs['Episodes']
prevperm = dfs['PrevPerm']
collection_start = dfs['metadata']['collection_start']
# convert date field to appropriate format
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
collection_start = pd.to_datetime(collection_start, format='%d/%m/%Y', errors='coerce')
# the maximum date has the highest possibility of satisfying the condition
episodes['LAST_DECOM'] = episodes.groupby('CHILD')['DECOM'].transform('max')
# prepare to merge
episodes.reset_index(inplace=True)
prevperm.reset_index(inplace=True)
merged = prevperm.merge(episodes, on='CHILD', how='left', suffixes=['_prev', '_eps'])
# If <PREV_PERM> or <LA_PERM> or <DATE_PERM> provided, then at least 1 episode must have a <DECOM> later than 01/04/2016
mask = (merged['PREV_PERM'].notna() | merged['DATE_PERM'].notna() | merged['LA_PERM'].notna()) & (
merged['LAST_DECOM'] < collection_start)
eps_error_locs = merged.loc[mask, 'index_eps']
prevperm_error_locs = merged.loc[mask, 'index_prev']
# return {'PrevPerm':prevperm_error_locs}
return {'Episodes': eps_error_locs.unique().tolist(), 'PrevPerm': prevperm_error_locs.unique().tolist()}
return error, _validate
def validate_158():
error = ErrorDefinition(
code='158',
description='If a child has been recorded as receiving an intervention for their substance misuse problem, then the additional item on whether an intervention was offered should be left blank.',
affected_fields=['INTERVENTION_RECEIVED', 'INTERVENTION_OFFERED'],
)
def _validate(dfs):
if 'OC2' not in dfs:
return {}
else:
oc2 = dfs['OC2']
error_mask = oc2['INTERVENTION_RECEIVED'].astype(str).eq('1') & oc2['INTERVENTION_OFFERED'].notna()
error_locations = oc2.index[error_mask]
return {'OC2': error_locations.tolist()}
return error, _validate
def validate_133():
error = ErrorDefinition(
code='133',
description='Data entry for accommodation after leaving care is invalid. If reporting on a childs accommodation after leaving care the data entry must be valid',
affected_fields=['ACCOM'],
)
def _validate(dfs):
if 'OC3' not in dfs:
return {}
else:
oc3 = dfs['OC3']
valid_codes = ['B1', 'B2', 'C1', 'C2', 'D1', 'D2', 'E1', 'E2', 'G1', 'G2', 'H1', 'H2', 'K1', 'K2', 'R1',
'R2', 'S2', 'T1', 'T2', 'U1', 'U2', 'V1', 'V2', 'W1', 'W2', 'X2', 'Y1', 'Y2', 'Z1', 'Z2',
'0']
error_mask = ~oc3['ACCOM'].isna() & ~oc3['ACCOM'].isin(valid_codes)
error_locations = oc3.index[error_mask]
return {'OC3': error_locations.tolist()}
return error, _validate
def validate_565():
error = ErrorDefinition(
code='565',
description='The date that the child started to be missing or away from placement without authorisation has been completed but whether the child was missing or away from placement without authorisation has not been completed.',
affected_fields=['MISSING', 'MIS_START']
)
def _validate(dfs):
if 'Missing' not in dfs:
return {}
else:
missing = dfs['Missing']
mask = missing['MIS_START'].notna() & missing['MISSING'].isna()
error_locations = missing.index[mask]
return {'Missing': error_locations.to_list()}
return error, _validate
def validate_433():
error = ErrorDefinition(
code='433',
description='The reason for new episode suggests that this is a continuation episode, but the episode does not start on the same day as the last episode finished.',
affected_fields=['RNE', 'DECOM'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
episodes['original_index'] = episodes.index
episodes.sort_values(['CHILD', 'DECOM', 'DEC'], inplace=True)
episodes[['PREVIOUS_DEC', 'PREVIOUS_CHILD']] = episodes[['DEC', 'CHILD']].shift(1)
rne_is_ongoing = episodes['RNE'].str.upper().astype(str).isin(['P', 'L', 'T', 'U', 'B'])
date_mismatch = episodes['PREVIOUS_DEC'] != episodes['DECOM']
missing_date = episodes['PREVIOUS_DEC'].isna() | episodes['DECOM'].isna()
same_child = episodes['PREVIOUS_CHILD'] == episodes['CHILD']
error_mask = rne_is_ongoing & (date_mismatch | missing_date) & same_child
error_locations = episodes['original_index'].loc[error_mask].sort_values()
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_437():
error = ErrorDefinition(
code='437',
description='Reason episode ceased is child has died or is aged 18 or over but there are further episodes.',
affected_fields=['REC'],
)
# !# potential false negatives, as this only operates on the current year's data
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
episodes.sort_values(['CHILD', 'DECOM'], inplace=True)
episodes[['NEXT_DECOM', 'NEXT_CHILD']] = episodes[['DECOM', 'CHILD']].shift(-1)
# drop rows with missing DECOM as invalid/missing values can lead to errors
episodes = episodes.dropna(subset=['DECOM'])
ceased_e2_e15 = episodes['REC'].str.upper().astype(str).isin(['E2', 'E15'])
has_later_episode = episodes['CHILD'] == episodes['NEXT_CHILD']
error_mask = ceased_e2_e15 & has_later_episode
error_locations = episodes.index[error_mask]
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_547():
error = ErrorDefinition(
code='547',
description="Any child who has health promotion information completed must also have immunisation, teeth check, health assessment and substance misuse problem identified fields completed.",
affected_fields=['HEALTH_CHECK', 'IMMUNISATIONS', 'TEETH_CHECK', 'HEALTH_ASSESSMENT', 'SUBSTANCE_MISUSE'],
)
def _validate(dfs):
if 'OC2' not in dfs:
return {}
else:
oc2 = dfs['OC2']
healthck = oc2['HEALTH_CHECK'].astype(str) == '1'
immunisations = oc2['IMMUNISATIONS'].isna()
teeth_ck = oc2['TEETH_CHECK'].isna()
health_ass = oc2['HEALTH_ASSESSMENT'].isna()
sub_misuse = oc2['SUBSTANCE_MISUSE'].isna()
error_mask = healthck & (immunisations | teeth_ck | health_ass | sub_misuse)
validation_error_locations = oc2.index[error_mask]
return {'OC2': validation_error_locations.to_list()}
return error, _validate
def validate_635():
error = ErrorDefinition(
code='635',
description='There are entries for date of order and local authority code where previous permanence option was arranged but previous permanence code is Z1',
affected_fields=['LA_PERM', 'DATE_PERM', 'PREV_PERM']
)
def _validate(dfs):
if 'PrevPerm' not in dfs:
return {}
else:
prev_perm = dfs['PrevPerm']
# raise and error if either LA_PERM or DATE_PERM are present, yet PREV_PERM is absent.
mask = ((prev_perm['LA_PERM'].notna() | prev_perm['DATE_PERM'].notna()) & prev_perm['PREV_PERM'].isna())
error_locations = prev_perm.index[mask]
return {'PrevPerm': error_locations.to_list()}
return error, _validate
def validate_550():
error = ErrorDefinition(
code='550',
description='A placement provider code of PR0 can only be associated with placement P1.',
affected_fields=['PLACE', 'PLACE_PROVIDER'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
mask = (episodes['PLACE'] != 'P1') & episodes['PLACE_PROVIDER'].eq('PR0')
validation_error_locations = episodes.index[mask]
return {'Episodes': validation_error_locations.tolist()}
return error, _validate
def validate_217():
error = ErrorDefinition(
code='217',
description='Children who are placed for adoption with current foster carers (placement types A3 or A5) must have a reason for new episode of S, T or U.',
affected_fields=['PLACE', 'DECOM', 'RNE'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
max_decom_allowed = pd.to_datetime('01/04/2015', format='%d/%m/%Y', errors='coerce')
reason_new_ep = ['S', 'T', 'U']
place_codes = ['A3', 'A5']
mask = (episodes['PLACE'].isin(place_codes) & (episodes['DECOM'] >= max_decom_allowed)) & ~episodes[
'RNE'].isin(reason_new_ep)
validation_error_mask = mask
validation_error_locations = episodes.index[validation_error_mask]
return {'Episodes': validation_error_locations.tolist()}
return error, _validate
def validate_518():
error = ErrorDefinition(
code='518',
description='If reporting legal status of adopters is L4 then the genders of adopters should be coded as MM or FF. MM = the adopting couple are both males. FF = the adopting couple are both females.',
affected_fields=['LS_ADOPTR', 'SEX_ADOPTR'],
)
def _validate(dfs):
if 'AD1' not in dfs:
return {}
else:
AD1 = dfs['AD1']
error_mask = AD1['LS_ADOPTR'].eq('L4') & ~AD1['SEX_ADOPTR'].isin(['MM', 'FF'])
error_locations = AD1.index[error_mask]
return {'AD1': error_locations.tolist()}
return error, _validate
def validate_517():
error = ErrorDefinition(
code='517',
description='If reporting legal status of adopters is L3 then the genders of adopters should be coded as MF. MF = the adopting couple are male and female.',
affected_fields=['LS_ADOPTR', 'SEX_ADOPTR'],
)
def _validate(dfs):
if 'AD1' not in dfs:
return {}
else:
AD1 = dfs['AD1']
error_mask = AD1['LS_ADOPTR'].eq('L3') & ~AD1['SEX_ADOPTR'].isin(['MF'])
error_locations = AD1.index[error_mask]
return {'AD1': error_locations.tolist()}
return error, _validate
def validate_558():
error = ErrorDefinition(
code='558',
description='If a child has been adopted, then the decision to place them for adoption has not been disrupted and the date of the decision that a child should no longer be placed for adoption should be left blank. if the REC code is either E11 or E12 then the DATE PLACED CEASED date should not be provided',
affected_fields=['DATE_PLACED_CEASED', 'REC'],
)
def _validate(dfs):
if 'Episodes' not in dfs or 'PlacedAdoption' not in dfs:
return {}
else:
episodes = dfs['Episodes']
placedAdoptions = dfs['PlacedAdoption']
episodes = episodes.reset_index()
rec_codes = ['E11', 'E12']
placeEpisodes = episodes[episodes['REC'].isin(rec_codes)]
merged = placeEpisodes.merge(placedAdoptions, how='left', on='CHILD').set_index('index')
episodes_with_errors = merged[merged['DATE_PLACED_CEASED'].notna()]
error_mask = episodes.index.isin(episodes_with_errors.index)
error_locations = episodes.index[error_mask]
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_453():
error = ErrorDefinition(
code='453',
description='Contradiction between placement distance in the last episode of the previous year and in the first episode of the current year.',
affected_fields=['PL_DISTANCE'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
if 'Episodes_last' not in dfs:
return {}
else:
episodes = dfs['Episodes']
episodes_last = dfs['Episodes_last']
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
episodes_last['DECOM'] = pd.to_datetime(episodes_last['DECOM'], format='%d/%m/%Y', errors='coerce')
episodes['PL_DISTANCE'] = pd.to_numeric(episodes['PL_DISTANCE'], errors='coerce')
episodes_last['PL_DISTANCE'] = pd.to_numeric(episodes_last['PL_DISTANCE'], errors='coerce')
# drop rows with missing DECOM before finding idxmin/max, as invalid/missing values can lead to errors
episodes = episodes.dropna(subset=['DECOM'])
episodes_last = episodes_last.dropna(subset=['DECOM'])
episodes_min = episodes.groupby('CHILD')['DECOM'].idxmin()
episodes_last_max = episodes_last.groupby('CHILD')['DECOM'].idxmax()
episodes = episodes[episodes.index.isin(episodes_min)]
episodes_last = episodes_last[episodes_last.index.isin(episodes_last_max)]
episodes_merged = episodes.reset_index().merge(episodes_last, how='left', on=['CHILD'],
suffixes=('', '_last'), indicator=True).set_index('index')
in_both_years = episodes_merged['_merge'] == 'both'
same_rne = episodes_merged['RNE'] == episodes_merged['RNE_last']
last_year_open = episodes_merged['DEC_last'].isna()
different_pl_dist = abs(episodes_merged['PL_DISTANCE'] - episodes_merged['PL_DISTANCE_last']) >= 0.2
error_mask = in_both_years & same_rne & last_year_open & different_pl_dist
validation_error_locations = episodes.index[error_mask]
return {'Episodes': validation_error_locations.tolist()}
return error, _validate
def validate_516():
error = ErrorDefinition(
code='516',
description='The episode data submitted for this child does not show that he/she was with their former foster carer(s) during the year.If the code in the reason episode ceased is E45 or E46 the child must have a placement code of U1 to U6.',
affected_fields=['REC', 'PLACE'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
place_codes = ['U1', 'U2', 'U3', 'U4', 'U5', 'U6']
rec_codes = ['E45', 'E46']
error_mask = episodes['REC'].isin(rec_codes) & ~episodes['PLACE'].isin(place_codes)
validation_error_locations = episodes.index[error_mask]
return {'Episodes': validation_error_locations.tolist()}
return error, _validate
def validate_511():
error = ErrorDefinition(
code='511',
description='If reporting that the number of person(s) adopting the looked after child is two adopters then the code should only be MM, FF or MF. MM = the adopting couple are both males; FF = the adopting couple are both females; MF = The adopting couple are male and female.',
affected_fields=['NB_ADOPTR', 'SEX_ADOPTR'],
)
def _validate(dfs):
if 'AD1' not in dfs:
return {}
else:
AD1 = dfs['AD1']
mask = AD1['NB_ADOPTR'].astype(str).eq('2') & AD1['SEX_ADOPTR'].isin(['M1', 'F1'])
validation_error_mask = mask
validation_error_locations = AD1.index[validation_error_mask]
return {'AD1': validation_error_locations.tolist()}
return error, _validate
def validate_524():
error = ErrorDefinition(
code='524',
description='If reporting legal status of adopters is L12 then the genders of adopters should be coded as MM or FF. MM = the adopting couple are both males. FF = the adopting couple are both females',
affected_fields=['LS_ADOPTR', 'SEX_ADOPTR'],
)
def _validate(dfs):
if 'AD1' not in dfs:
return {}
else:
AD1 = dfs['AD1']
error_mask = AD1['LS_ADOPTR'].eq('L12') & ~AD1['SEX_ADOPTR'].isin(['MM', 'FF'])
error_locations = AD1.index[error_mask]
return {'AD1': error_locations.tolist()}
return error, _validate
def validate_441():
error = ErrorDefinition(
code='441',
description='Participation method indicates child was 4 years old or over at the time of the review, but the date of birth and review date indicates the child was under 4 years old.',
affected_fields=['DOB', 'REVIEW', 'REVIEW_CODE'],
)
def _validate(dfs):
if 'Reviews' not in dfs:
return {}
else:
reviews = dfs['Reviews']
reviews['DOB'] = pd.to_datetime(reviews['DOB'], format='%d/%m/%Y', errors='coerce')
reviews['REVIEW'] = pd.to_datetime(reviews['REVIEW'], format='%d/%m/%Y', errors='coerce')
reviews = reviews.dropna(subset=['REVIEW', 'DOB'])
mask = reviews['REVIEW_CODE'].isin(['PN1', 'PN2', 'PN3', 'PN4', 'PN5', 'PN6', 'PN7']) & (
reviews['REVIEW'] < reviews['DOB'] + pd.offsets.DateOffset(years=4))
validation_error_mask = mask
validation_error_locations = reviews.index[validation_error_mask]
return {'Reviews': validation_error_locations.tolist()}
return error, _validate
def validate_184():
error = ErrorDefinition(
code='184',
description='Date of decision that a child should be placed for adoption is before the child was born.',
affected_fields=['DATE_PLACED', # PlacedAdoptino
'DOB'], # Header
)
def _validate(dfs):
if 'Header' not in dfs or 'PlacedAdoption' not in dfs:
return {}
else:
child_record = dfs['Header']
placed_for_adoption = dfs['PlacedAdoption']
all_data = (placed_for_adoption
.reset_index()
.merge(child_record, how='left', on='CHILD', suffixes=[None, '_P4A']))
all_data['DATE_PLACED'] = pd.to_datetime(all_data['DATE_PLACED'], format='%d/%m/%Y', errors='coerce')
all_data['DOB'] = pd.to_datetime(all_data['DOB'], format='%d/%m/%Y', errors='coerce')
mask = (all_data['DATE_PLACED'] >= all_data['DOB']) | all_data['DATE_PLACED'].isna()
validation_error = ~mask
validation_error_locations = all_data[validation_error]['index'].unique()
return {'PlacedAdoption': validation_error_locations.tolist()}
return error, _validate
def validate_612():
error = ErrorDefinition(
code='612',
description="Date of birth field has been completed but mother field indicates child is not a mother.",
affected_fields=['SEX', 'MOTHER', 'MC_DOB'],
)
def _validate(dfs):
if 'Header' not in dfs:
return {}
else:
header = dfs['Header']
error_mask = (
((header['MOTHER'].astype(str) == '0') | header['MOTHER'].isna())
& (header['SEX'].astype(str) == '2')
& header['MC_DOB'].notna()
)
validation_error_locations = header.index[error_mask]
return {'Header': validation_error_locations.tolist()}
return error, _validate
def validate_552():
"""
This error checks that the first adoption episode is after the last decision !
If there are multiple of either there may be unexpected results !
"""
error = ErrorDefinition(
code="552",
description="Date of Decision to place a child for adoption should be on or prior to the date that the child was placed for adoption.",
# Field that defines date of decision to place a child for adoption is DATE_PLACED and the start of adoption is defined by DECOM with 'A' placement types.
affected_fields=['DATE_PLACED', 'DECOM'],
)
def _validate(dfs):
if ('PlacedAdoption' not in dfs) or ('Episodes' not in dfs):
return {}
else:
# get the required datasets
placed_adoption = dfs['PlacedAdoption']
episodes = dfs['Episodes']
# keep index values so that they stay the same when needed later on for error locations
placed_adoption.reset_index(inplace=True)
episodes.reset_index(inplace=True)
adoption_eps = episodes[episodes['PLACE'].isin(['A3', 'A4', 'A5', 'A6'])].copy()
# find most recent adoption decision
placed_adoption['DATE_PLACED'] = pd.to_datetime(placed_adoption['DATE_PLACED'], format='%d/%m/%Y',
errors='coerce')
# remove rows where either of the required values have not been filled.
placed_adoption = placed_adoption[placed_adoption['DATE_PLACED'].notna()]
placed_adoption_inds = placed_adoption.groupby('CHILD')['DATE_PLACED'].idxmax(skipna=True)
last_decision = placed_adoption.loc[placed_adoption_inds]
# first time child started adoption
adoption_eps["DECOM"] = pd.to_datetime(adoption_eps['DECOM'], format='%d/%m/%Y', errors='coerce')
adoption_eps = adoption_eps[adoption_eps['DECOM'].notna()]
adoption_eps_inds = adoption_eps.groupby('CHILD')['DECOM'].idxmin(skipna=True)
# full information of first adoption
first_adoption = adoption_eps.loc[adoption_eps_inds]
# date of decision and date of start of adoption (DECOM) have to be put in one table
merged = first_adoption.merge(last_decision, on=['CHILD'], how='left', suffixes=['_EP', '_PA'])
# check to see if date of decision to place is less than or equal to date placed.
decided_after_placed = merged["DECOM"] < merged["DATE_PLACED"]
# find the corresponding location of error values per file.
episode_error_locs = merged.loc[decided_after_placed, 'index_EP']
placedadoption_error_locs = merged.loc[decided_after_placed, 'index_PA']
return {"PlacedAdoption": placedadoption_error_locs.to_list(), "Episodes": episode_error_locs.to_list()}
return error, _validate
def validate_551():
error = ErrorDefinition(
code='551',
description='Child has been placed for adoption but there is no date of the decision that the child should be placed for adoption.',
affected_fields=['DATE_PLACED', 'PLACE'],
)
def _validate(dfs):
if 'Episodes' not in dfs or 'PlacedAdoption' not in dfs:
return {}
else:
episodes = dfs['Episodes']
placedAdoptions = dfs['PlacedAdoption']
episodes = episodes.reset_index()
place_codes = ['A3', 'A4', 'A5', 'A6']
placeEpisodes = episodes[episodes['PLACE'].isin(place_codes)]
merged = placeEpisodes.merge(placedAdoptions, how='left', on='CHILD').set_index('index')
episodes_with_errors = merged[merged['DATE_PLACED'].isna()]
error_mask = episodes.index.isin(episodes_with_errors.index)
error_locations = episodes.index[error_mask]
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_557():
error = ErrorDefinition(
code='557',
description="Child for whom the decision was made that they should be placed for adoption has left care " +
"but was not adopted and information on the decision that they should no longer be placed for " +
"adoption items has not been completed.",
affected_fields=['DATE_PLACED_CEASED', 'REASON_PLACED_CEASED', # PlacedAdoption
'PLACE', 'LS', 'REC'], # Episodes
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
if 'PlacedAdoption' not in dfs:
return {}
else:
eps = dfs['Episodes']
placed = dfs['PlacedAdoption']
eps = eps.reset_index()
placed = placed.reset_index()
child_placed = eps['PLACE'].isin(['A3', 'A4', 'A5', 'A6'])
order_granted = eps['LS'].isin(['D1', 'E1'])
not_adopted = ~eps['REC'].isin(['E11', 'E12']) & eps['REC'].notna()
placed['ceased_incomplete'] = (
placed['DATE_PLACED_CEASED'].isna() | placed['REASON_PLACED_CEASED'].isna()
)
eps = eps[(child_placed | order_granted) & not_adopted]
eps = eps.merge(placed, on='CHILD', how='left', suffixes=['_EP', '_PA'], indicator=True)
eps = eps[(eps['_merge'] == 'left_only') | eps['ceased_incomplete']]
EP_errors = eps['index_EP']
PA_errors = eps['index_PA'].dropna()
return {
'Episodes': EP_errors.to_list(),
'PlacedAdoption': PA_errors.to_list(),
}
return error, _validate
def validate_207():
error = ErrorDefinition(
code='207',
description='Mother status for the current year disagrees with the mother status already recorded for this child.',
affected_fields=['MOTHER'],
)
def _validate(dfs):
if 'Header' not in dfs or 'Header_last' not in dfs:
return {}
else:
header = dfs['Header']
header_last = dfs['Header_last']
header_merged = header.reset_index().merge(header_last, how='left', on=['CHILD'], suffixes=('', '_last'),
indicator=True).set_index('index')
in_both_years = header_merged['_merge'] == 'both'
mother_is_different = header_merged['MOTHER'].astype(str) != header_merged['MOTHER_last'].astype(str)
mother_was_true = header_merged['MOTHER_last'].astype(str) == '1'
error_mask = in_both_years & mother_is_different & mother_was_true
error_locations = header.index[error_mask]
return {'Header': error_locations.to_list()}
return error, _validate
def validate_523():
error = ErrorDefinition(
code='523',
description="Date of decision that the child should be placed for adoption should be the same date as the decision that adoption is in the best interest (date should be placed).",
affected_fields=['DATE_PLACED', 'DATE_INT'],
)
def _validate(dfs):
if ("AD1" not in dfs) or ("PlacedAdoption" not in dfs):
return {}
else:
placed_adoption = dfs["PlacedAdoption"]
ad1 = dfs["AD1"]
# keep initial index values to be reused for locating errors later on.
placed_adoption.reset_index(inplace=True)
ad1.reset_index(inplace=True)
# convert to datetime to enable comparison
placed_adoption['DATE_PLACED'] = pd.to_datetime(placed_adoption['DATE_PLACED'], format="%d/%m/%Y",
errors='coerce')
ad1["DATE_INT"] = pd.to_datetime(ad1['DATE_INT'], format='%d/%m/%Y', errors='coerce')
# drop rows where either of the required values have not been filled.
placed_adoption = placed_adoption[placed_adoption["DATE_PLACED"].notna()]
ad1 = ad1[ad1["DATE_INT"].notna()]
# bring corresponding values together from both dataframes
merged_df = placed_adoption.merge(ad1, on=['CHILD'], how='inner', suffixes=["_AD", "_PA"])
# find error values
different_dates = merged_df['DATE_INT'] != merged_df['DATE_PLACED']
# map error locations to corresponding indices
pa_error_locations = merged_df.loc[different_dates, 'index_PA']
ad1_error_locations = merged_df.loc[different_dates, 'index_AD']
return {"PlacedAdoption": pa_error_locations.to_list(), "AD1": ad1_error_locations.to_list()}
return error, _validate
def validate_3001():
error = ErrorDefinition(
code='3001',
description='Where care leavers information is being returned for a young person around their 17th birthday, the accommodation cannot be with their former foster carer(s).',
affected_fields=['REC'],
)
def _validate(dfs):
if 'Header' not in dfs:
return {}
if 'OC3' not in dfs:
return {}
else:
header = dfs['Header']
oc3 = dfs['OC3']
collection_start = pd.to_datetime(dfs['metadata']['collection_start'], format='%d/%m/%Y', errors='coerce')
collection_end = pd.to_datetime(dfs['metadata']['collection_end'], format='%d/%m/%Y', errors='coerce')
header['DOB'] = pd.to_datetime(header['DOB'], format='%d/%m/%Y', errors='coerce')
header['DOB17'] = header['DOB'] + pd.DateOffset(years=17)
oc3_merged = oc3.reset_index().merge(header, how='left', on=['CHILD'], suffixes=('', '_header'),
indicator=True).set_index('index')
accom_foster = oc3_merged['ACCOM'].str.upper().astype(str).isin(['Z1', 'Z2'])
age_17_in_year = (oc3_merged['DOB17'] <= collection_end) & (oc3_merged['DOB17'] >= collection_start)
error_mask = accom_foster & age_17_in_year
error_locations = oc3.index[error_mask]
return {'OC3': error_locations.to_list()}
return error, _validate
def validate_389():
error = ErrorDefinition(
code='389',
description='Reason episode ceased is that child transferred to care of adult social care services, but child is aged under 16.',
affected_fields=['REC'],
)
def _validate(dfs):
if 'Header' not in dfs:
return {}
if 'Episodes' not in dfs:
return {}
else:
header = dfs['Header']
episodes = dfs['Episodes']
header['DOB'] = pd.to_datetime(header['DOB'], format='%d/%m/%Y', errors='coerce')
episodes['DEC'] = pd.to_datetime(episodes['DEC'], format='%d/%m/%Y', errors='coerce')
header['DOB16'] = header['DOB'] + pd.DateOffset(years=16)
episodes_merged = episodes.reset_index().merge(header, how='left', on=['CHILD'], suffixes=('', '_header'),
indicator=True).set_index('index')
ceased_asc = episodes_merged['REC'].str.upper().astype(str).isin(['E7'])
ceased_over_16 = episodes_merged['DOB16'] <= episodes_merged['DEC']
error_mask = ceased_asc & ~ceased_over_16
error_locations = episodes.index[error_mask]
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_387():
error = ErrorDefinition(
code='387',
description='Reason episode ceased is child moved into independent living arrangement, but the child is aged under 14.',
affected_fields=['REC'],
)
def _validate(dfs):
if 'Header' not in dfs:
return {}
if 'Episodes' not in dfs:
return {}
else:
header = dfs['Header']
episodes = dfs['Episodes']
header['DOB'] = pd.to_datetime(header['DOB'], format='%d/%m/%Y', errors='coerce')
episodes['DEC'] = pd.to_datetime(episodes['DEC'], format='%d/%m/%Y', errors='coerce')
header['DOB14'] = header['DOB'] + pd.DateOffset(years=14)
episodes_merged = episodes.reset_index().merge(header, how='left', on=['CHILD'], suffixes=('', '_header'),
indicator=True).set_index('index')
ceased_indep = episodes_merged['REC'].str.upper().astype(str).isin(['E5', 'E6'])
ceased_over_14 = episodes_merged['DOB14'] <= episodes_merged['DEC']
dec_present = episodes_merged['DEC'].notna()
error_mask = ceased_indep & ~ceased_over_14 & dec_present
error_locations = episodes.index[error_mask]
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_452():
error = ErrorDefinition(
code='452',
description='Contradiction between local authority of placement code in the last episode of the previous year and in the first episode of the current year.',
affected_fields=['PL_LA'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
if 'Episodes_last' not in dfs:
return {}
else:
episodes = dfs['Episodes']
episodes_last = dfs['Episodes_last']
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
episodes_last['DECOM'] = pd.to_datetime(episodes_last['DECOM'], format='%d/%m/%Y', errors='coerce')
episodes_min = episodes.groupby('CHILD')['DECOM'].idxmin()
episodes_last_max = episodes_last.groupby('CHILD')['DECOM'].idxmax()
episodes = episodes[episodes.index.isin(episodes_min)]
episodes_last = episodes_last[episodes_last.index.isin(episodes_last_max)]
episodes_merged = episodes.reset_index().merge(episodes_last, how='left', on=['CHILD'],
suffixes=('', '_last'), indicator=True).set_index('index')
in_both_years = episodes_merged['_merge'] == 'both'
same_rne = episodes_merged['RNE'] == episodes_merged['RNE_last']
last_year_open = episodes_merged['DEC_last'].isna()
different_pl_la = episodes_merged['PL_LA'].astype(str) != episodes_merged['PL_LA_last'].astype(str)
error_mask = in_both_years & same_rne & last_year_open & different_pl_la
validation_error_locations = episodes.index[error_mask]
return {'Episodes': validation_error_locations.tolist()}
return error, _validate
def validate_386():
error = ErrorDefinition(
code='386',
description='Reason episode ceased is adopted but child has reached age 18.',
affected_fields=['REC'],
)
def _validate(dfs):
if 'Header' not in dfs:
return {}
if 'Episodes' not in dfs:
return {}
else:
header = dfs['Header']
episodes = dfs['Episodes']
header['DOB'] = pd.to_datetime(header['DOB'], format='%d/%m/%Y', errors='coerce')
episodes['DEC'] = pd.to_datetime(episodes['DEC'], format='%d/%m/%Y', errors='coerce')
header['DOB18'] = header['DOB'] + pd.DateOffset(years=18)
episodes_merged = (
episodes
.reset_index()
.merge(header, how='left', on=['CHILD'], suffixes=('', '_header'), indicator=True)
.set_index('index')
.dropna(subset=['DOB18', 'DEC'])
)
ceased_adopted = episodes_merged['REC'].str.upper().astype(str).isin(['E11', 'E12'])
ceased_under_18 = episodes_merged['DOB18'] > episodes_merged['DEC']
error_mask = ceased_adopted & ~ceased_under_18
error_locations = episodes_merged.index[error_mask]
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_363():
error = ErrorDefinition(
code='363',
description='Child assessment order (CAO) lasted longer than 7 days allowed in the Children Act 1989.',
affected_fields=['LS', 'DECOM', 'DEC'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
episodes = dfs['Episodes']
collection_end_str = dfs['metadata']['collection_end']
L2_eps = episodes[episodes['LS'] == 'L3'].copy()
L2_eps['original_index'] = L2_eps.index
L2_eps = L2_eps[L2_eps['DECOM'].notna()]
L2_eps.loc[L2_eps['DEC'].isna(), 'DEC'] = collection_end_str
L2_eps['DECOM'] = pd.to_datetime(L2_eps['DECOM'], format='%d/%m/%Y', errors='coerce')
L2_eps = L2_eps.dropna(subset=['DECOM'])
L2_eps['DEC'] = pd.to_datetime(L2_eps['DEC'], format='%d/%m/%Y', errors='coerce')
L2_eps.sort_values(['CHILD', 'DECOM'])
L2_eps['index'] = pd.RangeIndex(0, len(L2_eps))
L2_eps['index+1'] = L2_eps['index'] + 1
L2_eps = L2_eps.merge(L2_eps, left_on='index', right_on='index+1',
how='left', suffixes=[None, '_prev'])
L2_eps = L2_eps[['original_index', 'DECOM', 'DEC', 'DEC_prev', 'CHILD', 'CHILD_prev', 'LS']]
L2_eps['new_period'] = (
(L2_eps['DECOM'] > L2_eps['DEC_prev'])
| (L2_eps['CHILD'] != L2_eps['CHILD_prev'])
)
L2_eps['duration'] = (L2_eps['DEC'] - L2_eps['DECOM']).dt.days
L2_eps['period_id'] = L2_eps['new_period'].astype(int).cumsum()
L2_eps['period_duration'] = L2_eps.groupby('period_id')['duration'].transform(sum)
error_mask = L2_eps['period_duration'] > 7
return {'Episodes': L2_eps.loc[error_mask, 'original_index'].to_list()}
return error, _validate
def validate_364():
error = ErrorDefinition(
code='364',
description='Sections 41-46 of Police and Criminal Evidence (PACE; 1984) severely limits ' +
'the time a child can be detained in custody in Local Authority (LA) accommodation.',
affected_fields=['LS', 'DECOM', 'DEC'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
episodes = dfs['Episodes']
collection_end_str = dfs['metadata']['collection_end']
J2_eps = episodes[episodes['LS'] == 'J2'].copy()
J2_eps['original_index'] = J2_eps.index
J2_eps['DECOM'] = pd.to_datetime(J2_eps['DECOM'], format='%d/%m/%Y', errors='coerce')
J2_eps = J2_eps[J2_eps['DECOM'].notna()]
J2_eps.loc[J2_eps['DEC'].isna(), 'DEC'] = collection_end_str
J2_eps['DEC'] = pd.to_datetime(J2_eps['DEC'], format='%d/%m/%Y', errors='coerce')
J2_eps.sort_values(['CHILD', 'DECOM'])
J2_eps['index'] = pd.RangeIndex(0, len(J2_eps))
J2_eps['index_prev'] = J2_eps['index'] + 1
J2_eps = J2_eps.merge(J2_eps, left_on='index', right_on='index_prev',
how='left', suffixes=[None, '_prev'])
J2_eps = J2_eps[['original_index', 'DECOM', 'DEC', 'DEC_prev', 'CHILD', 'CHILD_prev', 'LS']]
J2_eps['new_period'] = (
(J2_eps['DECOM'] > J2_eps['DEC_prev'])
| (J2_eps['CHILD'] != J2_eps['CHILD_prev'])
)
J2_eps['duration'] = (J2_eps['DEC'] - J2_eps['DECOM']).dt.days
J2_eps['period_id'] = J2_eps['new_period'].astype(int).cumsum()
J2_eps['period_duration'] = J2_eps.groupby('period_id')['duration'].transform(sum)
error_mask = J2_eps['period_duration'] > 21
return {'Episodes': J2_eps.loc[error_mask, 'original_index'].to_list()}
return error, _validate
def validate_365():
error = ErrorDefinition(
code='365',
description='Any individual short- term respite placement must not exceed 17 days.',
affected_fields=['LS', 'DECOM', 'DEC'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
episodes = dfs['Episodes']
collection_end_str = dfs['metadata']['collection_end']
episodes.loc[episodes['DEC'].isna(), 'DEC'] = collection_end_str
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
episodes['DEC'] = pd.to_datetime(episodes['DEC'], format='%d/%m/%Y', errors='coerce')
over_17_days = episodes['DEC'] > episodes['DECOM'] + pd.DateOffset(days=17)
error_mask = (episodes['LS'] == 'V3') & over_17_days
return {'Episodes': episodes.index[error_mask].to_list()}
return error, _validate
def validate_367():
error = ErrorDefinition(
code='367',
description='The maximum amount of respite care allowable is 75 days in any 12-month period.',
affected_fields=['LS', 'DECOM', 'DEC'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
episodes = dfs['Episodes']
V3_eps = episodes[episodes['LS'] == 'V3']
V3_eps = V3_eps.dropna(subset=['DECOM']) # missing DECOM should get fixed before looking for this error
collection_start = pd.to_datetime(dfs['metadata']['collection_start'], format='%d/%m/%Y', errors='coerce')
collection_end = pd.to_datetime(dfs['metadata']['collection_end'], format='%d/%m/%Y', errors='coerce')
V3_eps['DECOM_dt'] = pd.to_datetime(V3_eps['DECOM'], format='%d/%m/%Y', errors='coerce')
V3_eps['DEC_dt'] = pd.to_datetime(V3_eps['DEC'], format='%d/%m/%Y', errors='coerce')
# truncate episode start/end dates to collection start/end respectively
V3_eps.loc[V3_eps['DEC'].isna() | (V3_eps['DEC_dt'] > collection_end), 'DEC_dt'] = collection_end
V3_eps.loc[V3_eps['DECOM_dt'] < collection_start, 'DECOM_dt'] = collection_start
V3_eps['duration'] = (V3_eps['DEC_dt'] - V3_eps['DECOM_dt']).dt.days
V3_eps = V3_eps[V3_eps['duration'] > 0]
V3_eps['year_total_duration'] = V3_eps.groupby('CHILD')['duration'].transform(sum)
error_mask = V3_eps['year_total_duration'] > 75
return {'Episodes': V3_eps.index[error_mask].to_list()}
return error, _validate
def validate_440():
error = ErrorDefinition(
code='440',
description='Participation method indicates child was under 4 years old at the time of the review, but date of birth and review date indicates the child was 4 years old or over.',
affected_fields=['DOB', 'REVIEW', 'REVIEW_CODE'],
)
def _validate(dfs):
if 'Reviews' not in dfs:
return {}
else:
reviews = dfs['Reviews']
reviews['DOB'] = pd.to_datetime(reviews['DOB'], format='%d/%m/%Y', errors='coerce')
reviews['REVIEW'] = pd.to_datetime(reviews['REVIEW'], format='%d/%m/%Y', errors='coerce')
mask = reviews['REVIEW_CODE'].eq('PN0') & (
reviews['REVIEW'] > reviews['DOB'] + pd.offsets.DateOffset(years=4))
validation_error_mask = mask
validation_error_locations = reviews.index[validation_error_mask]
return {'Reviews': validation_error_locations.tolist()}
return error, _validate
def validate_445():
error = ErrorDefinition(
code='445',
description='D1 is not a valid code for episodes starting after December 2005.',
affected_fields=['LS', 'DECOM'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
max_decom_allowed = pd.to_datetime('31/12/2005', format='%d/%m/%Y', errors='coerce')
mask = episodes['LS'].eq('D1') & (episodes['DECOM'] > max_decom_allowed)
validation_error_mask = mask
validation_error_locations = episodes.index[validation_error_mask]
return {'Episodes': validation_error_locations.tolist()}
return error, _validate
def validate_446():
error = ErrorDefinition(
code='446',
description='E1 is not a valid code for episodes starting before December 2005.',
affected_fields=['LS', 'DECOM'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
min_decom_allowed = pd.to_datetime('01/12/2005', format='%d/%m/%Y', errors='coerce')
mask = episodes['LS'].eq('E1') & (episodes['DECOM'] < min_decom_allowed)
validation_error_mask = mask
validation_error_locations = episodes.index[validation_error_mask]
return {'Episodes': validation_error_locations.tolist()}
return error, _validate
def validate_208():
error = ErrorDefinition(
code='208',
description='Unique Pupil Number (UPN) for the current year disagrees with the Unique Pupil Number (UPN) already recorded for this child.',
affected_fields=['UPN'],
)
def _validate(dfs):
if 'Header' not in dfs or 'Header_last' not in dfs:
return {}
else:
header = dfs['Header']
header_last = dfs['Header_last']
header_merged = header.reset_index().merge(header_last, how='left', on=['CHILD'], suffixes=('', '_last'),
indicator=True).set_index('index')
in_both_years = header_merged['_merge'] == 'both'
upn_is_different = header_merged['UPN'].str.upper().astype(str) != header_merged[
'UPN_last'].str.upper().astype(str)
upn_not_recorded = header_merged['UPN'].str.upper().astype(str).isin(['UN2', 'UN3', 'UN4', 'UN5', 'UN6']) & \
header_merged['UPN_last'].str.upper().astype(str).isin(['UN1'])
error_mask = in_both_years & upn_is_different & ~upn_not_recorded
error_locations = header.index[error_mask]
return {'Header': error_locations.to_list()}
return error, _validate
def validate_204():
error = ErrorDefinition(
code='204',
description='Ethnic origin code disagrees with the ethnic origin already recorded for this child.',
affected_fields=['ETHNIC'],
)
def _validate(dfs):
if 'Header' not in dfs or 'Header_last' not in dfs:
return {}
else:
header = dfs['Header']
header_last = dfs['Header_last']
header_merged = header.reset_index().merge(header_last, how='left', on=['CHILD'], suffixes=('', '_last'),
indicator=True).set_index('index')
in_both_years = header_merged['_merge'] == 'both'
ethnic_is_different = header_merged['ETHNIC'].astype(str).str.upper() != header_merged[
'ETHNIC_last'].astype(str).str.upper()
error_mask = in_both_years & ethnic_is_different
error_locations = header.index[error_mask]
return {'Header': error_locations.to_list()}
return error, _validate
def validate_203():
error = ErrorDefinition(
code='203',
description='Date of birth disagrees with the date of birth already recorded for this child.',
affected_fields=['DOB'],
)
def _validate(dfs):
if 'Header' not in dfs or 'Header_last' not in dfs:
return {}
else:
header = dfs['Header']
header_last = dfs['Header_last']
header['DOB'] = pd.to_datetime(header['DOB'], format='%d/%m/%Y', errors='coerce')
header_last['DOB'] = pd.to_datetime(header_last['DOB'], format='%d/%m/%Y', errors='coerce')
header_merged = header.reset_index().merge(header_last, how='left', on=['CHILD'], suffixes=('', '_last'),
indicator=True).set_index('index')
in_both_years = header_merged['_merge'] == 'both'
dob_is_different = header_merged['DOB'].astype(str) != header_merged['DOB_last'].astype(str)
error_mask = in_both_years & dob_is_different
error_locations = header.index[error_mask]
return {'Header': error_locations.to_list()}
return error, _validate
def validate_530():
error = ErrorDefinition(
code='530',
description="A placement provider code of PR4 cannot be associated with placement P1.",
affected_fields=['PLACE', 'PLACE_PROVIDER'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
mask = episodes['PLACE'].eq('P1') & episodes['PLACE_PROVIDER'].eq('PR4')
validation_error_mask = mask
validation_error_locations = episodes.index[validation_error_mask]
return {'Episodes': validation_error_locations.tolist()}
return error, _validate
def validate_571():
error = ErrorDefinition(
code='571',
description='The date that the child ceased to be missing or away from placement without authorisation is before the start or after the end of the collection year.',
affected_fields=['MIS_END'],
)
def _validate(dfs):
if 'Missing' not in dfs:
return {}
else:
missing = dfs['Missing']
collection_start = pd.to_datetime(dfs['metadata']['collection_start'], format='%d/%m/%Y', errors='coerce')
collection_end = pd.to_datetime(dfs['metadata']['collection_end'], format='%d/%m/%Y', errors='coerce')
missing['fMIS_END'] = pd.to_datetime(missing['MIS_END'], format='%d/%m/%Y', errors='coerce')
end_date_before_year = missing['fMIS_END'] < collection_start
end_date_after_year = missing['fMIS_END'] > collection_end
error_mask = end_date_before_year | end_date_after_year
error_locations = missing.index[error_mask]
return {'Missing': error_locations.to_list()}
return error, _validate
def validate_1005():
error = ErrorDefinition(
code='1005',
description='The end date of the missing episode or episode that the child was away from placement without authorisation is not a valid date.',
affected_fields=['MIS_END'],
)
def _validate(dfs):
if 'Missing' not in dfs:
return {}
else:
missing = dfs['Missing']
missing['fMIS_END'] = pd.to_datetime(missing['MIS_END'], format='%d/%m/%Y', errors='coerce')
missing_end_date = missing['MIS_END'].isna()
invalid_end_date = missing['fMIS_END'].isna()
error_mask = ~missing_end_date & invalid_end_date
error_locations = missing.index[error_mask]
return {'Missing': error_locations.to_list()}
return error, _validate
def validate_1004():
error = ErrorDefinition(
code='1004',
description='The start date of the missing episode or episode that the child was away from placement without authorisation is not a valid date.',
affected_fields=['MIS_START'],
)
def _validate(dfs):
if 'Missing' not in dfs:
return {}
else:
missing = dfs['Missing']
missing['fMIS_START'] = pd.to_datetime(missing['MIS_START'], format='%d/%m/%Y', errors='coerce')
missing_start_date = missing['MIS_START'].isna()
invalid_start_date = missing['fMIS_START'].isna()
error_mask = missing_start_date | invalid_start_date
error_locations = missing.index[error_mask]
return {'Missing': error_locations.to_list()}
return error, _validate
def validate_202():
error = ErrorDefinition(
code='202',
description='The gender code conflicts with the gender already recorded for this child.',
affected_fields=['SEX'],
)
def _validate(dfs):
if 'Header' not in dfs or 'Header_last' not in dfs:
return {}
else:
header = dfs['Header']
header_last = dfs['Header_last']
header_merged = header.reset_index().merge(header_last, how='left', on=['CHILD'], suffixes=('', '_last'),
indicator=True).set_index('index')
in_both_years = header_merged['_merge'] == 'both'
sex_is_different = header_merged['SEX'].astype(str) != header_merged['SEX_last'].astype(str)
error_mask = in_both_years & sex_is_different
error_locations = header.index[error_mask]
return {'Header': error_locations.to_list()}
return error, _validate
def validate_621():
error = ErrorDefinition(
code='621',
description="Mother’s field has been completed but date of birth shows that the mother is younger than her child.",
affected_fields=['DOB', 'MC_DOB'],
)
def _validate(dfs):
if 'Header' not in dfs:
return {}
else:
header = dfs['Header']
header['MC_DOB'] = pd.to_datetime(header['MC_DOB'], format='%d/%m/%Y', errors='coerce')
header['DOB'] = pd.to_datetime(header['DOB'], format='%d/%m/%Y', errors='coerce')
mask = (header['MC_DOB'] > header['DOB']) | header['MC_DOB'].isna()
validation_error_mask = ~mask
validation_error_locations = header.index[validation_error_mask]
return {'Header': validation_error_locations.tolist()}
return error, _validate
def validate_556():
error = ErrorDefinition(
code='556',
description='Date of decision that the child should be placed for adoption should be on or prior to the date that the freeing order was granted.',
affected_fields=['DATE_PLACED', 'DECOM'],
)
def _validate(dfs):
if 'Episodes' not in dfs or 'PlacedAdoption' not in dfs:
return {}
else:
episodes = dfs['Episodes']
placedAdoptions = dfs['PlacedAdoption']
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
placedAdoptions['DATE_PLACED'] = pd.to_datetime(placedAdoptions['DATE_PLACED'], format='%d/%m/%Y',
errors='coerce')
episodes = episodes.reset_index()
D1Episodes = episodes[episodes['LS'] == 'D1']
merged = D1Episodes.reset_index().merge(placedAdoptions, how='left', on='CHILD', ).set_index('index')
episodes_with_errors = merged[merged['DATE_PLACED'] > merged['DECOM']]
error_mask = episodes.index.isin(episodes_with_errors.index)
error_locations = episodes.index[error_mask]
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_393():
error = ErrorDefinition(
code='393',
description='Child is looked after but mother field is not completed.',
affected_fields=['MOTHER'],
)
def _validate(dfs):
if 'Header' not in dfs or 'Episodes' not in dfs:
return {}
else:
header = dfs['Header']
episodes = dfs['Episodes']
header_female = header[header['SEX'].astype(str) == '2']
applicable_episodes = episodes[~episodes['LS'].str.upper().isin(['V3', 'V4'])]
error_mask = header_female['CHILD'].isin(applicable_episodes['CHILD']) & header_female['MOTHER'].isna()
error_locations = header_female.index[error_mask]
return {'Header': error_locations.to_list()}
return error, _validate
def validate_NoE():
error = ErrorDefinition(
code='NoE',
description='This child has no episodes loaded for previous year even though child started to be looked after before this current year.',
affected_fields=['DECOM'],
)
def _validate(dfs):
if 'Episodes' not in dfs or 'Episodes_last' not in dfs:
return {}
else:
episodes = dfs['Episodes']
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
episodes_last = dfs['Episodes_last']
episodes_last['DECOM'] = pd.to_datetime(episodes_last['DECOM'], format='%d/%m/%Y', errors='coerce')
collection_start = pd.to_datetime(dfs['metadata']['collection_start'], format='%d/%m/%Y', errors='coerce')
episodes_before_year = episodes[episodes['DECOM'] < collection_start]
episodes_merged = episodes_before_year.reset_index().merge(episodes_last, how='left', on=['CHILD'],
indicator=True).set_index('index')
episodes_not_matched = episodes_merged[episodes_merged['_merge'] == 'left_only']
error_mask = episodes.index.isin(episodes_not_matched.index)
error_locations = episodes.index[error_mask]
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_356():
error = ErrorDefinition(
code='356',
description='The date the episode ceased is before the date the same episode started.',
affected_fields=['DECOM'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
episodes['DEC'] = pd.to_datetime(episodes['DEC'], format='%d/%m/%Y', errors='coerce')
error_mask = episodes['DEC'].notna() & (episodes['DEC'] < episodes['DECOM'])
return {'Episodes': episodes.index[error_mask].to_list()}
return error, _validate
def validate_611():
error = ErrorDefinition(
code='611',
description="Date of birth field is blank, but child is a mother.",
affected_fields=['MOTHER', 'MC_DOB'],
)
def _validate(dfs):
if 'Header' not in dfs:
return {}
else:
header = dfs['Header']
validation_error_mask = header['MOTHER'].astype(str).isin(['1']) & header['MC_DOB'].isna()
validation_error_locations = header.index[validation_error_mask]
return {'Header': validation_error_locations.tolist()}
return error, _validate
def validate_1009():
error = ErrorDefinition(
code='1009',
description='Reason for placement change is not a valid code.',
affected_fields=['REASON_PLACE_CHANGE'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
episodes = dfs['Episodes']
code_list = [
'CARPL',
'CLOSE',
'ALLEG',
'STAND',
'APPRR',
'CREQB',
'CREQO',
'CHILD',
'LAREQ',
'PLACE',
'CUSTOD',
'OTHER'
]
mask = episodes['REASON_PLACE_CHANGE'].isin(code_list) | episodes['REASON_PLACE_CHANGE'].isna()
validation_error_mask = ~mask
validation_error_locations = episodes.index[validation_error_mask]
return {'Episodes': validation_error_locations.tolist()}
return error, _validate
def validate_1006():
error = ErrorDefinition(
code='1006',
description='Missing type invalid.',
affected_fields=['MISSING'],
)
def _validate(dfs):
if 'Missing' not in dfs:
return {}
missing_from_care = dfs['Missing']
code_list = ['M', 'A']
mask = missing_from_care['MISSING'].isin(code_list) | missing_from_care['MISSING'].isna()
validation_error_mask = ~mask
validation_error_locations = missing_from_care.index[validation_error_mask]
return {'Missing': validation_error_locations.tolist()}
return error, _validate
def validate_631():
error = ErrorDefinition(
code='631',
description='Previous permanence option not a valid value.',
affected_fields=['PREV_PERM'],
)
def _validate(dfs):
if 'PrevPerm' not in dfs:
return {}
previous_permanence = dfs['PrevPerm']
code_list = ['P1', 'P2', 'P3', 'P4', 'Z1']
mask = previous_permanence['PREV_PERM'].isin(code_list) | previous_permanence['PREV_PERM'].isna()
validation_error_mask = ~mask
validation_error_locations = previous_permanence.index[validation_error_mask]
return {'PrevPerm': validation_error_locations.tolist()}
return error, _validate
def validate_196():
error = ErrorDefinition(
code='196',
description='Strengths and Difficulties (SDQ) reason is not a valid code.',
affected_fields=['SDQ_REASON'],
)
def _validate(dfs):
if 'OC2' not in dfs:
return {}
oc2 = dfs['OC2']
code_list = ['SDQ1', 'SDQ2', 'SDQ3', 'SDQ4', 'SDQ5']
mask = oc2['SDQ_REASON'].isin(code_list) | oc2['SDQ_REASON'].isna()
validation_error_mask = ~mask
validation_error_locations = oc2.index[validation_error_mask]
return {'OC2': validation_error_locations.tolist()}
return error, _validate
def validate_177():
error = ErrorDefinition(
code='177',
description='The legal status of adopter(s) code is not a valid code.',
affected_fields=['LS_ADOPTR'],
)
def _validate(dfs):
if 'AD1' not in dfs:
return {}
adoptions = dfs['AD1']
code_list = ['L0', 'L11', 'L12', 'L2', 'L3', 'L4']
mask = adoptions['LS_ADOPTR'].isin(code_list) | adoptions['LS_ADOPTR'].isna()
validation_error_mask = ~mask
validation_error_locations = adoptions.index[validation_error_mask]
return {'AD1': validation_error_locations.tolist()}
return error, _validate
def validate_176():
error = ErrorDefinition(
code='176',
description='The gender of adopter(s) at the date of adoption code is not a valid code.',
affected_fields=['SEX_ADOPTR'],
)
def _validate(dfs):
if 'AD1' not in dfs:
return {}
adoptions = dfs['AD1']
code_list = ['M1', 'F1', 'MM', 'FF', 'MF']
mask = adoptions['SEX_ADOPTR'].isin(code_list) | adoptions['SEX_ADOPTR'].isna()
validation_error_mask = ~mask
validation_error_locations = adoptions.index[validation_error_mask]
return {'AD1': validation_error_locations.tolist()}
return error, _validate
def validate_175():
error = ErrorDefinition(
code='175',
description='The number of adopter(s) code is not a valid code.',
affected_fields=['NB_ADOPTR'],
)
def _validate(dfs):
if 'AD1' not in dfs:
return {}
adoptions = dfs['AD1']
code_list = ['1', '2']
mask = adoptions['NB_ADOPTR'].astype(str).isin(code_list) | adoptions['NB_ADOPTR'].isna()
validation_error_mask = ~mask
validation_error_locations = adoptions.index[validation_error_mask]
return {'AD1': validation_error_locations.tolist()}
return error, _validate
def validate_132():
error = ErrorDefinition(
code='132',
description='Data entry for activity after leaving care is invalid.',
affected_fields=['ACTIV'],
)
def _validate(dfs):
if 'OC3' not in dfs:
return {}
care_leavers = dfs['OC3']
code_list = [
'F1',
'P1',
'F2',
'P2',
'F4',
'P4',
'F5',
'P5',
'G4',
'G5',
'G6',
'0'
]
mask = care_leavers['ACTIV'].astype(str).isin(code_list) | care_leavers['ACTIV'].isna()
validation_error_mask = ~mask
validation_error_locations = care_leavers.index[validation_error_mask]
return {'OC3': validation_error_locations.tolist()}
return error, _validate
def validate_131():
error = ErrorDefinition(
code='131',
description='Data entry for being in touch after leaving care is invalid.',
affected_fields=['IN_TOUCH'],
)
def _validate(dfs):
if 'OC3' not in dfs:
return {}
care_leavers = dfs['OC3']
code_list = [
'YES',
'NO',
'DIED',
'REFU',
'NREQ',
'RHOM'
]
mask = care_leavers['IN_TOUCH'].isin(code_list) | care_leavers['IN_TOUCH'].isna()
validation_error_mask = ~mask
validation_error_locations = care_leavers.index[validation_error_mask]
return {'OC3': validation_error_locations.tolist()}
return error, _validate
def validate_120():
error = ErrorDefinition(
code='120',
description='The reason for the reversal of the decision that the child should be placed for adoption code is not valid.',
affected_fields=['REASON_PLACED_CEASED'],
)
def _validate(dfs):
if 'PlacedAdoption' not in dfs:
return {}
placed_adoptions = dfs['PlacedAdoption']
code_list = ['RD1', 'RD2', 'RD3', 'RD4']
mask = placed_adoptions['REASON_PLACED_CEASED'].isin(code_list) | placed_adoptions[
'REASON_PLACED_CEASED'].isna()
validation_error_mask = ~mask
validation_error_locations = placed_adoptions.index[validation_error_mask]
return {'PlacedAdoption': validation_error_locations.tolist()}
return error, _validate
def validate_114():
error = ErrorDefinition(
code='114',
description='Data entry to record the status of former carer(s) of an adopted child is invalid.',
affected_fields=['FOSTER_CARE'],
)
def _validate(dfs):
if 'AD1' not in dfs:
return {}
adoptions = dfs['AD1']
code_list = ['0', '1']
mask = adoptions['FOSTER_CARE'].astype(str).isin(code_list) | adoptions['FOSTER_CARE'].isna()
validation_error_mask = ~mask
validation_error_locations = adoptions.index[validation_error_mask]
return {'AD1': validation_error_locations.tolist()}
return error, _validate
def validate_178():
error = ErrorDefinition(
code='178',
description='Placement provider code is not a valid code.',
affected_fields=['PLACE_PROVIDER'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
episodes = dfs['Episodes']
code_list_placement_provider = ['PR0', 'PR1', 'PR2', 'PR3', 'PR4', 'PR5']
code_list_placement_with_no_provider = ['T0', 'T1', 'T2', 'T3', 'T4', 'Z1']
place_provider_needed_and_correct = episodes['PLACE_PROVIDER'].isin(code_list_placement_provider) & ~episodes[
'PLACE'].isin(code_list_placement_with_no_provider)
place_provider_not_provided = episodes['PLACE_PROVIDER'].isna()
place_provider_not_needed = episodes['PLACE_PROVIDER'].isna() & episodes['PLACE'].isin(
code_list_placement_with_no_provider)
mask = place_provider_needed_and_correct | place_provider_not_provided | place_provider_not_needed
validation_error_mask = ~mask
validation_error_locations = episodes.index[validation_error_mask]
return {'Episodes': validation_error_locations.tolist()}
return error, _validate
def validate_103():
error = ErrorDefinition(
code='103',
description='The ethnicity code is either not valid or has not been entered.',
affected_fields=['ETHNIC'],
)
def _validate(dfs):
if 'Header' not in dfs:
return {}
header = dfs['Header']
code_list = [
'WBRI',
'WIRI',
'WOTH',
'WIRT',
'WROM',
'MWBC',
'MWBA',
'MWAS',
'MOTH',
'AIND',
'APKN',
'ABAN',
'AOTH',
'BCRB',
'BAFR',
'BOTH',
'CHNE',
'OOTH',
'REFU',
'NOBT'
]
mask = header['ETHNIC'].isin(code_list)
validation_error_mask = ~mask
validation_error_locations = header.index[validation_error_mask]
return {'Header': validation_error_locations.tolist()}
return error, _validate
def validate_143():
error = ErrorDefinition(
code='143',
description='The reason for new episode code is not a valid code.',
affected_fields=['RNE'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
episodes = dfs['Episodes']
code_list = ['S', 'P', 'L', 'T', 'U', 'B']
mask = episodes['RNE'].isin(code_list) | episodes['RNE'].isna()
validation_error_mask = ~mask
validation_error_locations = episodes.index[validation_error_mask]
return {'Episodes': validation_error_locations.tolist()}
return error, _validate
def validate_144():
error = ErrorDefinition(
code='144',
description='The legal status code is not a valid code.',
affected_fields=['LS'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
episodes = dfs['Episodes']
code_list = [
'C1',
'C2',
'D1',
'E1',
'V2',
'V3',
'V4',
'J1',
'J2',
'J3',
'L1',
'L2',
'L3'
]
mask = episodes['LS'].isin(code_list) | episodes['LS'].isna()
validation_error_mask = ~mask
validation_error_locations = episodes.index[validation_error_mask]
return {'Episodes': validation_error_locations.tolist()}
return error, _validate
def validate_145():
error = ErrorDefinition(
code='145',
description='Category of need code is not a valid code.',
affected_fields=['CIN'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
episodes = dfs['Episodes']
code_list = [
'N1',
'N2',
'N3',
'N4',
'N5',
'N6',
'N7',
'N8',
]
mask = episodes['CIN'].isin(code_list) | episodes['CIN'].isna()
validation_error_mask = ~mask
validation_error_locations = episodes.index[validation_error_mask]
return {'Episodes': validation_error_locations.tolist()}
return error, _validate
def validate_146():
error = ErrorDefinition(
code='146',
description='Placement type code is not a valid code.',
affected_fields=['PLACE'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
episodes = dfs['Episodes']
code_list = [
'A3',
'A4',
'A5',
'A6',
'H5',
'K1',
'K2',
'P1',
'P2',
'P3',
'R1',
'R2',
'R3',
'R5',
'S1',
'T0',
'T1',
'T2',
'T3',
'T4',
'U1',
'U2',
'U3',
'U4',
'U5',
'U6',
'Z1'
]
mask = episodes['PLACE'].isin(code_list) | episodes['PLACE'].isna()
validation_error_mask = ~mask
validation_error_locations = episodes.index[validation_error_mask]
return {'Episodes': validation_error_locations.tolist()}
return error, _validate
def validate_149():
error = ErrorDefinition(
code='149',
description='Reason episode ceased code is not valid. ',
affected_fields=['REC'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
episodes = dfs['Episodes']
code_list = [
'E11',
'E12',
'E2',
'E3',
'E4A',
'E4B',
'E13',
'E41',
'E45',
'E46',
'E47',
'E48',
'E5',
'E6',
'E7',
'E8',
'E9',
'E14',
'E15',
'E16',
'E17',
'X1'
]
mask = episodes['REC'].isin(code_list) | episodes['REC'].isna()
validation_error_mask = ~mask
validation_error_locations = episodes.index[validation_error_mask]
return {'Episodes': validation_error_locations.tolist()}
return error, _validate
def validate_167():
error = ErrorDefinition(
code='167',
description='Data entry for participation is invalid or blank.',
affected_fields=['REVIEW_CODE'],
)
def _validate(dfs):
if 'Reviews' not in dfs:
return {}
review = dfs['Reviews']
code_list = ['PN0', 'PN1', 'PN2', 'PN3', 'PN4', 'PN5', 'PN6', 'PN7']
mask = review['REVIEW'].notna() & review['REVIEW_CODE'].isin(code_list) | review['REVIEW'].isna() & review[
'REVIEW_CODE'].isna()
validation_error_mask = ~mask
validation_error_locations = review.index[validation_error_mask]
return {'Reviews': validation_error_locations.tolist()}
return error, _validate
def validate_101():
error = ErrorDefinition(
code='101',
description='Gender code is not valid.',
affected_fields=['SEX'],
)
def _validate(dfs):
if 'Header' not in dfs:
return {}
header = dfs['Header']
code_list = ['1', '2']
mask = header['SEX'].astype(str).isin(code_list)
validation_error_mask = ~mask
validation_error_locations = header.index[validation_error_mask]
return {'Header': validation_error_locations.tolist()}
return error, _validate
def validate_141():
error = ErrorDefinition(
code='141',
description='Date episode began is not a valid date.',
affected_fields=['DECOM'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
mask = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce').notna()
na_location = episodes['DECOM'].isna()
validation_error_mask = ~mask & ~na_location
validation_error_locations = episodes.index[validation_error_mask]
return {'Episodes': validation_error_locations.tolist()}
return error, _validate
def validate_147():
error = ErrorDefinition(
code='147',
description='Date episode ceased is not a valid date.',
affected_fields=['DEC'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
mask = pd.to_datetime(episodes['DEC'], format='%d/%m/%Y', errors='coerce').notna()
na_location = episodes['DEC'].isna()
validation_error_mask = ~mask & ~na_location
validation_error_locations = episodes.index[validation_error_mask]
return {'Episodes': validation_error_locations.tolist()}
return error, _validate
def validate_171():
error = ErrorDefinition(
code='171',
description="Date of birth of mother's child is not a valid date.",
affected_fields=['MC_DOB'],
)
def _validate(dfs):
if 'Header' not in dfs:
return {}
else:
header = dfs['Header']
mask = pd.to_datetime(header['MC_DOB'], format='%d/%m/%Y', errors='coerce').notna()
na_location = header['MC_DOB'].isna()
validation_error_mask = ~mask & ~na_location
validation_error_locations = header.index[validation_error_mask]
return {'Header': validation_error_locations.tolist()}
return error, _validate
def validate_102():
error = ErrorDefinition(
code='102',
description='Date of birth is not a valid date.',
affected_fields=['DOB'],
)
def _validate(dfs):
if 'Header' not in dfs:
return {}
else:
header = dfs['Header']
mask = pd.to_datetime(header['DOB'], format='%d/%m/%Y', errors='coerce').notna()
validation_error_mask = ~mask
validation_error_locations = header.index[validation_error_mask]
return {'Header': validation_error_locations.tolist()}
return error, _validate
def validate_112():
error = ErrorDefinition(
code='112',
description='Date should be placed for adoption is not a valid date.',
affected_fields=['DATE_INT'],
)
def _validate(dfs):
if 'AD1' not in dfs:
return {}
else:
ad1 = dfs['AD1']
mask = pd.to_datetime(ad1['DATE_INT'], format='%d/%m/%Y', errors='coerce').notna()
na_location = ad1['DATE_INT'].isna()
validation_error_mask = ~mask & ~na_location
validation_error_locations = ad1.index[validation_error_mask]
return {'AD1': validation_error_locations.tolist()}
return error, _validate
def validate_115():
error = ErrorDefinition(
code='115',
description="Date of Local Authority's (LA) decision that a child should be placed for adoption is not a valid date.",
affected_fields=['DATE_PLACED'],
)
def _validate(dfs):
if 'PlacedAdoption' not in dfs:
return {}
else:
adopt = dfs['PlacedAdoption']
mask = pd.to_datetime(adopt['DATE_PLACED'], format='%d/%m/%Y', errors='coerce').notna()
na_location = adopt['DATE_PLACED'].isna()
validation_error_mask = ~mask & ~na_location
validation_error_locations = adopt.index[validation_error_mask]
return {'PlacedAdoption': validation_error_locations.tolist()}
return error, _validate
def validate_116():
error = ErrorDefinition(
code='116',
description="Date of Local Authority's (LA) decision that a child should no longer be placed for adoption is not a valid date.",
affected_fields=['DATE_PLACED_CEASED'],
)
def _validate(dfs):
if 'PlacedAdoption' not in dfs:
return {}
else:
adopt = dfs['PlacedAdoption']
mask = pd.to_datetime(adopt['DATE_PLACED_CEASED'], format='%d/%m/%Y', errors='coerce').notna()
na_location = adopt['DATE_PLACED_CEASED'].isna()
validation_error_mask = ~mask & ~na_location
validation_error_locations = adopt.index[validation_error_mask]
return {'PlacedAdoption': validation_error_locations.tolist()}
return error, _validate
def validate_392c():
error = ErrorDefinition(
code='392c',
description='Postcode(s) provided are invalid.',
affected_fields=['HOME_POST', 'PL_POST'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
home_provided = episodes['HOME_POST'].notna()
home_details = merge_postcodes(episodes, "HOME_POST")
home_valid = home_details['pcd'].notna()
pl_provided = episodes['PL_POST'].notna()
pl_details = merge_postcodes(episodes, "PL_POST")
pl_valid = pl_details['pcd'].notna()
error_mask = (home_provided & ~home_valid) | (pl_provided & ~pl_valid)
return {'Episodes': episodes.index[error_mask].tolist()}
return error, _validate
def validate_213():
error = ErrorDefinition(
code='213',
description='Placement provider information not required.',
affected_fields=['PLACE_PROVIDER'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
df = dfs['Episodes']
mask = df['PLACE'].isin(['T0', 'T1', 'T2', 'T3', 'T4', 'Z1']) & df['PLACE_PROVIDER'].notna()
return {'Episodes': df.index[mask].tolist()}
return error, _validate
def validate_168():
error = ErrorDefinition(
code='168',
description='Unique Pupil Number (UPN) is not valid. If unknown, default codes should be UN1, UN2, UN3, UN4 or UN5.',
affected_fields=['UPN'],
)
def _validate(dfs):
if 'Header' not in dfs:
return {}
else:
df = dfs['Header']
mask = df['UPN'].str.match(r'(^((?![IOS])[A-Z]){1}(\d{12}|\d{11}[A-Z]{1})$)|^(UN[1-5])$', na=False)
mask = ~mask
return {'Header': df.index[mask].tolist()}
return error, _validate
def validate_388():
error = ErrorDefinition(
code='388',
description='Reason episode ceased is coded new episode begins, but there is no continuation episode.',
affected_fields=['REC'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
df = dfs['Episodes']
df['DECOM'] = pd.to_datetime(df['DECOM'], format='%d/%m/%Y', errors='coerce')
df['DEC'] = pd.to_datetime(df['DEC'], format='%d/%m/%Y', errors='coerce')
df['DECOM'] = df['DECOM'].fillna('01/01/1901') # Watch for potential future issues
df = df.sort_values(['CHILD', 'DECOM'])
df['DECOM_NEXT_EPISODE'] = df.groupby(['CHILD'])['DECOM'].shift(-1)
# The max DECOM for each child is also the one with no next episode
# And we also add the skipna option
# grouped_decom_by_child = df.groupby(['CHILD'])['DECOM'].idxmax(skipna=True)
no_next = df.DECOM_NEXT_EPISODE.isna() & df.CHILD.notna()
# Dataframe with the maximum DECOM removed
max_decom_removed = df[~no_next]
# Dataframe with the maximum DECOM only
max_decom_only = df[no_next]
# Case 1: If reason episode ceased is coded X1 there must be a subsequent episode
# starting on the same day.
case1 = max_decom_removed[(max_decom_removed['REC'] == 'X1') &
(max_decom_removed['DEC'].notna()) &
(max_decom_removed['DECOM_NEXT_EPISODE'].notna()) &
(max_decom_removed['DEC'] != max_decom_removed['DECOM_NEXT_EPISODE'])]
# Case 2: If an episode ends but the child continues to be looked after, a new
# episode should start on the same day.The reason episode ceased code of
# the episode which ends must be X1.
case2 = max_decom_removed[(max_decom_removed['REC'] != 'X1') &
(max_decom_removed['REC'].notna()) &
(max_decom_removed['DEC'].notna()) &
(max_decom_removed['DECOM_NEXT_EPISODE'].notna()) &
(max_decom_removed['DEC'] == max_decom_removed['DECOM_NEXT_EPISODE'])]
# Case 3: If a child ceases to be looked after reason episode ceased code X1 must
# not be used.
case3 = max_decom_only[(max_decom_only['DEC'].notna()) &
(max_decom_only['REC'] == 'X1')]
mask_case1 = case1.index.tolist()
mask_case2 = case2.index.tolist()
mask_case3 = case3.index.tolist()
mask = mask_case1 + mask_case2 + mask_case3
mask.sort()
return {'Episodes': mask}
return error, _validate
def validate_113():
error = ErrorDefinition(
code='113',
description='Date matching child and adopter(s) is not a valid date.',
affected_fields=['DATE_MATCH'],
)
def _validate(dfs):
if 'AD1' not in dfs:
return {}
else:
ad1 = dfs['AD1']
mask = pd.to_datetime(ad1['DATE_MATCH'], format='%d/%m/%Y', errors='coerce').notna()
na_location = ad1['DATE_MATCH'].isna()
validation_error_mask = ~mask & ~na_location
validation_error_locations = ad1.index[validation_error_mask]
return {'AD1': validation_error_locations.tolist()}
return error, _validate
def validate_134():
error = ErrorDefinition(
code='134',
description='Data on adoption should not be entered for the OC3 cohort.',
affected_fields=['IN_TOUCH', 'ACTIV', 'ACCOM', 'DATE_INT', 'DATE_MATCH', 'FOSTER_CARE', 'NB_ADOPTR',
'SEX_ADOPTR', 'LS_ADOPTR'],
)
def _validate(dfs):
if 'OC3' not in dfs or 'AD1' not in dfs:
return {}
else:
oc3 = dfs['OC3']
ad1 = dfs['AD1']
ad1['ad1_index'] = ad1.index
all_data = ad1.merge(oc3, how='left', on='CHILD')
na_oc3_data = (
all_data['IN_TOUCH'].isna() &
all_data['ACTIV'].isna() &
all_data['ACCOM'].isna()
)
na_ad1_data = (
all_data['DATE_INT'].isna() &
all_data['DATE_MATCH'].isna() &
all_data['FOSTER_CARE'].isna() &
all_data['NB_ADOPTR'].isna() &
all_data['SEX_ADOPTR'].isna() &
all_data['LS_ADOPTR'].isna()
)
validation_error = ~na_oc3_data & ~na_ad1_data
validation_error_locations = all_data.loc[validation_error, 'ad1_index'].unique()
return {'AD1': validation_error_locations.tolist()}
return error, _validate
def validate_119():
error = ErrorDefinition(
code='119',
description='If the decision is made that a child should no longer be placed for adoption, then the date of this decision and the reason why this decision was made must be completed.',
affected_fields=['REASON_PLACED_CEASED', 'DATE_PLACED_CEASED'],
)
def _validate(dfs):
if 'PlacedAdoption' not in dfs:
return {}
else:
adopt = dfs['PlacedAdoption']
na_placed_ceased = adopt['DATE_PLACED_CEASED'].isna()
na_reason_ceased = adopt['REASON_PLACED_CEASED'].isna()
validation_error = (na_placed_ceased & ~na_reason_ceased) | (~na_placed_ceased & na_reason_ceased)
validation_error_locations = adopt.index[validation_error]
return {'PlacedAdoption': validation_error_locations.tolist()}
return error, _validate
def validate_159():
error = ErrorDefinition(
code='159',
description='If a child has been recorded as not receiving an intervention for their substance misuse problem, then the additional item on whether an intervention was offered should be completed as well.',
affected_fields=['SUBSTANCE_MISUSE', 'INTERVENTION_RECEIVED', 'INTERVENTION_OFFERED'],
)
def _validate(dfs):
if 'OC2' not in dfs:
return {}
else:
oc2 = dfs['OC2']
mask1 = oc2['SUBSTANCE_MISUSE'].astype(str) == '1'
mask2 = oc2['INTERVENTION_RECEIVED'].astype(str) == '0'
mask3 = oc2['INTERVENTION_OFFERED'].isna()
validation_error = mask1 & mask2 & mask3
validation_error_locations = oc2.index[validation_error]
return {'OC2': validation_error_locations.tolist()}
return error, _validate
def validate_142():
error = ErrorDefinition(
code='142',
description='A new episode has started, but the previous episode has not ended.',
affected_fields=['DEC', 'REC'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
df = dfs['Episodes']
df['DECOM'] = pd.to_datetime(df['DECOM'], format='%d/%m/%Y', errors='coerce')
df['DEC'] = pd.to_datetime(df['DEC'], format='%d/%m/%Y', errors='coerce')
df['DECOM'] = df['DECOM'].fillna('01/01/1901') # Watch for potential future issues
df['DECOM'] = df['DECOM'].replace('01/01/1901', pd.NA)
last_episodes = df.sort_values('DECOM').reset_index().groupby(['CHILD'])['index'].last()
ended_episodes_df = df.loc[~df.index.isin(last_episodes)]
ended_episodes_df = ended_episodes_df[(ended_episodes_df['DEC'].isna() | ended_episodes_df['REC'].isna()) &
ended_episodes_df['CHILD'].notna() & ended_episodes_df[
'DECOM'].notna()]
mask = ended_episodes_df.index.tolist()
return {'Episodes': mask}
return error, _validate
def validate_148():
error = ErrorDefinition(
code='148',
description='Date episode ceased and reason episode ceased must both be coded, or both left blank.',
affected_fields=['DEC', 'REC'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
df = dfs['Episodes']
df['DEC'] = pd.to_datetime(df['DEC'], format='%d/%m/%Y', errors='coerce')
mask = ((df['DEC'].isna()) & (df['REC'].notna())) | ((df['DEC'].notna()) & (df['REC'].isna()))
return {'Episodes': df.index[mask].tolist()}
return error, _validate
def validate_151():
error = ErrorDefinition(
code='151',
description='All data items relating to a childs adoption must be coded or left blank.',
affected_fields=['DATE_INT', 'DATE_MATCH', 'FOSTER_CARE', 'NB_ADOPTER', 'SEX_ADOPTR', 'LS_ADOPTR'],
)
def _validate(dfs):
if 'AD1' not in dfs:
return {}
else:
ad1 = dfs['AD1']
na_date_int = ad1['DATE_INT'].isna()
na_date_match = ad1['DATE_MATCH'].isna()
na_foster_care = ad1['FOSTER_CARE'].isna()
na_nb_adoptr = ad1['NB_ADOPTR'].isna()
na_sex_adoptr = ad1['SEX_ADOPTR'].isna()
na_lsadoptr = ad1['LS_ADOPTR'].isna()
ad1_not_null = (
~na_date_int & ~na_date_match & ~na_foster_care & ~na_nb_adoptr & ~na_sex_adoptr & ~na_lsadoptr)
validation_error = (
~na_date_int | ~na_date_match | ~na_foster_care | ~na_nb_adoptr | ~na_sex_adoptr | ~na_lsadoptr) & ~ad1_not_null
validation_error_locations = ad1.index[validation_error]
return {'AD1': validation_error_locations.tolist()}
return error, _validate
def validate_182():
error = ErrorDefinition(
code='182',
description='Data entries on immunisations, teeth checks, health assessments and substance misuse problem identified should be completed or all OC2 fields should be left blank.',
affected_fields=['IMMUNISATIONS', 'TEETH_CHECK', 'HEALTH_ASSESSMENT', 'SUBSTANCE_MISUSE', 'CONVICTED',
'HEALTH_CHECK', 'INTERVENTION_RECEIVED', 'INTERVENTION_OFFERED'],
)
def _validate(dfs):
if 'OC2' not in dfs:
return {}
else:
oc2 = dfs['OC2']
mask1 = (
oc2['IMMUNISATIONS'].isna() |
oc2['TEETH_CHECK'].isna() |
oc2['HEALTH_ASSESSMENT'].isna() |
oc2['SUBSTANCE_MISUSE'].isna()
)
mask2 = (
oc2['CONVICTED'].isna() &
oc2['HEALTH_CHECK'].isna() &
oc2['INTERVENTION_RECEIVED'].isna() &
oc2['INTERVENTION_OFFERED'].isna()
)
validation_error = mask1 & ~mask2
validation_error_locations = oc2.index[validation_error]
return {'OC2': validation_error_locations.tolist()}
return error, _validate
def validate_214():
error = ErrorDefinition(
code='214',
description='Placement location information not required.',
affected_fields=['PL_POST', 'URN'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
df = dfs['Episodes']
mask = df['LS'].isin(['V3', 'V4']) & ((df['PL_POST'].notna()) | (df['URN'].notna()))
return {'Episodes': df.index[mask].tolist()}
return error, _validate
def validate_222():
error = ErrorDefinition(
code='222',
description='Ofsted Unique reference number (URN) should not be recorded for this placement type.',
affected_fields=['URN'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
df = dfs['Episodes']
place_code_list = ['H5', 'P1', 'P2', 'P3', 'R1', 'R2', 'R5', 'T0', 'T1', 'T2', 'T3', 'T4', 'Z1']
mask = (df['PLACE'].isin(place_code_list)) & (df['URN'].notna()) & (df['URN'] != 'XXXXXX')
return {'Episodes': df.index[mask].tolist()}
return error, _validate
def validate_366():
error = ErrorDefinition(
code='366',
description='A child cannot change placement during the course of an individual short-term respite break.',
affected_fields=['RNE'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
df = dfs['Episodes']
mask = (df['LS'] == 'V3') & (df['RNE'] != 'S')
return {'Episodes': df.index[mask].tolist()}
return error, _validate
def validate_628():
error = ErrorDefinition(
code='628',
description='Motherhood details are not required for care leavers who have not been looked after during the year.',
affected_fields=['MOTHER'],
)
def _validate(dfs):
if 'Episodes' not in dfs or 'Header' not in dfs or 'OC3' not in dfs:
return {}
else:
hea = dfs['Header']
epi = dfs['Episodes']
oc3 = dfs['OC3']
hea = hea.reset_index()
oc3_no_nulls = oc3[oc3[['IN_TOUCH', 'ACTIV', 'ACCOM']].notna().any(axis=1)]
hea_merge_epi = hea.merge(epi, how='left', on='CHILD', indicator=True)
hea_not_in_epi = hea_merge_epi[hea_merge_epi['_merge'] == 'left_only']
cohort_to_check = hea_not_in_epi.merge(oc3_no_nulls, how='inner', on='CHILD')
error_cohort = cohort_to_check[cohort_to_check['MOTHER'].notna()]
error_list = list(set(error_cohort['index'].to_list()))
error_list.sort()
return {'Header': error_list}
return error, _validate
def validate_164():
error = ErrorDefinition(
code='164',
description='Distance is not valid. Please check a valid postcode has been entered.',
affected_fields=['PL_DISTANCE'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
df = dfs['Episodes']
is_short_term = df['LS'].isin(['V3', 'V4'])
distance = pd.to_numeric(df['PL_DISTANCE'], errors='coerce')
# Use a bit of tolerance in these bounds
distance_valid = distance.gt(-0.2) & distance.lt(1001.0)
mask = ~is_short_term & ~distance_valid
return {'Episodes': df.index[mask].tolist()}
return error, _validate
def validate_169():
error = ErrorDefinition(
code='169',
description='Local Authority (LA) of placement is not valid or is missing. Please check a valid postcode has been entered.',
affected_fields=['PL_LA'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
df = dfs['Episodes']
is_short_term = df['LS'].isin(['V3', 'V4'])
# Because PL_LA is derived, it will always be valid if present
mask = ~is_short_term & df['PL_LA'].isna()
return {'Episodes': df.index[mask].tolist()}
return error, _validate
def validate_179():
error = ErrorDefinition(
code='179',
description='Placement location code is not a valid code.',
affected_fields=['PL_LOCATION'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
df = dfs['Episodes']
is_short_term = df['LS'].isin(['V3', 'V4'])
# Because PL_LOCATION is derived, it will always be valid if present
mask = ~is_short_term & df['PL_LOCATION'].isna()
return {'Episodes': df.index[mask].tolist()}
return error, _validate
def validate_1015():
error = ErrorDefinition(
code='1015',
description='Placement provider is own provision but child not placed in own LA.',
affected_fields=['PL_LA'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
df = dfs['Episodes']
local_authority = dfs['metadata']['localAuthority']
placement_fostering_or_adoption = df['PLACE'].isin([
'A3', 'A4', 'A5', 'A6', 'U1', 'U2', 'U3', 'U4', 'U5', 'U6',
])
own_provision = df['PLACE_PROVIDER'].eq('PR1')
is_short_term = df['LS'].isin(['V3', 'V4'])
is_pl_la = df['PL_LA'].eq(local_authority)
checked_episodes = ~placement_fostering_or_adoption & ~is_short_term & own_provision
checked_episodes = checked_episodes & df['LS'].notna() & df['PLACE'].notna()
mask = checked_episodes & ~is_pl_la
return {'Episodes': df.index[mask].tolist()}
return error, _validate
def validate_411():
error = ErrorDefinition(
code='411',
description='Placement location code disagrees with LA of placement.',
affected_fields=['PL_LOCATION'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
df = dfs['Episodes']
local_authority = dfs['metadata']['localAuthority']
mask = df['PL_LOCATION'].eq('IN') & df['PL_LA'].ne(local_authority)
return {'Episodes': df.index[mask].tolist()}
return error, _validate
def validate_420():
error = ErrorDefinition(
code='420',
description='LA of placement completed but child is looked after under legal status V3 or V4.',
affected_fields=['PL_LA'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
df = dfs['Episodes']
is_short_term = df['LS'].isin(['V3', 'V4'])
mask = is_short_term & df['PL_LA'].notna()
return {'Episodes': df.index[mask].tolist()}
return error, _validate
def validate_355():
error = ErrorDefinition(
code='355',
description='Episode appears to have lasted for less than 24 hours',
affected_fields=['DECOM', 'DEC'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
df = dfs['Episodes']
mask = df['DECOM'].astype(str) == df['DEC'].astype(str)
return {'Episodes': df.index[mask].tolist()}
return error, _validate
def validate_586():
error = ErrorDefinition(
code='586',
description='Dates of missing periods are before child’s date of birth.',
affected_fields=['MIS_START'],
)
def _validate(dfs):
if 'Missing' not in dfs:
return {}
else:
df = dfs['Missing']
df['DOB'] = pd.to_datetime(df['DOB'], format='%d/%m/%Y', errors='coerce')
df['MIS_START'] = pd.to_datetime(df['MIS_START'], format='%d/%m/%Y', errors='coerce')
error_mask = df['MIS_START'].notna() & (df['MIS_START'] <= df['DOB'])
return {'Missing': df.index[error_mask].to_list()}
return error, _validate
def validate_630():
error = ErrorDefinition(
code='630',
description='Information on previous permanence option should be returned.',
affected_fields=['RNE'],
)
def _validate(dfs):
if 'PrevPerm' not in dfs or 'Episodes' not in dfs:
return {}
else:
epi = dfs['Episodes']
pre = dfs['PrevPerm']
epi['DECOM'] = pd.to_datetime(epi['DECOM'], format='%d/%m/%Y', errors='coerce')
collection_start = pd.to_datetime(dfs['metadata']['collection_start'], format='%d/%m/%Y', errors='coerce')
epi = epi.reset_index()
# Form the episode dataframe which has an 'RNE' of 'S' in this financial year
epi_has_rne_of_S_in_year = epi[(epi['RNE'] == 'S') & (epi['DECOM'] >= collection_start)]
# Merge to see
# 1) which CHILD ids are missing from the PrevPerm file
# 2) which CHILD are in the prevPerm file, but don't have the LA_PERM/DATE_PERM field completed where they should be
# 3) which CHILD are in the PrevPerm file, but don't have the PREV_PERM field completed.
merged_epi_preperm = epi_has_rne_of_S_in_year.merge(pre, on='CHILD', how='left', indicator=True)
error_not_in_preperm = merged_epi_preperm['_merge'] == 'left_only'
error_wrong_values_in_preperm = (merged_epi_preperm['PREV_PERM'] != 'Z1') & (
merged_epi_preperm[['LA_PERM', 'DATE_PERM']].isna().any(axis=1))
error_null_prev_perm = (merged_epi_preperm['_merge'] == 'both') & (merged_epi_preperm['PREV_PERM'].isna())
error_mask = error_not_in_preperm | error_wrong_values_in_preperm | error_null_prev_perm
error_list = merged_epi_preperm[error_mask]['index'].to_list()
error_list = list(set(error_list))
error_list.sort()
return {'Episodes': error_list}
return error, _validate
def validate_501():
error = ErrorDefinition(
code='501',
description='A new episode has started before the end date of the previous episode.',
affected_fields=['DECOM', 'DEC'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
epi = dfs['Episodes']
epi = epi.reset_index()
epi['DECOM'] = pd.to_datetime(epi['DECOM'], format='%d/%m/%Y', errors='coerce')
epi['DEC'] = pd.to_datetime(epi['DEC'], format='%d/%m/%Y', errors='coerce')
epi = epi.sort_values(['CHILD', 'DECOM'])
epi_lead = epi.shift(1)
epi_lead = epi_lead.reset_index()
m_epi = epi.merge(epi_lead, left_on='index', right_on='level_0', suffixes=('', '_prev'))
error_cohort = m_epi[(m_epi['CHILD'] == m_epi['CHILD_prev']) & (m_epi['DECOM'] < m_epi['DEC_prev'])]
error_list = error_cohort['index'].to_list()
error_list.sort()
return {'Episodes': error_list}
return error, _validate
def validate_502():
error = ErrorDefinition(
code='502',
description='Last year’s record ended with an open episode. The date on which that episode started does not match the start date of the first episode on this year’s record.',
affected_fields=['DECOM'],
)
def _validate(dfs):
if 'Episodes' not in dfs or 'Episodes_last' not in dfs:
return {}
else:
epi = dfs['Episodes']
epi_last = dfs['Episodes_last']
epi = epi.reset_index()
epi['DECOM'] = pd.to_datetime(epi['DECOM'], format='%d/%m/%Y', errors='coerce')
epi_last['DECOM'] = pd.to_datetime(epi_last['DECOM'], format='%d/%m/%Y', errors='coerce')
epi_last_no_dec = epi_last[epi_last['DEC'].isna()]
epi_min_decoms_index = epi[['CHILD', 'DECOM']].groupby(['CHILD'])['DECOM'].idxmin()
epi_min_decom_df = epi.loc[epi_min_decoms_index, :]
merged_episodes = epi_min_decom_df.merge(epi_last_no_dec, on='CHILD', how='inner')
error_cohort = merged_episodes[merged_episodes['DECOM_x'] != merged_episodes['DECOM_y']]
error_list = error_cohort['index'].to_list()
error_list = list(set(error_list))
error_list.sort()
return {'Episodes': error_list}
return error, _validate
def validate_153():
error = ErrorDefinition(
code='153',
description="All data items relating to a child's activity or accommodation after leaving care must be coded or left blank.",
affected_fields=['IN_TOUCH', 'ACTIV', 'ACCOM'],
)
def _validate(dfs):
if 'OC3' not in dfs:
return {}
oc3 = dfs['OC3']
oc3_not_na = (
oc3['IN_TOUCH'].notna() &
oc3['ACTIV'].notna() &
oc3['ACCOM'].notna()
)
oc3_all_na = (
oc3['IN_TOUCH'].isna() &
oc3['ACTIV'].isna() &
oc3['ACCOM'].isna()
)
validation_error = ~oc3_not_na & ~oc3_all_na
validation_error_locations = oc3.index[validation_error]
return {'OC3': validation_error_locations.to_list()}
return error, _validate
def validate_166():
error = ErrorDefinition(
code='166',
description="Date of review is invalid or blank.",
affected_fields=['REVIEW'],
)
def _validate(dfs):
if 'Reviews' not in dfs:
return {}
else:
review = dfs['Reviews']
error_mask = pd.to_datetime(review['REVIEW'], format='%d/%m/%Y', errors='coerce').isna()
validation_error_locations = review.index[error_mask]
return {'Reviews': validation_error_locations.to_list()}
return error, _validate
def validate_174():
error = ErrorDefinition(
code='174',
description="Mother's child date of birth is recorded but gender shows that the child is a male.",
affected_fields=['SEX', 'MC_DOB'],
)
def _validate(dfs):
if 'Header' not in dfs:
return {}
else:
header = dfs['Header']
child_is_male = header['SEX'].astype(str) == '1'
mc_dob_recorded = header['MC_DOB'].notna()
error_mask = child_is_male & mc_dob_recorded
validation_error_locations = header.index[error_mask]
return {'Header': validation_error_locations.to_list()}
return error, _validate
def validate_180():
error = ErrorDefinition(
code='180',
description="Data entry for the strengths and difficulties questionnaire (SDQ) score is invalid.",
affected_fields=['SDQ_SCORE'],
)
def _validate(dfs):
if 'OC2' not in dfs:
return {}
else:
oc2 = dfs['OC2']
oc2['SDQ_SCORE'] = pd.to_numeric(oc2['SDQ_SCORE'], errors='coerce')
error_mask = oc2['SDQ_SCORE'].notna() & ~oc2['SDQ_SCORE'].isin(range(41))
validation_error_locations = oc2.index[error_mask]
return {'OC2': validation_error_locations.to_list()}
return error, _validate
def validate_181():
error = ErrorDefinition(
code='181',
description="Data items relating to children looked after continuously for 12 months should be completed with a 0 or 1.",
affected_fields=['CONVICTED', 'HEALTH_CHECK', 'IMMUNISATIONS', 'TEETH_CHECK', 'HEALTH_ASSESSMENT',
'SUBSTANCE_MISUSE', 'INTERVENTION_RECEIVED', 'INTERVENTION_OFFERED'],
)
def _validate(dfs):
if 'OC2' not in dfs:
return {}
else:
oc2 = dfs['OC2']
code_list = ['0', '1']
fields_of_interest = ['CONVICTED', 'HEALTH_CHECK', 'IMMUNISATIONS', 'TEETH_CHECK', 'HEALTH_ASSESSMENT',
'SUBSTANCE_MISUSE', 'INTERVENTION_RECEIVED', 'INTERVENTION_OFFERED']
error_mask = (
oc2[fields_of_interest].notna()
& ~oc2[fields_of_interest].astype(str).isin(['0', '1'])
).any(axis=1)
validation_error_locations = oc2.index[error_mask]
return {'OC2': validation_error_locations.tolist()}
return error, _validate
def validate_192():
error = ErrorDefinition(
code='192',
description="Child has been identified as having a substance misuse problem but the additional item on whether an intervention was received has been left blank.",
affected_fields=['SUBSTANCE_MISUSE', 'INTERVENTION_RECEIVED'],
)
def _validate(dfs):
if 'OC2' not in dfs:
return {}
else:
oc2 = dfs['OC2']
misuse = oc2['SUBSTANCE_MISUSE'].astype(str) == '1'
intervention_blank = oc2['INTERVENTION_RECEIVED'].isna()
error_mask = misuse & intervention_blank
validation_error_locations = oc2.index[error_mask]
return {'OC2': validation_error_locations.to_list()}
return error, _validate
def validate_193():
error = ErrorDefinition(
code='193',
description="Child not identified as having a substance misuse problem but at least one of the two additional items on whether an intervention were offered and received have been completed.",
affected_fields=['SUBSTANCE_MISUSE', 'INTERVENTION_RECEIVED', 'INTERVENTION_OFFERED'],
)
def _validate(dfs):
if 'OC2' not in dfs:
return {}
else:
oc2 = dfs['OC2']
no_substance_misuse = oc2['SUBSTANCE_MISUSE'].isna() | (oc2['SUBSTANCE_MISUSE'].astype(str) == '0')
intervention_not_blank = oc2['INTERVENTION_RECEIVED'].notna() | oc2['INTERVENTION_OFFERED'].notna()
error_mask = no_substance_misuse & intervention_not_blank
validation_error_locations = oc2.index[error_mask]
return {'OC2': validation_error_locations.tolist()}
return error, _validate
def validate_197a():
error = ErrorDefinition(
code='197a',
description="Reason for no Strengths and Difficulties (SDQ) score is not required if Strengths and Difficulties Questionnaire score is filled in.",
affected_fields=['SDQ_SCORE', 'SDQ_REASON'],
)
def _validate(dfs):
if 'OC2' not in dfs:
return {}
else:
oc2 = dfs['OC2']
sdq_filled_in = oc2['SDQ_SCORE'].notna()
reason_filled_in = oc2['SDQ_REASON'].notna()
error_mask = sdq_filled_in & reason_filled_in
validation_error_locations = oc2.index[error_mask]
return {'OC2': validation_error_locations.tolist()}
return error, _validate
def validate_567():
error = ErrorDefinition(
code='567',
description='The date that the missing episode or episode that the child was away from placement without authorisation ended is before the date that it started.',
affected_fields=['MIS_START', 'MIS_END'],
)
def _validate(dfs):
if 'Missing' not in dfs:
return {}
else:
mis = dfs['Missing']
mis['MIS_START'] = pd.to_datetime(mis['MIS_START'], format='%d/%m/%Y', errors='coerce')
mis['MIS_END'] = pd.to_datetime(mis['MIS_END'], format='%d/%m/%Y', errors='coerce')
mis_error = mis[mis['MIS_START'] > mis['MIS_END']]
return {'Missing': mis_error.index.to_list()}
return error, _validate
def validate_304():
error = ErrorDefinition(
code='304',
description='Date unaccompanied asylum-seeking child (UASC) status ceased must be on or before the 18th birthday of a child.',
affected_fields=['DUC'],
)
def _validate(dfs):
if 'UASC' not in dfs:
return {}
else:
uasc = dfs['UASC']
uasc['DOB'] = pd.to_datetime(uasc['DOB'], format='%d/%m/%Y', errors='coerce')
uasc['DUC'] = pd.to_datetime(uasc['DUC'], format='%d/%m/%Y', errors='coerce')
mask = uasc['DUC'].notna() & (uasc['DUC'] > uasc['DOB'] + pd.offsets.DateOffset(years=18))
return {'UASC': uasc.index[mask].to_list()}
return error, _validate
def validate_333():
error = ErrorDefinition(
code='333',
description='Date should be placed for adoption must be on or prior to the date of matching child with adopter(s).',
affected_fields=['DATE_INT'],
)
def _validate(dfs):
if 'AD1' not in dfs:
return {}
else:
adt = dfs['AD1']
adt['DATE_MATCH'] = pd.to_datetime(adt['DATE_MATCH'], format='%d/%m/%Y', errors='coerce')
adt['DATE_INT'] = pd.to_datetime(adt['DATE_INT'], format='%d/%m/%Y', errors='coerce')
# If <DATE_MATCH> provided, then <DATE_INT> must also be provided and be <= <DATE_MATCH>
mask1 = adt['DATE_MATCH'].notna() & adt['DATE_INT'].isna()
mask2 = adt['DATE_MATCH'].notna() & adt['DATE_INT'].notna() & (adt['DATE_INT'] > adt['DATE_MATCH'])
mask = mask1 | mask2
return {'AD1': adt.index[mask].to_list()}
return error, _validate
def validate_1011():
error = ErrorDefinition(
code='1011',
description='This child is recorded as having his/her care transferred to another local authority for the final episode and therefore should not have the care leaver information completed.',
affected_fields=['IN_TOUCH', 'ACTIV', 'ACCOM'],
)
def _validate(dfs):
if 'OC3' not in dfs or 'Episodes' not in dfs:
return {}
else:
epi = dfs['Episodes']
oc3 = dfs['OC3']
epi['DECOM'] = pd.to_datetime(epi['DECOM'], format='%d/%m/%Y', errors='coerce')
# If final <REC> = 'E3' then <IN_TOUCH>; <ACTIV> and <ACCOM> should not be provided
epi.sort_values(['CHILD', 'DECOM'], inplace=True)
grouped_decom_by_child = epi.groupby(['CHILD'])['DECOM'].idxmax(skipna=True)
max_decom_only = epi.loc[epi.index.isin(grouped_decom_by_child), :]
E3_is_last = max_decom_only[max_decom_only['REC'] == 'E3']
oc3.reset_index(inplace=True)
cohort_to_check = oc3.merge(E3_is_last, on='CHILD', how='inner')
error_mask = cohort_to_check[['IN_TOUCH', 'ACTIV', 'ACCOM']].notna().any(axis=1)
error_list = cohort_to_check['index'][error_mask].to_list()
error_list = list(set(error_list))
error_list.sort()
return {'OC3': error_list}
return error, _validate
def validate_574():
error = ErrorDefinition(
code='574',
description='A new missing/away from placement without authorisation period cannot start when the previous missing/away from placement without authorisation period is still open. Missing/away from placement without authorisation periods should also not overlap.',
affected_fields=['MIS_START', 'MIS_END'],
)
def _validate(dfs):
if 'Missing' not in dfs:
return {}
else:
mis = dfs['Missing']
mis['MIS_START'] = pd.to_datetime(mis['MIS_START'], format='%d/%m/%Y', errors='coerce')
mis['MIS_END'] = pd.to_datetime(mis['MIS_END'], format='%d/%m/%Y', errors='coerce')
mis.sort_values(['CHILD', 'MIS_START'], inplace=True)
mis.reset_index(inplace=True)
mis.reset_index(inplace=True) # Twice on purpose
mis['LAG_INDEX'] = mis['level_0'].shift(-1)
lag_mis = mis.merge(mis, how='inner', left_on='level_0', right_on='LAG_INDEX', suffixes=['', '_PREV'])
# We're only interested in cases where there is more than one row for a child.
lag_mis = lag_mis[lag_mis['CHILD'] == lag_mis['CHILD_PREV']]
# A previous MIS_END date is null
mask1 = lag_mis['MIS_END_PREV'].isna()
# MIS_START is before previous MIS_END (overlapping dates)
mask2 = lag_mis['MIS_START'] < lag_mis['MIS_END_PREV']
mask = mask1 | mask2
error_list = lag_mis['index'][mask].to_list()
error_list.sort()
return {'Missing': error_list}
return error, _validate
def validate_564():
error = ErrorDefinition(
code='564',
description='Child was missing or away from placement without authorisation and the date started is blank.',
affected_fields=['MISSING', 'MIS_START'],
)
def _validate(dfs):
if 'Missing' not in dfs:
return {}
else:
mis = dfs['Missing']
error_mask = mis['MISSING'].isin(['M', 'A', 'm', 'a']) & mis['MIS_START'].isna()
return {'Missing': mis.index[error_mask].to_list()}
return error, _validate
def validate_566():
error = ErrorDefinition(
code='566',
description='The date that the child' + chr(
39) + 's episode of being missing or away from placement without authorisation ended has been completed but whether the child was missing or away without authorisation has not been completed.',
affected_fields=['MISSING', 'MIS_END'],
)
def _validate(dfs):
if 'Missing' not in dfs:
return {}
else:
mis = dfs['Missing']
error_mask = mis['MISSING'].isna() & mis['MIS_END'].notna()
return {'Missing': mis.index[error_mask].to_list()}
return error, _validate
def validate_436():
error = ErrorDefinition(
code='436',
description='Reason for new episode is that both child’s placement and legal status have changed, but this is not reflected in the episode data.',
affected_fields=['RNE', 'LS', 'PLACE', 'PL_POST', 'URN', 'PLACE_PROVIDER'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
epi = dfs['Episodes']
epi['DECOM'] = pd.to_datetime(epi['DECOM'], format='%d/%m/%Y', errors='coerce')
epi.sort_values(['CHILD', 'DECOM'], inplace=True)
epi.reset_index(inplace=True)
epi.reset_index(inplace=True)
epi['LAG_INDEX'] = epi['level_0'].shift(-1)
epi.fillna(value={"LS": '*', "PLACE": '*', "PL_POST": '*', "URN": '*', "PLACE_PROVIDER": '*'}, inplace=True)
epi_merge = epi.merge(epi, how='inner', left_on='level_0', right_on='LAG_INDEX', suffixes=['', '_PRE'])
epi_multi_row = epi_merge[epi_merge['CHILD'] == epi_merge['CHILD_PRE']]
epi_has_B_U = epi_multi_row[epi_multi_row['RNE'].isin(['U', 'B'])]
mask_ls = epi_has_B_U['LS'] == epi_has_B_U['LS_PRE']
mask1 = epi_has_B_U['PLACE'] == epi_has_B_U['PLACE_PRE']
mask2 = epi_has_B_U['PL_POST'] == epi_has_B_U['PL_POST_PRE']
mask3 = epi_has_B_U['URN'] == epi_has_B_U['URN_PRE']
mask4 = epi_has_B_U['PLACE_PROVIDER'] == epi_has_B_U['PLACE_PROVIDER_PRE']
error_mask = mask_ls | (mask1 & mask2 & mask3 & mask4)
error_list = epi_has_B_U[error_mask]['index'].to_list()
error_list.sort()
return {'Episodes': error_list}
return error, _validate
def validate_570():
error = ErrorDefinition(
code='570',
description='The date that the child started to be missing or away from placement without authorisation is after the end of the collection year.',
affected_fields=['MIS_START'],
)
def _validate(dfs):
if 'Missing' not in dfs:
return {}
else:
mis = dfs['Missing']
collection_end = pd.to_datetime(dfs['metadata']['collection_end'], format='%d/%m/%Y', errors='coerce')
mis['MIS_START'] = pd.to_datetime(mis['MIS_START'], format='%d/%m/%Y', errors='coerce')
error_mask = mis['MIS_START'] > collection_end
return {'Missing': mis.index[error_mask].to_list()}
return error, _validate
def validate_531():
error = ErrorDefinition(
code='531',
description='A placement provider code of PR5 cannot be associated with placements P1.',
affected_fields=['PLACE', 'PLACE_PROVIDER'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
epi = dfs['Episodes']
error_mask = (epi['PLACE'] == 'P1') & (epi['PLACE_PROVIDER'] == 'PR5')
return {'Episodes': epi.index[error_mask].to_list()}
return error, _validate
def validate_542():
error = ErrorDefinition(
code='542',
description='A child aged under 10 at 31 March should not have conviction information completed.',
affected_fields=['CONVICTED'],
)
def _validate(dfs):
if 'OC2' not in dfs:
return {}
else:
oc2 = dfs['OC2']
oc2['DOB'] = pd.to_datetime(oc2['DOB'], format='%d/%m/%Y', errors='coerce')
collection_end = pd.to_datetime(dfs['metadata']['collection_end'], format='%d/%m/%Y', errors='coerce')
error_mask = (oc2['DOB'] + pd.offsets.DateOffset(years=10) > collection_end) & oc2['CONVICTED'].notna()
return {'OC2': oc2.index[error_mask].to_list()}
return error, _validate
def validate_620():
error = ErrorDefinition(
code='620',
description='Child has been recorded as a mother, but date of birth shows that the mother is under 11 years of age.',
affected_fields=['DOB', 'MOTHER'],
)
def _validate(dfs):
if 'Header' not in dfs:
return {}
else:
hea = dfs['Header']
collection_start = pd.to_datetime(dfs['metadata']['collection_start'], format='%d/%m/%Y', errors='coerce')
hea['DOB'] = pd.to_datetime(hea['DOB'], format='%d/%m/%Y', errors='coerce')
hea_mother = hea[hea['MOTHER'].astype(str) == '1']
error_cohort = (hea_mother['DOB'] + pd.offsets.DateOffset(years=11)) > collection_start
return {'Header': hea_mother.index[error_cohort].to_list()}
return error, _validate
def validate_225():
error = ErrorDefinition(
code='225',
description='Reason for placement change must be recorded.',
affected_fields=['REASON_PLACE_CHANGE'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
epi = dfs['Episodes']
epi['DECOM'] = pd.to_datetime(epi['DECOM'], format='%d/%m/%Y', errors='coerce')
epi.sort_values(['CHILD', 'DECOM'], inplace=True)
epi.reset_index(inplace=True)
epi.reset_index(inplace=True)
epi['LAG_INDEX'] = epi['level_0'].shift(1)
m_epi = epi.merge(epi, how='inner', left_on='level_0', right_on='LAG_INDEX', suffixes=['', '_NEXT'])
m_epi = m_epi[m_epi['CHILD'] == m_epi['CHILD_NEXT']]
mask_is_X1 = m_epi['REC'] == 'X1'
mask_null_place_chg = m_epi['REASON_PLACE_CHANGE'].isna()
mask_place_not_T = ~m_epi['PLACE'].isin(['T0', 'T1', 'T2', 'T3', 'T4'])
mask_next_is_PBTU = m_epi['RNE_NEXT'].isin(['P', 'B', 'T', 'U'])
mask_next_place_not_T = ~m_epi['PLACE_NEXT'].isin(['T0', 'T1', 'T2', 'T3', 'T4'])
error_mask = mask_is_X1 & mask_null_place_chg & mask_place_not_T & mask_next_is_PBTU & mask_next_place_not_T
error_list = m_epi['index'][error_mask].to_list()
return {'Episodes': error_list}
return error, _validate
def validate_353():
error = ErrorDefinition(
code='353',
description='No episode submitted can start before 14 October 1991.',
affected_fields=['DECOM'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
epi = dfs['Episodes']
epi['DECOM'] = pd.to_datetime(epi['DECOM'], format='%d/%m/%Y', errors='coerce')
min_decom_allowed = pd.to_datetime('14/10/1991', format='%d/%m/%Y', errors='coerce')
error_mask = epi['DECOM'] < min_decom_allowed
return {'Episodes': epi.index[error_mask].to_list()}
return error, _validate
def validate_528():
error = ErrorDefinition(
code='528',
description='A placement provider code of PR2 cannot be associated with placements P1, R2 or R5.',
affected_fields=['PLACE', 'PLACE_PROVIDER'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
epi = dfs['Episodes']
error_mask = (epi['PLACE'].isin(['P1', 'R2', 'R5'])) & (epi['PLACE_PROVIDER'] == 'PR2')
return {'Episodes': epi.index[error_mask].to_list()}
return error, _validate
def validate_527():
error = ErrorDefinition(
code='527',
description='A placement provider code of PR1 cannot be associated with placements P1, R2 or R5.',
affected_fields=['PLACE', 'PLACE_PROVIDER'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
epi = dfs['Episodes']
error_mask = (epi['PLACE'].isin(['P1', 'R2', 'R5'])) & (epi['PLACE_PROVIDER'] == 'PR1')
return {'Episodes': epi.index[error_mask].to_list()}
return error, _validate
def validate_359():
error = ErrorDefinition(
code='359',
description='Child being looked after following 18th birthday must be accommodated under section 20(5) of the Children Act 1989 in a community home.',
affected_fields=['DEC', 'LS', 'PLACE'],
)
def _validate(dfs):
if 'Episodes' not in dfs or 'Header' not in dfs:
return {}
else:
epi = dfs['Episodes']
hea = dfs['Header']
hea['DOB'] = pd.to_datetime(hea['DOB'], format='%d/%m/%Y', errors='coerce')
collection_end = pd.to_datetime(dfs['metadata']['collection_end'], format='%d/%m/%Y', errors='coerce')
epi.reset_index(inplace=True)
epi = epi.merge(hea, on='CHILD', how='left', suffixes=['', '_HEA'])
mask_older_18 = (epi['DOB'] + pd.offsets.DateOffset(years=18)) < collection_end
mask_null_dec = epi['DEC'].isna()
mask_is_V2_K2 = (epi['LS'] == 'V2') & (epi['PLACE'] == 'K2')
error_mask = mask_older_18 & mask_null_dec & ~mask_is_V2_K2
error_list = epi['index'][error_mask].to_list()
error_list = list(set(error_list))
return {'Episodes': error_list}
return error, _validate
def validate_562():
error = ErrorDefinition(
code='562',
description='Episode commenced before the start of the current collection year but there is a missing continuous episode in the previous year.',
affected_fields=['DECOM'],
)
def _validate(dfs):
if 'Episodes' not in dfs or 'Episodes_last' not in dfs:
return {}
else:
epi = dfs['Episodes']
epi_last = dfs['Episodes_last']
epi['DECOM'] = pd.to_datetime(epi['DECOM'], format='%d/%m/%Y', errors='coerce')
epi_last['DECOM'] = pd.to_datetime(epi_last['DECOM'], format='%d/%m/%Y', errors='coerce')
collection_start = pd.to_datetime(dfs['metadata']['collection_start'], format='%d/%m/%Y', errors='coerce')
epi.reset_index(inplace=True)
epi = epi[epi['DECOM'] < collection_start]
grp_decom_by_child = epi.groupby(['CHILD'])['DECOM'].idxmin(skipna=True)
min_decom = epi.loc[epi.index.isin(grp_decom_by_child), :]
grp_last_decom_by_child = epi_last.groupby(['CHILD'])['DECOM'].idxmax(skipna=True)
max_last_decom = epi_last.loc[epi_last.index.isin(grp_last_decom_by_child), :]
merged_co = min_decom.merge(max_last_decom, how='left', on=['CHILD', 'DECOM'], suffixes=['', '_PRE'],
indicator=True)
error_cohort = merged_co[merged_co['_merge'] == 'left_only']
error_list = error_cohort['index'].to_list()
error_list = list(set(error_list))
error_list.sort()
return {'Episodes': error_list}
return error, _validate
def validate_354():
error = ErrorDefinition(
code='354',
description="Date episode ceased must be on or before the end of the current collection year.",
affected_fields=['DECOM'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
epi = dfs['Episodes']
collection_end = pd.to_datetime(dfs['metadata']['collection_end'], format='%d/%m/%Y', errors='coerce')
epi['DECOM'] = pd.to_datetime(epi['DECOM'], format='%d/%m/%Y', errors='coerce')
error_mask = epi['DECOM'] > collection_end
error_list = epi.index[error_mask].to_list()
return {'Episodes': error_list}
return error, _validate
def validate_385():
error = ErrorDefinition(
code='385',
description="Date episode ceased must be on or before the end of the current collection year.",
affected_fields=['DEC'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
epi = dfs['Episodes']
collection_end = pd.to_datetime(dfs['metadata']['collection_end'], format='%d/%m/%Y', errors='coerce')
epi['DEC'] = pd.to_datetime(epi['DEC'], format='%d/%m/%Y', errors='coerce')
error_mask = epi['DEC'] > collection_end
error_list = epi.index[error_mask].to_list()
return {'Episodes': error_list}
return error, _validate
def validate_408():
error = ErrorDefinition(
code='408',
description='Child is placed for adoption with a placement order, but no placement order has been recorded.',
affected_fields=['PLACE', 'LS'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
epi = dfs['Episodes']
error_mask = epi['PLACE'].isin(['A5', 'A6']) & (epi['LS'] != 'E1')
return {'Episodes': epi.index[error_mask].to_list()}
return error, _validate
def validate_380():
error = ErrorDefinition(
code='380',
description='A period of care cannot start with a temporary placement.',
affected_fields=['PLACE', 'RNE'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
epi = dfs['Episodes']
error_mask = (epi['PLACE'].isin(['T0', 'T1', 'T2', 'T3', 'T4'])) & (~epi['RNE'].isin(['P', 'B']))
return {'Episodes': epi.index[error_mask].to_list()}
return error, _validate
def validate_381():
error = ErrorDefinition(
code='381',
description='A period of care cannot end with a temporary placement.',
affected_fields=['PLACE', 'REC'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
epi = dfs['Episodes']
error_mask = (epi['PLACE'].isin(['T0', 'T1', 'T2', 'T3', 'T4'])) & (epi['REC'] != 'X1') & (
epi['REC'].notna())
return {'Episodes': epi.index[error_mask].to_list()}
return error, _validate
def validate_504():
error = ErrorDefinition(
code='504',
description='The category of need code differs from that reported at start of current period of being looked after',
affected_fields=['CIN'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
epi = dfs['Episodes']
epi['DECOM'] = pd.to_datetime(epi['DECOM'], format='%d/%m/%Y', errors='coerce')
epi['DEC'] = pd.to_datetime(epi['DEC'], format='%d/%m/%Y', errors='coerce')
epi.sort_values(['CHILD', 'DECOM'], inplace=True)
epi.reset_index(inplace=True)
epi.reset_index(inplace=True)
epi['LAG_INDEX'] = epi['level_0'].shift(1)
merge_epi = epi.merge(epi, how='inner', left_on='LAG_INDEX', right_on='level_0', suffixes=['', '_PRE'])
merge_epi = merge_epi[merge_epi['CHILD'] == merge_epi['CHILD_PRE']]
merge_epi = merge_epi[(merge_epi['REC_PRE'] == 'X1') & (merge_epi['DEC_PRE'] == merge_epi['DECOM'])]
error_cohort = merge_epi[merge_epi['CIN'] != merge_epi['CIN_PRE']]
error_list = error_cohort['index'].unique().tolist()
error_list.sort()
return {'Episodes': error_list}
return error, _validate
def validate_431():
error = ErrorDefinition(
code='431',
description='The reason for new episode is started to be looked after, but the previous episode ended on the same day.',
affected_fields=['RNE', 'DECOM'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
epi = dfs['Episodes']
epi['DECOM'] = pd.to_datetime(epi['DECOM'], format='%d/%m/%Y', errors='coerce')
epi['DEC'] = pd.to_datetime(epi['DEC'], format='%d/%m/%Y', errors='coerce')
epi.sort_values(['CHILD', 'DECOM'], inplace=True)
epi.reset_index(inplace=True)
epi.reset_index(inplace=True)
epi['LAG_INDEX'] = epi['level_0'].shift(-1)
m_epi = epi.merge(epi, how='inner', left_on='level_0', right_on='LAG_INDEX', suffixes=['', '_PREV'])
m_epi = m_epi[(m_epi['CHILD'] == m_epi['CHILD_PREV']) & (m_epi['RNE'] == 'S')]
error_mask = m_epi['DECOM'] <= m_epi['DEC_PREV']
error_list = m_epi['index'][error_mask].to_list()
error_list.sort()
return {'Episodes': error_list}
return error, _validate
def validate_503_Generic(subval):
Gen_503_dict = {
"A": {
"Desc": "The reason for new episode in the first episode does not match open episode at end of last year.",
"Fields": 'RNE'},
"B": {"Desc": "The legal status in the first episode does not match open episode at end of last year.",
"Fields": 'LS'},
"C": {"Desc": "The category of need in the first episode does not match open episode at end of last year.",
"Fields": 'CIN'},
"D": {"Desc": "The placement type in the first episode does not match open episode at end of last year",
"Fields": 'PLACE'},
"E": {"Desc": "The placement provider in the first episode does not match open episode at end of last year.",
"Fields": 'PLACE_PROVIDER'},
"F": {"Desc": "The Ofsted URN in the first episode does not match open episode at end of last year.",
"Fields": 'URN'},
"G": {"Desc": "The distance in first episode does not match open episode at end of last year.",
"Fields": 'PL_DISTANCE'},
"H": {"Desc": "The placement LA in first episode does not match open episode at end of last year.",
"Fields": 'PL_LA'},
"J": {"Desc": "The placement location in first episode does not match open episode at end of last year.",
"Fields": 'PL_LOCATION'},
}
error = ErrorDefinition(
code='503' + subval,
description=Gen_503_dict[subval]['Desc'],
affected_fields=[Gen_503_dict[subval]['Fields']],
)
def _validate(dfs):
if 'Episodes' not in dfs or 'Episodes_last' not in dfs:
return {}
else:
epi = dfs['Episodes']
epi_last = dfs['Episodes_last']
epi['DECOM'] = pd.to_datetime(epi['DECOM'], format='%d/%m/%Y', errors='coerce')
epi_last['DECOM'] = pd.to_datetime(epi_last['DECOM'], format='%d/%m/%Y', errors='coerce')
epi.reset_index(inplace=True)
grp_decom_by_child = epi.groupby(['CHILD'])['DECOM'].idxmin(skipna=True)
min_decom = epi.loc[epi.index.isin(grp_decom_by_child), :]
grp_last_decom_by_child = epi_last.groupby(['CHILD'])['DECOM'].idxmax(skipna=True)
max_last_decom = epi_last.loc[epi_last.index.isin(grp_last_decom_by_child), :]
merged_co = min_decom.merge(max_last_decom, how='inner', on=['CHILD', 'DECOM'], suffixes=['', '_PRE'])
this_one = Gen_503_dict[subval]['Fields']
pre_one = this_one + '_PRE'
if subval == 'G':
err_mask = abs(merged_co[this_one].astype(float) - merged_co[pre_one].astype(float)) >= 0.2
else:
err_mask = merged_co[this_one].astype(str) != merged_co[pre_one].astype(str)
err_list = merged_co['index'][err_mask].unique().tolist()
err_list.sort()
return {'Episodes': err_list}
return error, _validate
def validate_503A():
return validate_503_Generic('A')
def validate_503B():
return validate_503_Generic('B')
def validate_503C():
return validate_503_Generic('C')
def validate_503D():
return validate_503_Generic('D')
def validate_503E():
return validate_503_Generic('E')
def validate_503F():
return validate_503_Generic('F')
def validate_503G():
return validate_503_Generic('G')
def validate_503H():
return validate_503_Generic('H')
def validate_503J():
return validate_503_Generic('J')
def validate_526():
error = ErrorDefinition(
code='526',
description='Child is missing a placement provider code for at least one episode.',
affected_fields=['PLACE', 'PLACE_PROVIDER'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
epi = dfs['Episodes']
error_mask = ~epi['PLACE'].isin(['T0', 'T1', 'T2', 'T3', 'T4', 'Z1']) & epi['PLACE_PROVIDER'].isna()
return {'Episodes': epi.index[error_mask].to_list()}
return error, _validate
def validate_370to376and379(subval):
Gen_370_dict = {
"370": {"Desc": "Child in independent living should be at least 15.",
"P_Code": 'P2', "Y_gap": 15},
"371": {"Desc": "Child in semi-independent living accommodation not subject to children’s homes regulations " +
"should be at least 14.",
"P_Code": 'H5', "Y_gap": 14},
"372": {"Desc": "Child in youth custody or prison should be at least 10.",
"P_Code": 'R5', "Y_gap": 10},
"373": {"Desc": "Child placed in a school should be at least 4 years old.",
"P_Code": 'S1', "Y_gap": 4},
"374": {"Desc": "Child in residential employment should be at least 14 years old.",
"P_Code": 'P3', "Y_gap": 14},
"375": {"Desc": "Hospitalisation coded as a temporary placement exceeds six weeks.",
"P_Code": 'T1', "Y_gap": 42},
"376": {"Desc": "Temporary placements coded as being due to holiday of usual foster carer(s) cannot exceed " +
"three weeks.",
"P_Code": 'T3', "Y_gap": 21},
"379": {"Desc": "Temporary placements for unspecified reason (placement code T4) cannot exceed seven days.",
"P_Code": 'T4', "Y_gap": 7},
}
error = ErrorDefinition(
code=str(subval),
description=Gen_370_dict[subval]['Desc'],
affected_fields=['DECOM', 'PLACE'],
)
def _validate(dfs):
if 'Episodes' not in dfs or 'Header' not in dfs:
return {}
else:
epi = dfs['Episodes']
hea = dfs['Header']
hea['DOB'] = pd.to_datetime(hea['DOB'], format='%d/%m/%Y', errors='coerce')
epi['DECOM'] = pd.to_datetime(epi['DECOM'], format='%d/%m/%Y', errors='coerce')
epi['DEC'] = pd.to_datetime(epi['DEC'], format='%d/%m/%Y', errors='coerce')
epi.reset_index(inplace=True)
epi_p2 = epi[epi['PLACE'] == Gen_370_dict[subval]['P_Code']]
merged_e = epi_p2.merge(hea, how='inner', on='CHILD')
merged_e = merged_e.dropna(subset=['DECOM', 'DEC', 'DOB'])
if subval in ['370', '371', '372', '373', '374']:
error_mask = merged_e['DECOM'] < (merged_e['DOB'] +
pd.offsets.DateOffset(years=Gen_370_dict[subval]['Y_gap']))
else:
error_mask = merged_e['DEC'] > (merged_e['DECOM'] +
pd.offsets.DateOffset(days=Gen_370_dict[subval]['Y_gap']))
return {'Episodes': merged_e['index'][error_mask].unique().tolist()}
return error, _validate
def validate_370():
return validate_370to376and379('370')
def validate_371():
return validate_370to376and379('371')
def validate_372():
return validate_370to376and379('372')
def validate_373():
return validate_370to376and379('373')
def validate_374():
return validate_370to376and379('374')
def validate_375():
return validate_370to376and379('375')
def validate_376():
return validate_370to376and379('376')
def validate_379():
return validate_370to376and379('379')
def validate_529():
error = ErrorDefinition(
code='529',
description='Placement provider code of PR3 cannot be associated with placements P1, A3 to A6, K1, K2 and U1 to U6 as these placements cannot be provided by other public organisations.',
affected_fields=['PLACE', 'PLACE_PROVIDER'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
epi = dfs['Episodes']
code_list_placement_type = ['A3', 'A4', 'A5', 'A6', 'K1', 'K2', 'P1', 'U1', 'U2', 'U3', 'U4', 'U5', 'U6']
error_mask = epi['PLACE'].isin(code_list_placement_type) & (epi['PLACE_PROVIDER'] == 'PR3')
return {'Episodes': epi.index[error_mask].to_list()}
return error, _validate
def validate_383():
error = ErrorDefinition(
code='383',
description='A child in a temporary placement must subsequently return to his/her normal placement.',
affected_fields=['PLACE'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
epi = dfs['Episodes']
epi['DECOM'] = pd.to_datetime(epi['DECOM'], format='%d/%m/%Y', errors='coerce')
epi.sort_values(['CHILD', 'DECOM'], inplace=True)
epi.reset_index(inplace=True)
epi.reset_index(inplace=True)
epi['LAG_INDEX'] = epi['level_0'].shift(-1)
epi['LEAD_INDEX'] = epi['level_0'].shift(1)
m_epi = epi.merge(epi, how='inner', left_on='level_0', right_on='LAG_INDEX', suffixes=['', '_TOP'])
m_epi = m_epi.merge(epi, how='inner', left_on='level_0', right_on='LEAD_INDEX', suffixes=['', '_BOTM'])
m_epi = m_epi[m_epi['CHILD'] == m_epi['CHILD_TOP']]
m_epi = m_epi[m_epi['CHILD'] == m_epi['CHILD_BOTM']]
m_epi = m_epi[m_epi['PLACE'].isin(['T0', 'T1', 'T2', 'T3', 'T4'])]
mask1 = m_epi['RNE_BOTM'] != 'P'
mask2 = m_epi['PLACE_BOTM'] != m_epi['PLACE_TOP']
err_mask = mask1 | mask2
err_list = m_epi['index'][err_mask].unique().tolist()
err_list.sort()
return {'Episodes': err_list}
return error, _validate
def validate_377():
error = ErrorDefinition(
code='377',
description='Only two temporary placements coded as being due to holiday of usual foster carer(s) are ' +
'allowed in any 12- month period.',
affected_fields=['PLACE'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
epi = dfs['Episodes']
epi['DECOM'] = pd.to_datetime(epi['DECOM'], format='%d/%m/%Y', errors='coerce')
epi.reset_index(inplace=True)
potent_cohort = epi[epi['PLACE'] == 'T3']
# Here I'm after the CHILD ids where there are more than 2 T3 placements.
count_them = potent_cohort.groupby('CHILD')['CHILD'].count().to_frame(name='cc')
count_them.reset_index(inplace=True)
count_them = count_them[count_them['cc'] > 2]
err_coh = epi[epi['CHILD'].isin(count_them['CHILD'])]
err_coh = err_coh[err_coh['PLACE'] == 'T3']
err_list = err_coh['index'].unique().tolist()
err_list.sort()
return {'Episodes': err_list}
return error, _validate
# !# Potential false negatives - if child has no missing periods in current year's Missing table nothing is flagged!
def validate_576():
error = ErrorDefinition(
code='576',
description='There is an open missing/away from placement without authorisation period in ' +
'last year’s return and there is no corresponding period recorded at the start of ' +
'this year.',
affected_fields=['CHILD'],
)
def _validate(dfs):
if 'Missing' not in dfs or 'Missing_last' not in dfs:
return {}
else:
mis = dfs['Missing']
mis_l = dfs['Missing_last']
mis['MIS_START'] = | pd.to_datetime(mis['MIS_START'], format='%d/%m/%Y', errors='coerce') | pandas.to_datetime |
import numpy as np
import cv2
import os
import pandas as pd
import cv2
import progressbar
from utilities.generators import VideoSequenceGenerator
from pathlib import Path
from itertools import islice
from utilities.preprocessing import VideoVGG16FeatureExtractor
from utilities.preprocessing import VideoScorerPreprocessor
# import psutil
class SimpleDatasetLoader:
""" class able to load specific image or video data and apply to them dynamic generators and preprocessors.
While a PREPROCESSOR in a one input-one output function able to transform in some way the input image/video, a GENERATOR take one
file to generate many outputs for it (i.e. the simple splitting of a video in several subsequences"""
frame_count = 0
def __init__(self, preprocessors=None, generators=None):
# store the image preprocessor
self.preprocessors = preprocessors
self.generators = generators
# if the preprocessors or generators are None, initialize them as an
# empty list
if self.preprocessors is None:
self.preprocessors = []
# note: if no generator has been provided, the null generator should be added by default
if self.generators is None:
self.generators = []
def load(self, imagePaths, verbose=-1):
# initialize the list of features and labels
data = []
labels = []
# loop over the input images
for (i, imagePath) in enumerate(imagePaths):
# load the image and extract the class label assuming
# that our path has the following format:
# /path/to/dataset/{class}/{image}.jpg
image = cv2.imread(imagePath)
label = imagePath.split(os.path.sep)[-2]
# check to see if our preprocessors are not None
if self.preprocessors is not None:
# loop over the preprocessors and apply each to
# the image
for p in self.preprocessors:
image = p.preprocess(image)
# treat our processed image as a "feature vector"
# by updating the data list followed by the labels
data.append(image)
labels.append(label)
# show an update every 'verbose' images
if verbose > 0 and i > 0 and (i + 1) % verbose == 0:
print("[INFO] processed {}/{}".format(i + 1, len(imagePaths)))
# return a tuple of the data and labels
return np.array(data), np.array(labels)
# def video_transform(self, video_list):
# """ makes an input-output transformation of data taken from a list """
# for p in self.preprocessors:
# [video, label, path] = p.preprocess(str(path), isHighlight, isCelebration, int(initSeqFrame), int(maxFrame),
# float(highlight_length), int(pad_frames), int(start_celebration))
def load_video(self, video_csv_path, verbose=-1, bs=None, num_images=0, mode="train"):
sequences2load = 0
f = open(video_csv_path, "r")
# read header
line = f.readline()
while True:
# gives an object with many fields
# print(psutil.virtual_memory())
# initialize the list of features and labels
data = []
labels = []
while len(data) < bs:
video = []
line = f.readline()
if line == "":
print("ciao")
# reset the file pointer to the beginning of the file
# and re-read the line
f.seek(0)
line = f.readline()
line = f.readline()
# if we are evaluating we should now break from our
# loop to ensure we don't continue to fill up the
# batch from samples at the beginning of the file
if mode == "eval":
break
# extract the label and construct the image
line = line.strip().split(",")
path = line[0]
isHighlight = line[1]
isCelebration = line[2]
initSeqFrame = line[3]
maxFrame = line[4]
highlight_length = line[5]
pad_frames = float(line[6])
start_celebration = float(line[7])
if self.preprocessors is not None:
for p in self.preprocessors:
# if isinstance(p, VideoVGG16FeatureExtractor):
# [video, label, path] = p.preprocess_mod(str(path), video, isHighlight, isCelebration, int(initSeqFrame), int(maxFrame), float(highlight_length), int(pad_frames), int(start_celebration))
# elif isinstance(p, VideoScorerPreprocessor):
# [video, label, path] = p.preprocess(str(path), video, isHighlight, isCelebration, int(initSeqFrame), int(maxFrame), float(highlight_length), int(pad_frames), int(start_celebration))
# else:
# [video, label, path] = p.preprocess_and_save(str(path), video, isHighlight, isCelebration,
# int(initSeqFrame), int(maxFrame), float(highlight_length),
# int(pad_frames), int(start_celebration))
[video, label, path] = p.preprocess_and_save(str(path), video, isHighlight, isCelebration,
int(initSeqFrame), int(maxFrame), float(highlight_length),
int(pad_frames), int(start_celebration))
#[video, label, path] = p.preprocess(str(path), video, isHighlight, isCelebration, int(initSeqFrame), int(maxFrame), float(highlight_length), int(pad_frames), int(start_celebration))
# by updating the data list followed by the labels
if labels is not None:
data.append(video)
label = np.array(label)
# label = np.transpose(label[np.newaxis])
labels.append(label)
# show an update every 'verbose' images
if verbose > 0 and len(data) > 0 and (len(data) + 1) % verbose == 0:
print("\n[INFO] loaded video {}/{}".format(len(data) + 1, sequences2load))
#labels = np.array(labels)
#print("labels.shape = ", labels.shape)
#yield np.array(data).astype("float") / 255.0, labels <--- vecchia maniera
#yield np.array(data), labels
yield np.squeeze(np.array(data)), np.squeeze(labels)
# return a tuple of the data and labels
# return np.array(data), np.array(labels)
def load_dataset(self, video_csv_path, generate=False, try_one_video=False):
for x in self.generators:
if isinstance(x, VideoSequenceGenerator):
print("[INFO] creating sequences...")
df = | pd.read_csv(video_csv_path) | pandas.read_csv |
# -*- coding: utf-8 -*-
"""Constants and functions in common across modules."""
# standard library imports
import contextlib
import mmap
import os
import sys
import tempfile
from pathlib import Path
# third-party imports
import numpy as np
import pandas as pd
import xxhash
from loguru import logger as loguru_logger
from memory_tempfile import MemoryTempfile
# global constants
NAME = "azulejo"
DEFAULT_PARQUET_COMPRESSION = "ZSTD"
# Sizes and minimum read times with various compressions
# for a file with one proteome on a system with M.2 SSD disk
# under pyarrow 1.0.0 into pandas 1.1.0:
# "NONE": 43MB, 1.8s
# "ZSTD": 13M, 1.8s
# "SNAPPY": 29 MB, 1.8s
# "BROTLI": 13 MB, 1.9s
# "LZ4": 23MB, (disabled under pyarrow 1.0.0, was about like brotli under 0.17)
# "GZIP": 14 MB, 2.1 s
# "LZO": not supported
# "BZ2": not supported
# In addition, the ingest process took 28.8s with None, and
# 28.4 s with ZSTD, probably due to writing less data.
# With its 70% compression factor, ZSTD can be expected to
# perform even better relative to uncompressed and snappy
# on production systems with slower disks for which
# cache is not warmed up (as mine was in this test).
# So ZSTD seems a clear choice for now.
PARQUET_EXTENSIONS = ["parquet", "pq", "parq"]
TSV_EXTENSIONS = ["tsv"]
SAVED_INPUT_FILE = "input.toml"
# Changing the extension of these files will change the type of file written.
# TSV files, though readable/editable, do not give the written values back.
# Parquet is also ~100X faster.
CLUSTER_FILETYPE = "parq"
CLUSTERS_FILE = "homology_clusters.parq"
CLUSTERSYN_FILE = "homology_clusters.syn.parq"
CLUSTER_HIST_FILE = "homology_cluster_hist.tsv"
FRAGMENTS_FILE = "fragments.tsv"
ANCHOR_HIST_FILE = "anchor_hist.tsv"
HOMOLOGY_FILE = "proteins.hom.parq"
PROTEOMES_FILE = "proteomes.tsv"
PROTEOMOLOGY_FILE = "proteomes.hom.parq"
PROTEOSYN_FILE = "proteomes.hom.syn.parq"
PROTEINS_FILE = "proteins.parq"
SYNTENY_FILE = "proteins.hom.syn.parq"
ANCHORS_FILE = "synteny_anchors.tsv"
SYNTENY_FILETYPE = "tsv"
COLLECTION_FILE = "collection.json"
COLLECTION_HOM_FILE = "collection.hom.json"
COLLECTION_SYN_FILE = "collection.hom.syn.json"
EXTERNAL_CLUSTERS_FILE = "homology_clusters-external.tsv"
# fragment-name defs
PLASTID_STARTS = ["chromop", "chl", "mt", "mi", "rh", "mu", "le", "pl"]
CHROMOSOME_SYNONYMS = ["chromosome", "chrom", "chro", "gs", "gm"]
ALTERNATE_ABBREV = "alt"
CHROMOSOME_ABBREV = "chr"
SCAFFOLD_SYNONYMS = ["scaffold", "scaf", "sca"]
SCAFFOLD_ABBREV = "sc"
# synteny codes
UNAMBIGUOUS_CODE = "U"
DISAMBIGUATED_CODE = "D"
INDIRECT_CODE = "I"
LOCALLY_UNAMBIGUOUS_CODE = "L"
NON_AMBIGUOUS_CODE = "N"
AMBIGUOUS_CODE = "A"
CODE_DICT = {
UNAMBIGUOUS_CODE: "unambiguous",
DISAMBIGUATED_CODE: "disambiguated",
INDIRECT_CODE: "indirectly unambiguous",
LOCALLY_UNAMBIGUOUS_CODE: "locally unambiguous",
NON_AMBIGUOUS_CODE: "non-ambiguous",
AMBIGUOUS_CODE: "ambiguous",
}
DIRECTIONAL_CATEGORY = pd.CategoricalDtype(categories=["-", "+"])
YES_NO = pd.CategoricalDtype(categories=["y", "n"])
SYNTENY_CATEGORY = pd.CategoricalDtype(categories=CODE_DICT.keys())
DEFAULT_DTYPE = pd.UInt32Dtype()
NONDEFAULT_DTYPES = {
"anchor.subframe.ok": pd.BooleanDtype(),
"code": SYNTENY_CATEGORY,
"fasta_url": pd.StringDtype(),
"gff_url": pd.StringDtype(),
"frag.direction": DIRECTIONAL_CATEGORY,
"frag.id": pd.CategoricalDtype(),
"frag.is_chr": YES_NO,
"frag.is_plas": YES_NO,
"frag.is_scaf": YES_NO,
"frag.len": pd.UInt64Dtype(),
"frag.orig_id": pd.StringDtype(),
"frag.start": pd.UInt64Dtype(),
"gff.feature": pd.CategoricalDtype(),
"gff.id": | pd.CategoricalDtype() | pandas.CategoricalDtype |
"""
EC Models
=============================
**Author:** `ichbinkk`
"""
from __future__ import print_function
from __future__ import division
import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
import torchvision
from torchvision import datasets, models, transforms
import matplotlib.pyplot as plt
import time
import os
import copy
from torch.autograd import Variable
from torch.utils.data import Dataset
from PIL import Image
import timm.models as tm
import pandas as pd
def set_parameter_requires_grad(model, feature_extracting):
if feature_extracting:
for param in model.parameters():
param.requires_grad = False
def initialize_model(model_name, num_classes=1, feature_extract=False, use_pretrained=False):
# Initialize these variables which will be set in this if statement. Each of these
# variables is model specific.
model_ft = None
input_size = 0
if model_name == "resnet18":
""" Resnet
"""
model_ft = models.resnet18(pretrained=use_pretrained)
set_parameter_requires_grad(model_ft, feature_extract)
num_ftrs = model_ft.fc.in_features
model_ft.fc = nn.Linear(num_ftrs, num_classes)
input_size = 224
elif model_name == "resnet34":
""" Resnet
"""
model_ft = models.resnet34(pretrained=use_pretrained)
set_parameter_requires_grad(model_ft, feature_extract)
num_ftrs = model_ft.fc.in_features
model_ft.fc = nn.Linear(num_ftrs, num_classes)
input_size = 224
elif model_name == "resnet50":
""" Resnet
"""
model_ft = models.resnet50(pretrained=use_pretrained)
set_parameter_requires_grad(model_ft, feature_extract)
num_ftrs = model_ft.fc.in_features
model_ft.fc = nn.Linear(num_ftrs, num_classes)
input_size = 224
elif model_name == "resnet101":
""" Resnet
"""
model_ft = models.resnet101(pretrained=use_pretrained)
set_parameter_requires_grad(model_ft, feature_extract)
num_ftrs = model_ft.fc.in_features
model_ft.fc = nn.Linear(num_ftrs, num_classes)
input_size = 224
elif model_name == "resnet152":
""" Resnet
"""
model_ft = models.resnet152(pretrained=use_pretrained)
set_parameter_requires_grad(model_ft, feature_extract)
num_ftrs = model_ft.fc.in_features
model_ft.fc = nn.Linear(num_ftrs, num_classes)
input_size = 224
# if model_name == "resnet":
# """ Resnet34
# """
# model_ft = tm.resnet34(pretrained=use_pretrained)
# set_parameter_requires_grad(model_ft, feature_extract)
# num_ftrs = model_ft.fc.in_features
# model_ft.fc = nn.Linear(num_ftrs, num_classes)
# input_size = 224
elif model_name == "regnety_040":
""" regnet
regnety_040, regnety_080, regnety_160
"""
model_ft = tm.regnety_040(pretrained=use_pretrained)
set_parameter_requires_grad(model_ft, feature_extract)
num_ftrs = model_ft.get_classifier().in_features
model_ft.fc = nn.Linear(num_ftrs, num_classes)
input_size = 224
elif model_name == "regnety_080":
""" regnet
regnety_040, regnety_080, regnety_160
"""
model_ft = tm.regnety_040(pretrained=use_pretrained)
set_parameter_requires_grad(model_ft, feature_extract)
num_ftrs = model_ft.get_classifier().in_features
model_ft.fc = nn.Linear(num_ftrs, num_classes)
input_size = 224
elif model_name == "regnety_160":
""" regnet
regnety_040, regnety_080, regnety_160
"""
model_ft = tm.regnety_040(pretrained=use_pretrained)
set_parameter_requires_grad(model_ft, feature_extract)
num_ftrs = model_ft.get_classifier().in_features
model_ft.fc = nn.Linear(num_ftrs, num_classes)
input_size = 224
elif model_name == "efficientnet_b2":
"""
efficientnet_b2 256, efficientnet_b3 288, efficientnet_b4 320
"""
model_ft = tm.efficientnet_b2(pretrained=use_pretrained)
set_parameter_requires_grad(model_ft, feature_extract)
num_ftrs = model_ft.get_classifier().in_features
model_ft.classifier = nn.Linear(num_ftrs, num_classes)
input_size = 256
elif model_name == "efficientnet_b3":
"""
efficientnet_b2 256, efficientnet_b3 288, efficientnet_b4 320
"""
model_ft = tm.efficientnet_b3(pretrained=use_pretrained)
set_parameter_requires_grad(model_ft, feature_extract)
num_ftrs = model_ft.get_classifier().in_features
model_ft.classifier = nn.Linear(num_ftrs, num_classes)
input_size = 288
elif model_name == "efficientnet_b4":
"""
efficientnet_b2 256, efficientnet_b3 288, efficientnet_b4 320
"""
model_ft = tm.efficientnet_b4(pretrained=use_pretrained)
set_parameter_requires_grad(model_ft, feature_extract)
num_ftrs = model_ft.get_classifier().in_features
model_ft.classifier = nn.Linear(num_ftrs, num_classes)
input_size = 320
elif model_name == "vit_t":
model_ft = tm.vit_tiny_patch16_224(pretrained=use_pretrained)
set_parameter_requires_grad(model_ft, feature_extract)
num_ftrs = model_ft.head.in_features
model_ft.head = nn.Linear(num_ftrs, num_classes)
input_size = 224
elif model_name == "vit_s":
model_ft = tm.vit_small_patch32_224(pretrained=use_pretrained)
set_parameter_requires_grad(model_ft, feature_extract)
num_ftrs = model_ft.head.in_features
model_ft.head = nn.Linear(num_ftrs, num_classes)
input_size = 224
elif model_name == "pit_xs":
""" pit
pit_xs_224
"""
model_ft = tm.pit_xs_224(pretrained=use_pretrained)
set_parameter_requires_grad(model_ft, feature_extract)
num_ftrs = model_ft.get_classifier().in_features
model_ft.head = nn.Linear(num_ftrs, num_classes)
input_size = 224
elif model_name == "pit_s":
""" pit
pit_s_224
"""
model_ft = tm.pit_s_224(pretrained=use_pretrained)
set_parameter_requires_grad(model_ft, feature_extract)
num_ftrs = model_ft.get_classifier().in_features
model_ft.head = nn.Linear(num_ftrs, num_classes)
input_size = 224
elif model_name == "deit_s":
""" deit
deit_small_patch16_224
"""
model_ft = tm.deit_small_patch16_224(pretrained=use_pretrained)
set_parameter_requires_grad(model_ft, feature_extract)
num_ftrs = model_ft.head.in_features
model_ft.head = nn.Linear(num_ftrs, num_classes)
input_size = 224
elif model_name == "deit_b":
""" deit
deit_base_patch16_224
"""
model_ft = tm.deit_small_patch16_224(pretrained=use_pretrained)
set_parameter_requires_grad(model_ft, feature_extract)
num_ftrs = model_ft.head.in_features
model_ft.head = nn.Linear(num_ftrs, num_classes)
input_size = 224
elif model_name == "mixer":
""" mixer
"""
model_ft = tm.mixer_b16_224(pretrained=use_pretrained)
set_parameter_requires_grad(model_ft, feature_extract)
num_ftrs = model_ft.head.in_features
model_ft.head = nn.Linear(num_ftrs, num_classes)
input_size = 224
elif model_name == "swin_vit_t":
""" swin-vit
tm.swin_tiny_patch4_window7_224
"""
model_ft = tm.swin_tiny_patch4_window7_224(pretrained=use_pretrained)
set_parameter_requires_grad(model_ft, feature_extract)
num_ftrs = model_ft.head.in_features
model_ft.head = nn.Linear(num_ftrs, num_classes)
input_size = 224
elif model_name == "swin_vit_s":
""" swin-vit
tm.swin_small_patch4_window7_224
"""
model_ft = tm.swin_small_patch4_window7_224(pretrained=use_pretrained)
set_parameter_requires_grad(model_ft, feature_extract)
num_ftrs = model_ft.head.in_features
model_ft.head = nn.Linear(num_ftrs, num_classes)
input_size = 224
elif model_name == "alexnet":
""" Alexnet
"""
model_ft = models.alexnet(pretrained=use_pretrained)
set_parameter_requires_grad(model_ft, feature_extract)
num_ftrs = model_ft.classifier[6].in_features
model_ft.classifier[6] = nn.Linear(num_ftrs,num_classes)
input_size = 224
elif model_name == "vgg11":
""" VGG11
"""
model_ft = models.vgg11(pretrained=use_pretrained)
set_parameter_requires_grad(model_ft, feature_extract)
num_ftrs = model_ft.classifier[6].in_features
model_ft.classifier[6] = nn.Linear(num_ftrs,num_classes)
input_size = 224
elif model_name == "vgg19":
""" VGG19
"""
model_ft = models.vgg19(pretrained=use_pretrained)
set_parameter_requires_grad(model_ft, feature_extract)
num_ftrs = model_ft.classifier[6].in_features
model_ft.classifier[6] = nn.Linear(num_ftrs,num_classes)
input_size = 224
elif model_name == "squeezenet":
""" Squeezenet
"""
model_ft = models.squeezenet1_0(pretrained=use_pretrained)
set_parameter_requires_grad(model_ft, feature_extract)
model_ft.classifier[1] = nn.Conv2d(512, num_classes, kernel_size=(1,1), stride=(1,1))
model_ft.num_classes = num_classes
input_size = 224
elif model_name == "densenet121":
""" Densenet121
"""
model_ft = models.densenet121(pretrained=use_pretrained)
set_parameter_requires_grad(model_ft, feature_extract)
num_ftrs = model_ft.classifier.in_features
model_ft.classifier = nn.Linear(num_ftrs, num_classes)
input_size = 224
elif model_name == "inception":
""" Inception v3
Be careful, expects (299,299) sized images and has auxiliary output
"""
model_ft = models.inception_v3(pretrained=use_pretrained)
set_parameter_requires_grad(model_ft, feature_extract)
# Handle the auxilary net
num_ftrs = model_ft.AuxLogits.fc.in_features
model_ft.AuxLogits.fc = nn.Linear(num_ftrs, num_classes)
# Handle the primary net
num_ftrs = model_ft.fc.in_features
model_ft.fc = nn.Linear(num_ftrs,num_classes)
input_size = 299
else:
print("Invalid model name, exiting...")
exit()
return model_ft, input_size
'''write to excel'''
def save_excel(data, file):
writer = pd.ExcelWriter(file) # 写入Excel文件
data = | pd.DataFrame(data) | pandas.DataFrame |
"""Module grouping tests for the pydov.util.query module."""
import pandas as pd
import numpy as np
import pytest
from pydov.util.dovutil import build_dov_url
from pydov.util.query import (
PropertyInList,
Join,
)
class TestPropertyInList(object):
"""Test the PropertyInList query expression."""
def test(self):
"""Test the PropertyInList expression with a standard list.
Test whether the generated query is correct.
"""
l = ['a', 'b', 'c']
query = PropertyInList('methode', l)
xml = query.toXML()
assert xml.tag == '{http://www.opengis.net/ogc}Or'
assert len(list(xml)) == 3
for f in xml:
assert f.tag == '{http://www.opengis.net/ogc}PropertyIsEqualTo'
propertyname = f.find('./{http://www.opengis.net/ogc}PropertyName')
assert propertyname.text == 'methode'
literal = f.find('./{http://www.opengis.net/ogc}Literal')
assert literal.text in l
l.remove(literal.text)
assert len(l) == 0
def test_duplicate(self):
"""Test the PropertyInList expression with a list containing
duplicates.
Test whether the generated query is correct and does not contain the
duplicate entry twice.
"""
l = ['a', 'a', 'b', 'c']
l_output = ['a', 'b', 'c']
query = PropertyInList('methode', l)
xml = query.toXML()
assert xml.tag == '{http://www.opengis.net/ogc}Or'
assert len(list(xml)) == 3
for f in xml:
assert f.tag == '{http://www.opengis.net/ogc}PropertyIsEqualTo'
propertyname = f.find('./{http://www.opengis.net/ogc}PropertyName')
assert propertyname.text == 'methode'
literal = f.find('./{http://www.opengis.net/ogc}Literal')
assert literal.text in l
l_output.remove(literal.text)
assert len(l_output) == 0
def test_list_single(self):
"""Test the PropertyInList expression with a list containing
a single item.
Test whether the generated query is correct and does contain only a
single PropertyIsEqualTo.
"""
l = ['a']
query = PropertyInList('methode', l)
xml = query.toXML()
assert xml.tag == '{http://www.opengis.net/ogc}PropertyIsEqualTo'
propertyname = xml.find('./{http://www.opengis.net/ogc}PropertyName')
assert propertyname.text == 'methode'
literal = xml.find('./{http://www.opengis.net/ogc}Literal')
assert literal.text in l
l.remove(literal.text)
assert len(l) == 0
def test_list_single_duplicate(self):
"""Test the PropertyInList expression with a list containing
a single duplicated item.
Test whether the generated query is correct and does contain only a
single PropertyIsEqualTo.
"""
l = ['a', 'a']
l_output = ['a']
query = PropertyInList('methode', l)
xml = query.toXML()
assert xml.tag == '{http://www.opengis.net/ogc}PropertyIsEqualTo'
propertyname = xml.find('./{http://www.opengis.net/ogc}PropertyName')
assert propertyname.text == 'methode'
literal = xml.find('./{http://www.opengis.net/ogc}Literal')
assert literal.text in l_output
l_output.remove(literal.text)
assert len(l_output) == 0
def test_emptylist(self):
"""Test the PropertyInList expression with an empty list.
Test whether a ValueError is raised.
"""
with pytest.raises(ValueError):
l = []
PropertyInList('methode', l)
def test_nolist(self):
"""Test the PropertyInList expression with a string instead of a list.
Test whether a ValueError is raised.
"""
with pytest.raises(ValueError):
l = 'goed'
PropertyInList('betrouwbaarheid', l)
class TestJoin(object):
"""Test the Join query expression."""
def test(self):
"""Test the Join expression with a standard dataframe.
Test whether the generated query is correct.
"""
l = [build_dov_url('data/boring/1986-068853'),
build_dov_url('data/boring/1986-068843'),
build_dov_url('data/boring/1980-068861')]
df = pd.DataFrame({
'pkey_boring': pd.Series(l),
'diepte_tot_m': pd.Series([10, 20, 30])
})
query = Join(df, 'pkey_boring')
xml = query.toXML()
assert xml.tag == '{http://www.opengis.net/ogc}Or'
assert len(list(xml)) == 3
for f in xml:
assert f.tag == '{http://www.opengis.net/ogc}PropertyIsEqualTo'
propertyname = f.find('./{http://www.opengis.net/ogc}PropertyName')
assert propertyname.text == 'pkey_boring'
literal = f.find('./{http://www.opengis.net/ogc}Literal')
assert literal.text in l
l.remove(literal.text)
assert len(l) == 0
def test_duplicate(self):
"""Test the Join expression with a column containing
duplicates.
Test whether the generated query is correct and does not contain the
duplicate entry twice.
"""
l = [build_dov_url('data/boring/1986-068853'),
build_dov_url('data/boring/1986-068853'),
build_dov_url('data/boring/1980-068861')]
l_output = [build_dov_url('data/boring/1986-068853'),
build_dov_url('data/boring/1980-068861')]
df = pd.DataFrame({
'pkey_boring': pd.Series(l),
'diepte_tot_m': pd.Series([10, 20, 30])
})
query = Join(df, 'pkey_boring')
xml = query.toXML()
assert xml.tag == '{http://www.opengis.net/ogc}Or'
assert len(list(xml)) == 2
for f in xml:
assert f.tag == '{http://www.opengis.net/ogc}PropertyIsEqualTo'
propertyname = f.find('./{http://www.opengis.net/ogc}PropertyName')
assert propertyname.text == 'pkey_boring'
literal = f.find('./{http://www.opengis.net/ogc}Literal')
assert literal.text in l
l_output.remove(literal.text)
assert len(l_output) == 0
def test_wrongcolumn(self):
"""Test the Join expression with a join_column not available in the
dataframe.
Test whether a ValueError is raised.
"""
with pytest.raises(ValueError):
l = [build_dov_url('data/boring/1986-068853'),
build_dov_url('data/boring/1986-068843'),
build_dov_url('data/boring/1980-068861')]
df = pd.DataFrame({
'pkey_boring': pd.Series(l),
'diepte_tot_m': pd.Series([10, 20, 30])
})
Join(df, 'pkey_sondering')
def test_single(self):
"""Test the Join expression with a dataframe containing a single row.
Test whether the generated query is correct and does contain only a
single PropertyIsEqualTo.
"""
l = [build_dov_url('data/boring/1986-068853')]
df = pd.DataFrame({
'pkey_boring': | pd.Series(l) | pandas.Series |
import pytest
import numpy as np
from datetime import date, timedelta, time, datetime
import dateutil
import pandas as pd
import pandas.util.testing as tm
from pandas.compat import lrange
from pandas.compat.numpy import np_datetime64_compat
from pandas import (DatetimeIndex, Index, date_range, DataFrame,
Timestamp, offsets)
from pandas.util.testing import assert_almost_equal
randn = np.random.randn
class TestDatetimeIndexLikeTimestamp(object):
# Tests for DatetimeIndex behaving like a vectorized Timestamp
def test_dti_date_out_of_range(self):
# see gh-1475
pytest.raises(ValueError, DatetimeIndex, ['1400-01-01'])
pytest.raises(ValueError, DatetimeIndex, [datetime(1400, 1, 1)])
def test_timestamp_fields(self):
# extra fields from DatetimeIndex like quarter and week
idx = tm.makeDateIndex(100)
fields = ['dayofweek', 'dayofyear', 'week', 'weekofyear', 'quarter',
'days_in_month', 'is_month_start', 'is_month_end',
'is_quarter_start', 'is_quarter_end', 'is_year_start',
'is_year_end', 'weekday_name']
for f in fields:
expected = getattr(idx, f)[-1]
result = getattr(Timestamp(idx[-1]), f)
assert result == expected
assert idx.freq == Timestamp(idx[-1], idx.freq).freq
assert idx.freqstr == Timestamp(idx[-1], idx.freq).freqstr
class TestDatetimeIndex(object):
def test_get_loc(self):
idx = pd.date_range('2000-01-01', periods=3)
for method in [None, 'pad', 'backfill', 'nearest']:
assert idx.get_loc(idx[1], method) == 1
assert idx.get_loc(idx[1].to_pydatetime(), method) == 1
assert idx.get_loc(str(idx[1]), method) == 1
if method is not None:
assert idx.get_loc(idx[1], method,
tolerance=pd.Timedelta('0 days')) == 1
assert idx.get_loc('2000-01-01', method='nearest') == 0
assert idx.get_loc('2000-01-01T12', method='nearest') == 1
assert idx.get_loc('2000-01-01T12', method='nearest',
tolerance='1 day') == 1
assert idx.get_loc('2000-01-01T12', method='nearest',
tolerance=pd.Timedelta('1D')) == 1
assert idx.get_loc('2000-01-01T12', method='nearest',
tolerance=np.timedelta64(1, 'D')) == 1
assert idx.get_loc('2000-01-01T12', method='nearest',
tolerance=timedelta(1)) == 1
with tm.assert_raises_regex(ValueError,
'unit abbreviation w/o a number'):
idx.get_loc('2000-01-01T12', method='nearest', tolerance='foo')
with pytest.raises(KeyError):
idx.get_loc('2000-01-01T03', method='nearest', tolerance='2 hours')
with pytest.raises(
ValueError,
match='tolerance size must match target index size'):
idx.get_loc('2000-01-01', method='nearest',
tolerance=[pd.Timedelta('1day').to_timedelta64(),
pd.Timedelta('1day').to_timedelta64()])
assert idx.get_loc('2000', method='nearest') == slice(0, 3)
assert idx.get_loc('2000-01', method='nearest') == slice(0, 3)
assert idx.get_loc('1999', method='nearest') == 0
assert idx.get_loc('2001', method='nearest') == 2
with pytest.raises(KeyError):
idx.get_loc('1999', method='pad')
with pytest.raises(KeyError):
idx.get_loc('2001', method='backfill')
with pytest.raises(KeyError):
idx.get_loc('foobar')
with pytest.raises(TypeError):
idx.get_loc(slice(2))
idx = pd.to_datetime(['2000-01-01', '2000-01-04'])
assert idx.get_loc('2000-01-02', method='nearest') == 0
assert idx.get_loc('2000-01-03', method='nearest') == 1
assert idx.get_loc('2000-01', method='nearest') == slice(0, 2)
# time indexing
idx = pd.date_range('2000-01-01', periods=24, freq='H')
tm.assert_numpy_array_equal(idx.get_loc(time(12)),
np.array([12]), check_dtype=False)
tm.assert_numpy_array_equal(idx.get_loc(time(12, 30)),
np.array([]), check_dtype=False)
with pytest.raises(NotImplementedError):
idx.get_loc(time(12, 30), method='pad')
def test_get_indexer(self):
idx = pd.date_range('2000-01-01', periods=3)
exp = np.array([0, 1, 2], dtype=np.intp)
tm.assert_numpy_array_equal(idx.get_indexer(idx), exp)
target = idx[0] + pd.to_timedelta(['-1 hour', '12 hours',
'1 day 1 hour'])
tm.assert_numpy_array_equal(idx.get_indexer(target, 'pad'),
np.array([-1, 0, 1], dtype=np.intp))
tm.assert_numpy_array_equal(idx.get_indexer(target, 'backfill'),
np.array([0, 1, 2], dtype=np.intp))
tm.assert_numpy_array_equal(idx.get_indexer(target, 'nearest'),
np.array([0, 1, 1], dtype=np.intp))
tm.assert_numpy_array_equal(
idx.get_indexer(target, 'nearest',
tolerance=pd.Timedelta('1 hour')),
np.array([0, -1, 1], dtype=np.intp))
tol_raw = [pd.Timedelta('1 hour'),
pd.Timedelta('1 hour'),
pd.Timedelta('1 hour').to_timedelta64(), ]
tm.assert_numpy_array_equal(
idx.get_indexer(target, 'nearest',
tolerance=[np.timedelta64(x) for x in tol_raw]),
np.array([0, -1, 1], dtype=np.intp))
tol_bad = [pd.Timedelta('2 hour').to_timedelta64(),
pd.Timedelta('1 hour').to_timedelta64(),
'foo', ]
with pytest.raises(
ValueError, match='abbreviation w/o a number'):
idx.get_indexer(target, 'nearest', tolerance=tol_bad)
with pytest.raises(ValueError):
idx.get_indexer(idx[[0]], method='nearest', tolerance='foo')
def test_reasonable_keyerror(self):
# GH #1062
index = DatetimeIndex(['1/3/2000'])
try:
index.get_loc('1/1/2000')
except KeyError as e:
assert '2000' in str(e)
def test_roundtrip_pickle_with_tz(self):
# GH 8367
# round-trip of timezone
index = date_range('20130101', periods=3, tz='US/Eastern', name='foo')
unpickled = tm.round_trip_pickle(index)
tm.assert_index_equal(index, unpickled)
def test_reindex_preserves_tz_if_target_is_empty_list_or_array(self):
# GH7774
index = date_range('20130101', periods=3, tz='US/Eastern')
assert str(index.reindex([])[0].tz) == 'US/Eastern'
assert str(index.reindex(np.array([]))[0].tz) == 'US/Eastern'
def test_time_loc(self): # GH8667
from datetime import time
from pandas._libs.index import _SIZE_CUTOFF
ns = _SIZE_CUTOFF + np.array([-100, 100], dtype=np.int64)
key = time(15, 11, 30)
start = key.hour * 3600 + key.minute * 60 + key.second
step = 24 * 3600
for n in ns:
idx = pd.date_range('2014-11-26', periods=n, freq='S')
ts = pd.Series(np.random.randn(n), index=idx)
i = np.arange(start, n, step)
tm.assert_numpy_array_equal(ts.index.get_loc(key), i,
check_dtype=False)
tm.assert_series_equal(ts[key], ts.iloc[i])
left, right = ts.copy(), ts.copy()
left[key] *= -10
right.iloc[i] *= -10
tm.assert_series_equal(left, right)
def test_time_overflow_for_32bit_machines(self):
# GH8943. On some machines NumPy defaults to np.int32 (for example,
# 32-bit Linux machines). In the function _generate_regular_range
# found in tseries/index.py, `periods` gets multiplied by `strides`
# (which has value 1e9) and since the max value for np.int32 is ~2e9,
# and since those machines won't promote np.int32 to np.int64, we get
# overflow.
periods = np.int_(1000)
idx1 = pd.date_range(start='2000', periods=periods, freq='S')
assert len(idx1) == periods
idx2 = pd.date_range(end='2000', periods=periods, freq='S')
assert len(idx2) == periods
def test_nat(self):
assert DatetimeIndex([np.nan])[0] is pd.NaT
def test_week_of_month_frequency(self):
# GH 5348: "ValueError: Could not evaluate WOM-1SUN" shouldn't raise
d1 = date(2002, 9, 1)
d2 = date(2013, 10, 27)
d3 = date(2012, 9, 30)
idx1 = DatetimeIndex([d1, d2])
idx2 = DatetimeIndex([d3])
result_append = idx1.append(idx2)
expected = DatetimeIndex([d1, d2, d3])
tm.assert_index_equal(result_append, expected)
result_union = idx1.union(idx2)
expected = DatetimeIndex([d1, d3, d2])
tm.assert_index_equal(result_union, expected)
# GH 5115
result = date_range("2013-1-1", periods=4, freq='WOM-1SAT')
dates = ['2013-01-05', '2013-02-02', '2013-03-02', '2013-04-06']
expected = DatetimeIndex(dates, freq='WOM-1SAT')
tm.assert_index_equal(result, expected)
def test_hash_error(self):
index = date_range('20010101', periods=10)
with tm.assert_raises_regex(TypeError, "unhashable type: %r" %
type(index).__name__):
hash(index)
def test_stringified_slice_with_tz(self):
# GH2658
import datetime
start = datetime.datetime.now()
idx = DatetimeIndex(start=start, freq="1d", periods=10)
df = DataFrame(lrange(10), index=idx)
df["2013-01-14 23:44:34.437768-05:00":] # no exception here
def test_append_join_nondatetimeindex(self):
rng = date_range('1/1/2000', periods=10)
idx = Index(['a', 'b', 'c', 'd'])
result = rng.append(idx)
assert isinstance(result[0], Timestamp)
# it works
rng.join(idx, how='outer')
def test_comparisons_coverage(self):
rng = date_range('1/1/2000', periods=10)
# raise TypeError for now
pytest.raises(TypeError, rng.__lt__, rng[3].value)
result = rng == list(rng)
exp = rng == rng
tm.assert_numpy_array_equal(result, exp)
def test_comparisons_nat(self):
fidx1 = pd.Index([1.0, np.nan, 3.0, np.nan, 5.0, 7.0])
fidx2 = pd.Index([2.0, 3.0, np.nan, np.nan, 6.0, 7.0])
didx1 = pd.DatetimeIndex(['2014-01-01', pd.NaT, '2014-03-01', pd.NaT,
'2014-05-01', '2014-07-01'])
didx2 = pd.DatetimeIndex(['2014-02-01', '2014-03-01', pd.NaT, pd.NaT,
'2014-06-01', '2014-07-01'])
darr = np.array([np_datetime64_compat('2014-02-01 00:00Z'),
np_datetime64_compat('2014-03-01 00:00Z'),
np_datetime64_compat('nat'), np.datetime64('nat'),
np_datetime64_compat('2014-06-01 00:00Z'),
np_datetime64_compat('2014-07-01 00:00Z')])
cases = [(fidx1, fidx2), (didx1, didx2), (didx1, darr)]
# Check pd.NaT is handles as the same as np.nan
with tm.assert_produces_warning(None):
for idx1, idx2 in cases:
result = idx1 < idx2
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx2 > idx1
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= idx2
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx2 >= idx1
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == idx2
expected = np.array([False, False, False, False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != idx2
expected = np.array([True, True, True, True, True, False])
tm.assert_numpy_array_equal(result, expected)
with tm.assert_produces_warning(None):
for idx1, val in [(fidx1, np.nan), (didx1, pd.NaT)]:
result = idx1 < val
expected = np.array([False, False, False, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 > val
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= val
tm.assert_numpy_array_equal(result, expected)
result = idx1 >= val
tm.assert_numpy_array_equal(result, expected)
result = idx1 == val
tm.assert_numpy_array_equal(result, expected)
result = idx1 != val
expected = np.array([True, True, True, True, True, True])
tm.assert_numpy_array_equal(result, expected)
# Check pd.NaT is handles as the same as np.nan
with tm.assert_produces_warning(None):
for idx1, val in [(fidx1, 3), (didx1, datetime(2014, 3, 1))]:
result = idx1 < val
expected = np.array([True, False, False, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 > val
expected = np.array([False, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= val
expected = np.array([True, False, True, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 >= val
expected = np.array([False, False, True, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == val
expected = np.array([False, False, True, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != val
expected = np.array([True, True, False, True, True, True])
tm.assert_numpy_array_equal(result, expected)
def test_map(self):
rng = date_range('1/1/2000', periods=10)
f = lambda x: x.strftime('%Y%m%d')
result = rng.map(f)
exp = Index([f(x) for x in rng], dtype='<U8')
tm.assert_index_equal(result, exp)
def test_iteration_preserves_tz(self):
# see gh-8890
index = date_range("2012-01-01", periods=3, freq='H', tz='US/Eastern')
for i, ts in enumerate(index):
result = ts
expected = index[i]
assert result == expected
index = date_range("2012-01-01", periods=3, freq='H',
tz=dateutil.tz.tzoffset(None, -28800))
for i, ts in enumerate(index):
result = ts
expected = index[i]
assert result._repr_base == expected._repr_base
assert result == expected
# 9100
index = pd.DatetimeIndex(['2014-12-01 03:32:39.987000-08:00',
'2014-12-01 04:12:34.987000-08:00'])
for i, ts in enumerate(index):
result = ts
expected = index[i]
assert result._repr_base == expected._repr_base
assert result == expected
def test_misc_coverage(self):
rng = date_range('1/1/2000', periods=5)
result = rng.groupby(rng.day)
assert isinstance(list(result.values())[0][0], Timestamp)
idx = DatetimeIndex(['2000-01-03', '2000-01-01', '2000-01-02'])
assert not idx.equals(list(idx))
non_datetime = Index(list('abc'))
assert not idx.equals(list(non_datetime))
def test_string_index_series_name_converted(self):
# #1644
df = DataFrame(np.random.randn(10, 4),
index=date_range('1/1/2000', periods=10))
result = df.loc['1/3/2000']
assert result.name == df.index[2]
result = df.T['1/3/2000']
assert result.name == df.index[2]
def test_get_duplicates(self):
idx = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-02',
'2000-01-03', '2000-01-03', '2000-01-04'])
result = idx.get_duplicates()
ex = DatetimeIndex(['2000-01-02', '2000-01-03'])
tm.assert_index_equal(result, ex)
def test_argmin_argmax(self):
idx = DatetimeIndex(['2000-01-04', '2000-01-01', '2000-01-02'])
assert idx.argmin() == 1
assert idx.argmax() == 0
def test_sort_values(self):
idx = DatetimeIndex(['2000-01-04', '2000-01-01', '2000-01-02'])
ordered = idx.sort_values()
assert ordered.is_monotonic
ordered = idx.sort_values(ascending=False)
assert ordered[::-1].is_monotonic
ordered, dexer = idx.sort_values(return_indexer=True)
assert ordered.is_monotonic
tm.assert_numpy_array_equal(dexer, np.array([1, 2, 0], dtype=np.intp))
ordered, dexer = idx.sort_values(return_indexer=True, ascending=False)
assert ordered[::-1].is_monotonic
tm.assert_numpy_array_equal(dexer, np.array([0, 2, 1], dtype=np.intp))
def test_map_bug_1677(self):
index = DatetimeIndex(['2012-04-25 09:30:00.393000'])
f = index.asof
result = index.map(f)
expected = Index([f(index[0])])
tm.assert_index_equal(result, expected)
def test_groupby_function_tuple_1677(self):
df = DataFrame(np.random.rand(100),
index=date_range("1/1/2000", periods=100))
monthly_group = df.groupby(lambda x: (x.year, x.month))
result = monthly_group.mean()
assert isinstance(result.index[0], tuple)
def test_append_numpy_bug_1681(self):
# another datetime64 bug
dr = date_range('2011/1/1', '2012/1/1', freq='W-FRI')
a = DataFrame()
c = DataFrame({'A': 'foo', 'B': dr}, index=dr)
result = a.append(c)
assert (result['B'] == dr).all()
def test_isin(self):
index = tm.makeDateIndex(4)
result = index.isin(index)
assert result.all()
result = index.isin(list(index))
assert result.all()
assert_almost_equal(index.isin([index[2], 5]),
np.array([False, False, True, False]))
def test_time(self):
rng = pd.date_range('1/1/2000', freq='12min', periods=10)
result = pd.Index(rng).time
expected = [t.time() for t in rng]
assert (result == expected).all()
def test_date(self):
rng = | pd.date_range('1/1/2000', freq='12H', periods=10) | pandas.date_range |
"""
Go from the RVs <NAME> sent (with delta Pav as the
template) to RVs that can be input to radvel.
"""
import os
import pandas as pd, numpy as np
from astrobase.lcmath import find_lc_timegroups
from numpy import array as nparr
from timmy.paths import DATADIR
rvdir = os.path.join(DATADIR, 'spectra', 'Veloce', 'RVs')
rvpath = os.path.join(rvdir, 'TOI837_rvs_v1.txt')
df = | pd.read_csv(rvpath, names=['time','rv','rv_err'], sep=' ') | pandas.read_csv |
# coding: utf-8
# ***Visualization(Exploratory data analysis) - Phase 1 ***
# * ***Major questions to answer(A/B Testing):***
# 1. Does the installment amount affect loan status ?
# 2. Does the installment grade affect loan status ?
# 3. Which grade has highest default rate ?
# 4. Does annual income/home-ownership affect default rate ?
# 5. Which state has highest default rate ?
# * ***Text Analysis - Phase 2 ***
# 6. Is it that a people with a certain empoyee title are taking up more loans as compared to others ?
# 7. Does a specific purpose affect loan status ?
# * ***Model Building - Phase 3***
# 8. Trying various models and comparing them
# ***Visualization(Exploratory data analysis) - Phase 1 ***
# In[50]:
# This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load in
# Importing the libraries
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import os
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory
from subprocess import check_output
# Reading the dataset
data = pd.read_csv("../input/loan.csv")
data_1 = pd.DataFrame(data) # Creating a copy
# Checking the dataset
data.head()
data.tail()
data.describe()
data = data.iloc[:,2:-30].values
# In[51]:
# Setting the target vector
status = data[:,14]
unique_labels = np.unique(status, return_counts = True)
# print(unique_labels)
plt.figure()
plt.bar(unique_labels[0],unique_labels[1])
plt.xlabel('Type of label')
plt.ylabel('Frequency')
plt.title('Status categories')
plt.show()
category = unique_labels[0]
frequency = unique_labels[1]
category_count = np.vstack((category,frequency))
category_list = np.array(category_count.T).tolist()
category_list_1 = pd.DataFrame(category_list)
print(category_list_1)
# Let us consider only 2 major categories "Charged off" and "Fully Paid". A few reasons to do this:
# 1. To convert it into a binary cassification problem, and to analyze in detail the effect of important variables on the loan status.
# 2. A lot of observations show status "Current", so we do not know whether it will be "Charged Off", "Fully Paid" or "Default".
# 3. The observations for "Default" are too less as compared to "Fully Paid" or "Charged Off", to thoughroly investigate those observations with loan status as "Default".
# 4. The remaining categories of "loan status" are not of prime importance for this analysis.
#
# In[52]:
category_one_data = data_1[data_1.loan_status == "Fully Paid"]
category_two_data = data_1[data_1.loan_status == "Charged Off"]
new_data = np.vstack((category_one_data,category_two_data))
# new_data_copy = pd.DataFrame(new_data)
new_data = new_data[:,2:-30]
new_data_df = | pd.DataFrame(new_data) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import click
import logging
from pathlib import Path
import pandas as pd
from src.utils.config import Config
from src.features import build_features
from dotenv import find_dotenv, load_dotenv
from sklearn.manifold import TSNE
import umap
from sklearn.decomposition import PCA
import numpy as np
from sklearn.preprocessing import RobustScaler as rs
from sklearn.preprocessing import MinMaxScaler as mms
from sklearn.preprocessing import StandardScaler as sd
project_dir=Config.project_dir
def process_data():
labels= pd.read_csv(project_dir / "data/raw/labels.csv")
expression_data = pd.read_csv(project_dir / "data/raw/data.csv")
#rename and Merge labels and features
expression_data.rename({"Unnamed: 0":"sample"}, axis='columns', inplace =True)
labels.rename({"Unnamed: 0":"sample"}, axis='columns', inplace =True)
labled_expression_merged = pd.merge(labels,expression_data,on="sample")
# save
expression_data=expression_data.drop("sample",axis=1)
expression_data.to_csv(project_dir/ "data/processed/expression_data_original.csv")
labels=labels.drop("sample",axis=1)
labels.to_csv(project_dir/ "data/processed/labels.csv")
labled_expression_merged.to_csv(project_dir/ "data/processed/merged_expression_dataset.csv", index=True)
"""[Robust scaling ]
Robust rescaling the expression levels of each gene,
applying the formula :
rescaled = (gene_expression - median(gene_expression)) / IQR(gene_expression) where IQR stands for Inter Quartile Range.
"""
expression_data_centered = rs().fit_transform(expression_data)
df_expression_data_centered = pd.DataFrame(expression_data_centered,columns=expression_data.columns)
df_expression_data_centered.to_csv(project_dir/ "data/processed/expression_data_centerted.csv")
"""[standard scaling ]
"""
expression_data_standardized = sd().fit_transform(expression_data)
df_expression_data_standardized = pd.DataFrame(expression_data_standardized,columns=expression_data.columns)
df_expression_data_standardized.to_csv(project_dir/ "data/processed/expression_data_standardized.csv")
y = labels['Class'].values
true_labels = np.array([Config.labels_map[element] for element in y])
df_true_labels = pd.DataFrame(true_labels,columns=["Class"])
df_true_labels.to_csv(project_dir/ "data/processed/true_labels.csv")
expression_level_5000_HGV , features_5000_HGV= build_features.top_k_variance(
expression_data.values,
k=1000,
names= expression_data.columns
)
#--------------------- data reduction -----------------------#
pca_reducer = PCA(n_components=2)
pca_reducer.fit(expression_data )
pc = pca_reducer.transform(expression_data )
X_tsne = TSNE(n_components=2).fit_transform(expression_data)
UMAP_COMPONENTS_REDUCTION = 2
UMAP_COMPONENTS_FEATURES = 20
UMAP_EPOCHS = 2000
manifold_reducer = umap.UMAP(
n_components=UMAP_COMPONENTS_REDUCTION,
n_neighbors=200,
n_epochs=UMAP_EPOCHS,
metric='cosine',
min_dist=0.9)
manifold = manifold_reducer.fit_transform(expression_data)
# saving tranformed data
components= ["c1","c2"]
df_PCA =pd.DataFrame(pc,columns=components)
df_PCA.to_csv(Config.project_dir/ "data/transformed/PCA_reduction.csv")
df_PCA = | pd.DataFrame(X_tsne,columns=components) | pandas.DataFrame |
# -*- coding: utf-8 -*-
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
import pytest
import re
from numpy import nan as NA
import numpy as np
from numpy.random import randint
from pandas.compat import range, u
import pandas.compat as compat
from pandas import Index, Series, DataFrame, isna, MultiIndex, notna
from pandas.util.testing import assert_series_equal
import pandas.util.testing as tm
import pandas.core.strings as strings
class TestStringMethods(object):
def test_api(self):
# GH 6106, GH 9322
assert Series.str is strings.StringMethods
assert isinstance(Series(['']).str, strings.StringMethods)
# GH 9184
invalid = Series([1])
with tm.assert_raises_regex(AttributeError,
"only use .str accessor"):
invalid.str
assert not hasattr(invalid, 'str')
def test_iter(self):
# GH3638
strs = 'google', 'wikimedia', 'wikipedia', 'wikitravel'
ds = Series(strs)
for s in ds.str:
# iter must yield a Series
assert isinstance(s, Series)
# indices of each yielded Series should be equal to the index of
# the original Series
tm.assert_index_equal(s.index, ds.index)
for el in s:
# each element of the series is either a basestring/str or nan
assert isinstance(el, compat.string_types) or isna(el)
# desired behavior is to iterate until everything would be nan on the
# next iter so make sure the last element of the iterator was 'l' in
# this case since 'wikitravel' is the longest string
assert s.dropna().values.item() == 'l'
def test_iter_empty(self):
ds = Series([], dtype=object)
i, s = 100, 1
for i, s in enumerate(ds.str):
pass
# nothing to iterate over so nothing defined values should remain
# unchanged
assert i == 100
assert s == 1
def test_iter_single_element(self):
ds = Series(['a'])
for i, s in enumerate(ds.str):
pass
assert not i
assert_series_equal(ds, s)
def test_iter_object_try_string(self):
ds = Series([slice(None, randint(10), randint(10, 20)) for _ in range(
4)])
i, s = 100, 'h'
for i, s in enumerate(ds.str):
pass
assert i == 100
assert s == 'h'
def test_cat(self):
one = np.array(['a', 'a', 'b', 'b', 'c', NA], dtype=np.object_)
two = np.array(['a', NA, 'b', 'd', 'foo', NA], dtype=np.object_)
# single array
result = strings.str_cat(one)
exp = 'aabbc'
assert result == exp
result = strings.str_cat(one, na_rep='NA')
exp = 'aabbcNA'
assert result == exp
result = strings.str_cat(one, na_rep='-')
exp = 'aabbc-'
assert result == exp
result = strings.str_cat(one, sep='_', na_rep='NA')
exp = 'a_a_b_b_c_NA'
assert result == exp
result = strings.str_cat(two, sep='-')
exp = 'a-b-d-foo'
assert result == exp
# Multiple arrays
result = strings.str_cat(one, [two], na_rep='NA')
exp = np.array(['aa', 'aNA', 'bb', 'bd', 'cfoo', 'NANA'],
dtype=np.object_)
tm.assert_numpy_array_equal(result, exp)
result = strings.str_cat(one, two)
exp = np.array(['aa', NA, 'bb', 'bd', 'cfoo', NA], dtype=np.object_)
tm.assert_almost_equal(result, exp)
def test_count(self):
values = np.array(['foo', 'foofoo', NA, 'foooofooofommmfoo'],
dtype=np.object_)
result = strings.str_count(values, 'f[o]+')
exp = np.array([1, 2, NA, 4])
tm.assert_numpy_array_equal(result, exp)
result = Series(values).str.count('f[o]+')
exp = Series([1, 2, NA, 4])
assert isinstance(result, Series)
tm.assert_series_equal(result, exp)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_count(mixed, 'a')
xp = np.array([1, NA, 0, NA, NA, 0, NA, NA, NA])
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.count('a')
xp = Series([1, NA, 0, NA, NA, 0, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = [u('foo'), u('foofoo'), NA, u('foooofooofommmfoo')]
result = strings.str_count(values, 'f[o]+')
exp = np.array([1, 2, NA, 4])
tm.assert_numpy_array_equal(result, exp)
result = Series(values).str.count('f[o]+')
exp = Series([1, 2, NA, 4])
assert isinstance(result, Series)
tm.assert_series_equal(result, exp)
def test_contains(self):
values = np.array(['foo', NA, 'fooommm__foo',
'mmm_', 'foommm[_]+bar'], dtype=np.object_)
pat = 'mmm[_]+'
result = strings.str_contains(values, pat)
expected = np.array([False, NA, True, True, False], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
result = strings.str_contains(values, pat, regex=False)
expected = np.array([False, NA, False, False, True], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
values = ['foo', 'xyz', 'fooommm__foo', 'mmm_']
result = strings.str_contains(values, pat)
expected = np.array([False, False, True, True])
assert result.dtype == np.bool_
tm.assert_numpy_array_equal(result, expected)
# case insensitive using regex
values = ['Foo', 'xYz', 'fOOomMm__fOo', 'MMM_']
result = strings.str_contains(values, 'FOO|mmm', case=False)
expected = np.array([True, False, True, True])
tm.assert_numpy_array_equal(result, expected)
# case insensitive without regex
result = strings.str_contains(values, 'foo', regex=False, case=False)
expected = np.array([True, False, True, False])
tm.assert_numpy_array_equal(result, expected)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_contains(mixed, 'o')
xp = np.array([False, NA, False, NA, NA, True, NA, NA, NA],
dtype=np.object_)
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.contains('o')
xp = Series([False, NA, False, NA, NA, True, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = np.array([u'foo', NA, u'fooommm__foo', u'mmm_'],
dtype=np.object_)
pat = 'mmm[_]+'
result = strings.str_contains(values, pat)
expected = np.array([False, np.nan, True, True], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
result = strings.str_contains(values, pat, na=False)
expected = np.array([False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
values = np.array(['foo', 'xyz', 'fooommm__foo', 'mmm_'],
dtype=np.object_)
result = strings.str_contains(values, pat)
expected = np.array([False, False, True, True])
assert result.dtype == np.bool_
tm.assert_numpy_array_equal(result, expected)
# na
values = Series(['om', 'foo', np.nan])
res = values.str.contains('foo', na="foo")
assert res.loc[2] == "foo"
def test_startswith(self):
values = Series(['om', NA, 'foo_nom', 'nom', 'bar_foo', NA, 'foo'])
result = values.str.startswith('foo')
exp = Series([False, NA, True, False, False, NA, True])
tm.assert_series_equal(result, exp)
# mixed
mixed = np.array(['a', NA, 'b', True, datetime.today(),
'foo', None, 1, 2.], dtype=np.object_)
rs = strings.str_startswith(mixed, 'f')
xp = np.array([False, NA, False, NA, NA, True, NA, NA, NA],
dtype=np.object_)
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.startswith('f')
assert isinstance(rs, Series)
xp = Series([False, NA, False, NA, NA, True, NA, NA, NA])
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('om'), NA, u('foo_nom'), u('nom'), u('bar_foo'), NA,
u('foo')])
result = values.str.startswith('foo')
exp = Series([False, NA, True, False, False, NA, True])
tm.assert_series_equal(result, exp)
result = values.str.startswith('foo', na=True)
tm.assert_series_equal(result, exp.fillna(True).astype(bool))
def test_endswith(self):
values = Series(['om', NA, 'foo_nom', 'nom', 'bar_foo', NA, 'foo'])
result = values.str.endswith('foo')
exp = Series([False, NA, False, False, True, NA, True])
tm.assert_series_equal(result, exp)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_endswith(mixed, 'f')
xp = np.array([False, NA, False, NA, NA, False, NA, NA, NA],
dtype=np.object_)
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.endswith('f')
xp = Series([False, NA, False, NA, NA, False, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('om'), NA, u('foo_nom'), u('nom'), u('bar_foo'), NA,
u('foo')])
result = values.str.endswith('foo')
exp = Series([False, NA, False, False, True, NA, True])
tm.assert_series_equal(result, exp)
result = values.str.endswith('foo', na=False)
tm.assert_series_equal(result, exp.fillna(False).astype(bool))
def test_title(self):
values = | Series(["FOO", "BAR", NA, "Blah", "blurg"]) | pandas.Series |
import numpy as np
import cvxpy as cp
from tqdm import tqdm
import random
import time
import torch
import torch.nn as nn
import torch.autograd as autograd
import torch.optim as optim
import torch.nn.functional as F
import matplotlib.pyplot as plt
from numpy import linalg
from itertools import accumulate
import pandas as pd
from utils import solve, model
import argparse
def train(layer, true, iters=1000, choice=1, random_seed=1, show=False):
torch.manual_seed(random_seed)
np.random.seed(random_seed)
pn_t = torch.tensor([0.05]).double().requires_grad_(True)
a1_t = torch.tensor([0.5]).double().requires_grad_(True)
a3_t = torch.tensor([0.5]).double().requires_grad_(True)
max_theta_t = torch.tensor([18.5]).double().requires_grad_(True)
min_theta_t = torch.tensor([18]).double().requires_grad_(True)
max_power_t = torch.tensor([1.0]).double().requires_grad_(True)
variables = [pn_t,a1_t,a3_t,max_theta_t,min_theta_t,max_power_t]
results = []
record_variables = []
optimizer = torch.optim.Adam(variables, lr=0.15)
for i in range(iters):
pred = layer(*variables)
if choice==1:
loss = nn.MSELoss()(true[0], pred[0]) + nn.MSELoss()(true[1], pred[1])
else:
loss = nn.MSELoss()(true[0], pred[0])
optimizer.zero_grad()
loss.backward()
optimizer.step()
with torch.no_grad():
pn_t.data = torch.clamp(pn_t.data, min=0.01, max=0.1)
a1_t.data = torch.clamp(a1_t.data, min=0.01, max=1)
a3_t.data = torch.clamp(a3_t.data, min=0.01, max=1)
max_power_t.data = torch.clamp(max_power_t.data, min=0.1, max=10)
results.append(loss.item())
if i % 100==0: print("(iter %d) loss: %g " % (i, results[-1]))
if i == 50:
optimizer.param_groups[0]["lr"] = 0.1
if i == 200:
optimizer.param_groups[0]["lr"] = 0.05
if i == 800:
optimizer.param_groups[0]["lr"] = 0.01
if show:
im = plt.plot(results,color='gray')
anno = plt.annotate(f'step:{i}\n loss={loss}', xy=(0.85, 0.9), xycoords='axes fraction',color='black')
plt.axis("equal")
plt.pause(0.001)
anno.remove()
record_variables.append([v.detach().numpy().copy() for v in variables])
return [v.detach().numpy().copy() for v in variables], record_variables
def experiment(layer,seed1,theta_0, price, amb, choice, seed2, show, T=24*5):
np.random.seed(seed1)
price = price_data[:T]
amb = amb_data[:T]
C_th = 10 * np.random.uniform(0.9,1.1)
R_th = 2 * np.random.uniform(0.9,1.1)
P_n = 5 * np.random.uniform(0.9,1.1)
eta = 2.5 * np.random.uniform(0.9,1.1)
theta_r = 20 * np.random.uniform(0.9,1.1)
Delta = np.random.uniform(0.9,1.1)
pn_value = 0.02 # you can change it as you like
a1_value = round(1 - 1/(R_th*C_th),4)
a2_value = eta*R_th
a3_value = round((1-a1_value)*a2_value,6)
max_theta = round(theta_r + Delta,3)
min_theta = round(theta_r - Delta,3)
max_power = round(P_n,3)
params = {'pn':pn_value, 'a1':a1_value, 'a2':a2_value, 'a3': a3_value,
'max_theta':max_theta, 'min_theta':min_theta, 'max_power':max_power}
print(params)
true = solve(price, amb, T, pn_value, a1_value, a3_value, max_theta, min_theta, max_power, theta_0, tensor=True)
variables, record = train(layer, true, 600, choice, seed2, show)
pn_ = ((variables[0][0] - pn_value)**2)**0.5
a1_ = ((variables[1][0] - a1_value)**2)**0.5
a3_ = ((variables[2][0] - a3_value)**2)**0.5
max_theta_ = ((variables[3][0] - max_theta)**2)**0.5
min_theta_ = ((variables[4][0] - min_theta)**2)**0.5
max_power_ = ((variables[5][0] - max_power)**2)**0.5
print(pn_,a1_,a3_,max_theta_,min_theta_,max_power_)
return [v[0] for v in variables], [pn_value,a1_value,a3_value,max_theta,min_theta,max_power], [pn_,a1_,a3_,max_theta_,min_theta_,max_power_]
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--num", type=int, default=10, help="the number of experiments")
parser.add_argument("--save", type=bool, default=False, help="whether to save the result")
parser.add_argument("--show", type=bool, default=False, help="whether to show the real-time training loss")
parser.add_argument("--T", type=int, default=120, help="the length of the training data")
parser.add_argument("--seed", type=int, default=1, help="the training random seed")
parser.add_argument("--choice", type=int, default=1, help="1 for OptNet1 and 2 or OptNet2, indicated in the paper")
opts = parser.parse_args()
amb_data = np.array( | pd.read_excel('dataset/input_data_pool.xlsx',sheet_name='theta_amb') | pandas.read_excel |
def process_result_file_into_dataframe(p_file):
import numpy as np
from pandas import DataFrame
l_go_entries = []
with open(p_file, 'r') as f:
flag_start_p, flag_start = False, False
for line in f.readlines():
if line.startswith('Finding terms for P'):
flag_start_p = True
elif flag_start_p and line.startswith('-- '):
flag_start = True
go_id, term, corrected_pvalue, uncorrected_pvalue, fdr_rate, num_annotation, genes, nb_gene_go = [np.nan for _ in range(8)]
elif flag_start and line.startswith('\n'):
flag_start = False
fold_enrichment = nbr_gene_inter*7200/(nbr_gene_net*nbr_gene_go)
l_go_entries.append([go_id, term, corrected_pvalue, uncorrected_pvalue, fdr_rate, nbr_gene_inter, nbr_gene_net, nbr_gene_go, fold_enrichment, num_annotation, genes])
elif line.startswith('Finding terms for C'):
flag_start_p = False
break
elif flag_start:
if line.startswith('GOID'):
go_id = line.split('\t')[1]
go_id = go_id[0:go_id.find('\n')]
elif line.startswith('TERM'):
term = line.split('\t')[1]
term = term[0:term.find('\n')]
elif line.startswith('CORRECTED P-VALUE'):
corrected_pvalue = line.split('\t')[1]
corrected_pvalue = corrected_pvalue[0:corrected_pvalue.find('\n')]
elif line.startswith('UNCORRECTED P-VALUE'):
uncorrected_pvalue = line.split('\t')[1]
uncorrected_pvalue = uncorrected_pvalue[0:uncorrected_pvalue.find('\n')]
elif line.startswith('FDR_RATE'):
fdr_rate = line.split('\t')[1]
fdr_rate = float(fdr_rate[0:fdr_rate.find('%')])
elif line.startswith('NUM_ANNOTATIONS'):
num_annotation = line.split('\t')[1]
num_annotation = num_annotation[0:num_annotation.find('\n')]
nbr_gene_go = int(num_annotation.split(' ')[7])
nbr_gene_net = int(num_annotation.split(' ')[2])
nbr_gene_inter = int(num_annotation.split(' ')[0])
elif line.startswith('The genes annotated'):
continue
else:
genes = line
genes = genes[0:genes.find('\n')]
df_go = | DataFrame(l_go_entries, columns=['GOID', 'TERM', 'CORRECTED P-VALUE', 'UNCORRECTED P-VALUE', 'FDR RATE', 'NBR GENE INTER', 'NBR GENE NET', 'NBR GENE GO', 'FOLD ENRICHMENT', 'NUM ANNOTATION', 'GENES']) | pandas.DataFrame |
"""
Tests the coalescence tree object.
"""
import os
import random
import shutil
import sqlite3
import sys
import unittest
import numpy as np
import pandas as pd
from pandas.testing import assert_frame_equal
from setup_tests import setUpAll, tearDownAll, skipLongTest
from pycoalescence import Simulation
from pycoalescence.coalescence_tree import CoalescenceTree, get_parameter_description
from pycoalescence.sqlite_connection import check_sql_table_exist
def setUpModule():
"""
Creates the output directory and moves logging files
"""
setUpAll()
t = CoalescenceTree("sample/sample.db")
t.clear_calculations()
def tearDownModule():
"""
Removes the output directory
"""
tearDownAll()
class TestNullSimulationErrors(unittest.TestCase):
"""
Tests that simulations that are not linked raise the correct error.
"""
def testRaisesError(self):
"""
Tests that a null simulation will raise an error when any operation is performed.
"""
t = CoalescenceTree()
with self.assertRaises(RuntimeError):
t.get_species_richness()
with self.assertRaises(RuntimeError):
t.calculate_fragment_richness()
with self.assertRaises(RuntimeError):
t.calculate_alpha_diversity()
with self.assertRaises(RuntimeError):
t.calculate_beta_diversity()
with self.assertRaises(RuntimeError):
t.calculate_fragment_abundances()
with self.assertRaises(RuntimeError):
t.calculate_fragment_octaves()
with self.assertRaises(RuntimeError):
t.calculate_octaves()
with self.assertRaises(RuntimeError):
t.get_fragment_list()
with self.assertRaises(RuntimeError):
t.get_alpha_diversity()
with self.assertRaises(RuntimeError):
t.get_beta_diversity()
with self.assertRaises(RuntimeError):
t.get_community_references()
with self.assertRaises(RuntimeError):
t.get_metacommunity_references()
with self.assertRaises(RuntimeError):
t.get_species_locations()
with self.assertRaises(RuntimeError):
t.get_species_abundances()
with self.assertRaises(RuntimeError):
t.get_species_list()
with self.assertRaises(RuntimeError):
_ = t.get_simulation_parameters()
with self.assertRaises(RuntimeError):
t.get_fragment_abundances("null", 1)
with self.assertRaises(RuntimeError):
t.get_species_richness()
with self.assertRaises(RuntimeError):
t.get_octaves(1)
class TestParameterDescriptions(unittest.TestCase):
"""
Tests that program correctly reads from the parameter_descriptions.json dictionary.
"""
def testReadsCorrectly(self):
"""
Tests that the dictionary is read correctly.
"""
tmp_dict = {
"habitat_change_rate": "the rate of change from present density maps to historic density maps",
"sample_file": "the sample area map for spatially selective sampling. Can be null to sample all " "cells",
"sample_x": "the sample map x dimension",
"sample_y": "the sample map y dimension",
"sample_x_offset": "the sample x map offset from the grid",
"sample_y_offset": "the sample y map offset from the grid",
"output_dir": "the output directory for the simulation database",
"seed": "the random seed to start the simulation, for repeatability",
"coarse_map_x": "the coarse density map x dimension",
"fine_map_file": "the density map file location at the finer resolution, covering a smaller area",
"tau": "the tau dispersal value for fat-tailed dispersal",
"grid_y": "the simulated grid y dimension",
"dispersal_relative_cost": "the relative rate of moving through non-habitat compared to habitat",
"fine_map_y_offset": "the number of cells the fine map is offset from the sample map in the y "
"dimension, at the fine resolution",
"gen_since_historical": "the number of generations that occur before the historical, or historic,"
" state is reached",
"dispersal_method": "the dispersal method used. Can be one of 'normal', 'norm-uniform' or " "'fat-tail'.",
"historical_fine_map": "the historical, or historic, coarse density map file location",
"coarse_map_scale": "the scale of the coarse density map compared to the fine density map. 1 "
"means equal density",
"grid_x": "the simulated grid x dimension",
"coarse_map_file": "the density map file location at the coarser resolution, covering a larger " "area",
"min_num_species": "the minimum number of species known to exist (currently has no effect)",
"historical_coarse_map": "the historical, or historic, coarse density map file location",
"m_probability": "the probability of choosing from the uniform dispersal kernel in normal-uniform"
" dispersal",
"sigma": "the sigma dispersal value for normal, fat-tailed and normal-uniform dispersals",
"deme": "the number of individuals inhabiting a cell at a map density of 1",
"time_config_file": "will be 'set' if temporal sampling is used, 'null' otherwise",
"coarse_map_y": "the coarse density map y dimension",
"fine_map_x": "the fine density map x dimension",
"coarse_map_y_offset": "the number of cells the coarse map is offset from the fine map in the y "
"dimension, at the fine resolution",
"cutoff": "the maximal dispersal distance possible, for normal-uniform dispersal",
"fine_map_y": "the fine density map y dimension",
"sample_size": "the proportion of individuals to sample from each cell (0-1)",
"fine_map_x_offset": "the number of cells the fine map is offset from the sample map in the x "
"dimension, at the fine resolution",
"speciation_rate": "the minimum speciation rate the simulation was run with",
"task": "the job or task reference number given to this simulation",
"coarse_map_x_offset": "the number of cells the coarse map is offset from the fine map in the x "
"dimension, at the fine resolution",
"landscape_type": "if false, landscapes have hard boundaries. Otherwise, can be infinite, "
"with 1s everywhere, or tiled_coarse or tiled_fine for repeated units of tiled "
"maps",
"max_time": "the maximum simulation time to run for (in seconds)",
"sim_complete": "set to true upon simulation completion, false for incomplete simulations",
"protracted": "if true, the simulation was run with protracted speciation.",
"min_speciation_gen": "the minimum number of generations required before speciation can occur",
"max_speciation_gen": "the maximum number of generations a lineage can exist before it is " "speciated",
"dispersal_map": "a tif file where rows represent cumulative dispersal probability to every other "
"cell, using the row number = x + (y * x_max)",
}
t = CoalescenceTree("sample/sample.db")
sim_output = t.get_simulation_parameters()
for key in sim_output.keys():
self.assertIn(key, get_parameter_description().keys())
self.assertEqual(get_parameter_description(key), t.get_parameter_description(key))
for key in get_parameter_description().keys():
self.assertIn(key, sim_output.keys())
for key in tmp_dict.keys():
self.assertEqual(tmp_dict[key], get_parameter_description(key))
self.assertDictEqual(tmp_dict, get_parameter_description())
with self.assertRaises(KeyError):
get_parameter_description(key="notakey")
dispersal_parameters = t.dispersal_parameters()
expected_disp_dict = {
"dispersal_method": "normal",
"sigma": 3.55,
"tau": 0.470149,
"m_probability": 0,
"cutoff": 0,
}
for key in dispersal_parameters.keys():
self.assertIn(key, tmp_dict.keys())
self.assertIn(key, expected_disp_dict.keys())
for key, val in expected_disp_dict.items():
self.assertIn(key, dispersal_parameters.keys())
if isinstance(val, float):
self.assertAlmostEqual(val, dispersal_parameters[key])
else:
self.assertEqual(val, dispersal_parameters[key])
class TestCoalescenceTreeSettingSpeciationParameters(unittest.TestCase):
"""Tests that the correct errors are raised when speciation parameters are supplied incorrectly."""
@classmethod
def setUpClass(cls):
"""Generates the temporary databases to attempt analysis on."""
src = [os.path.join("sample", "sample{}.db".format(x)) for x in [2, 3]]
cls.dst = [os.path.join("output", "sample{}.db".format(x)) for x in [2, 3]]
for tmp_src, tmp_dst in zip(src, cls.dst):
if os.path.exists(tmp_dst):
os.remove(tmp_dst)
shutil.copy(tmp_src, tmp_dst)
def testSetSpeciationRates(self):
"""Tests setting speciation rates works as intended and raises appropriate errors"""
ct = CoalescenceTree(self.dst[0])
for attempt in ["a string", ["a", "string"], [["list", "list2"], 0.2, 0.1], [None]]:
with self.assertRaises(TypeError):
ct._set_speciation_rates(attempt)
with self.assertRaises(RuntimeError):
ct._set_speciation_rates(None)
for attempt in [-10, -2.0, 1.1, 100, [-1, 0.1, 0.2], [0.2, 0.8, 1.1]]:
with self.assertRaises(ValueError):
ct._set_speciation_rates(attempt)
expected_list = [0.1, 0.2, 0.3]
ct._set_speciation_rates(expected_list)
self.assertEqual(expected_list, ct.applied_speciation_rates_list)
ct._set_speciation_rates(0.2)
self.assertEqual([0.2], ct.applied_speciation_rates_list)
def testSetRecordFragments(self):
"""Tests that setting the record_fragments flag works as expected."""
ct = CoalescenceTree(self.dst[0])
ct._set_record_fragments(True)
self.assertEqual("null", ct.record_fragments)
ct._set_record_fragments(False)
self.assertEqual("F", ct.record_fragments)
for each in ["PlotBiodiversityMetrics.db", "doesntexist.csv"]:
config_path = os.path.join("sample", each)
with self.assertRaises(IOError):
ct._set_record_fragments(config_path)
expected = os.path.join("sample", "FragmentsTest.csv")
ct._set_record_fragments(expected)
self.assertEqual(expected, ct.record_fragments)
def testSetRecordSpatial(self):
"""Tests that the setting the record_spatial flag works as expected"""
ct = CoalescenceTree(self.dst[0])
ct._set_record_spatial("T")
self.assertTrue(ct.record_spatial)
ct._set_record_spatial("F")
self.assertFalse(ct.record_spatial)
with self.assertRaises(TypeError):
ct._set_record_spatial("nota bool")
ct._set_record_spatial(True)
self.assertTrue(ct.record_spatial)
def testSetMetacommunityParameters(self):
"""Tests that setting the metacommunity parameters works as expected."""
ct = CoalescenceTree(self.dst[0])
for size, spec in [[-10, 0.1], [10, -0.1], [10, 1.1]]:
with self.assertRaises(ValueError):
ct.fragments = "F"
ct._set_record_fragments(False)
ct._set_record_spatial(False)
ct.times = [0.0]
ct._set_metacommunity_parameters(size, spec)
ct._set_metacommunity_parameters()
self.assertEqual(0.0, ct.metacommunity_size)
self.assertEqual(0.0, ct.metacommunity_speciation_rate)
ct._set_metacommunity_parameters(10, 0.1, "simulated")
self.assertEqual(10, ct.metacommunity_size)
self.assertEqual(0.1, ct.metacommunity_speciation_rate)
def testSetProtractedParameters(self):
"""Tests that setting the protracted parameters works as expected."""
ct = CoalescenceTree(self.dst[0])
with self.assertRaises(ValueError):
ct._set_protracted_parameters(0.1, 100)
ct = CoalescenceTree(self.dst[1])
ct._set_protracted_parameters(10, 100)
self.assertEqual((10.0, 100.0), ct.protracted_parameters[0])
ct.protracted_parameters = []
for min_proc, max_proc in [[200, 5000], [80, 50], [200, 11000]]:
with self.assertRaises(ValueError):
ct._check_protracted_parameters(min_proc, max_proc)
with self.assertRaises(ValueError):
ct._set_protracted_parameters(min_proc, max_proc)
with self.assertRaises(ValueError):
ct.add_protracted_parameters(min_proc, max_proc)
ct._set_protracted_parameters(50, 5000)
self.assertEqual((50.0, 5000.0), ct.protracted_parameters[0])
ct.protracted_parameters = []
ct._set_protracted_parameters()
self.assertEqual((0.0, 0.0), ct.protracted_parameters[0])
def testSetSampleFile(self):
"""Tests that the sample file is correctly set."""
ct = CoalescenceTree(self.dst[0])
for file in ["notafile.tif", os.path.join("sample", "sample.db")]:
with self.assertRaises(IOError):
ct._set_sample_file(file)
ct._set_sample_file()
self.assertEqual("null", ct.sample_file)
expected_file = os.path.join("sample", "SA_sample_coarse.tif")
ct._set_sample_file(expected_file)
self.assertEqual(expected_file, ct.sample_file)
def testSetTimes(self):
"""Tests that times are correctly set."""
ct = CoalescenceTree(self.dst[0])
ct._set_times(None)
self.assertEqual(0.0, ct.times[0])
with self.assertRaises(TypeError):
ct.add_times(0.5)
with self.assertRaises(TypeError):
ct.add_times([0.2, 0.5, "string"])
ct.times = None
ct.add_times([0.2, 0.5, 10])
self.assertEqual([0.0, 0.2, 0.5, 10.0], ct.times)
ct.times = None
ct._set_times(0.2)
self.assertEqual([0.0, 0.2], ct.times)
ct.times = None
ct._set_times([0.1, 0.5, 10.0])
self.assertEqual([0.0, 0.1, 0.5, 10.0], ct.times)
class TestCoalescenceTreeParameters(unittest.TestCase):
"""Tests that parameters are correctly obtained from the databases and the relevant errors are raised."""
def testCommunityParameters1(self):
"""Tests the community parameters make sense in a very simple community."""
shutil.copyfile(os.path.join("sample", "sample3.db"), os.path.join("output", "temp_sample3.db"))
t = CoalescenceTree(os.path.join("output", "temp_sample3.db"), logging_level=50)
self.assertEqual([], t.get_metacommunity_references())
self.assertEqual([1], t.get_community_references())
params = t.get_community_parameters(1)
expected_dict = {
"speciation_rate": 0.001,
"time": 0.0,
"fragments": 0,
"metacommunity_reference": 0,
"min_speciation_gen": 100.0,
"max_speciation_gen": 10000.0,
}
self.assertEqual(expected_dict, params)
with self.assertRaises(sqlite3.Error):
t.get_metacommunity_parameters(1)
with self.assertRaises(KeyError):
t.get_community_parameters(2)
with self.assertRaises(KeyError):
t.get_community_reference(0.1, 0.0, 0, 0, 0.0, min_speciation_gen=100.0, max_speciation_gen=10000.0)
with self.assertRaises(KeyError):
_ = t.get_community_reference(speciation_rate=0.001, time=0.0, fragments=False)
ref = t.get_community_reference(
speciation_rate=0.001, time=0.0, fragments=False, min_speciation_gen=100.0, max_speciation_gen=10000.0
)
self.assertEqual(1, ref)
self.assertEqual(expected_dict, t.get_community_parameters(ref))
t.wipe_data()
with self.assertRaises(IOError):
t.get_community_parameters_pd()
def testCommunityParameters2(self):
"""Tests the community parameters make sense in a very simple community."""
t = CoalescenceTree(os.path.join("sample", "sample4.db"))
self.assertEqual([1, 2, 3, 4, 5], t.get_community_references())
expected_params1 = {"speciation_rate": 0.1, "time": 0.0, "fragments": 0, "metacommunity_reference": 0}
expected_params2 = {"speciation_rate": 0.1, "time": 0.0, "fragments": 0, "metacommunity_reference": 1}
expected_params3 = {"speciation_rate": 0.2, "time": 0.0, "fragments": 0, "metacommunity_reference": 1}
expected_params4 = {"speciation_rate": 0.1, "time": 0.0, "fragments": 0, "metacommunity_reference": 2}
expected_params5 = {"speciation_rate": 0.2, "time": 0.0, "fragments": 0, "metacommunity_reference": 2}
expected_meta_params1 = {
"speciation_rate": 0.001,
"metacommunity_size": 10000.0,
"option": "simulated",
"external_reference": 0,
}
expected_meta_params2 = {
"speciation_rate": 0.001,
"metacommunity_size": 10000.0,
"option": "analytical",
"external_reference": 0,
}
params1 = t.get_community_parameters(1)
params2 = t.get_community_parameters(2)
params3 = t.get_community_parameters(3)
params4 = t.get_community_parameters(4)
params5 = t.get_community_parameters(5)
params6 = t.get_metacommunity_parameters(1)
params7 = t.get_metacommunity_parameters(2)
self.assertEqual([1, 2], t.get_metacommunity_references())
self.assertEqual(expected_params1, params1)
self.assertEqual(expected_params2, params2)
self.assertEqual(expected_params3, params3)
self.assertEqual(expected_params4, params4)
self.assertEqual(expected_params5, params5)
self.assertEqual(expected_meta_params1, params6)
self.assertEqual(expected_meta_params2, params7)
with self.assertRaises(KeyError):
t.get_community_parameters(6)
with self.assertRaises(KeyError):
t.get_metacommunity_parameters(3)
ref1 = t.get_community_reference(speciation_rate=0.1, time=0.0, fragments=False)
with self.assertRaises(KeyError):
t.get_community_reference(
speciation_rate=0.1, time=0.0, fragments=False, min_speciation_gen=0.1, max_speciation_gen=10000.0
)
ref2 = t.get_community_reference(
speciation_rate=0.1,
time=0.0,
fragments=False,
metacommunity_size=10000.0,
metacommunity_speciation_rate=0.001,
metacommunity_option="simulated",
)
with self.assertRaises(KeyError):
t.get_community_reference(
speciation_rate=0.1,
time=0.0,
fragments=False,
metacommunity_size=10000.0,
metacommunity_speciation_rate=0.01,
metacommunity_option="simulated",
)
ref3 = t.get_community_reference(
speciation_rate=0.2,
time=0.0,
fragments=False,
metacommunity_size=10000.0,
metacommunity_speciation_rate=0.001,
metacommunity_option="simulated",
)
ref4 = t.get_community_reference(
speciation_rate=0.1,
time=0.0,
fragments=False,
metacommunity_size=10000.0,
metacommunity_speciation_rate=0.001,
metacommunity_option="analytical",
)
ref5 = t.get_community_reference(
speciation_rate=0.2,
time=0.0,
fragments=False,
metacommunity_size=10000.0,
metacommunity_speciation_rate=0.001,
metacommunity_option="analytical",
)
self.assertEqual(1, ref1)
self.assertEqual(2, ref2)
self.assertEqual(3, ref3)
self.assertEqual(4, ref4)
self.assertEqual(5, ref5)
expected_community_params_list = []
for reference in t.get_community_references():
params = t.get_community_parameters(reference)
params["reference"] = reference
expected_community_params_list.append(params)
expected_community_params = pd.DataFrame(expected_community_params_list)
actual_output = t.get_community_parameters_pd()
assert_frame_equal(expected_community_params, actual_output, check_like=True)
def testIsComplete(self):
"""Tests sims are correctly identified as complete."""
t = CoalescenceTree(os.path.join("sample", "sample4.db"))
self.assertTrue(t.is_complete)
class TestCoalescenceTreeAnalysis(unittest.TestCase):
"""Tests analysis is performed correctly"""
@classmethod
def setUpClass(cls):
"""Sets up the Coalescence object test case."""
dst1 = os.path.join("output", "sampledb0.db")
for i in range(0, 11):
dst = os.path.join("output", "sampledb{}.db".format(i))
if os.path.exists(dst):
os.remove(dst)
shutil.copyfile(os.path.join("sample", "sample.db"), dst)
shutil.copyfile(os.path.join("sample", "nse_reference.db"), os.path.join("output", "nse_reference1.db"))
random.seed(2)
cls.test = CoalescenceTree(dst1, logging_level=50)
cls.test.clear_calculations()
cls.test.import_comparison_data(os.path.join("sample", "PlotBiodiversityMetrics.db"))
cls.test.calculate_fragment_richness()
cls.test.calculate_fragment_octaves()
cls.test.calculate_octaves_error()
cls.test.calculate_alpha_diversity()
cls.test.calculate_beta_diversity()
cls.test2 = CoalescenceTree()
cls.test2.set_database(os.path.join("sample", "sample_nofrag.db"))
dstx = os.path.join("output", "sampledbx.db")
shutil.copyfile(dst1, dstx)
c = CoalescenceTree(dstx)
c.import_comparison_data(os.path.join("sample", "PlotBiodiversityMetrics.db"))
c.calculate_goodness_of_fit()
@classmethod
def tearDownClass(cls):
"""
Removes the files from output."
"""
cls.test.clear_calculations()
def testComparisonDataNoExistError(self):
c = CoalescenceTree(os.path.join("sample", "sample.db"))
with self.assertRaises(IOError):
c.import_comparison_data(os.path.join("sample", "doesnotexist.db"))
def testFragmentOctaves(self):
num = self.test.cursor.execute(
"SELECT richness FROM FRAGMENT_OCTAVES WHERE fragment == 'P09' AND octave == 0"
" AND community_reference == 1"
).fetchall()[0][0]
self.assertEqual(num, 7, msg="Fragment octaves not correctly calculated.")
num = self.test.cursor.execute(
"SELECT richness FROM FRAGMENT_OCTAVES WHERE fragment == 'P09' AND octave == 0 "
" AND community_reference == 2"
).fetchall()[0][0]
self.assertEqual(num, 7, msg="Fragment octaves not correctly calculated.")
num = self.test.cursor.execute(
"SELECT richness FROM FRAGMENT_OCTAVES WHERE fragment == 'cerrogalera' AND octave == 1 "
" AND community_reference == 1"
).fetchall()[0][0]
self.assertEqual(num, 3, msg="Fragment octaves not correctly calculated.")
num = self.test.cursor.execute(
"SELECT richness FROM FRAGMENT_OCTAVES WHERE fragment == 'whole' AND octave == 1 "
" AND community_reference == 2"
).fetchall()[0][0]
self.assertEqual(num, 221, msg="Fragment octaves not correctly calculated.")
def testFragmentAbundances(self):
"""
Tests that fragment abundances are produced properly by the fragment detection functions.
"""
num = self.test.cursor.execute(
"SELECT COUNT(fragment) FROM FRAGMENT_ABUNDANCES WHERE fragment == 'P09' " " AND community_reference == 1"
).fetchall()[0][0]
self.assertEqual(num, 9, msg="Fragment abundances not correctly calculated.")
num = self.test.cursor.execute(
"SELECT COUNT(fragment) FROM FRAGMENT_ABUNDANCES WHERE fragment == 'P09' " " AND community_reference == 2"
).fetchall()[0][0]
self.assertEqual(num, 9, msg="Fragment abundances not correctly calculated.")
num = self.test.cursor.execute(
"SELECT COUNT(fragment) FROM FRAGMENT_ABUNDANCES WHERE fragment == 'cerrogalera' "
" AND community_reference == 1"
).fetchall()[0][0]
self.assertEqual(num, 9, msg="Fragment abundances not correctly calculated.")
def testSpeciesAbundances(self):
"""Tests that the produced species abundances are correct by comparing species richness."""
num = self.test.cursor.execute(
"SELECT COUNT(species_id) FROM SPECIES_ABUNDANCES WHERE community_reference == 2"
).fetchall()[0][0]
self.assertEqual(num, 1029, msg="Species abundances not correctly calculated.")
num = self.test.cursor.execute(
"SELECT COUNT(species_id) FROM SPECIES_ABUNDANCES WHERE community_reference == 1"
).fetchall()[0][0]
self.assertEqual(num, 884, msg="Species abundances not correctly calculated.")
def testGetOctaves(self):
"""Tests getting the octaves."""
c = CoalescenceTree(os.path.join("output", "sampledb4.db"))
c.clear_calculations()
c.import_comparison_data(os.path.join("sample", "PlotBiodiversityMetrics.db"))
c.calculate_richness()
self.assertEqual([[0, 585], [1, 231], [2, 59], [3, 5]], c.get_octaves(1))
c = CoalescenceTree(os.path.join("output", "sampledb4.db"))
c.clear_calculations()
c.import_comparison_data(os.path.join("sample", "PlotBiodiversityMetrics.db"))
c.calculate_richness()
actual = c.get_octaves_pd().head()
expected = pd.DataFrame(
[[1, 0, 585], [1, 1, 231], [1, 2, 59], [1, 3, 5], [2, 0, 760]],
columns=["community_reference", "octave", "richness"],
)
assert_frame_equal(actual, expected, check_like=True)
def testSpeciesLocations(self):
"""
Tests that species locations have been correctly assigned.
"""
num = self.test.cursor.execute(
"SELECT species_id FROM SPECIES_LOCATIONS WHERE x==1662 AND y==4359 " " AND community_reference == 1"
).fetchall()
self.assertEqual(len(set(num)), 2, msg="Species locations not correctly assigned")
all_list = self.test.get_species_locations()
select_list = self.test.get_species_locations(community_reference=1)
self.assertListEqual([1, 1662, 4359, 1], all_list[0])
self.assertListEqual([1, 1662, 4359], select_list[0])
def testAlphaDiversity(self):
"""
Tests that alpha diversity is correctly calculated and fetched for each parameter reference
"""
c = CoalescenceTree(os.path.join("sample", "sample.db"))
with self.assertRaises(IOError):
c.get_alpha_diversity_pd()
self.assertEqual(9, self.test.get_alpha_diversity(1))
self.assertEqual(10, self.test.get_alpha_diversity(2))
expected_alphas_list = []
for reference in self.test.get_community_references():
expected_alphas_list.append(
{"community_reference": reference, "alpha_diversity": self.test.get_alpha_diversity(reference)}
)
expected_alphas = pd.DataFrame(expected_alphas_list).reset_index(drop=True)
actual_alphas = self.test.get_alpha_diversity_pd().reset_index(drop=True)
assert_frame_equal(expected_alphas, actual_alphas, check_like=True)
def testBetaDiversity(self):
"""
Tests that beta diversity is correctly calculated and fetched for the reference
"""
c = CoalescenceTree(os.path.join("sample", "sample.db"))
with self.assertRaises(IOError):
c.get_beta_diversity_pd()
self.assertAlmostEqual(98.111111111, self.test.get_beta_diversity(1), places=5)
self.assertAlmostEqual(102.8, self.test.get_beta_diversity(2), places=5)
expected_betas_list = []
for reference in self.test.get_community_references():
expected_betas_list.append(
{"community_reference": reference, "beta_diversity": self.test.get_beta_diversity(reference)}
)
expected_betas = pd.DataFrame(expected_betas_list).reset_index(drop=True)
actual_betas = self.test.get_beta_diversity_pd().reset_index(drop=True)
assert_frame_equal(expected_betas, actual_betas, check_like=True)
def testGetNumberIndividuals(self):
"""Tests that the number of individuals is obtained correctly."""
c = CoalescenceTree(os.path.join("output", "sampledb7.db"))
self.assertEqual(1504, c.get_number_individuals(community_reference=1))
self.assertEqual(12, c.get_number_individuals(fragment="P09", community_reference=1))
c.wipe_data()
c.import_comparison_data(os.path.join("sample", "PlotBiodiversityMetrics.db"))
with self.assertRaises(IOError):
c.get_number_individuals(fragment="none")
with self.assertRaises(IOError):
c.get_number_individuals()
def testGetFragmentAbundances(self):
"""Tests that fragment abundances are correctly obtained."""
c = CoalescenceTree(os.path.join("sample", "sample3.db"))
with self.assertRaises(IOError):
c.get_fragment_abundances(fragment="P09", reference=1)
with self.assertRaises(IOError):
c.get_fragment_abundances_pd()
abundances = self.test.get_fragment_abundances(fragment="P09", reference=1)
expected_abundances = [[302, 1], [303, 1], [304, 1], [305, 1], [306, 1], [307, 1], [546, 2], [693, 1], [732, 3]]
self.assertEqual(expected_abundances, abundances[:10])
all_abundances = self.test.get_all_fragment_abundances()
expected_abundances2 = [
[1, "P09", 302, 1],
[1, "P09", 303, 1],
[1, "P09", 304, 1],
[1, "P09", 305, 1],
[1, "P09", 306, 1],
[1, "P09", 307, 1],
[1, "P09", 546, 2],
[1, "P09", 693, 1],
[1, "P09", 732, 3],
[1, "cerrogalera", 416, 1],
]
self.assertEqual(expected_abundances2, all_abundances[:10])
df = pd.DataFrame(
expected_abundances2, columns=["community_reference", "fragment", "species_id", "no_individuals"]
)
actual_df = self.test.get_fragment_abundances_pd().head(n=10)
| assert_frame_equal(df, actual_df, check_like=True) | pandas.testing.assert_frame_equal |
#########################
# generate-gafs.py
# Author: <NAME>
##########################
# Create our labelled image data for AI training
#########################
import pandas as pd
import time
import numpy as np
import matplotlib.pyplot as plt
from pyts.image import GramianAngularField
from pathlib import Path
import datetime
import shutil
import os
import random
from os import path
import copy
#############
def save_gaf(periodData, simpleFileName):
# Clear any previous graphs
plt.clf()
# Need to reshape to numpy for plotting in gasf
periodnp = periodData.to_numpy().reshape(1, -1)
# Create GAF from data
gaf = GramianAngularField(
image_size=len(periodData),
method='summation')
gafData = gaf.transform(periodnp)
# Plot the gaf to an image
plt.imshow(gafData[0],
cmap='rainbow',
origin='lower',
interpolation='nearest')
plt.axis('off')
# Now we save this gaf as a file
dest = "gafs/" + simpleFileName
plt.savefig(dest,
bbox_inches='tight',
pad_inches=0,
transparent=True)
plt.close()
if __name__ == "__main__":
# First, load in the raw data
kData = pd.read_csv('rawdata/cleanAUSData.csv')
print(kData)
# Let's just deal with one location, Darwin
kData = kData[kData['Location'] == 'Darwin']
print(kData)
# Now, our highest correlating field to rain tomorrow was Humididy 3pm.
# so, let's make gafs of this value over time.
usingCols = [
"Date",
"Location",
"Humidity3pm",
"RainNext4days",
]
kData = kData[usingCols]
print(kData)
# Ensure the gafs folder exists
Path(
"gafs"
).mkdir(parents=True, exist_ok=True)
# So, we'll start by using 14 days of humidity data to try predict
# if it will rain tomorrow
totalDays = len(kData)
daysPerGraph = 32
# As we don't have the final data
# We stop dayrange + 1 days before end of the data
maxDay = totalDays - (daysPerGraph + 1)
print("About to create %s gafs" % maxDay)
labelDF = pd.DataFrame()
# Now we loop over every day in our whole dataset
for i in range(0, maxDay):
# Print every 5% progress
if i % (maxDay // 20) == 0:
pct = round((i / maxDay) * 100.0, 3)
prog = ("%s" % pct) + "%..."
print(prog, end='', flush=True)
# i is the day number
# at day 0, we need to be x days ahead
# since we need past data
startRow = i
endRow = startRow + daysPerGraph
#########################
# First we generate the GAF
#########################
# Get the date of the day we are predicting for
currentDate = kData.iloc[[i + daysPerGraph]]['Date'].item()
# Get our period of data
humidityOnly = kData[["Humidity3pm"]]
periodDF = humidityOnly[startRow:endRow]
simpleFileName = "day-%s-%s.png" % (
str(i + 1).rjust(5, "0"),
currentDate
)
save_gaf(periodDF, simpleFileName)
#########################
# Now we determine the gaf data
#########################
# We have a 14 days of data
# Now we want just the following day's rain label
# Get our period of data
rainnext = kData.iloc[[i + daysPerGraph]]['RainNext4days'].item()
if rainnext == 1.:
dayLabel = "Rain"
else:
dayLabel = "Dry"
#########################
# Now we add all gaf data to the csv
#########################
gafDataRow = {
"date": [currentDate],
"tomorrowResult": [dayLabel],
"filename": [simpleFileName]
}
newRow = | pd.DataFrame(gafDataRow) | pandas.DataFrame |
import pandas as pd
from IPython.core.display_functions import display
raw_csv_data = pd.read_csv("Absenteeism-data.csv")
type(raw_csv_data)
raw_csv_data
# Eyeballed the data to check the data for errors
df = raw_csv_data.copy()
pd.options.display.max_columns = None
pd.options.display.max_rows = 50
df.info()
# This is the concise summary of the dataframe
df = df.drop(['ID'], axis=1)
# Dropped the ID because it won't improve my model.
df['Reason for Absence'].unique()
# These are the categorical variables that classify the employees' reasons for their absence.
len(df['Reason for Absence'].unique())
# Since counting in Programming starts from 0, a number between 0 and 28 must be missing.
sorted(df['Reason for Absence'].unique())
# The missing value is 20.
# There must be a meaning behind these categorical variables.
reason_columns = pd.get_dummies(df['Reason for Absence'])
reason_columns['check'] = reason_columns.sum(axis=1)
reason_columns['check'].sum(axis=0)
# To check that the individual was absent from work due to one particular reason only.
reason_columns['check'].unique()
# To verify that the 700 values return only 1, which further proves that there are no missing or incorrect values.
reason_columns = reason_columns.drop(['check'], axis=1)
# I dropped the check column since I won't need it any further in preprocessing.
reason_columns = pd.get_dummies(df['Reason for Absence'], drop_first=True)
# Since I only want to predict the specific KNOWN reason that induces an individual to be excessively absent from work, I decided to drop reason 0 to avoid multicollinearity and preserve the logic of my model.
reason_columns
df.columns.values
reason_columns.columns.values
df = df.drop(['Reason for Absence'], axis=1)
# Dropped 'Reason for Absence' to avoid the duplication of information in my dataset, which triggers multicollinearity.
reason_type_1 = reason_columns.loc[:, 1:14].max(axis=1)
reason_type_2 = reason_columns.loc[:, 15:17].max(axis=1)
reason_type_3 = reason_columns.loc[:, 18:21].max(axis=1)
reason_type_4 = reason_columns.loc[:, 22:].max(axis=1)
# Grouped/classified these variables to re-organize them, based on the Feature descriptions of Absenteeism of employees.
df
df = | pd.concat([df, reason_type_1, reason_type_2, reason_type_3, reason_type_4], axis=1) | pandas.concat |
# python vaccination_adaptive_hybrid_autosearch_conform.py MSA_NAME VACCINATION_TIME VACCINATION_RATIO consider_hesitancy ACCEPTANCE_SCENARIO w1 w2 w3 w4 w5 quick_test
# python vaccination_adaptive_hybrid_autosearch_conform.py Atlanta 15 0.1 True cf18 1 1 1 1 1 False
import setproctitle
setproctitle.setproctitle("covid-19-vac@chenlin")
import sys
import os
import datetime
import pandas as pd
import numpy as np
import pickle
import time
import pdb
from skcriteria import Data, MIN
from skcriteria.madm import closeness
import constants
import functions
#import disease_model_only_modify_attack_rates
import disease_model_diff_acceptance
###############################################################################
# Constants
root = '/data/chenlin/COVID-19/Data'
timestring='20210206'
MIN_DATETIME = datetime.datetime(2020, 3, 1, 0)
MAX_DATETIME = datetime.datetime(2020, 5, 2, 23)
NUM_DAYS = 63
NUM_GROUPS = 5
# Vaccination protection rate
PROTECTION_RATE = 1
# Policy execution ratio
EXECUTION_RATIO = 1
# Recheck interval: After distributing some portion of vaccines, recheck the most vulnerable demographic group
RECHECK_INTERVAL = 0.01
###############################################################################
# Main variable settings
MSA_NAME = sys.argv[1]; #MSA_NAME = 'SanFrancisco'
MSA_NAME_FULL = constants.MSA_NAME_FULL_DICT[MSA_NAME] #MSA_NAME_FULL = 'San_Francisco_Oakland_Hayward_CA'
print('MSA_NAME: ',MSA_NAME)
# Policies to compare
policy_to_compare = ['No_Vaccination','Baseline','Age_Flood', 'Income_Flood','JUE_EW_Flood']
# Vaccination time
VACCINATION_TIME = sys.argv[2];print('VACCINATION_TIME:',VACCINATION_TIME)
VACCINATION_TIME_STR = VACCINATION_TIME
VACCINATION_TIME = float(VACCINATION_TIME)
print(VACCINATION_TIME_STR,'\n',VACCINATION_TIME)
policy_savename = 'adaptive_%sd_hybrid'%VACCINATION_TIME_STR
print('policy_savename:',policy_savename)
# Vaccination_Ratio
VACCINATION_RATIO = sys.argv[3]; print('VACCINATION_RATIO:',VACCINATION_RATIO)
VACCINATION_RATIO = float(VACCINATION_RATIO)
# Consider hesitancy or not
consider_hesitancy = sys.argv[4]
print('Consider hesitancy? ', consider_hesitancy)
if(consider_hesitancy not in ['True','False']):
print('Invalid value for consider_hesitancy. Please check.')
pdb.set_trace()
# Acceptance scenario, if considering hesitancy
# if consider_hesitancy=='False', this field does not affect anything
ACCEPTANCE_SCENARIO = sys.argv[5]
print('Vaccine acceptance scenario: ', ACCEPTANCE_SCENARIO)
w1 = float(sys.argv[6])
w2 = float(sys.argv[7])
w3 = float(sys.argv[8])
w4 = float(sys.argv[9])
w5 = float(sys.argv[10])
weights = [w1,w2,w3,w4,w5]
print('Weights:', weights)
# Quick Test: prototyping
quick_test = sys.argv[11]; print('Quick testing?', quick_test)
if(quick_test == 'True'):
NUM_SEEDS = 2
NUM_SEEDS_CHECKING = 2
else:
NUM_SEEDS = 30
NUM_SEEDS_CHECKING = 30
print('NUM_SEEDS: ', NUM_SEEDS)
print('NUM_SEEDS_CHECKING: ', NUM_SEEDS_CHECKING)
STARTING_SEED = range(NUM_SEEDS)
STARTING_SEED_CHECKING = range(NUM_SEEDS_CHECKING)
distribution_time = VACCINATION_RATIO / RECHECK_INTERVAL # 分几次把疫苗分配完
# Compare all policies with no_vaccination scenario
REL_TO = 'No_Vaccination'
###############################################################################
# Functions
def run_simulation(starting_seed, num_seeds, vaccination_vector, vaccine_acceptance,protection_rate=1):
#m = disease_model_only_modify_attack_rates.Model(starting_seed=starting_seed,
m = disease_model_diff_acceptance.Model(starting_seed=starting_seed, #20211007
num_seeds=num_seeds,
debug=False,clip_poisson_approximation=True,ipf_final_match='poi',ipf_num_iter=100)
m.init_exogenous_variables(poi_areas=poi_areas,
poi_dwell_time_correction_factors=poi_dwell_time_correction_factors,
cbg_sizes=cbg_sizes,
poi_cbg_visits_list=poi_cbg_visits_list,
all_hours=all_hours,
p_sick_at_t0=constants.parameters_dict[MSA_NAME][0],
#vaccination_time=24*31, # when to apply vaccination (which hour)
vaccination_time=24*VACCINATION_TIME, # when to apply vaccination (which hour)
vaccination_vector = vaccination_vector,
vaccine_acceptance = vaccine_acceptance,#20211007
protection_rate = protection_rate,
home_beta=constants.parameters_dict[MSA_NAME][1],
cbg_attack_rates_original = cbg_attack_rates_scaled,
cbg_death_rates_original = cbg_death_rates_scaled,
poi_psi=constants.parameters_dict[MSA_NAME][2],
just_compute_r0=False,
latency_period=96, # 4 days
infectious_period=84, # 3.5 days
confirmation_rate=.1,
confirmation_lag=168, # 7 days
death_lag=432
)
m.init_endogenous_variables()
T1,L_1,I_1,R_1,C2,D2,total_affected, history_C2, history_D2, total_affected_each_cbg = m.simulate_disease_spread(no_print=True)
del T1
del L_1
del I_1
del C2
del D2
return history_C2, history_D2
# Analyze results and produce graphs
def output_result(cbg_table, demo_feat, policy_list, num_groups, print_result=True,draw_result=True, rel_to=REL_TO):
#print('Observation dimension: ', demo_feat)
results = {}
for policy in policy_list:
exec("final_deaths_rate_%s_total = cbg_table['Final_Deaths_%s'].sum()/cbg_table['Sum'].sum()" % (policy.lower(),policy))
cbg_table['Final_Deaths_' + policy] = eval('avg_final_deaths_' + policy.lower())
exec("%s = np.zeros(num_groups)" % ('final_deaths_rate_'+ policy.lower()))
deaths_total_abs = eval('final_deaths_rate_%s_total'%(policy.lower()))
for i in range(num_groups):
eval('final_deaths_rate_'+ policy.lower())[i] = cbg_table[cbg_table[demo_feat + '_Quantile']==i]['Final_Deaths_' + policy].sum()
eval('final_deaths_rate_'+ policy.lower())[i] /= cbg_table[cbg_table[demo_feat + '_Quantile']==i]['Sum'].sum()
deaths_gini_abs = functions.gini(eval('final_deaths_rate_'+ policy.lower()))
if(rel_to=='No_Vaccination'):
# rel is compared to No_Vaccination
if(policy=='No_Vaccination'):
deaths_total_no_vaccination = deaths_total_abs
deaths_gini_no_vaccination = deaths_gini_abs
deaths_total_rel = 0; deaths_gini_rel = 0
results[policy] = {
'deaths_total_abs':'%.6f'% deaths_total_abs,
'deaths_total_rel':'%.6f'% deaths_total_rel,
'deaths_gini_abs':'%.4f'% deaths_gini_abs, #'%.4f'
'deaths_gini_rel':'%.4f'% deaths_gini_rel} #'%.4f'
else:
deaths_total_rel = (eval('final_deaths_rate_%s_total'%(policy.lower())) - deaths_total_no_vaccination) / deaths_total_no_vaccination
deaths_gini_rel = (functions.gini(eval('final_deaths_rate_'+ policy.lower())) - deaths_gini_no_vaccination) / deaths_gini_no_vaccination
results[policy] = {
'deaths_total_abs':'%.6f'% deaths_total_abs,
'deaths_total_rel':'%.6f'% deaths_total_rel,
'deaths_gini_abs':'%.4f'% deaths_gini_abs, #'%.4f'
'deaths_gini_rel':'%.4f'% deaths_gini_rel} #'%.4f'
elif(rel_to=='Baseline'):
# rel is compared to Baseline
if(policy=='Baseline'):
deaths_total_baseline = deaths_total_abs
deaths_gini_baseline = deaths_gini_abs
deaths_total_rel = 0
deaths_gini_rel = 0
results[policy] = {
'deaths_total_abs':'%.6f'% deaths_total_abs,
'deaths_total_rel':'%.6f'% deaths_total_rel,
'deaths_gini_abs':'%.4f'% deaths_gini_abs,
'deaths_gini_rel':'%.4f'% deaths_gini_rel}
else:
deaths_total_rel = (eval('final_deaths_rate_%s_total'%(policy.lower())) - deaths_total_baseline) / deaths_total_baseline
deaths_gini_rel = (functions.gini(eval('final_deaths_rate_'+ policy.lower())) - deaths_gini_baseline) / deaths_gini_baseline
results[policy] = {
'deaths_total_abs':'%.6f'% deaths_total_abs,
'deaths_total_rel':'%.6f'% deaths_total_rel,
'deaths_gini_abs':'%.4f'% deaths_gini_abs,
'deaths_gini_rel':'%.4f'% deaths_gini_rel}
if(print_result==True):
print('Policy: ', policy)
print('Deaths, Gini Index: ',functions.gini(eval('final_deaths_rate_'+ policy.lower())))
if(policy=='Baseline'):
deaths_total_baseline = eval('final_deaths_rate_%s_total'%(policy.lower()))
deaths_gini_baseline = functions.gini(eval('final_deaths_rate_'+ policy.lower()))
if(policy!='Baseline' and policy!='No_Vaccination'):
print('Compared to baseline:')
print('Deaths total: ', (eval('final_deaths_rate_%s_total'%(policy.lower())) - deaths_total_baseline) / deaths_total_baseline)
print('Deaths gini: ', (functions.gini(eval('final_deaths_rate_'+ policy.lower())) - deaths_gini_baseline) / deaths_gini_baseline)
return results
def make_gini_table(policy_list, demo_feat_list, num_groups, save_path, save_result=False):
cbg_table_name_dict=dict()
cbg_table_name_dict['Age'] = cbg_age_msa
cbg_table_name_dict['Mean_Household_Income'] = cbg_income_msa
cbg_table_name_dict['Essential_Worker'] = cbg_occupation_msa
cbg_table_name_dict['Hybrid'] = cbg_age_msa # randomly choose one. it doesn't matter.
#print('Policy list: ', policy_list)
#print('Demographic feature list: ', demo_feat_list)
gini_df = pd.DataFrame(columns=pd.MultiIndex.from_tuples([('All','deaths_total_abs'),('All','deaths_total_rel')]))
gini_df['Policy'] = policy_list
for demo_feat in demo_feat_list:
results = output_result(cbg_table_name_dict[demo_feat],
demo_feat, policy_list, num_groups=NUM_GROUPS,
print_result=False, draw_result=False,rel_to=REL_TO)
for i in range(len(policy_list)):
policy = policy_list[i]
gini_df.loc[i,('All','deaths_total_abs')] = results[policy]['deaths_total_abs']
gini_df.loc[i,('All','deaths_total_rel')] = results[policy]['deaths_total_rel'] if abs(float(results[policy]['deaths_total_rel']))>=0.01 else 0
gini_df.loc[i,(demo_feat,'deaths_gini_abs')] = results[policy]['deaths_gini_abs']
gini_df.loc[i,(demo_feat,'deaths_gini_rel')] = results[policy]['deaths_gini_rel'] if abs(float(results[policy]['deaths_gini_rel']))>=0.01 else 0
gini_df.set_index(['Policy'],inplace=True)
# Transpose
gini_df_trans = pd.DataFrame(gini_df.values.T, index=gini_df.columns, columns=gini_df.index)#转置
# Save .csv
if(save_result==True):
gini_df_trans.to_csv(save_path)
return gini_df_trans
###############################################################################
# Load Demographic-Related Data
start = time.time()
# Load POI-CBG visiting matrices
f = open(os.path.join(root, MSA_NAME, '%s_2020-03-01_to_2020-05-02.pkl'%MSA_NAME_FULL), 'rb')
poi_cbg_visits_list = pickle.load(f)
f.close()
# Load precomputed parameters to adjust(clip) POI dwell times
d = pd.read_csv(os.path.join(root,MSA_NAME, 'parameters_%s.csv' % MSA_NAME))
# No clipping
new_d = d
all_hours = functions.list_hours_in_range(MIN_DATETIME, MAX_DATETIME)
poi_areas = new_d['feet'].values#面积
poi_dwell_times = new_d['median'].values#平均逗留时间
poi_dwell_time_correction_factors = (poi_dwell_times / (poi_dwell_times+60)) ** 2
del new_d
del d
# Load ACS Data for MSA-county matching
acs_data = pd.read_csv(os.path.join(root,'list1.csv'),header=2)
acs_msas = [msa for msa in acs_data['CBSA Title'].unique() if type(msa) == str]
msa_match = functions.match_msa_name_to_msas_in_acs_data(MSA_NAME_FULL, acs_msas)
msa_data = acs_data[acs_data['CBSA Title'] == msa_match].copy()
msa_data['FIPS Code'] = msa_data.apply(lambda x : functions.get_fips_codes_from_state_and_county_fp((x['FIPS State Code']),x['FIPS County Code']), axis=1)
good_list = list(msa_data['FIPS Code'].values)
print('Counties included: ', good_list)
del acs_data
# Load CBG ids for the MSA
cbg_ids_msa = pd.read_csv(os.path.join(root,MSA_NAME,'%s_cbg_ids.csv'%MSA_NAME_FULL))
cbg_ids_msa.rename(columns={"cbg_id":"census_block_group"}, inplace=True)
M = len(cbg_ids_msa)
# Mapping from cbg_ids to columns in hourly visiting matrices
cbgs_to_idxs = dict(zip(cbg_ids_msa['census_block_group'].values, range(M)))
x = {}
for i in cbgs_to_idxs:
x[str(i)] = cbgs_to_idxs[i]
#print('Number of CBGs in this metro area:', M)
# Load SafeGraph data to obtain CBG sizes (i.e., populations)
filepath = os.path.join(root,"safegraph_open_census_data/data/cbg_b01.csv")
cbg_agesex = pd.read_csv(filepath)
# Extract CBGs belonging to the MSA - https://covid-mobility.stanford.edu//datasets/
cbg_age_msa = pd.merge(cbg_ids_msa, cbg_agesex, on='census_block_group', how='left')
del cbg_agesex
# Add up males and females of the same age, according to the detailed age list (DETAILED_AGE_LIST)
# which is defined in constants.py
for i in range(3,25+1): # 'B01001e3'~'B01001e25'
male_column = 'B01001e'+str(i)
female_column = 'B01001e'+str(i+24)
cbg_age_msa[constants.DETAILED_AGE_LIST[i-3]] = cbg_age_msa.apply(lambda x : x[male_column]+x[female_column],axis=1)
# Rename
cbg_age_msa.rename(columns={'B01001e1':'Sum'},inplace=True)
# Extract columns of interest
columns_of_interest = ['census_block_group','Sum'] + constants.DETAILED_AGE_LIST
cbg_age_msa = cbg_age_msa[columns_of_interest].copy()
# Deal with NaN values
cbg_age_msa.fillna(0,inplace=True)
# Deal with CBGs with 0 populations
cbg_age_msa['Sum'] = cbg_age_msa['Sum'].apply(lambda x : x if x!=0 else 1)
# Obtain cbg sizes (populations)
cbg_sizes = cbg_age_msa['Sum'].values
cbg_sizes = np.array(cbg_sizes,dtype='int32')
print('Total population: ',np.sum(cbg_sizes))
# Select counties belonging to the MSA
y = []
for i in x:
if((len(i)==12) & (int(i[0:5])in good_list)):
y.append(x[i])
if((len(i)==11) & (int(i[0:4])in good_list)):
y.append(x[i])
idxs_msa_all = list(x.values())
idxs_msa_nyt = y
print('Number of CBGs in this metro area:', len(idxs_msa_all))
print('Number of CBGs in to compare with NYT data:', len(idxs_msa_nyt))
# Load other Safegraph demographic data, and perform grouping
#if('Age_Flood' in policy_to_compare):
if(True):
# Calculate elder ratios
cbg_age_msa['Elder_Absolute'] = cbg_age_msa.apply(lambda x : x['70 To 74 Years']+x['75 To 79 Years']+x['80 To 84 Years']+x['85 Years And Over'],axis=1)
cbg_age_msa['Elder_Ratio'] = cbg_age_msa['Elder_Absolute'] / cbg_age_msa['Sum']
# Grouping
separators = functions.get_separators(cbg_age_msa, NUM_GROUPS, 'Elder_Ratio','Sum', normalized=True)
cbg_age_msa['Age_Quantile'] = cbg_age_msa['Elder_Ratio'].apply(lambda x : functions.assign_group(x, separators))
#if('EW_Flood' in policy_to_compare):
if(True):
# cbg_c24.csv: Occupation
filepath = os.path.join(root,"safegraph_open_census_data/data/cbg_c24.csv")
cbg_occupation = pd.read_csv(filepath)
# Extract pois corresponding to the metro area, by merging dataframes
cbg_occupation_msa = | pd.merge(cbg_ids_msa, cbg_occupation, on='census_block_group', how='left') | pandas.merge |
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from lmfit import Model, Parameters, minimize, report_fit
from scipy.optimize import curve_fit
from scipy import stats
from utilities.statistical_tests import r_squared_calculator
from GEN_Utils import FileHandling
from loguru import logger
logger.info('Import ok')
# Define the fitting functions
def sigmoid(x, bottom, top, X50):
return bottom + ((top - bottom) / (1 + np.exp((X50 - x))))
def boltzmann(x, bottom, top, V50, slope):
return bottom + ((top - bottom) / (1 + np.exp((V50 - x)/slope)))
def denaturant(urea, top, bottom, cM, m):
# adapted from https://en.wikipedia.org/wiki/Equilibrium_unfolding, by keeping terms for bottom as in Savitski, subbing deltaG into standard equation, and reintroducing bottom term as per boltzmann
temp_constant = 298.15
gas_constant = 8.31446261815324
constant = temp_constant * gas_constant
y = bottom + ((top - bottom) / (1 + np.exp((m*(cM-urea)/constant))))
# deltaG can then be calculated as m(cM-urea) - generally calculated at 0M urea therefore m(cM)
return y
def denaturant_fit(compiled, info_cols, quant_cols):
"""Attempts to fit a sigmoid to each row. Returns sample_params dict where keys are sequences"""
fit_params = {}
for info, quant_data in compiled.set_index(info_cols).iterrows():
# extract x and y vals for fitting
y_vals = np.array(list(quant_data[quant_cols]))
x_vals = np.array([float(x) for x in quant_cols])
# Attempt fitting
try:
model = Model(denaturant)
params = model.make_params(
bottom=-1, top=1, cM=3, m=-10000)
result = model.fit(y_vals, params, urea=x_vals)
r_squared = r_squared_calculator(
x_vals, y_vals, denaturant, result.values.values())
# Collect fitted parameters
fit_stats = pd.DataFrame()
for parameter, details in result.params.items():
fit_stats[f'{parameter}_value'] = [details.value]
fit_stats[f'{parameter}_stderr'] = [details.stderr]
fit_stats[f'{parameter}_relerr'] = fit_stats[f'{parameter}_stderr'].values[0] / \
fit_stats[f'{parameter}_value'].values[0] * 100
# add r-squared value, key info
fit_stats['r_squared'] = r_squared
fit_stats['key'] = [info]
fit_params[info] = fit_stats
except:
logger.info(f'No fit found for {info}')
return fit_params
def sigmoid_filter(summary, filter_R2=True, filter_range=True, filter_cM=True, filter_relerr=True, filter_direction=True):
# apply filtering criteria
filtered = summary.copy()
if filter_R2:
# Remove R2 < filter threshold
filtered['filter_R2'] = [1 if R2 > 0.75 else 0 for R2 in filtered['r_squared']]
logger.info(f"R2 filter: {filtered['filter_R2'].sum()}")
if filter_range:
# Remove top/bottom outside range - threshold = 10?
filtered = filtered[(abs(filtered['top_value']) < 10) & (abs(filtered['bottom_value']) < 10)]
filtered['filter_range'] = [1 if (abs(val_1) < 10) & (abs(val_2) < 10) else 0 for val_1, val_2 in filtered[['top_value', 'bottom_value']].values]
logger.info(f"Range filter: {filtered['filter_range'].sum()}")
if filter_cM:
# Remove cM outside range tested
filtered['filter_cM'] = [1 if (val < 6) & (val > 0) else 0 for val in filtered['cM_value']]
logger.info(f"cM filter: {filtered['filter_cM'].sum()}")
if filter_relerr:
# Remove fits with > 50% uncertainty in cM fit
filtered['filter_relerr'] = [1 if val < 50 else 0 for val in filtered['cM_relerr']]
logger.info(f"Relative cM error: {filtered['filter_relerr'].sum()}")
if filter_direction:
# Remove sigmoids that trend upward
filtered['filter_direction'] = [1 if val_0 > val_6 else 0 for val_0, val_6 in zip(filtered['0M_value'], filtered['6M_value'])]
logger.info(f"Sigmoid direction: {filtered['filter_direction'].sum()}")
filter_cols = [col for col in filtered.columns.tolist() if 'filter_' in str(col)]
filtered['filter_count'] = filtered[filter_cols].sum(axis=1)
filtered['filter_all'] = [1 if num == len(filter_cols) else 0 for num in filtered['filter_count']]
logger.info(f"All filters: {filtered['filter_all'].sum()}")
# add filtering info to original df
summary['filtered'] = filtered['filter_all']
return summary, filtered
if __name__ == '__main__':
filter_cols = []
input_path = f'results/lysate_denaturation/clustering/clustered.xlsx'
output_folder = f'results/lysate_denaturation/sigmoid_fitting/'
if not os.path.exists(output_folder):
os.makedirs(output_folder)
# Read in cluster data
clusters_summary = pd.read_excel(input_path, sheet_name=None)
cluster_number = clusters_summary['summary']['cluster'].max()
clusters = clusters_summary['clustered'].copy()
clusters.drop([col for col in clusters.columns.tolist() if 'Unnamed: ' in str(col)], axis=1, inplace=True)
info_cols = ['Sequence', 'Proteins', 'PC1', 'PC2', f'score_{cluster_number}', f'member_{cluster_number}']
quant_cols = [col for col in clusters.columns.tolist() if type(col) == float]
clusters = clusters[info_cols+quant_cols].rename(columns={f'member_{cluster_number}': 'cluster', f'score_{cluster_number}': 'score'})
info_cols = ['Sequence', 'Proteins', 'PC1', 'PC2', 'cluster', 'score']
# complete denaturant fit
fit_params = denaturant_fit(clusters, info_cols=info_cols, quant_cols=quant_cols)
fitting_parameters = pd.concat(fit_params.values()).reset_index(drop=True)
# add back useful info
fitting_parameters[info_cols] = pd.DataFrame(fitting_parameters['key'].tolist(), index=fitting_parameters.index)
summary = | pd.merge(clusters, fitting_parameters, on=info_cols, how='inner') | pandas.merge |
import pandas as pd
import logging
import heapq
import significance_tests as st
class InsightExtractor:
def __init__(self, data, dimensions, measure, agg):
"""
input:
data: pandas dataframe
dimensions: array of strings of dimension names
measure: string measure name
agg: string of the level-1 aggregation function
to apply (one of: [sum, count]), default "sum"
"""
self.data = data.fillna('')
self.dimensions = dimensions
self.agg = agg
# If the aggregate measure is count, create a column of 1s
# as our measure column. This allows us to use the same 'sum'
# aggregate functions in place of count
if self.agg == 'count':
self.measure = 'count'
self.data['count'] = 1
else:
self.measure = measure
# Cache the overall sum of the measure, since we use it repeatedly
self.total_measure_sum = self.data[self.measure].sum()
# A cutoff score: don't look for insights that have a subgroup impact
# smaller than this cutoff score.
self.cutoff = 0.01
# Starting Insight id, unique for each insight found
self.iid = 1001
@classmethod
def fromfilename(cls, filename, agg):
"""
Datasets should be csvs formatted with dimension columns to the left
and measure as the final column. The first row should be a header.
"""
data = | pd.read_csv(filename, encoding='mac_roman') | pandas.read_csv |
import pandas as pd
from unittest2 import TestCase # or `from unittest import ...` if on Python 3.4+
import numpy as np
import category_encoders.tests.helpers as th
import category_encoders as encoders
np_X = th.create_array(n_rows=100)
np_X_t = th.create_array(n_rows=50, extras=True)
np_y = np.random.randn(np_X.shape[0]) > 0.5
np_y_t = np.random.randn(np_X_t.shape[0]) > 0.5
X = th.create_dataset(n_rows=100)
X_t = th.create_dataset(n_rows=50, extras=True)
y = pd.DataFrame(np_y)
y_t = pd.DataFrame(np_y_t)
class TestOneHotEncoderTestCase(TestCase):
def test_one_hot(self):
enc = encoders.OneHotEncoder(verbose=1, return_df=False)
enc.fit(X)
self.assertEqual(enc.transform(X_t).shape[1],
enc.transform(X).shape[1],
'We have to get the same count of columns despite the presence of a new value')
enc = encoders.OneHotEncoder(verbose=1, return_df=True, handle_unknown='indicator')
enc.fit(X)
out = enc.transform(X_t)
self.assertIn('extra_-1', out.columns.values)
enc = encoders.OneHotEncoder(verbose=1, return_df=True, handle_unknown='return_nan')
enc.fit(X)
out = enc.transform(X_t)
self.assertEqual(len([x for x in out.columns.values if str(x).startswith('extra_')]), 3)
enc = encoders.OneHotEncoder(verbose=1, return_df=True, handle_unknown='error')
# The exception is already raised in fit() because transform() is called there to get
# feature_names right.
enc.fit(X)
with self.assertRaises(ValueError):
enc.transform(X_t)
enc = encoders.OneHotEncoder(verbose=1, return_df=True, handle_unknown='return_nan', use_cat_names=True)
enc.fit(X)
out = enc.transform(X_t)
self.assertIn('extra_A', out.columns.values)
enc = encoders.OneHotEncoder(verbose=1, return_df=True, use_cat_names=True, handle_unknown='indicator')
enc.fit(X)
out = enc.transform(X_t)
self.assertIn('extra_-1', out.columns.values)
# test inverse_transform
X_i = th.create_dataset(n_rows=100, has_none=False)
X_i_t = th.create_dataset(n_rows=50, has_none=False)
cols = ['underscore', 'none', 'extra', 321, 'categorical']
enc = encoders.OneHotEncoder(verbose=1, use_cat_names=True, cols=cols)
enc.fit(X_i)
obtained = enc.inverse_transform(enc.transform(X_i_t))
th.verify_inverse_transform(X_i_t, obtained)
def test_fit_transform_HaveMissingValuesAndUseCatNames_ExpectCorrectValue(self):
encoder = encoders.OneHotEncoder(cols=[0], use_cat_names=True, handle_unknown='indicator')
result = encoder.fit_transform([[-1]])
self.assertListEqual([[1, 0]], result.get_values().tolist())
def test_inverse_transform_HaveDedupedColumns_ExpectCorrectInverseTransform(self):
encoder = encoders.OneHotEncoder(cols=['match', 'match_box'], use_cat_names=True)
value = pd.DataFrame({'match': pd.Series('box_-1'), 'match_box': pd.Series(-1)})
transformed = encoder.fit_transform(value)
inverse_transformed = encoder.inverse_transform(transformed)
assert value.equals(inverse_transformed)
def test_inverse_transform_HaveNoCatNames_ExpectCorrectInverseTransform(self):
encoder = encoders.OneHotEncoder(cols=['match', 'match_box'], use_cat_names=False)
value = pd.DataFrame({'match': pd.Series('box_-1'), 'match_box': pd.Series(-1)})
transformed = encoder.fit_transform(value)
inverse_transformed = encoder.inverse_transform(transformed)
assert value.equals(inverse_transformed)
def test_fit_transform_HaveColumnAppearTwice_ExpectColumnsDeduped(self):
encoder = encoders.OneHotEncoder(cols=['match', 'match_box'], use_cat_names=True, handle_unknown='indicator')
value = pd.DataFrame({'match': pd.Series('box_-1'), 'match_box': pd.Series('-1')})
result = encoder.fit_transform(value)
columns = result.columns.tolist()
self.assertSetEqual({'match_box_-1', 'match_-1', 'match_box_-1#', 'match_box_-1##'}, set(columns))
def test_fit_transform_HaveHandleUnknownValueAndUnseenValues_ExpectAllZeroes(self):
train = pd.DataFrame({'city': ['Chicago', 'Seattle']})
test = pd.DataFrame({'city': ['Chicago', 'Detroit']})
expected_result = pd.DataFrame({'city_1': [1, 0],
'city_2': [0, 0]},
columns=['city_1', 'city_2'])
enc = encoders.OneHotEncoder(handle_unknown='value')
result = enc.fit(train).transform(test)
pd.testing.assert_frame_equal(expected_result, result)
def test_fit_transform_HaveHandleUnknownValueAndSeenValues_ExpectMappingUsed(self):
train = pd.DataFrame({'city': ['Chicago', 'Seattle']})
expected_result = pd.DataFrame({'city_1': [1, 0],
'city_2': [0, 1]},
columns=['city_1', 'city_2'])
enc = encoders.OneHotEncoder(handle_unknown='value')
result = enc.fit(train).transform(train)
pd.testing.assert_frame_equal(expected_result, result)
def test_fit_transform_HaveHandleUnknownIndicatorAndNoMissingValue_ExpectExtraColumn(self):
train = pd.DataFrame({'city': ['Chicago', 'Seattle']})
expected_result = pd.DataFrame({'city_1': [1, 0],
'city_2': [0, 1],
'city_-1': [0, 0]},
columns=['city_1', 'city_2', 'city_-1'])
enc = encoders.OneHotEncoder(handle_unknown='indicator')
result = enc.fit(train).transform(train)
pd.testing.assert_frame_equal(expected_result, result)
def test_fit_transform_HaveHandleUnknownIndicatorAndMissingValue_ExpectValueSet(self):
train = pd.DataFrame({'city': ['Chicago', 'Seattle']})
test = pd.DataFrame({'city': ['Chicago', 'Detroit']})
expected_result = pd.DataFrame({'city_1': [1, 0],
'city_2': [0, 0],
'city_-1': [0, 1]},
columns=['city_1', 'city_2', 'city_-1'])
enc = encoders.OneHotEncoder(handle_unknown='indicator')
result = enc.fit(train).transform(test)
pd.testing.assert_frame_equal(expected_result, result)
def test_HandleMissingIndicator_NanInTrain_ExpectAsColumn(self):
train = ['A', 'B', np.nan]
encoder = encoders.OneHotEncoder(handle_missing='indicator', handle_unknown='value')
result = encoder.fit_transform(train)
expected = [[1, 0, 0],
[0, 1, 0],
[0, 0, 1]]
self.assertEqual(result.values.tolist(), expected)
def test_HandleMissingIndicator_HaveNoNan_ExpectSecondColumn(self):
train = ['A', 'B']
encoder = encoders.OneHotEncoder(handle_missing='indicator', handle_unknown='value')
result = encoder.fit_transform(train)
expected = [[1, 0, 0],
[0, 1, 0]]
self.assertEqual(result.values.tolist(), expected)
def test_HandleMissingIndicator_NanNoNanInTrain_ExpectAsNanColumn(self):
train = ['A', 'B']
test = ['A', 'B', np.nan]
encoder = encoders.OneHotEncoder(handle_missing='indicator', handle_unknown='value')
encoder.fit(train)
result = encoder.transform(test)
expected = [[1, 0, 0],
[0, 1, 0],
[0, 0, 1]]
self.assertEqual(result.values.tolist(), expected)
def test_HandleUnknown_HaveNoUnknownInTrain_ExpectIndicatorInTest(self):
train = ['A', 'B']
test = ['A', 'B', 'C']
encoder = encoders.OneHotEncoder(handle_unknown='indicator', handle_missing='value')
encoder.fit(train)
result = encoder.transform(test)
expected = [[1, 0, 0],
[0, 1, 0],
[0, 0, 1]]
self.assertEqual(result.values.tolist(), expected)
def test_HandleUnknown_HaveOnlyKnown_ExpectSecondColumn(self):
train = ['A', 'B']
encoder = encoders.OneHotEncoder(handle_unknown='indicator', handle_missing='value')
result = encoder.fit_transform(train)
expected = [[1, 0, 0],
[0, 1, 0]]
self.assertEqual(result.values.tolist(), expected)
def test_inverse_transform_HaveNanInTrainAndHandleMissingValue_ExpectReturnedWithNan(self):
train = pd.DataFrame({'city': ['chicago', np.nan]})
enc = encoders.OneHotEncoder(handle_missing='value', handle_unknown='value')
result = enc.fit_transform(train)
original = enc.inverse_transform(result)
pd.testing.assert_frame_equal(train, original)
def test_inverse_transform_HaveNanInTrainAndHandleMissingReturnNan_ExpectReturnedWithNan(self):
train = pd.DataFrame({'city': ['chicago', np.nan]})
enc = encoders.OneHotEncoder(handle_missing='return_nan', handle_unknown='value')
result = enc.fit_transform(train)
original = enc.inverse_transform(result)
pd.testing.assert_frame_equal(train, original)
def test_inverse_transform_BothFieldsAreReturnNanWithNan_ExpectValueError(self):
train = pd.DataFrame({'city': ['chicago', np.nan]})
test = pd.DataFrame({'city': ['chicago', 'los angeles']})
enc = encoders.OneHotEncoder(handle_missing='return_nan', handle_unknown='return_nan')
enc.fit(train)
result = enc.transform(test)
message = 'inverse_transform is not supported because transform impute '\
'the unknown category nan when encode city'
with self.assertWarns(UserWarning, msg=message) as w:
enc.inverse_transform(result)
def test_inverse_transform_HaveMissingAndNoUnknown_ExpectInversed(self):
train = pd.DataFrame({'city': ['chicago', np.nan]})
test = pd.DataFrame({'city': ['chicago', 'los angeles']})
enc = encoders.OneHotEncoder(handle_missing='value', handle_unknown='return_nan')
enc.fit(train)
result = enc.transform(test)
original = enc.inverse_transform(result)
pd.testing.assert_frame_equal(train, original)
def test_inverse_transform_HaveHandleMissingValueAndHandleUnknownReturnNan_ExpectBestInverse(self):
train = pd.DataFrame({'city': ['chicago', np.nan]})
test = pd.DataFrame({'city': ['chicago', np.nan, 'los angeles']})
expected = | pd.DataFrame({'city': ['chicago', np.nan, np.nan]}) | pandas.DataFrame |
import matplotlib.pyplot as plt
import seaborn as sns
from datos import data
import pandas
sns.set(style="white")
d=data('mtcars')
colors = sns.husl_palette(3)
d=data('mtcars')
ps = | pandas.Series([i for i in d.cyl]) | pandas.Series |
from datetime import datetime
import pandas as pd
import pytest
from dask import dataframe as dd
import featuretools as ft
from featuretools import Relationship
from featuretools.tests.testing_utils import to_pandas
from featuretools.utils.gen_utils import import_or_none
ks = import_or_none('databricks.koalas')
@pytest.fixture
def values_es(es):
es.normalize_entity('log', 'values', 'value',
make_time_index=True,
new_entity_time_index="value_time")
return es
@pytest.fixture
def true_values_lti():
true_values_lti = pd.Series([datetime(2011, 4, 10, 10, 41, 0),
datetime(2011, 4, 9, 10, 31, 9),
datetime(2011, 4, 9, 10, 31, 18),
datetime(2011, 4, 9, 10, 31, 27),
datetime(2011, 4, 10, 10, 40, 1),
datetime(2011, 4, 10, 10, 41, 3),
datetime(2011, 4, 9, 10, 30, 12),
datetime(2011, 4, 10, 10, 41, 6),
datetime(2011, 4, 9, 10, 30, 18),
datetime(2011, 4, 9, 10, 30, 24),
datetime(2011, 4, 10, 11, 10, 3)])
return true_values_lti
@pytest.fixture
def true_sessions_lti():
sessions_lti = pd.Series([datetime(2011, 4, 9, 10, 30, 24),
datetime(2011, 4, 9, 10, 31, 27),
datetime(2011, 4, 9, 10, 40, 0),
datetime(2011, 4, 10, 10, 40, 1),
datetime(2011, 4, 10, 10, 41, 6),
datetime(2011, 4, 10, 11, 10, 3)])
return sessions_lti
@pytest.fixture
def wishlist_df():
wishlist_df = pd.DataFrame({
"session_id": [0, 1, 2, 2, 3, 4, 5],
"datetime": [datetime(2011, 4, 9, 10, 30, 15),
datetime(2011, 4, 9, 10, 31, 30),
datetime(2011, 4, 9, 10, 30, 30),
datetime(2011, 4, 9, 10, 35, 30),
datetime(2011, 4, 10, 10, 41, 0),
datetime(2011, 4, 10, 10, 39, 59),
datetime(2011, 4, 10, 11, 10, 2)],
"product_id": ['coke zero', 'taco clock', 'coke zero', 'car',
'toothpaste', 'brown bag', 'coke zero'],
})
return wishlist_df
@pytest.fixture
def extra_session_df(es):
row_values = {'customer_id': 2,
'device_name': 'PC',
'device_type': 0,
'id': 6}
row = pd.DataFrame(row_values, index=pd.Index([6], name='id'))
df = to_pandas(es['sessions'].df)
df = df.append(row, sort=True).sort_index()
if isinstance(es['sessions'].df, dd.DataFrame):
df = dd.from_pandas(df, npartitions=3)
if ks and isinstance(es['sessions'].df, ks.DataFrame):
df = ks.from_pandas(df)
return df
class TestLastTimeIndex(object):
def test_leaf(self, es):
es.add_last_time_indexes()
log = es['log']
assert len(log.last_time_index) == 17
log_df = to_pandas(log.df)
log_lti = to_pandas(log.last_time_index)
for v1, v2 in zip(log_lti, log_df['datetime']):
assert (pd.isnull(v1) and pd.isnull(v2)) or v1 == v2
def test_leaf_no_time_index(self, es):
es.add_last_time_indexes()
stores = es['stores']
true_lti = pd.Series([None for x in range(6)], dtype='datetime64[ns]')
assert len(true_lti) == len(stores.last_time_index)
stores_lti = to_pandas(stores.last_time_index)
for v1, v2 in zip(stores_lti, true_lti):
assert (pd.isnull(v1) and pd.isnull(v2)) or v1 == v2
# TODO: possible issue with either normalize_entity or add_last_time_indexes
def test_parent(self, values_es, true_values_lti):
# test entity with time index and all instances in child entity
if not all(isinstance(entity.df, pd.DataFrame) for entity in values_es.entities):
pytest.xfail('possible issue with either normalize_entity or add_last_time_indexes')
values_es.add_last_time_indexes()
values = values_es['values']
assert len(values.last_time_index) == 11
sorted_lti = to_pandas(values.last_time_index).sort_index()
for v1, v2 in zip(sorted_lti, true_values_lti):
assert (pd.isnull(v1) and pd.isnull(v2)) or v1 == v2
# TODO: fails with Dask, tests needs to be reworked
def test_parent_some_missing(self, values_es, true_values_lti):
# test entity with time index and not all instances have children
if not all(isinstance(entity.df, pd.DataFrame) for entity in values_es.entities):
pytest.xfail('fails with Dask, tests needs to be reworked')
values = values_es['values']
# add extra value instance with no children
row_values = {'value': 21.0,
'value_time': pd.Timestamp("2011-04-10 11:10:02"),
'values_id': 11}
# make sure index doesn't have same name as column to suppress pandas warning
row = pd.DataFrame(row_values, index=pd.Index([11]))
df = values.df.append(row, sort=True)
df = df[['value', 'value_time']].sort_values(by='value')
df.index.name = 'values_id'
values.update_data(df)
values_es.add_last_time_indexes()
# lti value should default to instance's time index
true_values_lti[10] = pd.Timestamp("2011-04-10 11:10:02")
true_values_lti[11] = pd.Timestamp("2011-04-10 11:10:03")
assert len(values.last_time_index) == 12
sorted_lti = values.last_time_index.sort_index()
for v1, v2 in zip(sorted_lti, true_values_lti):
assert (pd.isnull(v1) and pd.isnull(v2)) or v1 == v2
def test_parent_no_time_index(self, es, true_sessions_lti):
# test entity without time index and all instances have children
es.add_last_time_indexes()
sessions = es['sessions']
assert len(sessions.last_time_index) == 6
sorted_lti = to_pandas(sessions.last_time_index).sort_index()
for v1, v2 in zip(sorted_lti, true_sessions_lti):
assert (pd.isnull(v1) and pd.isnull(v2)) or v1 == v2
def test_parent_no_time_index_missing(self, es, extra_session_df,
true_sessions_lti):
# test entity without time index and not all instance have children
sessions = es['sessions']
# add session instance with no associated log instances
sessions.update_data(extra_session_df)
es.add_last_time_indexes()
# since sessions has no time index, default value is NaT
true_sessions_lti[6] = pd.NaT
assert len(sessions.last_time_index) == 7
sorted_lti = to_pandas(sessions.last_time_index).sort_index()
for v1, v2 in zip(sorted_lti, true_sessions_lti):
assert (pd.isnull(v1) and pd.isnull(v2)) or v1 == v2
def test_multiple_children(self, es, wishlist_df,
true_sessions_lti):
# test all instances in both children
if isinstance(es.entities[0].df, dd.DataFrame):
wishlist_df = dd.from_pandas(wishlist_df, npartitions=2)
if ks and isinstance(es.entities[0].df, ks.DataFrame):
wishlist_df = ks.from_pandas(wishlist_df)
variable_types = {'id': ft.variable_types.variable.Index,
'session_id': ft.variable_types.variable.Numeric,
'datetime': ft.variable_types.variable.DatetimeTimeIndex,
'product_id': ft.variable_types.variable.Categorical}
es.entity_from_dataframe(entity_id="wishlist_log",
dataframe=wishlist_df,
index='id',
make_index=True,
time_index='datetime',
variable_types=variable_types)
relationship = Relationship(es['sessions']['id'],
es['wishlist_log']['session_id'])
es.add_relationship(relationship)
es.add_last_time_indexes()
sessions = es['sessions']
# wishlist df has more recent events for two session ids
true_sessions_lti[1] = pd.Timestamp("2011-4-9 10:31:30")
true_sessions_lti[3] = pd.Timestamp("2011-4-10 10:41:00")
assert len(sessions.last_time_index) == 6
sorted_lti = to_pandas(sessions.last_time_index).sort_index()
for v1, v2 in zip(sorted_lti, true_sessions_lti):
assert (pd.isnull(v1) and | pd.isnull(v2) | pandas.isnull |
import re
import numpy as np
import pytest
from pandas import Categorical, CategoricalIndex, DataFrame, Index, Series
import pandas._testing as tm
from pandas.core.arrays.categorical import recode_for_categories
from pandas.tests.arrays.categorical.common import TestCategorical
class TestCategoricalAPI:
def test_ordered_api(self):
# GH 9347
cat1 = Categorical(list("acb"), ordered=False)
tm.assert_index_equal(cat1.categories, Index(["a", "b", "c"]))
assert not cat1.ordered
cat2 = Categorical(list("acb"), categories=list("bca"), ordered=False)
tm.assert_index_equal(cat2.categories, Index(["b", "c", "a"]))
assert not cat2.ordered
cat3 = Categorical(list("acb"), ordered=True)
tm.assert_index_equal(cat3.categories, Index(["a", "b", "c"]))
assert cat3.ordered
cat4 = Categorical(list("acb"), categories=list("bca"), ordered=True)
tm.assert_index_equal(cat4.categories, Index(["b", "c", "a"]))
assert cat4.ordered
def test_set_ordered(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
cat2 = cat.as_unordered()
assert not cat2.ordered
cat2 = cat.as_ordered()
assert cat2.ordered
cat2.as_unordered(inplace=True)
assert not cat2.ordered
cat2.as_ordered(inplace=True)
assert cat2.ordered
assert cat2.set_ordered(True).ordered
assert not cat2.set_ordered(False).ordered
cat2.set_ordered(True, inplace=True)
assert cat2.ordered
cat2.set_ordered(False, inplace=True)
assert not cat2.ordered
# removed in 0.19.0
msg = "can't set attribute"
with pytest.raises(AttributeError, match=msg):
cat.ordered = True
with pytest.raises(AttributeError, match=msg):
cat.ordered = False
def test_rename_categories(self):
cat = Categorical(["a", "b", "c", "a"])
# inplace=False: the old one must not be changed
res = cat.rename_categories([1, 2, 3])
tm.assert_numpy_array_equal(
res.__array__(), np.array([1, 2, 3, 1], dtype=np.int64)
)
tm.assert_index_equal(res.categories, Index([1, 2, 3]))
exp_cat = np.array(["a", "b", "c", "a"], dtype=np.object_)
tm.assert_numpy_array_equal(cat.__array__(), exp_cat)
exp_cat = Index(["a", "b", "c"])
tm.assert_index_equal(cat.categories, exp_cat)
# GH18862 (let rename_categories take callables)
result = cat.rename_categories(lambda x: x.upper())
expected = Categorical(["A", "B", "C", "A"])
tm.assert_categorical_equal(result, expected)
# and now inplace
res = cat.rename_categories([1, 2, 3], inplace=True)
assert res is None
tm.assert_numpy_array_equal(
cat.__array__(), np.array([1, 2, 3, 1], dtype=np.int64)
)
tm.assert_index_equal(cat.categories, Index([1, 2, 3]))
@pytest.mark.parametrize("new_categories", [[1, 2, 3, 4], [1, 2]])
def test_rename_categories_wrong_length_raises(self, new_categories):
cat = Categorical(["a", "b", "c", "a"])
msg = (
"new categories need to have the same number of items as the "
"old categories!"
)
with pytest.raises(ValueError, match=msg):
cat.rename_categories(new_categories)
def test_rename_categories_series(self):
# https://github.com/pandas-dev/pandas/issues/17981
c = Categorical(["a", "b"])
result = c.rename_categories(Series([0, 1], index=["a", "b"]))
expected = Categorical([0, 1])
tm.assert_categorical_equal(result, expected)
def test_rename_categories_dict(self):
# GH 17336
cat = Categorical(["a", "b", "c", "d"])
res = cat.rename_categories({"a": 4, "b": 3, "c": 2, "d": 1})
expected = Index([4, 3, 2, 1])
tm.assert_index_equal(res.categories, expected)
# Test for inplace
res = cat.rename_categories({"a": 4, "b": 3, "c": 2, "d": 1}, inplace=True)
assert res is None
tm.assert_index_equal(cat.categories, expected)
# Test for dicts of smaller length
cat = Categorical(["a", "b", "c", "d"])
res = cat.rename_categories({"a": 1, "c": 3})
expected = Index([1, "b", 3, "d"])
tm.assert_index_equal(res.categories, expected)
# Test for dicts with bigger length
cat = Categorical(["a", "b", "c", "d"])
res = cat.rename_categories({"a": 1, "b": 2, "c": 3, "d": 4, "e": 5, "f": 6})
expected = Index([1, 2, 3, 4])
tm.assert_index_equal(res.categories, expected)
# Test for dicts with no items from old categories
cat = Categorical(["a", "b", "c", "d"])
res = cat.rename_categories({"f": 1, "g": 3})
expected = Index(["a", "b", "c", "d"])
tm.assert_index_equal(res.categories, expected)
def test_reorder_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
old = cat.copy()
new = Categorical(
["a", "b", "c", "a"], categories=["c", "b", "a"], ordered=True
)
# first inplace == False
res = cat.reorder_categories(["c", "b", "a"])
# cat must be the same as before
tm.assert_categorical_equal(cat, old)
# only res is changed
tm.assert_categorical_equal(res, new)
# inplace == True
res = cat.reorder_categories(["c", "b", "a"], inplace=True)
assert res is None
tm.assert_categorical_equal(cat, new)
@pytest.mark.parametrize(
"new_categories",
[
["a"], # not all "old" included in "new"
["a", "b", "d"], # still not all "old" in "new"
["a", "b", "c", "d"], # all "old" included in "new", but too long
],
)
def test_reorder_categories_raises(self, new_categories):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
msg = "items in new_categories are not the same as in old categories"
with pytest.raises(ValueError, match=msg):
cat.reorder_categories(new_categories)
def test_add_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
old = cat.copy()
new = Categorical(
["a", "b", "c", "a"], categories=["a", "b", "c", "d"], ordered=True
)
# first inplace == False
res = cat.add_categories("d")
tm.assert_categorical_equal(cat, old)
tm.assert_categorical_equal(res, new)
res = cat.add_categories(["d"])
tm.assert_categorical_equal(cat, old)
tm.assert_categorical_equal(res, new)
# inplace == True
res = cat.add_categories("d", inplace=True)
tm.assert_categorical_equal(cat, new)
assert res is None
# GH 9927
cat = Categorical(list("abc"), ordered=True)
expected = Categorical(list("abc"), categories=list("abcde"), ordered=True)
# test with Series, np.array, index, list
res = cat.add_categories(Series(["d", "e"]))
tm.assert_categorical_equal(res, expected)
res = cat.add_categories(np.array(["d", "e"]))
tm.assert_categorical_equal(res, expected)
res = cat.add_categories(Index(["d", "e"]))
tm.assert_categorical_equal(res, expected)
res = cat.add_categories(["d", "e"])
tm.assert_categorical_equal(res, expected)
def test_add_categories_existing_raises(self):
# new is in old categories
cat = Categorical(["a", "b", "c", "d"], ordered=True)
msg = re.escape("new categories must not include old categories: {'d'}")
with pytest.raises(ValueError, match=msg):
cat.add_categories(["d"])
def test_set_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
exp_categories = Index(["c", "b", "a"])
exp_values = np.array(["a", "b", "c", "a"], dtype=np.object_)
res = cat.set_categories(["c", "b", "a"], inplace=True)
tm.assert_index_equal(cat.categories, exp_categories)
tm.assert_numpy_array_equal(cat.__array__(), exp_values)
assert res is None
res = cat.set_categories(["a", "b", "c"])
# cat must be the same as before
tm.assert_index_equal(cat.categories, exp_categories)
tm.assert_numpy_array_equal(cat.__array__(), exp_values)
# only res is changed
exp_categories_back = Index(["a", "b", "c"])
tm.assert_index_equal(res.categories, exp_categories_back)
tm.assert_numpy_array_equal(res.__array__(), exp_values)
# not all "old" included in "new" -> all not included ones are now
# np.nan
cat = Categorical(["a", "b", "c", "a"], ordered=True)
res = cat.set_categories(["a"])
tm.assert_numpy_array_equal(res.codes, np.array([0, -1, -1, 0], dtype=np.int8))
# still not all "old" in "new"
res = cat.set_categories(["a", "b", "d"])
tm.assert_numpy_array_equal(res.codes, np.array([0, 1, -1, 0], dtype=np.int8))
tm.assert_index_equal(res.categories, Index(["a", "b", "d"]))
# all "old" included in "new"
cat = cat.set_categories(["a", "b", "c", "d"])
exp_categories = Index(["a", "b", "c", "d"])
tm.assert_index_equal(cat.categories, exp_categories)
# internals...
c = Categorical([1, 2, 3, 4, 1], categories=[1, 2, 3, 4], ordered=True)
tm.assert_numpy_array_equal(c._codes, np.array([0, 1, 2, 3, 0], dtype=np.int8))
tm.assert_index_equal(c.categories, Index([1, 2, 3, 4]))
exp = np.array([1, 2, 3, 4, 1], dtype=np.int64)
tm.assert_numpy_array_equal(np.asarray(c), exp)
# all "pointers" to '4' must be changed from 3 to 0,...
c = c.set_categories([4, 3, 2, 1])
# positions are changed
tm.assert_numpy_array_equal(c._codes, np.array([3, 2, 1, 0, 3], dtype=np.int8))
# categories are now in new order
tm.assert_index_equal(c.categories, Index([4, 3, 2, 1]))
# output is the same
exp = np.array([1, 2, 3, 4, 1], dtype=np.int64)
tm.assert_numpy_array_equal(np.asarray(c), exp)
assert c.min() == 4
assert c.max() == 1
# set_categories should set the ordering if specified
c2 = c.set_categories([4, 3, 2, 1], ordered=False)
assert not c2.ordered
tm.assert_numpy_array_equal(np.asarray(c), np.asarray(c2))
# set_categories should pass thru the ordering
c2 = c.set_ordered(False).set_categories([4, 3, 2, 1])
assert not c2.ordered
tm.assert_numpy_array_equal(np.asarray(c), np.asarray(c2))
def test_to_dense_deprecated(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
with tm.assert_produces_warning(FutureWarning):
cat.to_dense()
@pytest.mark.parametrize(
"values, categories, new_categories",
[
# No NaNs, same cats, same order
(["a", "b", "a"], ["a", "b"], ["a", "b"]),
# No NaNs, same cats, different order
(["a", "b", "a"], ["a", "b"], ["b", "a"]),
# Same, unsorted
(["b", "a", "a"], ["a", "b"], ["a", "b"]),
# No NaNs, same cats, different order
(["b", "a", "a"], ["a", "b"], ["b", "a"]),
# NaNs
(["a", "b", "c"], ["a", "b"], ["a", "b"]),
(["a", "b", "c"], ["a", "b"], ["b", "a"]),
(["b", "a", "c"], ["a", "b"], ["a", "b"]),
(["b", "a", "c"], ["a", "b"], ["a", "b"]),
# Introduce NaNs
(["a", "b", "c"], ["a", "b"], ["a"]),
(["a", "b", "c"], ["a", "b"], ["b"]),
(["b", "a", "c"], ["a", "b"], ["a"]),
(["b", "a", "c"], ["a", "b"], ["a"]),
# No overlap
(["a", "b", "c"], ["a", "b"], ["d", "e"]),
],
)
@pytest.mark.parametrize("ordered", [True, False])
def test_set_categories_many(self, values, categories, new_categories, ordered):
c = Categorical(values, categories)
expected = Categorical(values, new_categories, ordered)
result = c.set_categories(new_categories, ordered=ordered)
tm.assert_categorical_equal(result, expected)
def test_set_categories_rename_less(self):
# GH 24675
cat = Categorical(["A", "B"])
result = cat.set_categories(["A"], rename=True)
expected = Categorical(["A", np.nan])
tm.assert_categorical_equal(result, expected)
def test_set_categories_private(self):
cat = Categorical(["a", "b", "c"], categories=["a", "b", "c", "d"])
cat._set_categories(["a", "c", "d", "e"])
expected = Categorical(["a", "c", "d"], categories=list("acde"))
tm.assert_categorical_equal(cat, expected)
# fastpath
cat = Categorical(["a", "b", "c"], categories=["a", "b", "c", "d"])
cat._set_categories(["a", "c", "d", "e"], fastpath=True)
expected = Categorical(["a", "c", "d"], categories=list("acde"))
tm.assert_categorical_equal(cat, expected)
def test_remove_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
old = cat.copy()
new = Categorical(["a", "b", np.nan, "a"], categories=["a", "b"], ordered=True)
# first inplace == False
res = cat.remove_categories("c")
tm.assert_categorical_equal(cat, old)
tm.assert_categorical_equal(res, new)
res = cat.remove_categories(["c"])
tm.assert_categorical_equal(cat, old)
tm.assert_categorical_equal(res, new)
# inplace == True
res = cat.remove_categories("c", inplace=True)
tm.assert_categorical_equal(cat, new)
assert res is None
@pytest.mark.parametrize("removals", [["c"], ["c", np.nan], "c", ["c", "c"]])
def test_remove_categories_raises(self, removals):
cat = Categorical(["a", "b", "a"])
message = re.escape("removals must all be in old categories: {'c'}")
with pytest.raises(ValueError, match=message):
cat.remove_categories(removals)
def test_remove_unused_categories(self):
c = Categorical(["a", "b", "c", "d", "a"], categories=["a", "b", "c", "d", "e"])
exp_categories_all = Index(["a", "b", "c", "d", "e"])
exp_categories_dropped = Index(["a", "b", "c", "d"])
tm.assert_index_equal(c.categories, exp_categories_all)
res = c.remove_unused_categories()
tm.assert_index_equal(res.categories, exp_categories_dropped)
tm.assert_index_equal(c.categories, exp_categories_all)
res = c.remove_unused_categories(inplace=True)
tm.assert_index_equal(c.categories, exp_categories_dropped)
assert res is None
# with NaN values (GH11599)
c = Categorical(["a", "b", "c", np.nan], categories=["a", "b", "c", "d", "e"])
res = c.remove_unused_categories()
tm.assert_index_equal(res.categories, Index(np.array(["a", "b", "c"])))
exp_codes = np.array([0, 1, 2, -1], dtype=np.int8)
tm.assert_numpy_array_equal(res.codes, exp_codes)
tm.assert_index_equal(c.categories, exp_categories_all)
val = ["F", np.nan, "D", "B", "D", "F", np.nan]
cat = Categorical(values=val, categories=list("ABCDEFG"))
out = cat.remove_unused_categories()
tm.assert_index_equal(out.categories, Index(["B", "D", "F"]))
exp_codes = np.array([2, -1, 1, 0, 1, 2, -1], dtype=np.int8)
tm.assert_numpy_array_equal(out.codes, exp_codes)
assert out.tolist() == val
alpha = list("abcdefghijklmnopqrstuvwxyz")
val = np.random.choice(alpha[::2], 10000).astype("object")
val[np.random.choice(len(val), 100)] = np.nan
cat = Categorical(values=val, categories=alpha)
out = cat.remove_unused_categories()
assert out.tolist() == val.tolist()
class TestCategoricalAPIWithFactor(TestCategorical):
def test_describe(self):
# string type
desc = self.factor.describe()
assert self.factor.ordered
exp_index = CategoricalIndex(
["a", "b", "c"], name="categories", ordered=self.factor.ordered
)
expected = DataFrame(
{"counts": [3, 2, 3], "freqs": [3 / 8.0, 2 / 8.0, 3 / 8.0]}, index=exp_index
)
tm.assert_frame_equal(desc, expected)
# check unused categories
cat = self.factor.copy()
cat.set_categories(["a", "b", "c", "d"], inplace=True)
desc = cat.describe()
exp_index = CategoricalIndex(
list("abcd"), ordered=self.factor.ordered, name="categories"
)
expected = DataFrame(
{"counts": [3, 2, 3, 0], "freqs": [3 / 8.0, 2 / 8.0, 3 / 8.0, 0]},
index=exp_index,
)
tm.assert_frame_equal(desc, expected)
# check an integer one
cat = Categorical([1, 2, 3, 1, 2, 3, 3, 2, 1, 1, 1])
desc = cat.describe()
exp_index = CategoricalIndex([1, 2, 3], ordered=cat.ordered, name="categories")
expected = DataFrame(
{"counts": [5, 3, 3], "freqs": [5 / 11.0, 3 / 11.0, 3 / 11.0]},
index=exp_index,
)
tm.assert_frame_equal(desc, expected)
# https://github.com/pandas-dev/pandas/issues/3678
# describe should work with NaN
cat = Categorical([np.nan, 1, 2, 2])
desc = cat.describe()
expected = DataFrame(
{"counts": [1, 2, 1], "freqs": [1 / 4.0, 2 / 4.0, 1 / 4.0]},
index=CategoricalIndex(
[1, 2, np.nan], categories=[1, 2], name="categories"
),
)
| tm.assert_frame_equal(desc, expected) | pandas._testing.assert_frame_equal |
from collections import OrderedDict
import os
import torch
import numpy as np
import pandas as pd
from tqdm import tqdm
from utils.measure import Measure, psnr
from utils.imresize import imresize
from utils.util import patchify, fiFindByWildcard, t, rgb, imread, imwrite, impad
from models.modules.flow import GaussianDiag
def get_epses(opt, batch_size, lr_shape, eps_std, seed=None):
if seed: torch.manual_seed(seed)
C = opt['flowUpsamplerNet']['C']
H = int(opt['scale'] * lr_shape[2] // opt['flowUpsamplerNet']['scaleH'])
W = int(opt['scale'] * lr_shape[3] // opt['flowUpsamplerNet']['scaleW'])
z = GaussianDiag.sample_eps([batch_size, C, H, W], eps_std)
epses = [z]
levels = int(np.log2(opt['scale']) + 1)
for level in range(2, levels):
new_C = 2 ** (level-2) * C // 4**level
new_H = H * 2**level
new_W = W * 2**level
eps = GaussianDiag.sample_eps([batch_size, new_C, new_H, new_W], eps_std)
epses.append(eps)
return epses[::-1]
@torch.no_grad()
def get_sr_with_epses(model, opt, lq, heat=None, epses=None):
assert not model.training
if epses is None:
heat = opt['heat'] if heat is None else heat
epses = get_epses(opt, lq.shape[0], lq.shape, heat)
lr = lq.to(opt['device'], opt['eval_dtype'])
epses = [eps.to(opt['device'], opt['eval_dtype']) for eps in epses]
sr = model(lr=lr, z=None, epses=epses).float()
return sr, [eps.float() for eps in epses]
@torch.no_grad()
def get_sr(model, opt, lq, is_rrdb=False, **kwargs):
assert not model.training
if is_rrdb:
return model(lq.to(opt['device'], opt['eval_dtype'])).float()
else:
return get_sr_with_epses(model, opt, lq, **kwargs)[0]
def compute_validation_metrics(model, opt, dataset, pad_factor=2, epses=None, is_rrdb=False):
measure = Measure(use_gpu=True)
df = None
for (lr, hr) in dataset:
lr, hr = np.array(lr), np.array(hr)
h, w, _ = lr.shape
lq_orig = lr.copy()
lr = impad(lr, bottom=int(np.ceil(h / pad_factor) * pad_factor - h),
right=int(np.ceil(w / pad_factor) * pad_factor - w))
lr_t = t(lr)
if epses is not None:
tmp_epses = get_epses(opt, 1, lr_t.shape, opt['heat'])
repeated_epses = []
for i, tmp_eps in enumerate(tmp_epses):
eps = epses[i].repeat(1, 1,16,16)[:, :, :tmp_eps.shape[2],:tmp_eps.shape[3]]
repeated_epses.append(eps)
sr_t = get_sr(model, opt, lq=lr_t, is_rrdb=False, epses=repeated_epses)
else:
sr_t = get_sr(model, opt, lq=lr_t, is_rrdb=is_rrdb)
sr = rgb(sr_t)[:hr.shape[0], :hr.shape[1]]
meas = OrderedDict()
meas['PSNR'], meas['SSIM'], meas['LPIPS'] = measure.measure(sr, hr)
lr_reconstruct_rgb = imresize(sr, 1 / opt['scale'])
meas['LRC PSNR'] = psnr(lq_orig, lr_reconstruct_rgb)
df = pd.DataFrame([meas]) if df is None else pd.concat([pd.DataFrame([meas]), df])
return df.mean()
def compute_metrics_on_train_patches(model, opt, lrs, gts, epses=None, is_rrdb=False):
measure = Measure(use_gpu=True)
if epses is not None:
epses = [eps.repeat(len(lrs), 1, 1, 1) for eps in epses]
srs = get_sr(model, opt, lq=lrs, is_rrdb=False, epses=epses)
else:
srs = get_sr(model, opt, lq=lrs, is_rrdb=is_rrdb)
df = None
for (lr, sr, gt) in zip(lrs, srs, gts):
lr, sr, hr = rgb(lr), rgb(sr), rgb(gt)
meas = OrderedDict()
meas['PSNR'], meas['SSIM'], meas['LPIPS'] = measure.measure(sr, hr)
lr_reconstruct_rgb = imresize(sr, 1 / opt['scale'])
meas['LRC PSNR'] = psnr(lr, lr_reconstruct_rgb)
df = pd.DataFrame([meas]) if df is None else pd.concat([ | pd.DataFrame([meas]) | pandas.DataFrame |
# Dash dependencies import
import dash
import dash_core_components as dcc
import dash_html_components as html
import dash_uploader as du
import uuid
import pathlib
import dash_bootstrap_components as dbc
import plotly.figure_factory as ff
from dash.dependencies import Input, Output,State
import plotly.express as px
import plotly.graph_objects as go
from plotly.subplots import make_subplots
import pandas as pd
import numpy as np
px.defaults.template = "ggplot2"
# End Dash dependencies import
# Data preprocessing
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split
from imblearn.over_sampling import SMOTE
# ML Algorithm
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
# Model evaluation
from sklearn.metrics import accuracy_score, precision_score, recall_score,f1_score,confusion_matrix,roc_curve,roc_auc_score
# Save model
import os
import io
import shutil
import joblib
from app import app, server
PATH=pathlib.Path(__file__).parent
DATA_PATH=PATH.joinpath("../datasets").resolve()
TELCO_CHURN_FILE_UPLOADS_DATA_PATH=PATH.joinpath("../datasets/telco_churn_file_uploads").resolve()
du.configure_upload(app, TELCO_CHURN_FILE_UPLOADS_DATA_PATH, use_upload_id=False)
TELCO_CHURN_MODEL_DATA_PATH=PATH.joinpath("../Notebooks/Churn Models").resolve()
feat_importance_df=pd.read_csv(DATA_PATH.joinpath("feature-importance.csv"))
df=pd.read_csv(DATA_PATH.joinpath("telco-customer-churn.csv"))
telco_churm_metrics_df=pd.read_json(TELCO_CHURN_MODEL_DATA_PATH.joinpath("model_metrics.json"), orient ='split', compression = 'infer')
joblib_model = joblib.load(TELCO_CHURN_MODEL_DATA_PATH.joinpath("best_gridsearch_model_pipeline.pkl"))
df['TotalCharges']=pd.to_numeric(df['TotalCharges'], errors='coerce')
# Revenue distribution
def distribution_by_revenue(df):
totalcharges_attrition_df=df.groupby( ["Churn"], as_index=False )["TotalCharges"].sum()
totalcharges_attrition_df=totalcharges_attrition_df.sort_values(by=['TotalCharges'],ascending=True)
totalcharges_attrition_df.columns=['Churn','Revenue']
colors = ['crimson','skyblue']
totalcharges_attrition_df=totalcharges_attrition_df.round(0)
fig=px.bar(totalcharges_attrition_df,x='Churn',y='Revenue',color='Churn',text='Revenue',color_discrete_sequence=colors,
title='Churn by Revenue')
fig.update_layout(legend=dict(yanchor="top",y=0.95,xanchor="left",x=0.40),autosize=True,margin=dict(t=30,b=0,l=0,r=0))
return fig
# churn distribution
def churn_distribution(df):
attrition_df=df.groupby(["Churn"], as_index=False )["customerID"].count()
colors = ['skyblue','crimson']
fig = go.Figure(data=[go.Pie(labels=attrition_df['Churn'].tolist(), values=attrition_df['customerID'].tolist(), hole=.3)])
fig.update_layout(title={'text': 'Customer Churn Distribution','y':0.9,'x':0.5, 'xanchor': 'center','yanchor': 'top'},
showlegend=False,autosize=True,annotations=[dict(text='Attrition', font_size=20, showarrow=False)],margin=dict(t=100,b=0,l=0,r=0),height=350,colorway=colors)
return fig
# gender_attrition_df
def churn_by_gender(df):
gender_attrition_df=df.groupby(["Churn","gender"], as_index=False )["customerID"].count()
gender_attrition_df.columns=['Churn','Gender','Customers']
colors = ['skyblue','crimson']
fig=px.bar(gender_attrition_df,x='Gender',y='Customers',color='Churn',text='Customers',color_discrete_sequence=colors,
title='Churn by Gender')
fig.update_layout(legend=dict(yanchor="top",y=0.95,xanchor="left",x=0.46),autosize=True,margin=dict(t=30,b=0,l=0,r=0)) #use barmode='stack' when stacking,
return fig
def churn_by_contract(df):
contract_attrition_df=df.groupby(["Churn","Contract"], as_index=False )["customerID"].count()
contract_base_df=df.groupby(["Contract"], as_index=False )["customerID"].count()
contract_base_df['Churn']='Customer Base'
contract_attrition_df=contract_attrition_df.append(contract_base_df, ignore_index = True)
contract_attrition_df.columns=['Churn','Contract','Customers']
contract_attrition_df=contract_attrition_df.sort_values(by=['Contract', 'Customers'],ascending=True)
colors = ['crimson','skyblue','teal']
fig=px.bar(contract_attrition_df,x='Contract',y='Customers',color='Churn',text='Customers',color_discrete_sequence=colors,barmode="group",
title='Churn by Customer Contract Type')
fig.update_layout(legend=dict(yanchor="top",y=0.95,xanchor="left",x=0.50),autosize=True,margin=dict(t=30,b=0,l=0,r=0)) #use barmode='stack' when stacking,
return fig
def churn_by_monthlycharges(df):
churn_dist = df[df['Churn']=='Yes']['MonthlyCharges']
no_churn_dist = df[df['Churn']=='No']['MonthlyCharges']
group_labels = ['No Churn', 'Churn Customers']
colors = ['teal','crimson']
fig = ff.create_distplot([no_churn_dist,churn_dist], group_labels, bin_size=[1, .10],
curve_type='kde', show_rug=False, colors=colors)# override default 'kde' or 'normal'
fig.update_layout(title={'text': 'Customer Churn Distribution by Monthly Charges','y':0.9,'x':0.5, 'xanchor': 'center','yanchor': 'top'},
legend=dict(yanchor="top",y=0.95,xanchor="left",x=0.50),autosize=True,margin=dict(t=50,b=0,l=0,r=0)) #use barmode='stack' when stacking,
return fig
def tenure_charges_correlation(df):
df_correlation=df[['tenure','MonthlyCharges','TotalCharges']].corr()
fig=px.imshow(df_correlation,title='Tenure, Monthly and Total Charges Correlation')
fig.update_layout(legend=dict(yanchor="top",y=0.95,xanchor="left",x=0.40),autosize=True,margin=dict(t=30,b=0,l=0,r=0))
return fig
def churn_by_citizenship(df):
citizenship_attrition_df=df.groupby( [ "Churn","SeniorCitizen"], as_index=False )["customerID"].count()
citizenship_base_df=df.groupby(["SeniorCitizen"], as_index=False )["customerID"].count()
citizenship_base_df['Churn']='Customer Base'
citizenship_attrition_df=citizenship_attrition_df.append(citizenship_base_df, ignore_index = True)
citizenship_attrition_df.columns=['Churn','Citizenship','Customers']
citizenship_attrition_df=citizenship_attrition_df.sort_values(by=['Citizenship', 'Customers'],ascending=False)
colors = ['teal','skyblue','crimson']
fig=px.bar(citizenship_attrition_df,x='Customers',y=['Citizenship'],color='Churn',text='Customers',orientation="h",color_discrete_sequence=colors,barmode="group",
title='Churn by Citizenship')
fig.update_layout(legend=dict(yanchor="top",y=0.95,xanchor="left",x=0.50),autosize=True,margin=dict(t=30,b=0,l=0,r=0))
return fig
def churn_by_tenure(df):
tenure_attrition_df=df.groupby( [ "Churn","tenure"], as_index=False )["customerID"].count()
tenure_attrition_df.columns=['Churn','Tenure','Customers']
colors = ['skyblue','crimson']
tenure_attrition_df=tenure_attrition_df.round(0)
fig = px.treemap(tenure_attrition_df, path=['Churn', 'Tenure'], values='Customers',color_discrete_sequence=colors,
title='Churn by Customer Tenure')
fig.update_layout(legend=dict(yanchor="top",y=0.95,xanchor="left",x=0.50),autosize=True,margin=dict(t=30,b=0,l=0,r=0))
return fig
def data_summary(df):
data_summary_df=pd.DataFrame(df.describe())
data_summary_df.reset_index(level=0, inplace=True)
data_summary_df=data_summary_df.drop(columns='SeniorCitizen')
data_summary_df.columns=['Metric','Tenure','MonthlyCharges','TotalCharges']
fig = go.Figure(data=[go.Table(header=dict(values=list(data_summary_df.columns),fill_color='paleturquoise',
align='left'),cells=dict(values=[data_summary_df.Metric, data_summary_df.Tenure, data_summary_df.MonthlyCharges, data_summary_df.TotalCharges],
fill_color='lavender',align='left'))])
fig.update_layout(showlegend=False,autosize=True,margin=dict(t=0,b=0,l=0,r=0),height=350)
return fig
def churn_by_payment_method(df):
PaymentMethod_attrition_df=df.groupby( [ "Churn","PaymentMethod"], as_index=False )["customerID"].count()
PaymentMethod_base_df=df.groupby(["PaymentMethod"], as_index=False )["customerID"].count()
PaymentMethod_base_df['Churn']='Customer Base'
PaymentMethod_attrition_df=PaymentMethod_attrition_df.append(PaymentMethod_base_df, ignore_index = True)
PaymentMethod_attrition_df.columns=['Churn','PaymentMethod','Customers']
PaymentMethod_attrition_df=PaymentMethod_attrition_df.sort_values(by=['PaymentMethod', 'Customers'],ascending=True)
colors = ['crimson','skyblue','teal']
fig=px.bar(PaymentMethod_attrition_df,x='PaymentMethod',y='Customers',color='Churn',text='Customers',color_discrete_sequence=colors,barmode="group",
title='Churn by Payment Method')
fig.update_layout(legend=dict(yanchor="top",y=0.95,xanchor="left",x=0.40),autosize=True,margin=dict(t=30,b=0,l=0,r=0)) #use barmode='stack' when stacking,
return fig
def churn_by_techsupport(df):
techsupport_attrition_df=df.groupby( [ "Churn","TechSupport"], as_index=False )["customerID"].count()
techsupport_base_df=df.groupby(["TechSupport"], as_index=False )["customerID"].count()
techsupport_base_df['Churn']='Customer Base'
techsupport_attrition_df=techsupport_attrition_df.append(techsupport_base_df, ignore_index = True)
techsupport_attrition_df.columns=['Churn','TechSupport','Customers']
techsupport_attrition_df=techsupport_attrition_df.sort_values(by=['TechSupport', 'Customers'],ascending=True)
colors = ['crimson','skyblue','teal']
fig=px.bar(techsupport_attrition_df,x='TechSupport',y='Customers',color='Churn',text='Customers',color_discrete_sequence=colors,barmode="group",
title='Churn by Tech Support')
fig.update_layout(legend=dict(yanchor="top",y=0.95,xanchor="left",x=0.50),autosize=True,margin=dict(t=30,b=0,l=0,r=0)) #use barmode='stack' when stacking,
return fig
####3 MODELING ####
def feature_correlation(df):
df['TotalCharges']=df['TotalCharges'].fillna(df['TotalCharges'].mean()) # Impute TotalCharges null values with mean TotalCharges
df['Churn'].replace(to_replace='Yes', value=1, inplace=True)
df['Churn'].replace(to_replace='No', value=0, inplace=True)
df['SeniorCitizen'] = df['SeniorCitizen'].astype(str) # convert SeniorCitizen column to string
data_columns=['gender','Partner','Dependents','PhoneService','MultipleLines','InternetService','OnlineSecurity','OnlineBackup','DeviceProtection', 'TechSupport', 'StreamingTV','StreamingMovies','Contract', 'PaperlessBilling', 'PaymentMethod','SeniorCitizen']
df=pd.get_dummies(df,columns=data_columns)
churn_corr_df=pd.DataFrame(df.corr()['Churn'])
churn_corr_df.reset_index(level=0, inplace=True)
churn_corr_df.columns=['Features','Correlation']
churn_corr_df["Correlation Type"] = np.where(churn_corr_df["Correlation"]<0, 'negative', 'positive')
churn_corr_df=churn_corr_df.sort_values(by=['Correlation'],ascending=False)
churn_corr_df=churn_corr_df[~churn_corr_df['Features'].isin(['Churn'])]
colors = ['skyblue','orange']
fig=px.bar(churn_corr_df,x='Features',y='Correlation',color='Correlation Type',text='Correlation',color_discrete_sequence=colors,
title='Features Correlation')
fig.update_layout(legend=dict(yanchor="top",y=0.95,xanchor="left",x=0.50),autosize=True,margin=dict(t=30,b=0,l=0,r=0))
return fig
def feature_importance(feat_importance_df):
feat_importance_df=feat_importance_df.sort_values(by=['Importance'],ascending=False)
fig=px.bar(feat_importance_df,x='Features',y='Importance',text='Importance',color='Importance',height=650,title='Random Forest Feature Importance')
fig.update_layout(legend=dict(yanchor="top",y=0.99,xanchor="left",x=0.01),autosize=True,margin=dict(t=30,b=0,l=0,r=0))
return fig
def telco_churn_model_metrics_summary(telco_churm_metrics_df):
unpivoted_metric_df=telco_churm_metrics_df[telco_churm_metrics_df['Type']=='Metric'][['Model','Accuracy','Precision','Recall','F_1_Score','AUC_Score']]
unpivoted_metric_df=unpivoted_metric_df.melt(id_vars=['Model'], var_name='Metrics', value_name='Score').sort_values(by=['Score'],ascending=True)
colors = ['crimson','skyblue','teal','orange']
fig=px.bar(unpivoted_metric_df,x='Metrics',y='Score',color='Model',text='Score',color_discrete_sequence=colors,barmode="group",title='Model Perforance Metrics')
fig.update_layout(legend=dict(yanchor="top",y=0.95,xanchor="left",x=0.01),autosize=True,margin=dict(t=30,b=0,l=0,r=0)) #use barmode='stack' when stacking,
return fig
def uac_roc(telco_churm_metrics_df):
uac_roc_df=telco_churm_metrics_df[telco_churm_metrics_df['Type']=='ROC'][['Model','Confusion_Matrix_ROC']]
uac_roc_df=uac_roc_df.sort_values(by=['Model'],ascending=True)
uac_roc_df=uac_roc_df.set_index('Model').transpose()
uac_roc_fig = go.Figure()
uac_roc_fig.add_trace(go.Scatter(x=uac_roc_df['Logistic Regression FPR'][0], y=uac_roc_df['Logistic Regression TPR'][0],name='Logistic Regression',
line = dict(color='teal', width=2),line_shape='spline'))
uac_roc_fig.add_trace(go.Scatter(x=uac_roc_df['Random Forest FPR'][0], y=uac_roc_df['Random Forest TPR'][0],name='Random Forest',
line = dict(color='royalblue', width=2),line_shape='spline'))
uac_roc_fig.add_trace(go.Scatter(x=uac_roc_df['Support Vector Machine FPR'][0], y=uac_roc_df['Support Vector Machine TPR'][0],name='Support Vector Machine',
line = dict(color='orange', width=2),line_shape='spline'))
uac_roc_fig.add_trace(go.Scatter(x=np.array([0., 1.]), y=np.array([0., 1.]),name='Random Gues',
line = dict(color='firebrick', width=4, dash='dash')))
uac_roc_fig.update_layout(title={'text': 'AUC-ROC Model Evaluation','y':0.9,'x':0.5, 'xanchor': 'center','yanchor': 'top'},
legend=dict(yanchor="bottom",y=0.05,xanchor="right",x=0.95),autosize=True,margin=dict(t=70,b=0,l=0,r=0))
return uac_roc_fig
def random_forest_confusion_matrix(telco_churm_metrics_df):
con_matrix_df=telco_churm_metrics_df[telco_churm_metrics_df['Type']=='Confusion_Matrix'][['Model','Confusion_Matrix_ROC']]
con_matrix_df.reset_index(level=0, inplace=True)
random_f_z=con_matrix_df['Confusion_Matrix_ROC'][1]
random_f_z= random_f_z[::-1]
x=['TP','FP']
y = x[::-1].copy()
random_f_z_text = [[str(y) for y in x] for x in random_f_z]
colorscale = [[0, 'orange'], [1, 'teal']]
font_colors = ['white', 'black']
fig = ff.create_annotated_heatmap(random_f_z,x=x, y=y, annotation_text=random_f_z_text, hoverinfo='z',colorscale=colorscale)
fig.update_layout(title_text='Random Forest',autosize=True,margin=dict(t=30,b=0,l=0,r=0))
return fig
def logistic_regression_confusion_matrix(telco_churm_metrics_df):
con_matrix_df=telco_churm_metrics_df[telco_churm_metrics_df['Type']=='Confusion_Matrix'][['Model','Confusion_Matrix_ROC']]
con_matrix_df.reset_index(level=0, inplace=True)
logistic_z=con_matrix_df['Confusion_Matrix_ROC'][0]
logistic_z= logistic_z[::-1]
x=['TP','FP']
y = x[::-1].copy()
logistic_z_text = [[str(y) for y in x] for x in logistic_z]
colorscale = [[0, 'skyblue'], [1, 'green']]
fig = ff.create_annotated_heatmap(logistic_z,x=x, y=y, annotation_text=logistic_z_text, hoverinfo='z',colorscale=colorscale)
fig.update_layout(title_text='Logistic Regression',autosize=True,margin=dict(t=30,b=0,l=0,r=0))
return fig
def svm_confusion_matrix(telco_churm_metrics_df):
con_matrix_df=telco_churm_metrics_df[telco_churm_metrics_df['Type']=='Confusion_Matrix'][['Model','Confusion_Matrix_ROC']]
con_matrix_df.reset_index(level=0, inplace=True)
svm_z=con_matrix_df['Confusion_Matrix_ROC'][2]
svm_z= svm_z[::-1]
x=['TP','FP']
y = x[::-1].copy()
svm_z_text = [[str(y) for y in x] for x in svm_z]
colorscale = [[0, 'crimson'], [1, 'green']]
fig = ff.create_annotated_heatmap(svm_z,x=x, y=y, annotation_text=svm_z_text, hoverinfo='z',colorscale='rainbow')
fig.update_layout(title_text='Support Vector Machine',autosize=True,margin=dict(t=30,b=0,l=0,r=0))
return fig
layout=dbc.Container([
dbc.NavbarSimple(
children=[
dbc.NavItem(dbc.NavLink("Customer Churn", active=True,href="/apps/telco_customer_churn")),
dbc.NavItem(dbc.NavLink("Customer Survival Analysis", active=False,href="/apps/telco_customer_survival_analysis")),
dbc.NavItem(dbc.NavLink("Customer Lifetime Value", active=False,href="/apps/customer_lifetime_value")),
dbc.NavItem(dbc.NavLink("Customer Segmentation", active=False,href="/apps/customer_segmentation")),
# dbc.NavItem(dbc.NavLink("Explore", active=True,href="/apps/explore")),
# dbc.NavItem(dbc.NavLink("Clean", active=True,href="#")),
# dbc.NavItem(dbc.NavLink("Analyse", active=True,href="#")),
# dbc.NavItem(dbc.NavLink("Model", active=True, href="#"))
],
brand="Qaml",
brand_href="/apps/home",
color="primary",
dark=True,
style={'margin-bottom': '2px'},
),#end navigation
dbc.Tabs(
[
# Explore Data Tab
dbc.Tab(
# Explore Data Body
html.Div(
[
#Cards Row.
dbc.Row(
[
dbc.Col(dbc.Card(dbc.CardBody( [
html.H1(df.shape[0], className="card-title"),
html.P(
"Total Customers",
className="card-text",
),
],
style={'text-align': 'center'}
), color="primary", inverse=True), style={'margin-top': '30px'}, md=2),
dbc.Col(dbc.Card(dbc.CardBody( [
html.H1(df[df['Churn']=='Yes']['customerID'].count(), className="card-title"),
html.P(
"Churned Cust",
className="card-text",
),
],
style={'text-align': 'center'}
), color="primary", inverse=True), style={'margin-top': '30px'}, md=2),
dbc.Col(dbc.Card(dbc.CardBody( [
html.H1(df[df['Churn']=='No']['customerID'].count(), className="card-title"),
html.P(
"Remained Cust",
className="card-text",
),
],
style={'text-align': 'center'}
), color="primary", inverse=True), style={'margin-top': '30px'}, md=2),
dbc.Col(dbc.Card(dbc.CardBody( [
html.H1(round(df[df['Churn']=='Yes']['TotalCharges'].sum()/1000,2), className="card-title"),
html.P(
"Churned Customer Rev. (K)",
className="card-text",
),
],
style={'text-align': 'center'}
), color="primary", inverse=True), style={'margin-top': '30px'}, md=3),
dbc.Col(dbc.Card(dbc.CardBody( [
html.H1(round(df[df['Churn']=='No']['TotalCharges'].sum()/1000,2), className="card-title"),
html.P(
"Remained Customer Rev. (K)",
className="card-text",
),
],
style={'text-align': 'center'}
), color="primary", inverse=True), style={'margin-top': '30px'}, md=3),
]
),
#1.
dbc.Row(
[
dbc.Col(html.Div([
dcc.Graph(
id='churn-distribution',
figure=churn_distribution(df),
config={'displayModeBar': False },
),
]
),
style={
'margin-top': '30px'
},
md=3),
#2.
dbc.Col(html.Div([
dcc.Graph(
id='churn_by_gender',
figure=churn_by_gender(df),
config={'displayModeBar': False }
),
]
),
style={
'margin-top': '30px'
},
md=3),
#3.
dbc.Col(html.Div([
dcc.Graph(
id='churn-by-contract',
figure=churn_by_contract(df),
config={'displayModeBar': False }
),
]
),
style={
'margin-top': '30px'
},
md=6),
]
),
# 4.
dbc.Row(
[
dbc.Col(html.Div([
dcc.Graph(
id='distribution-by-revenue',
figure=distribution_by_revenue(df),
config={'displayModeBar': False }
),
]
),
style={
'margin-top': '30px'
},
md=4),
dbc.Col(html.Div([
dcc.Graph(
id='churn-by-monthlycharges',
figure=churn_by_monthlycharges(df),
config={'displayModeBar': False }
),
]
),
style={
'margin-top': '30px'
},
md=8),
]
),
dbc.Row(
[
dbc.Col(html.Div([
dcc.Graph(
id='churn-by-citizenship',
figure=churn_by_citizenship(df),
config={'displayModeBar': False }
),
]
),
style={
'margin-top': '30px'
},
md=4),
dbc.Col(html.Div([
dcc.Graph(
id='tenure-charges-correlation',
figure=tenure_charges_correlation(df),
config={'displayModeBar': False }
),
]
),
style={
'margin-top': '30px'
},
md=8),
]
),
dbc.Row(
[
dbc.Col(html.Div([
dcc.Graph(
id='churn-by-tenure',
figure=churn_by_tenure(df),
config={'displayModeBar': False }
),
]
),
style={
'margin-top': '30px'
},
md=12),
]
),
dbc.Row(
[
dbc.Col(html.Div([
dcc.Graph(
id='churn-by-techsupport',
figure=churn_by_techsupport(df),
config={'displayModeBar': False }
),
]
),
style={
'margin-top': '30px'
},
md=5),
dbc.Col(html.Div([
dcc.Graph(
id='churn-by-payment_method',
figure=churn_by_payment_method(df),
config={'displayModeBar': False }
),
]
),
style={
'margin-top': '30px'
},
md=7),
]
),
# footer
dbc.Row(
[
dbc.Col(html.Div("@galaxydataanalytics "),
style={
'margin-top': '2px',
'text-align':'center',
'backgroundColor': 'rgba(120,120,120,0.2)'
},
md=12)
]
),
#end footer
],
style={
'padding-left': '3px',
'padding-right': '3px'
},
),
#End Explore Data Body
label="Explore Data"), # Explore Data Tab Name
# Ml Modeling Tab
dbc.Tab(
# Ml Modeling Body
html.Div(
[
#1.
dbc.Row(
[
dbc.Col(html.Div([
dcc.Graph(
id='feature-correlation',
figure=feature_correlation(df),
config={'displayModeBar': False }
),
]
),
style={
'margin-top': '30px'
},
md=12),
]
),
# 4.
dbc.Row(
[
dbc.Col(html.Div([
dcc.Graph(
id='feature-importance',
figure=feature_importance(feat_importance_df),
config={'displayModeBar': False }
),
]
),
style={
'margin-top': '30px'
},
md=12),
]
),
dbc.Row(
[
dbc.Col(html.Div([
dcc.Graph(
id='uac-roc',
figure=uac_roc(telco_churm_metrics_df),
config={'displayModeBar': False }
),
]
),
style={
'margin-top': '30px'
},
md=6),
dbc.Col(html.Div([
dcc.Graph(
id='random-forest-confusion-matrix',
figure=random_forest_confusion_matrix(telco_churm_metrics_df),
config={'displayModeBar': False }
),
]
),
style={
'margin-top': '30px'
},
md=6),
]
),
dbc.Row(
[
dbc.Col(html.Div([
dcc.Graph(
id='logistic-regression-confusion-matrix',
figure=logistic_regression_confusion_matrix(telco_churm_metrics_df),
config={'displayModeBar': False }
),
]
),
style={
'margin-top': '30px'
},
md=6),
dbc.Col(html.Div([
dcc.Graph(
id='svm-confusion-matrix',
figure=svm_confusion_matrix(telco_churm_metrics_df),
config={'displayModeBar': False }
),
]
),
style={
'margin-top': '30px'
},
md=6),
]
),
dbc.Row(
[
dbc.Col(html.Div([
dcc.Graph(
id='telco-churn-model-metrics-summary',
figure=telco_churn_model_metrics_summary(telco_churm_metrics_df),
config={'displayModeBar': False }
),
]
),
style={
'margin-top': '30px'
},
md=12),
]
),
# footer
dbc.Row(
[
dbc.Col(html.Div("@galaxydataanalytics "),
style={
'margin-top': '2px',
'text-align':'center',
'backgroundColor': 'rgba(120,120,120,0.2)'
},
md=12)
]
),
#end footer
],
style={
'padding-left': '3px',
'padding-right': '3px'
},
),
#End Ml Modeling Body
label="Ml Modeling"), # Ml Modeling Tab Name
# Ml Prediction Tab
dbc.Tab(
# Ml Prediction Body
html.Div(
[
#1.
dbc.Row(
[
dbc.Col(
html.Div(
[
dcc.Dropdown(
id="gender-input", placeholder="Select Gender...", options=[
{"label": "Male", "value": "Male"},
{"label": "Female", "value": "Female"},
],
),
html.Br(),
dcc.Dropdown(
id="citizen-input", placeholder="Select Citizen Seniority...", options=[
{"label": "Senior", "value": "1.0"},
{"label": "Junior", "value": "0.0"},
],
),
html.Br(),
dcc.Dropdown(
id="partner-input", placeholder="Select Partner...", options=[
{"label": "Yes", "value": "Yes"},
{"label": "No", "value": "No"},
],
),
html.Br(),
dcc.Dropdown(
id="dependents-input", placeholder="Select Dependents...", options=[
{"label": "Yes", "value": "Yes"},
{"label": "No", "value": "No"},
],
),
html.Br(),
dcc.Dropdown(
id="phone-service-input", placeholder="Select Phone Service...", options=[
{"label": "Yes", "value": "Yes"},
{"label": "No", "value": "No"},
],
),
html.Br(),
dcc.Dropdown(
id="multipleLines-input", placeholder="Select Multiple lines...", options=[
{"label": "Yes", "value": "Yes"},
{"label": "No", "value": "No"},
{"label": "No phone service", "value": "No phone service"},
],
),
html.Br(),
dbc.Input(id="tenure-input", placeholder="Enter Tenure...", type="Number", min=0, max=100),
]
),
style={
'margin-top': '30px'
},
md=4),
#2.
dbc.Col(
html.Div(
[
dcc.Dropdown(
id="internet-service-input", placeholder="Select Internet Service...", options=[
{"label": "DSL", "value": "DSL"},
{"label": "Fiber optic", "value": "Fiber optic"},
{"label": "No", "value": "No"},
],style={'margin-bottom': '5px'}
),
html.Br(),
dcc.Dropdown(
id="online-security-input", placeholder="Select Online Security...", options=[
{"label": "Yes", "value": "Yes"},
{"label": "No", "value": "No"},
{"label": "No internet service", "value": "No internet service"},
],style={'margin-bottom': '5px'}
),
html.Br(),
dcc.Dropdown(
id="online-backup-input", placeholder="Select Online backup...", options=[
{"label": "Yes", "value": "Yes"},
{"label": "No", "value": "No"},
{"label": "No internet service", "value": "No internet service"},
],style={'margin-bottom': '5px'}
),
html.Br(),
dcc.Dropdown(
id="device-protection-input", placeholder="Select Device Protection...", options=[
{"label": "Yes", "value": "Yes"},
{"label": "No", "value": "No"},
{"label": "No internet service", "value": "No internet service"},
],style={'margin-bottom': '5px'}
),
html.Br(),
dcc.Dropdown(
id="techsupport-input", placeholder="Select Tech Support...", options=[
{"label": "Yes", "value": "Yes"},
{"label": "No", "value": "No"},
{"label": "No internet service", "value": "No internet service"},
],style={'margin-bottom': '5px'}
),
html.Br(),
dcc.Dropdown(
id="streaming-tv-input", placeholder="Select Streaming Tv...", options=[
{"label": "Yes", "value": "Yes"},
{"label": "No", "value": "No"},
{"label": "No internet service", "value": "No internet service"},
],style={'margin-bottom': '5px'}
),
html.Br(),
dbc.Button("predict", id="predict-input", className="mr-2"),
]
),
style={
'margin-top': '30px'
},
md=4),
dbc.Col(
html.Div(
[
dcc.Dropdown(
id="streaming-movies-input", placeholder="Select Streaming Movies...", options=[
{"label": "Yes", "value": "Yes"},
{"label": "No", "value": "No"},
{"label": "No internet service", "value": "No internet service"},
],style={'margin-bottom': '5px'}
),
html.Br(),
dcc.Dropdown(
id="contract-input", placeholder="Select Contract Type...", options=[
{"label": "Month-to-month", "value": "Month-to-month"},
{"label": "One year", "value": "One year"},
{"label": "Two year", "value": "Two year"},
],style={'margin-bottom': '5px'}
),
html.Br(),
dcc.Dropdown(
id="paperless-billing-input", placeholder="Select Paperless Billing...", options=[
{"label": "Yes", "value": "Yes"},
{"label": "No", "value": "No"},
],style={'margin-bottom': '5px'}
),
html.Br(),
dcc.Dropdown(
id="payment-method-input", placeholder="Select Payment Method...", options=[
{"label": "Electronic check", "value": "Electronic check"},
{"label": "Mailed check", "value": "Mailed check"},
{"label": "Bank transfer (automatic)", "value": "Bank transfer (automatic)"},
{"label": "Credit card (automatic)", "value": "Credit card (automatic)"},
],style={'margin-bottom': '5px'}
),
html.Br(),
dbc.Input(id="monthly-charges-input", placeholder="Enter Monthly Charges...", type="Number", min=0, max=1000000),
html.Br(),
dbc.Input(id="total-charges-input", placeholder="Enter Total Charges...", type="Number", min=0, max=10000000),
]
),
style={
'margin-top': '30px'
},
md=4),
#3.
dbc.Col(html.Div(
[
html.H6("Batch Prediction") ,
]
),
style={
'margin-top': '30px'
},
md=4),
]
),
dbc.Row(
[
dbc.Col(html.Div(
du.Upload( id='upload-file',
max_file_size=2, # 2 Mb max file size
filetypes=['csv'],
# upload_id=uuid.uuid1(), # Unique session id
text='Drag and Drop a File Here to upload!',
text_completed='File Sucessfully Uploaded: ',
),
),
md=5),
dbc.Col(html.Div(
dbc.Button("Batch Predict", id="create-analysis-input", className="mr-2", color="info")
),
md=2),
dbc.Col(html.Div(
dbc.Alert(id="prediction-output", color="success"),
),
md=5),
]
),
# ========= Remove the table =============
dbc.Row(
[
dbc.Col(html.Div(
# dcc.Graph(id='prediction-output-table',figure={})
),
md=12)
]
),
# =========== End Remove Table ================
#1.
dbc.Row(
[
dbc.Col(html.Div([
dcc.Graph(
id='churn-distribution-pred',
figure={},
config={'displayModeBar': False },
),
]
),
style={
'margin-top': '30px'
},
md=3),
#2.
dbc.Col(html.Div([
dcc.Graph(
id='churn-by-gender-pred',
figure={},
config={'displayModeBar': False }
),
]
),
style={
'margin-top': '30px'
},
md=3),
#3.
dbc.Col(html.Div([
dcc.Graph(
id='churn-by-contract-pred',
figure={},
config={'displayModeBar': False }
),
]
),
style={
'margin-top': '30px'
},
md=6),
]
),
dbc.Row(
[
dbc.Col(html.Div([
dcc.Graph(
id='revenue-distribution-pred',
figure={},
config={'displayModeBar': False },
),
]
),
style={
'margin-top': '30px'
},
md=4),
#2.
dbc.Col(html.Div([
dcc.Graph(
id='churn-by-techsupport-pred',
figure={},
config={'displayModeBar': False }
),
]
),
style={
'margin-top': '30px'
},
md=8),
]
),
dbc.Row(
[
dbc.Col(html.Div([
dcc.Graph(
id='citizenship-distribution-pred',
figure={},
config={'displayModeBar': False },
),
]
),
style={
'margin-top': '30px'
},
md=6),
#2.
dbc.Col(html.Div([
dcc.Graph(
id='churn-by-payment_method-pred',
figure={},
config={'displayModeBar': False }
),
]
),
style={
'margin-top': '30px'
},
md=6),
]
),
dbc.Row(
[
dbc.Col(html.Div([
dcc.Graph(
id='churn-by-tenure-pred',
figure={},
config={'displayModeBar': False }
),
]
),
style={
'margin-top': '30px'
},
md=12),
]
),
# footer
dbc.Row(
[
dbc.Col(html.Div("@galaxydataanalytics "),
style={
'margin-top': '2px',
'text-align':'center',
'backgroundColor': 'rgba(120,120,120,0.2)'
},
md=12),
dbc.Col(
# Hidden div inside the app that stores the intermediate value
html.Div(id='global-dataframe'),
# , style={'display': 'none'}
style={'display': 'none'},
md=0),
]
),
#end footer
],
style={
'padding-left': '3px',
'padding-right': '3px'
},
),
#End Ml Prediction Body
label="Ml Prediction"), # Ml Prediction Tab Name
]
)
],
fluid=True
)
# @app.callback(
# Output('global-dataframe', 'children'),
# Input('fetch-data-input','n_clicks'),
# State('tweet-topics-input','value'),
# State('number-of-tweets-input','value'),
# )
# def global_dataframe(n,tweet_topics,number_of_tweets):
# date_since =pd.to_datetime('today').strftime("%Y-%m-%d")
# #Define the cursor
# tweets = tw.Cursor(api.search, q=tweet_topics, lang="en", since=date_since).items(int(number_of_tweets))
# # Clean text
# text_preprocess = lambda x: re.compile('\#').sub('', re.compile('RT @').sub('@', x).strip())
# # Create DataFrame
# users_locs = [[tweet.user.screen_name,tweet.user.name,tweet.user.verified,
# tweet.user.followers_count,tweet.user.friends_count,tweet.user.listed_count,
# tweet.retweet_count,tweet.favorite_count,tweet.retweeted,tweet.entities,
# tweet.user.favourites_count,
# tweet.user.location,tweet.created_at,tweet.text,
# re.sub(r"http\S+", "", re.sub('@[^\s]+','',text_preprocess(tweet.text))),
# TextBlob(re.sub(r"http\S+", "", re.sub('@[^\s]+','',text_preprocess(tweet.text)))).sentiment[0],
# TextBlob(re.sub(r"http\S+", "", re.sub('@[^\s]+','',text_preprocess(tweet.text)))).sentiment[1]
# ] for tweet in tweets]
# cols=columns=['screen_name','name','user_verification','followers_count','friends_count',
# 'listed_count','retweet_count','favorite_count','retweeted','entities','favourites_count',
# 'location','created_at','text','clean_text','sentiment_polarity','sentiment_subjectivity']
# tweet_df = pd.DataFrame(data=users_locs, columns=cols)
# tweet_df["sentiment_polarity_color"] = np.where(tweet_df["sentiment_polarity"]<0, 'red', 'green')
# return tweet_df.to_json(date_format='iso', orient='split')
@app.callback(
Output("prediction-output", "children"),
Input("predict-input", "n_clicks"),
[State("gender-input", "value"),
State("citizen-input","value"),
State("partner-input","value"),
State("dependents-input","value"),
State("phone-service-input","value"),
State("tenure-input","value"),
State("multipleLines-input","value"),
State("internet-service-input","value"),
State("online-security-input","value"),
State("online-backup-input","value"),
State("device-protection-input","value"),
State("techsupport-input","value"),
State("streaming-tv-input","value"),
State("streaming-movies-input","value"),
State("contract-input","value"),
State("paperless-billing-input","value"),
State("payment-method-input","value"),
State("monthly-charges-input","value"),
State("total-charges-input","value")
]
,
prevent_initial_call=False
)
def on_button_click(n,gender,citizen,partner,dependents,phone_service,tenure,multiple_lines,internet_service,online_security,online_backup,
device_protection,techsupport,streaming_tv,streaming_movies,contract,paperless_billing,payment_method,
monthly_charges,total_charges):
pred_dict={"ID":"1","gender":str(gender), "SeniorCitizen":float(citizen),"Partner":str(partner),"Dependents":str(dependents),
"tenure":int(tenure),"PhoneService":str(phone_service),"MultipleLines":str(multiple_lines),"InternetService":str(internet_service),"OnlineSecurity":str(online_security),
"OnlineBackup":str(online_backup), "DeviceProtection":str(device_protection),"TechSupport":str(techsupport),"StreamingTV":str(streaming_tv),
"StreamingMovies":str(streaming_movies),"Contract":str(contract),"PaperlessBilling":str(paperless_billing),"PaymentMethod":str(payment_method),
"MonthlyCharges":float(monthly_charges),"TotalCharges":float(total_charges) }
pred_columns=['ID','gender', 'SeniorCitizen', 'Partner', 'Dependents','tenure', 'PhoneService', 'MultipleLines', 'InternetService',
'OnlineSecurity', 'OnlineBackup', 'DeviceProtection', 'TechSupport','StreamingTV', 'StreamingMovies', 'Contract', 'PaperlessBilling',
'PaymentMethod', 'MonthlyCharges', 'TotalCharges']
pred_data=pd.DataFrame(pred_dict,columns=pred_columns, index=[0])
pred_data.to_csv(DATA_PATH.joinpath("telco_pred_data.csv")) # for reference
df=pd.read_csv(DATA_PATH.joinpath("telco-customer-churn.csv")) # use the data to process user input
df.set_index("customerID", inplace = True)
df=df.drop(columns=['Churn'])
df['TotalCharges']=pd.to_numeric(df['TotalCharges'], errors='coerce')
pred_df=df.append(pred_data)
pred_df['SeniorCitizen']=pred_df['SeniorCitizen'].fillna(pred_df['SeniorCitizen'].max())
pred_df['SeniorCitizen']=pred_df['SeniorCitizen'].apply(np.int64)
pred_df_columns=['gender','Partner','Dependents','PhoneService','MultipleLines','InternetService','OnlineSecurity','OnlineBackup','DeviceProtection', 'TechSupport', 'StreamingTV','StreamingMovies','Contract', 'PaperlessBilling', 'PaymentMethod','SeniorCitizen']
pred_df=pd.get_dummies(pred_df,columns=pred_df_columns)
pred_mms_columns=['tenure','MonthlyCharges','TotalCharges']
pred_mms_df=pd.DataFrame(pred_df,columns=pred_mms_columns)
pred_df=pred_df.drop(columns=pred_mms_columns)
pred_rescaled_features=MinMaxScaler().fit_transform(pred_mms_df)
pred_rescaled_df=pd.DataFrame(pred_rescaled_features,columns=pred_mms_columns,index=pred_df.index)
pred_df=pd.concat([pred_df,pred_rescaled_df],axis=1)
pred_df= pred_df.sort_index(axis=1)
pred_df=pred_df.dropna()
pred_df=pred_df.iloc[:,1:]
predict_probability=joblib_model.predict_proba(pred_df.tail(1))
prediction = joblib_model.predict(pred_df.tail(1))[0]
churn_confidence=''
churn_prediction=''
if prediction==1:
pred_feedback=(predict_probability[:,1])
churn_prediction='Model predicted the customer will churn '
else:
pred_feedback=(predict_probability[:,0])
churn_prediction='Model predicted the customer will not churn '
pred_feedback[0]
churn_prediction
return f"{churn_prediction} With Confidence of {round(pred_feedback[0]*100,2)}%."
@app.callback(
# Output("prediction-output-table", "figure"),
Output('global-dataframe', 'children'),
[Input('upload-file', 'isCompleted'),
# Input("predict-input", "n_clicks")
],
[State('upload-file', 'fileNames'),
State('upload-file', 'upload_id')],
prevent_initial_call=True
)
def callback_on_completion(iscompleted, filenames, upload_id):
file=str(filenames).replace("['","").replace("']","")
pred_data=pd.read_csv(TELCO_CHURN_FILE_UPLOADS_DATA_PATH.joinpath(file))
print(pred_data.shape)
df=pd.read_csv(DATA_PATH.joinpath("telco-customer-churn.csv")) # use the data to process user input
pred_df=df.append(pred_data)
pred_df.set_index("customerID", inplace = True)
pred_df['TotalCharges']=pd.to_numeric(pred_df['TotalCharges'], errors='coerce')
pred_df=pred_df.drop(columns=['Churn'])
print(pred_data.shape)
pred_df['SeniorCitizen']=pred_df['SeniorCitizen'].fillna(pred_df['SeniorCitizen'].max())
pred_df['SeniorCitizen']=pred_df['SeniorCitizen'].apply(np.int64)
pred_df_columns=['gender','Partner','Dependents','PhoneService','MultipleLines','InternetService','OnlineSecurity','OnlineBackup','DeviceProtection', 'TechSupport', 'StreamingTV','StreamingMovies','Contract', 'PaperlessBilling', 'PaymentMethod','SeniorCitizen']
pred_df=pd.get_dummies(pred_df,columns=pred_df_columns)
pred_mms_columns=['tenure','MonthlyCharges','TotalCharges']
pred_mms_df=pd.DataFrame(pred_df,columns=pred_mms_columns)
pred_df=pred_df.drop(columns=pred_mms_columns)
pred_rescaled_features=MinMaxScaler().fit_transform(pred_mms_df)
pred_rescaled_df=pd.DataFrame(pred_rescaled_features,columns=pred_mms_columns,index=pred_df.index)
pred_df=pd.concat([pred_df,pred_rescaled_df],axis=1)
pred_df= pred_df.sort_index(axis=1)
pred_df=pred_df.dropna()
print(pred_data.shape)
user_records_loaded=str(df.shape[0])
user_attribute_attributes=str(df.shape[1])
predict_probability=joblib_model.predict_proba(pred_df.tail(int(user_records_loaded)))
prediction = joblib_model.predict(pred_df.tail(int(user_records_loaded)))[0:int(user_records_loaded)]
results_df = pd.DataFrame({'No Probability':predict_probability[:,0], 'Yes Probability':predict_probability[:,1],'Prediction':prediction})
pred_data[['No Probability','Yes Probability','Prediction']]=results_df
pred_data['Prediction'].replace(to_replace=1.0, value='Yes', inplace=True)
pred_data['Prediction'].replace(to_replace=0.0, value='No', inplace=True)
pred_confidence=[]
for index, row in pred_data.iterrows():
if row['Prediction']=='Yes':
pred_confidence.append(row['Yes Probability']*100)
else:
pred_confidence.append(row['No Probability']*100)
pred_data['Prediction Confidence']=pred_confidence
print(pred_data.head())
return pred_data.to_json(date_format='iso', orient='split')
# fig = go.Figure(data=[go.Table(header=dict(values=list(pred_data[['customerID','Prediction','Prediction Confidence']]),fill_color='paleturquoise',
# align='left'),cells=dict(values=[pred_data['customerID'], pred_data['Prediction'], pred_data['Prediction Confidence']],
# fill_color='lavender',align='left'))])
# fig.update_layout(showlegend=False,autosize=True,margin=dict(t=0,b=0,l=0,r=0),height=350)
# shutil.rmtree(TELCO_CHURN_FILE_UPLOADS_DATA_PATH)
# os.makedirs(TELCO_CHURN_FILE_UPLOADS_DATA_PATH)
# return fig
# ==== Prediction Analysis==========
@app.callback(
Output('churn-distribution-pred' , 'figure'),
Input('create-analysis-input','n_clicks'),
State('global-dataframe', 'children'),
prevent_initial_call=False)
def churn_distribution_pred(n,jsonified_global_dataframe):
df=pd.read_json(jsonified_global_dataframe, orient='split')
attrition_df=df.groupby(["Churn"], as_index=False )["customerID"].count()
colors = ['skyblue','crimson']
fig = go.Figure(data=[go.Pie(labels=attrition_df['Churn'].tolist(), values=attrition_df['customerID'].tolist(), hole=.3)])
fig.update_layout(title={'text': 'Customer Churn Distribution','y':0.9,'x':0.5, 'xanchor': 'center','yanchor': 'top'},
showlegend=False,autosize=True,annotations=[dict(text='Attrition', font_size=20, showarrow=False)],margin=dict(t=100,b=0,l=0,r=0),height=350,colorway=colors)
return fig
@app.callback(
Output('churn-by-gender-pred' , 'figure'),
Input('create-analysis-input','n_clicks'),
State('global-dataframe', 'children'),
prevent_initial_call=False)
def churn_by_gender_pred(n,jsonified_global_dataframe):
df=pd.read_json(jsonified_global_dataframe, orient='split')
gender_attrition_df=df.groupby(["Churn","gender"], as_index=False )["customerID"].count()
gender_attrition_df.columns=['Churn','Gender','Customers']
colors = ['skyblue','crimson']
fig=px.bar(gender_attrition_df,x='Gender',y='Customers',color='Churn',text='Customers',color_discrete_sequence=colors,
title='Churn by Gender')
fig.update_layout(legend=dict(yanchor="top",y=0.95,xanchor="left",x=0.46),autosize=True,margin=dict(t=30,b=0,l=0,r=0)) #use barmode='stack' when stacking,
return fig
@app.callback(
Output('churn-by-contract-pred' , 'figure'),
Input('create-analysis-input','n_clicks'),
State('global-dataframe', 'children'),
prevent_initial_call=False)
def churn_by_contract_pred(n,jsonified_global_dataframe):
df=pd.read_json(jsonified_global_dataframe, orient='split')
contract_attrition_df=df.groupby(["Churn","Contract"], as_index=False )["customerID"].count()
contract_base_df=df.groupby(["Contract"], as_index=False )["customerID"].count()
contract_base_df['Churn']='Customer Base'
contract_attrition_df=contract_attrition_df.append(contract_base_df, ignore_index = True)
contract_attrition_df.columns=['Churn','Contract','Customers']
contract_attrition_df=contract_attrition_df.sort_values(by=['Contract', 'Customers'],ascending=True)
colors = ['crimson','skyblue','teal']
fig=px.bar(contract_attrition_df,x='Contract',y='Customers',color='Churn',text='Customers',color_discrete_sequence=colors,barmode="group",
title='Churn by Customer Contract Type')
fig.update_layout(legend=dict(yanchor="top",y=0.95,xanchor="left",x=0.50),autosize=True,margin=dict(t=30,b=0,l=0,r=0)) #use barmode='stack' when stacking,
return fig
@app.callback(
Output('revenue-distribution-pred' , 'figure'),
Input('create-analysis-input','n_clicks'),
State('global-dataframe', 'children'),
prevent_initial_call=False)
def churn_by_revenue_pred(n,jsonified_global_dataframe):
df=pd.read_json(jsonified_global_dataframe, orient='split')
totalcharges_attrition_df=df.groupby( ["Churn"], as_index=False )["TotalCharges"].sum()
totalcharges_attrition_df=totalcharges_attrition_df.sort_values(by=['TotalCharges'],ascending=True)
totalcharges_attrition_df.columns=['Churn','Revenue']
totalcharges_attrition_df=totalcharges_attrition_df.round(2)
colors = ['crimson','skyblue']
fig=px.bar(totalcharges_attrition_df,x='Churn',y='Revenue',color='Churn',text='Revenue',color_discrete_sequence=colors,
title='Churn by Revenue')
fig.update_layout(legend=dict(yanchor="top",y=0.95,xanchor="left",x=0.40),autosize=True,margin=dict(t=30,b=0,l=0,r=0))
return fig
@app.callback(
Output('churn-by-techsupport-pred' , 'figure'),
Input('create-analysis-input','n_clicks'),
State('global-dataframe', 'children'),
prevent_initial_call=False)
def churn_by_techsupport_pred(n,jsonified_global_dataframe):
df=pd.read_json(jsonified_global_dataframe, orient='split')
techsupport_attrition_df=df.groupby( [ "Churn","TechSupport"], as_index=False )["customerID"].count()
techsupport_base_df=df.groupby(["TechSupport"], as_index=False )["customerID"].count()
techsupport_base_df['Churn']='Customer Base'
techsupport_attrition_df=techsupport_attrition_df.append(techsupport_base_df, ignore_index = True)
techsupport_attrition_df.columns=['Churn','TechSupport','Customers']
techsupport_attrition_df=techsupport_attrition_df.sort_values(by=['TechSupport', 'Customers'],ascending=True)
colors = ['crimson','skyblue','teal']
fig=px.bar(techsupport_attrition_df,x='TechSupport',y='Customers',color='Churn',text='Customers',color_discrete_sequence=colors,barmode="group",
title='Churn by Tech Support')
fig.update_layout(legend=dict(yanchor="top",y=0.95,xanchor="left",x=0.50),autosize=True,margin=dict(t=30,b=0,l=0,r=0)) #use barmode='stack' when stacking,
return fig
@app.callback(
Output('churn-by-payment_method-pred' , 'figure'),
Input('create-analysis-input','n_clicks'),
State('global-dataframe', 'children'),
prevent_initial_call=False)
def churn_by_payment_method_pred(n,jsonified_global_dataframe):
df=pd.read_json(jsonified_global_dataframe, orient='split')
PaymentMethod_attrition_df=df.groupby( [ "Churn","PaymentMethod"], as_index=False )["customerID"].count()
PaymentMethod_base_df=df.groupby(["PaymentMethod"], as_index=False )["customerID"].count()
PaymentMethod_base_df['Churn']='Customer Base'
PaymentMethod_attrition_df=PaymentMethod_attrition_df.append(PaymentMethod_base_df, ignore_index = True)
PaymentMethod_attrition_df.columns=['Churn','PaymentMethod','Customers']
PaymentMethod_attrition_df=PaymentMethod_attrition_df.sort_values(by=['PaymentMethod', 'Customers'],ascending=True)
colors = ['crimson','skyblue','teal']
fig=px.bar(PaymentMethod_attrition_df,x='PaymentMethod',y='Customers',color='Churn',text='Customers',color_discrete_sequence=colors,barmode="group",
title='Churn by Payment Method')
fig.update_layout(legend=dict(yanchor="top",y=0.95,xanchor="left",x=0.40),autosize=True,margin=dict(t=30,b=0,l=0,r=0)) #use barmode='stack' when stacking,
return fig
@app.callback(
Output('citizenship-distribution-pred' , 'figure'),
Input('create-analysis-input','n_clicks'),
State('global-dataframe', 'children'),
prevent_initial_call=False)
def churn_by_citizenship_pred(n,jsonified_global_dataframe):
df= | pd.read_json(jsonified_global_dataframe, orient='split') | pandas.read_json |
import pandas
import numpy
class ScriptSetting:
csv_file_name = 'telecom.csv'
csv_separator = ','
csv_null_values = 'null'
csv_true_values = 'true'
csv_false_values = 'false'
columns_out_of_prediction = ['customerId']
missing_column = 'customerAge'
missing_column_range = [14, 18, 28, 35, 40, 60, 150]
negative_influence = 'negative'
positive_influence = 'positive'
def load_data_frame_from_csv(file_name):
data_frame = pandas.read_csv(file_name,
sep=ScriptSetting.csv_separator,
na_values=ScriptSetting.csv_null_values,
true_values=ScriptSetting.csv_true_values,
false_values=ScriptSetting.csv_false_values
)
print("Number of rows: " + str(len(data_frame.index)))
print("Number of columns: " + str(len(data_frame.columns)))
return data_frame
# Return list of columns that contain missing values
def get_missing_columns(data_frame):
return data_frame.columns[data_frame.isna().any()].tolist()
def subset_columns(data_frame, columns):
return [column for column in data_frame.columns if column not in columns]
# Convert all string columns to numerical columns by adding numerical categories
def columns_to_number(data_frame):
for column in data_frame.select_dtypes(exclude=[numpy.number, numpy.bool_]).columns:
data_frame[column] = pandas.Series(data_frame.loc[:, column].astype('category').cat.codes,
index=data_frame.index)
return data_frame
# Normalize all numeric columns that do not contain null values (Z-scaling)
def normalize_data(data_frame):
# Non-null columns
columns = subset_columns(data_frame, get_missing_columns(data_frame) + ScriptSetting.columns_out_of_prediction)
# Numerical columns
for column in data_frame[columns].select_dtypes(include=numpy.number).columns:
data_frame[column] = (data_frame[column] - data_frame[column].mean()) / data_frame[column].std(ddof=0)
return data_frame
# List of columns for which we will count Euclid distance
def get_predict_columns(data_frame):
return subset_columns(data_frame, [ScriptSetting.missing_column] + ScriptSetting.columns_out_of_prediction)
# Calculating Information Value (IV) and Weight of Evidence (WoE) for every column
def count_iv_woe(data_frame, age_range):
column_influence = {ScriptSetting.positive_influence: {}, ScriptSetting.negative_influence: {}}
# Make copy of data frame and add target variable
df_iv_woe = data_frame.copy()
age_range_name = str(age_range).replace(']', '').replace(', ', '-').replace('(', '')
age_range_name = 'range_' + age_range_name
df_iv_woe[age_range_name] = 'F'
# Set target variable to current age range
df_iv_woe.loc[df_iv_woe[ScriptSetting.missing_column] == age_range, age_range_name] = 'T'
# For each column calculate IV and Woe
for column in get_predict_columns(data_frame):
i_v = pandas.crosstab(df_iv_woe[column], df_iv_woe[age_range_name]).apply(lambda c: c / c.sum(), axis=0)
i_v = i_v.replace(0.0, 0.00001)
i_v['WoE'] = numpy.log(i_v['T'] / i_v['F'])
i_v['IV'] = (i_v['T'] - i_v['F']) * numpy.log(i_v['T'] / i_v['F'])
# If we have variables with IV > 0.5, add them to influence columns
if len(i_v[i_v['IV'] > 0.5].index) > 0:
for IV_index, IV_row in i_v[i_v['IV'] > 0.5].iterrows():
influence_type = ScriptSetting.positive_influence
# If WoE is negative, this variable has the opposite result than target variable
if IV_row['WoE'] < 0:
influence_type = ScriptSetting.negative_influence
column_influence[influence_type][column] = IV_index
return column_influence
# Group data in ranges
def group_data(data_frame):
columns_group = get_predict_columns(data_frame)
df_ranged = data_frame.copy()
# For every numeric column, we create 5 (or less) ranges
for column in df_ranged[columns_group].select_dtypes(include=numpy.number).columns:
column_bin = pandas.qcut(df_ranged[column], 5, duplicates='drop')
df_ranged[column] = column_bin
# We have grouped missing column in special defined ranges
age_bin = pandas.cut(df_ranged[ScriptSetting.missing_column], ScriptSetting.missing_column_range)
df_ranged[ScriptSetting.missing_column] = age_bin
column_influence = {}
# For every age range calculates influence of other columns
for age_id, age_range in age_bin.drop_duplicates().iteritems():
if | pandas.isnull(age_range) | pandas.isnull |
import numpy as np
import pandas as pd
import tensorflow as tf
Data = | pd.read_csv('ratings.csv', sep=';', names=['user', 'item', 'rating', 'timestamp'], header=None) | pandas.read_csv |
import pandas as pd
from sklearn.preprocessing import LabelEncoder
from labels import *
def transformDataHotEncoding(df, labels=None):
if labels == None:
labels = df.columns
for col in labels:
if df[col].dtypes == "object":
if len(df[col].unique()) == 2:
df[col] = LabelEncoder().fit_transform(df[col])
else:
dummies = | pd.get_dummies(df[col], prefix=col) | pandas.get_dummies |
import os
from pathlib import Path
from .. import api
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
description = """
Parse the Reports directory from bcl2fastq.
This command will parse, and extract various statistics from, HTML files in
the Reports directory created by the bcl2fastq or bcl2fastq2 prograrm. After
creating an output directory, the command will write the following files:
- flowcell-summary.csv
- lane-summary.csv
- top-unknown-barcodes.csv
- reports.pdf
Use --sheet to sort samples in the lane-summary.csv file in the same order
as your SampleSheet.csv file. You can also provide a modified version of your
SampleSheet.csv file to subset samples for the lane-summary.csv and
reports.pdf files.
"""
epilog = f"""
[Example] Parse a Reports directory:
$ fuc {api.common._script_name()} Reports output
[Example] Sort and/or subset samples:
$ fuc {api.common._script_name()} Reports output --sheet SampleSheet.csv
"""
def create_parser(subparsers):
parser = api.common._add_parser(
subparsers,
api.common._script_name(),
help='Parse the Reports directory from bcl2fastq.',
description=description,
)
parser.add_argument(
'reports',
help='Reports directory.'
)
parser.add_argument(
'output',
help='Output directory (will be created).'
)
parser.add_argument(
'--sheet',
metavar='PATH',
help='SampleSheet.csv file. Used for sorting and/or subsetting \n'
'samples.'
)
def main(args):
for path in Path(args.reports).rglob('all/all/all/laneBarcode.html'):
html_file = path.absolute()
dfs = | pd.read_html(html_file) | pandas.read_html |
from DIS import DIS
from itertools import chain
from scipy.stats import norm
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
plt.ion()
import tensorflow as tf
import tensorflow_probability as tfp
tfb = tfp.bijectors
tfd = tfp.distributions
tfe = tf.contrib.eager
tf.enable_eager_execution()
class MG1:
"""Class to encapsulate MG1 queue model and how it's tempered"""
def __init__(self, interdeparture, nobs, nlatent):
"""
`interdeparture` is observed interdeparture times
`nobs` is number of observations
"""
self.interdeparture = interdeparture
self.nobs = nobs
self.normCDF = tfb.NormalCDF()
self.initial_target = tfd.Independent(
tfd.Normal(loc=tf.zeros(nlatent), scale=tf.ones(nlatent)),
reinterpreted_batch_ndims=1)
self.max_eps = 10.
def likelihood_prelims(self, inputs):
"""Store preliminary calculations required for evaluating likelihood
`inputs` is a tensor of random variables
`inputs[:,0:3]` control parameters
`inputs[:,3:3+self.nobs]` control arrival times
`inputs[:,3+T:3+2*self.nobs]` control service times
"""
nbatches = inputs.shape[0]
self.log_prior = self.initial_target.log_prob(inputs)
## Raw underlying U(0,1) variables
inputs_u = self.normCDF.forward(inputs)
## Parameters
arrival_rate = inputs_u[:,0] / 3.
min_service = inputs_u[:,1] * 10.
service_width = inputs_u[:,2] * 10.
## Arrival and service variables
arrivals_u = inputs_u[:,3:3+self.nobs]
services_u = inputs_u[:,3+self.nobs:3+2*self.nobs]
arrivals = -tf.log(arrivals_u) / tf.reshape(arrival_rate, (nbatches, 1))
services = tf.reshape(min_service, (nbatches, 1)) + \
services_u * tf.reshape(service_width, (nbatches, 1))
# Compute interdeparture times
departures = [] # Inter-departure times
current_arrival = tf.zeros(nbatches) # Will be arrival time
# for current customer
last_departure = tf.zeros(nbatches) # Will be departure time
# for previous customer
# (or zero if none)
for i in range(self.nobs):
current_arrival += arrivals[:,i]
departures += [services[:,i] + \
tf.maximum(0., current_arrival - last_departure)]
last_departure += departures[i]
departures = tf.stack(departures)
self.sq_dist = tf.math.reduce_sum(
tf.pow(tf.reshape(self.interdeparture, (-1, 1)) - departures, 2.),
0)
def log_tempered_target(self, eps):
"""Calculate log of unnormalised tempered target density
Requires likelihood_prelim to have already been run.
"""
nbatches = self.sq_dist.shape[0]
if (eps == 0.):
log_obs = tf.where(self.sq_dist == 0., tf.zeros(nbatches),
tf.fill(nbatches, -np.infty))
else:
log_obs = -self.sq_dist / (2. * eps ** 2.)
return self.log_prior + log_obs
def normals2queue(inputs, nobs):
"""Convert inputs to queue output
If the inputs are N(0,1) draws, this outputs samples from prior + model.
This is a numpy version of similar tensorflow code in `likelihood_prelims`.
`inputs` is a matrix (rows are batches)
`inputs[:,0:3]` control parameters
`inputs[:,3:3+nobs]` control arrival times
`inputs[:,3+T:3+2*nobs]` control service times
Returns:
`pars` a matrix, rows are batches, cols are parameters
`departures` a matrix, rows are batches, cols are interdeparture times
"""
nbatches = inputs.shape[0]
## Raw underlying U(0,1) variables
inputs_u = norm.cdf(inputs)
## Parameters
arrival_rate = inputs_u[:,0] / 3.
min_service = inputs_u[:,1] * 10.
service_width = inputs_u[:,2] * 10.
pars = np.stack((arrival_rate, min_service, service_width), axis=1)
## Arrival and service variables
arrivals_u = inputs_u[:,3:3+nobs]
services_u = inputs_u[:,3+nobs:3+2*nobs]
arrivals = -np.log(arrivals_u) / np.reshape(arrival_rate, (nbatches, 1))
services = np.reshape(min_service, (nbatches, 1)) + \
services_u * np.reshape(service_width, (nbatches, 1))
# Compute interdeparture times
departures = np.zeros((nbatches, nobs)) # Inter-departure times
current_arrival = np.zeros(nbatches) # Will be arrival time
# for current customer
last_departure = np.zeros(nbatches) # Will be departure time
# for previous customer
# (or zero if none)
for i in range(nobs):
current_arrival += arrivals[:,i]
departures[:,i] = services[:,i] + \
np.maximum(0., current_arrival - last_departure)
last_departure += departures[:,i]
return pars, departures
def run_sim(is_size, ess_frac):
## See MG1_example.py for code to generate this synthetic data
deps_obs = np.array(
[4.67931388, 33.32367159, 16.1354178 , 4.26184914, 21.51870177,
19.26768645, 17.41684327, 4.39394293, 4.98717158, 4.00745068,
17.13184198, 4.64447435, 12.10859597, 6.86436748, 4.199275 ,
11.70312317, 7.06592802, 16.28106949, 8.66159665, 4.33875566],
dtype='float32')
nobs = 20
nlatent = 43
ar_model = MG1(deps_obs, nobs, nlatent=nlatent)
## Approximating family
hidden_size = (100, 100, 50)
bichain=list(chain.from_iterable([
tfb.Permute(np.random.permutation(nlatent)), # random permutation
tfb.RealNVP(nlatent//2, shift_and_log_scale_fn =
tfb.real_nvp_default_template(hidden_size, activation=tf.nn.elu,
kernel_initializer=tf.initializers.truncated_normal(stddev=0.001)
)),
] for _ in range(16)))
bichain = bichain[1:] # remove final permutation
bijector = tfb.Chain(bichain)
base = tfd.MultivariateNormalDiag(loc=tf.zeros(nlatent))
dis_approx = tfd.TransformedDistribution(base, bijector)
dis_approx.sample() # Ensure variables created
dis_opt = tf.train.AdamOptimizer()
dis = DIS(model=ar_model, q=dis_approx, optimiser=dis_opt,
importance_size=is_size, ess_target=is_size*ess_frac,
max_weight=0.1, nbatches=10)
while dis.elapsed < 60. * 180.: #stops shortly after 180 mins
dis.train(iterations=50)
# Save samples from approx posterior to a file
x_sample = [b.numpy() for b in dis.batches]
x_sample = np.vstack(x_sample)
pars_samp, deps_samp = normals2queue(x_sample, nobs)
pars_samp[:,2] += pars_samp[:,1]
np.save('MG1_pars_N{:.0f}_frac{:.2f}'.format(is_size, ess_frac), pars_samp)
# Return some summaries
results = np.column_stack((np.array(dis.time_list), np.array(dis.eps_list),
np.array(dis.it_list)))
results = np.insert(results, 3, is_size, 1)
results = np.insert(results, 4, ess_frac, 1)
results = pd.DataFrame(results,
columns=['time', 'eps', 'iteration',
'is samples', 'ess frac'])
return results
output = []
for is_size in (50000, 20000, 10000, 5000):
for ess_frac in (0.05, 0.1, 0.2):
try: # run_sim can fail due to numerical instability
output += [run_sim(is_size, ess_frac)]
except ValueError:
pass
output = pd.concat(output)
output['ess frac'] = output['ess frac'].astype('category')
output['is samples'] = output['is samples'].astype('int')
| pd.to_pickle(output, "mg1_comparison.pkl") | pandas.to_pickle |
import numpy as np
import pandas as pd
from numpy.testing import assert_array_equal
from pandas.testing import assert_frame_equal
from nose.tools import (assert_equal,
assert_almost_equal,
raises,
ok_,
eq_)
from rsmtool.preprocessor import (FeaturePreprocessor,
FeatureSubsetProcessor,
FeatureSpecsProcessor)
class TestFeaturePreprocessor:
def setUp(self):
self.fpp = FeaturePreprocessor()
def test_select_candidates_with_N_or_more_items(self):
data = pd.DataFrame({'candidate': ['a'] * 3 + ['b'] * 2 + ['c'],
'sc1': [2, 3, 1, 5, 6, 1]})
df_included_expected = pd.DataFrame({'candidate': ['a'] * 3 + ['b'] * 2,
'sc1': [2, 3, 1, 5, 6]})
df_excluded_expected = pd.DataFrame({'candidate': ['c'],
'sc1': [1]})
(df_included,
df_excluded) = FeaturePreprocessor.select_candidates(data, 2)
assert_frame_equal(df_included, df_included_expected)
assert_frame_equal(df_excluded, df_excluded_expected)
def test_select_candidates_with_N_or_more_items_all_included(self):
data = pd.DataFrame({'candidate': ['a'] * 2 + ['b'] * 2 + ['c'] * 2,
'sc1': [2, 3, 1, 5, 6, 1]})
(df_included,
df_excluded) = FeaturePreprocessor.select_candidates(data, 2)
assert_frame_equal(df_included, data)
assert_equal(len(df_excluded), 0)
def test_select_candidates_with_N_or_more_items_all_excluded(self):
data = pd.DataFrame({'candidate': ['a'] * 3 + ['b'] * 2 + ['c'],
'sc1': [2, 3, 1, 5, 6, 1]})
(df_included,
df_excluded) = FeaturePreprocessor.select_candidates(data, 4)
assert_frame_equal(df_excluded, data)
assert_equal(len(df_included), 0)
def test_select_candidates_with_N_or_more_items_custom_name(self):
data = pd.DataFrame({'ID': ['a'] * 3 + ['b'] * 2 + ['c'],
'sc1': [2, 3, 1, 5, 6, 1]})
df_included_expected = pd.DataFrame({'ID': ['a'] * 3 + ['b'] * 2,
'sc1': [2, 3, 1, 5, 6]})
df_excluded_expected = pd.DataFrame({'ID': ['c'],
'sc1': [1]})
(df_included,
df_excluded) = FeaturePreprocessor.select_candidates(data, 2, 'ID')
assert_frame_equal(df_included, df_included_expected)
assert_frame_equal(df_excluded, df_excluded_expected)
def test_rename_no_columns(self):
df = pd.DataFrame(columns=['spkitemid', 'sc1', 'sc2', 'length',
'raw', 'candidate', 'feature1', 'feature2'])
df = self.fpp.rename_default_columns(df, [], 'spkitemid', 'sc1', 'sc2',
'length', 'raw', 'candidate')
assert_array_equal(df.columns,
['spkitemid', 'sc1', 'sc2', 'length', 'raw',
'candidate', 'feature1', 'feature2'])
def test_rename_no_columns_some_values_none(self):
df = pd.DataFrame(columns=['spkitemid', 'sc1', 'sc2', 'feature1', 'feature2'])
df = self.fpp.rename_default_columns(df, [], 'spkitemid', 'sc1', 'sc2', None, None, None)
assert_array_equal(df.columns, ['spkitemid', 'sc1', 'sc2', 'feature1', 'feature2'])
def test_rename_no_used_columns_but_unused_columns_with_default_names(self):
df = pd.DataFrame(columns=['spkitemid', 'sc1', 'sc2', 'length', 'feature1', 'feature2'])
df = self.fpp.rename_default_columns(df, [], 'spkitemid', 'sc1', 'sc2', None, None, None)
assert_array_equal(df.columns, ['spkitemid', 'sc1', 'sc2',
'##length##', 'feature1', 'feature2'])
def test_rename_used_columns(self):
df = pd.DataFrame(columns=['id', 'r1', 'r2', 'words', 'SR', 'feature1', 'feature2'])
df = self.fpp.rename_default_columns(df, [], 'id', 'r1', 'r2', 'words', 'SR', None)
assert_array_equal(df.columns, ['spkitemid', 'sc1', 'sc2', 'length',
'raw', 'feature1', 'feature2'])
def test_rename_used_columns_and_unused_columns_with_default_names(self):
df = pd.DataFrame(columns=['id', 'r1', 'r2', 'words', 'raw', 'feature1', 'feature2'])
df = self.fpp.rename_default_columns(df, [], 'id', 'r1', 'r2', 'words', None, None)
assert_array_equal(df.columns, ['spkitemid', 'sc1', 'sc2', 'length',
'##raw##', 'feature1', 'feature2'])
def test_rename_used_columns_with_swapped_names(self):
df = pd.DataFrame(columns=['id', 'sc1', 'sc2', 'raw', 'words', 'feature1', 'feature2'])
df = self.fpp.rename_default_columns(df, [], 'id', 'sc2', 'sc1', 'words', None, None)
assert_array_equal(df.columns, ['spkitemid', 'sc2', 'sc1', '##raw##',
'length', 'feature1', 'feature2'])
def test_rename_used_columns_but_not_features(self):
df = pd.DataFrame(columns=['id', 'sc1', 'sc2', 'length', 'feature2'])
df = self.fpp.rename_default_columns(df, ['length'], 'id', 'sc1', 'sc2', None, None, None)
assert_array_equal(df.columns, ['spkitemid', 'sc1', 'sc2', 'length', 'feature2'])
def test_rename_candidate_column(self):
df = pd.DataFrame(columns=['spkitemid', 'sc1', 'sc2', 'length',
'apptNo', 'feature1', 'feature2'])
df = self.fpp.rename_default_columns(df, [],
'spkitemid', 'sc1', 'sc2', None, None, 'apptNo')
assert_array_equal(df.columns, ['spkitemid', 'sc1', 'sc2', '##length##',
'candidate', 'feature1', 'feature2'])
def test_rename_candidate_named_sc2(self):
df = pd.DataFrame(columns=['id', 'sc1', 'sc2', 'question', 'l1', 'score'])
df_renamed = self.fpp.rename_default_columns(df, [],
'id', 'sc1', None, None, 'score', 'sc2')
assert_array_equal(df_renamed.columns, ['spkitemid', 'sc1',
'candidate', 'question', 'l1', 'raw'])
@raises(KeyError)
def test_check_subgroups_missing_columns(self):
df = pd.DataFrame(columns=['a', 'b', 'c'])
subgroups = ['a', 'd']
FeaturePreprocessor.check_subgroups(df, subgroups)
def test_check_subgroups_nothing_to_replace(self):
df = pd.DataFrame({'a': ['1', '2'],
'b': ['32', '34'],
'd': ['abc', 'def']})
subgroups = ['a', 'd']
df_out = FeaturePreprocessor.check_subgroups(df, subgroups)
assert_frame_equal(df_out, df)
def test_check_subgroups_replace_empty(self):
df = pd.DataFrame({'a': ['1', ''],
'b': [' ', '34'],
'd': ['ab c', ' ']})
subgroups = ['a', 'd']
df_expected = pd.DataFrame({'a': ['1', 'No info'],
'b': [' ', '34'],
'd': ['ab c', 'No info']})
df_out = FeaturePreprocessor.check_subgroups(df, subgroups)
assert_frame_equal(df_out, df_expected)
def test_filter_on_column(self):
bad_df = pd.DataFrame({'spkitemlab': np.arange(1, 9, dtype='int64'),
'sc1': ['00', 'TD', '02', '03'] * 2})
df_filtered_with_zeros = pd.DataFrame({'spkitemlab': [1, 3, 4, 5, 7, 8],
'sc1': [0.0, 2.0, 3.0] * 2})
df_filtered = pd.DataFrame({'spkitemlab': [3, 4, 7, 8], 'sc1': [2.0, 3.0] * 2})
(output_df_with_zeros,
output_excluded_df_with_zeros) = self.fpp.filter_on_column(bad_df, 'sc1',
'spkitemlab',
exclude_zeros=False)
output_df, output_excluded_df = self.fpp.filter_on_column(bad_df, 'sc1',
'spkitemlab',
exclude_zeros=True)
assert_frame_equal(output_df_with_zeros, df_filtered_with_zeros)
assert_frame_equal(output_df, df_filtered)
def test_filter_on_column_all_non_numeric(self):
bad_df = pd.DataFrame({'sc1': ['A', 'I', 'TD', 'TD'] * 2,
'spkitemlab': range(1, 9)})
expected_df_excluded = bad_df.copy()
expected_df_excluded.drop('sc1', axis=1, inplace=True)
df_filtered, df_excluded = self.fpp.filter_on_column(bad_df, 'sc1',
'spkitemlab',
exclude_zeros=True)
ok_(df_filtered.empty)
ok_("sc1" not in df_filtered.columns)
assert_frame_equal(df_excluded, expected_df_excluded, check_dtype=False)
def test_filter_on_column_std_epsilon_zero(self):
# Test that the function exclude columns where std is returned as
# very low value rather than 0
data = {'id': np.arange(1, 21, dtype='int64'),
'feature_ok': np.arange(1, 21),
'feature_zero_sd': [1.5601] * 20}
bad_df = pd.DataFrame(data=data)
output_df, output_excluded_df = self.fpp.filter_on_column(bad_df,
'feature_zero_sd',
'id',
exclude_zeros=False,
exclude_zero_sd=True)
good_df = bad_df[['id', 'feature_ok']].copy()
assert_frame_equal(output_df, good_df)
ok_(output_excluded_df.empty)
def test_filter_on_column_with_inf(self):
# Test that the function exclude columns where feature value is 'inf'
data = pd.DataFrame({'feature_1': [1.5601, 0, 2.33, 11.32],
'feature_ok': np.arange(1, 5)})
data['feature_with_inf'] = 1 / data['feature_1']
data['id'] = np.arange(1, 5, dtype='int64')
bad_df = data[np.isinf(data['feature_with_inf'])].copy()
good_df = data[~np.isinf(data['feature_with_inf'])].copy()
bad_df.reset_index(drop=True, inplace=True)
good_df.reset_index(drop=True, inplace=True)
output_df, output_excluded_df = self.fpp.filter_on_column(data, 'feature_with_inf',
'id',
exclude_zeros=False,
exclude_zero_sd=True)
assert_frame_equal(output_df, good_df)
assert_frame_equal(output_excluded_df, bad_df)
def test_filter_on_flag_column_empty_flag_dictionary(self):
# no flags specified, keep the data frame as is
df = pd.DataFrame({'spkitemid': ['a', 'b', 'c', 'd'],
'sc1': [1, 2, 1, 3],
'feature': [2, 3, 4, 5],
'flag1': [0, 0, 0, 0],
'flag2': [1, 2, 2, 1]})
flag_dict = {}
df_new, df_excluded = self.fpp.filter_on_flag_columns(df, flag_dict)
assert_frame_equal(df_new, df)
eq_(len(df_excluded), 0)
def test_filter_on_flag_column_nothing_to_exclude_int_column_and_dict(self):
df = pd.DataFrame({'spkitemid': ['a', 'b', 'c', 'd'],
'sc1': [1, 2, 1, 3],
'feature': [2, 3, 4, 5],
'flag1': [0, 1, 2, 3]})
flag_dict = {'flag1': [0, 1, 2, 3, 4]}
df_new, df_excluded = self.fpp.filter_on_flag_columns(df, flag_dict)
assert_frame_equal(df_new, df)
eq_(len(df_excluded), 0)
def test_filter_on_flag_column_nothing_to_exclude_float_column_and_dict(self):
df = pd.DataFrame({'spkitemid': ['a', 'b', 'c', 'd'],
'sc1': [1, 2, 1, 3],
'feature': [2, 3, 4, 5],
'flag1': [0.5, 1.1, 2.2, 3.6]})
flag_dict = {'flag1': [0.5, 1.1, 2.2, 3.6, 4.5]}
df_new, df_excluded = self.fpp.filter_on_flag_columns(df, flag_dict)
assert_frame_equal(df_new, df)
eq_(len(df_excluded), 0)
def test_filter_on_flag_column_nothing_to_exclude_str_column_and_dict(self):
df = pd.DataFrame({'spkitemid': ['a', 'b', 'c', 'd'],
'sc1': [1, 2, 1, 3],
'feature': [2, 3, 4, 5],
'flag1': ['a', 'b', 'c', 'd']})
flag_dict = {'flag1': ['a', 'b', 'c', 'd', 'e']}
df_new, df_excluded = self.fpp.filter_on_flag_columns(df, flag_dict)
assert_frame_equal(df_new, df)
eq_(len(df_excluded), 0)
def test_filter_on_flag_column_nothing_to_exclude_float_column_int_dict(self):
df = pd.DataFrame({'spkitemid': ['a', 'b', 'c', 'd'],
'sc1': [1, 2, 1, 3],
'feature': [2, 3, 4, 5],
'flag1': [0.0, 1.0, 2.0, 3.0]})
flag_dict = {'flag1': [0, 1, 2, 3, 4]}
df_new, df_excluded = self.fpp.filter_on_flag_columns(df, flag_dict)
assert_frame_equal(df_new, df)
eq_(len(df_excluded), 0)
def test_filter_on_flag_column_nothing_to_exclude_int_column_float_dict(self):
df = pd.DataFrame({'spkitemid': ['a', 'b', 'c', 'd'],
'sc1': [1, 2, 1, 3],
'feature': [2, 3, 4, 5],
'flag1': [0, 1, 2, 3]})
flag_dict = {'flag1': [0.0, 1.0, 2.0, 3.0, 4.5]}
df_new, df_excluded = self.fpp.filter_on_flag_columns(df, flag_dict)
| assert_frame_equal(df_new, df) | pandas.testing.assert_frame_equal |
# Copyright (c) Microsoft Corporation and Fairlearn contributors.
# Licensed under the MIT License.
import numpy as np
import pandas as pd
import pytest
import sklearn.metrics as skm
import fairlearn.metrics as metrics
from .data_for_test import y_t, y_p, g_1, g_2, g_3, g_4
from test.unit.input_convertors import conversions_for_1d
@pytest.mark.parametrize("transform_y_p", conversions_for_1d)
@pytest.mark.parametrize("transform_y_t", conversions_for_1d)
def test_basic(transform_y_t, transform_y_p):
# If there are failures here, other, more specific tests should also fail
g_f = pd.DataFrame(data=g_4, columns=['My feature'])
target = metrics.MetricFrame(skm.recall_score,
transform_y_t(y_t),
transform_y_p(y_p),
sensitive_features=g_f)
# Check on the indices properties
assert target.control_levels is None
assert isinstance(target.sensitive_levels, list)
assert (target.sensitive_levels == ['My feature'])
# Check we have correct return types
assert isinstance(target.overall, float)
assert isinstance(target.by_group, pd.Series)
# Check we have expected number of elements
assert len(target.by_group) == 2
assert np.array_equal(target.by_group.index.names, ['My feature'])
recall_overall = skm.recall_score(y_t, y_p)
assert target.overall == recall_overall
mask_p = (g_4 == 'pp')
mask_q = (g_4 == 'q')
recall_p = skm.recall_score(y_t[mask_p], y_p[mask_p])
recall_q = skm.recall_score(y_t[mask_q], y_p[mask_q])
assert target.by_group['pp'] == recall_p
assert target.by_group['q'] == recall_q
target_mins = target.group_min()
assert isinstance(target_mins, float)
assert target_mins == min(recall_p, recall_q)
target_maxes = target.group_max()
assert isinstance(target_mins, float)
assert target_maxes == max(recall_p, recall_q)
@pytest.mark.parametrize("transform_y_p", conversions_for_1d)
@pytest.mark.parametrize("transform_y_t", conversions_for_1d)
def test_basic_metric_dict(transform_y_t, transform_y_p):
# If there are failures here, other, more specific tests should also fail
g_f = | pd.DataFrame(data=g_4, columns=['My feature']) | pandas.DataFrame |
import argparse
from functools import partial
import pandas as pd
class Converter(object):
def __init__(self, input_file, output_file):
self.output_file = output_file
self.input_file = input_file
self.file = None
self.package_size = 5 # 5 bytes
self.columns = ["Time", "PulseRate", "SpO2", "PulseWaveform",
"BarGraph", "SignalStrength", "Beep", "FingerOut",
"Searching", "DroppingSpO2", "ProbeError"]
def get_readings(self):
readings = []
with open(self.input_file, "rb") as file:
reader = partial(file.read1, self.package_size)
file_iterator = iter(reader, bytes())
for i, data in enumerate(file_iterator):
if [d & 0x80 != 0 for d in data] !=\
[True, False, False, False, False]:
raise ValueError("Invalid data packet.")
# 1st byte
signalStrength = data[0] & 0x0f
fingerOut = bool(data[0] & 0x10)
droppingSpO2 = bool(data[0] & 0x20)
beep = bool(data[0] & 0x40)
# 2nd byte
pulseWaveform = data[1]
# 3rd byte
barGraph = data[2] & 0x0f
probeError = bool(data[2] & 0x10)
searching = bool(data[2] & 0x20)
pulseRate = (data[2] & 0x40) << 1
# 4th byte
pulseRate |= data[3] & 0x7f
# 5th byte
bloodSpO2 = data[4] & 0x7f
# time
time = i/60
reading = [time, pulseRate, bloodSpO2, pulseWaveform,
barGraph, signalStrength, beep,
fingerOut, searching, droppingSpO2,
probeError]
readings.append(reading)
return readings
def dumpFileData(input_file, output_file):
converter = Converter(input_file, output_file)
readings = converter.get_readings()
df = | pd.DataFrame(readings, columns=converter.columns) | pandas.DataFrame |
# -*- coding: utf-8 -*-
# pylint: disable=E1101
# flake8: noqa
from datetime import datetime
import csv
import os
import sys
import re
import nose
import platform
from multiprocessing.pool import ThreadPool
from numpy import nan
import numpy as np
from pandas.io.common import DtypeWarning
from pandas import DataFrame, Series, Index, MultiIndex, DatetimeIndex
from pandas.compat import(
StringIO, BytesIO, PY3, range, long, lrange, lmap, u
)
from pandas.io.common import URLError
import pandas.io.parsers as parsers
from pandas.io.parsers import (read_csv, read_table, read_fwf,
TextFileReader, TextParser)
import pandas.util.testing as tm
import pandas as pd
from pandas.compat import parse_date
import pandas.lib as lib
from pandas import compat
from pandas.lib import Timestamp
from pandas.tseries.index import date_range
import pandas.tseries.tools as tools
from numpy.testing.decorators import slow
import pandas.parser
class ParserTests(object):
"""
Want to be able to test either C+Cython or Python+Cython parsers
"""
data1 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
def read_csv(self, *args, **kwargs):
raise NotImplementedError
def read_table(self, *args, **kwargs):
raise NotImplementedError
def setUp(self):
import warnings
warnings.filterwarnings(action='ignore', category=FutureWarning)
self.dirpath = tm.get_data_path()
self.csv1 = os.path.join(self.dirpath, 'test1.csv')
self.csv2 = os.path.join(self.dirpath, 'test2.csv')
self.xls1 = os.path.join(self.dirpath, 'test.xls')
def construct_dataframe(self, num_rows):
df = DataFrame(np.random.rand(num_rows, 5), columns=list('abcde'))
df['foo'] = 'foo'
df['bar'] = 'bar'
df['baz'] = 'baz'
df['date'] = pd.date_range('20000101 09:00:00',
periods=num_rows,
freq='s')
df['int'] = np.arange(num_rows, dtype='int64')
return df
def generate_multithread_dataframe(self, path, num_rows, num_tasks):
def reader(arg):
start, nrows = arg
if not start:
return pd.read_csv(path, index_col=0, header=0, nrows=nrows,
parse_dates=['date'])
return pd.read_csv(path,
index_col=0,
header=None,
skiprows=int(start) + 1,
nrows=nrows,
parse_dates=[9])
tasks = [
(num_rows * i / num_tasks,
num_rows / num_tasks) for i in range(num_tasks)
]
pool = ThreadPool(processes=num_tasks)
results = pool.map(reader, tasks)
header = results[0].columns
for r in results[1:]:
r.columns = header
final_dataframe = pd.concat(results)
return final_dataframe
def test_converters_type_must_be_dict(self):
with tm.assertRaisesRegexp(TypeError, 'Type converters.+'):
self.read_csv(StringIO(self.data1), converters=0)
def test_empty_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), decimal='')
def test_empty_thousands_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), thousands='')
def test_multi_character_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), thousands=',,')
def test_empty_string(self):
data = """\
One,Two,Three
a,1,one
b,2,two
,3,three
d,4,nan
e,5,five
nan,6,
g,7,seven
"""
df = self.read_csv(StringIO(data))
xp = DataFrame({'One': ['a', 'b', np.nan, 'd', 'e', np.nan, 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', np.nan, 'five',
np.nan, 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(StringIO(data), na_values={'One': [], 'Three': []},
keep_default_na=False)
xp = DataFrame({'One': ['a', 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', 'nan', 'five',
'', 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(
StringIO(data), na_values=['a'], keep_default_na=False)
xp = DataFrame({'One': [np.nan, 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', 'nan', 'five', '',
'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(StringIO(data), na_values={'One': [], 'Three': []})
xp = DataFrame({'One': ['a', 'b', np.nan, 'd', 'e', np.nan, 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', np.nan, 'five',
np.nan, 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
# GH4318, passing na_values=None and keep_default_na=False yields
# 'None' as a na_value
data = """\
One,Two,Three
a,1,None
b,2,two
,3,None
d,4,nan
e,5,five
nan,6,
g,7,seven
"""
df = self.read_csv(
StringIO(data), keep_default_na=False)
xp = DataFrame({'One': ['a', 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['None', 'two', 'None', 'nan', 'five', '',
'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
def test_read_csv(self):
if not compat.PY3:
if compat.is_platform_windows():
prefix = u("file:///")
else:
prefix = u("file://")
fname = prefix + compat.text_type(self.csv1)
# it works!
read_csv(fname, index_col=0, parse_dates=True)
def test_dialect(self):
data = """\
label1,label2,label3
index1,"a,c,e
index2,b,d,f
"""
dia = csv.excel()
dia.quoting = csv.QUOTE_NONE
df = self.read_csv(StringIO(data), dialect=dia)
data = '''\
label1,label2,label3
index1,a,c,e
index2,b,d,f
'''
exp = self.read_csv(StringIO(data))
exp.replace('a', '"a', inplace=True)
tm.assert_frame_equal(df, exp)
def test_dialect_str(self):
data = """\
fruit:vegetable
apple:brocolli
pear:tomato
"""
exp = DataFrame({
'fruit': ['apple', 'pear'],
'vegetable': ['brocolli', 'tomato']
})
dia = csv.register_dialect('mydialect', delimiter=':') # noqa
df = self.read_csv(StringIO(data), dialect='mydialect')
tm.assert_frame_equal(df, exp)
csv.unregister_dialect('mydialect')
def test_1000_sep(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334, 13],
'C': [5, 10.]
})
df = self.read_csv(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
def test_1000_sep_with_decimal(self):
data = """A|B|C
1|2,334.01|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334.01, 13],
'C': [5, 10.]
})
tm.assert_equal(expected.A.dtype, 'int64')
tm.assert_equal(expected.B.dtype, 'float')
tm.assert_equal(expected.C.dtype, 'float')
df = self.read_csv(StringIO(data), sep='|', thousands=',', decimal='.')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|',
thousands=',', decimal='.')
tm.assert_frame_equal(df, expected)
data_with_odd_sep = """A|B|C
1|2.334,01|5
10|13|10,
"""
df = self.read_csv(StringIO(data_with_odd_sep),
sep='|', thousands='.', decimal=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data_with_odd_sep),
sep='|', thousands='.', decimal=',')
tm.assert_frame_equal(df, expected)
def test_separator_date_conflict(self):
# Regression test for issue #4678: make sure thousands separator and
# date parsing do not conflict.
data = '06-02-2013;13:00;1-000.215'
expected = DataFrame(
[[datetime(2013, 6, 2, 13, 0, 0), 1000.215]],
columns=['Date', 2]
)
df = self.read_csv(StringIO(data), sep=';', thousands='-',
parse_dates={'Date': [0, 1]}, header=None)
tm.assert_frame_equal(df, expected)
def test_squeeze(self):
data = """\
a,1
b,2
c,3
"""
idx = Index(['a', 'b', 'c'], name=0)
expected = Series([1, 2, 3], name=1, index=idx)
result = self.read_table(StringIO(data), sep=',', index_col=0,
header=None, squeeze=True)
tm.assertIsInstance(result, Series)
tm.assert_series_equal(result, expected)
def test_squeeze_no_view(self):
# GH 8217
# series should not be a view
data = """time,data\n0,10\n1,11\n2,12\n4,14\n5,15\n3,13"""
result = self.read_csv(StringIO(data), index_col='time', squeeze=True)
self.assertFalse(result._is_view)
def test_inf_parsing(self):
data = """\
,A
a,inf
b,-inf
c,Inf
d,-Inf
e,INF
f,-INF
g,INf
h,-INf
i,inF
j,-inF"""
inf = float('inf')
expected = Series([inf, -inf] * 5)
df = read_csv(StringIO(data), index_col=0)
tm.assert_almost_equal(df['A'].values, expected.values)
df = read_csv(StringIO(data), index_col=0, na_filter=False)
tm.assert_almost_equal(df['A'].values, expected.values)
def test_multiple_date_col(self):
# Can use multiple date parsers
data = """\
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
def func(*date_cols):
return lib.try_parse_dates(parsers._concat_date_cols(date_cols))
df = self.read_csv(StringIO(data), header=None,
date_parser=func,
prefix='X',
parse_dates={'nominal': [1, 2],
'actual': [1, 3]})
self.assertIn('nominal', df)
self.assertIn('actual', df)
self.assertNotIn('X1', df)
self.assertNotIn('X2', df)
self.assertNotIn('X3', df)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.ix[0, 'nominal'], d)
df = self.read_csv(StringIO(data), header=None,
date_parser=func,
parse_dates={'nominal': [1, 2],
'actual': [1, 3]},
keep_date_col=True)
self.assertIn('nominal', df)
self.assertIn('actual', df)
self.assertIn(1, df)
self.assertIn(2, df)
self.assertIn(3, df)
data = """\
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
df = read_csv(StringIO(data), header=None,
prefix='X',
parse_dates=[[1, 2], [1, 3]])
self.assertIn('X1_X2', df)
self.assertIn('X1_X3', df)
self.assertNotIn('X1', df)
self.assertNotIn('X2', df)
self.assertNotIn('X3', df)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.ix[0, 'X1_X2'], d)
df = read_csv(StringIO(data), header=None,
parse_dates=[[1, 2], [1, 3]], keep_date_col=True)
self.assertIn('1_2', df)
self.assertIn('1_3', df)
self.assertIn(1, df)
self.assertIn(2, df)
self.assertIn(3, df)
data = '''\
KORD,19990127 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
'''
df = self.read_csv(StringIO(data), sep=',', header=None,
parse_dates=[1], index_col=1)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.index[0], d)
def test_multiple_date_cols_int_cast(self):
data = ("KORD,19990127, 19:00:00, 18:56:00, 0.8100\n"
"KORD,19990127, 20:00:00, 19:56:00, 0.0100\n"
"KORD,19990127, 21:00:00, 20:56:00, -0.5900\n"
"KORD,19990127, 21:00:00, 21:18:00, -0.9900\n"
"KORD,19990127, 22:00:00, 21:56:00, -0.5900\n"
"KORD,19990127, 23:00:00, 22:56:00, -0.5900")
date_spec = {'nominal': [1, 2], 'actual': [1, 3]}
import pandas.io.date_converters as conv
# it works!
df = self.read_csv(StringIO(data), header=None, parse_dates=date_spec,
date_parser=conv.parse_date_time)
self.assertIn('nominal', df)
def test_multiple_date_col_timestamp_parse(self):
data = """05/31/2012,15:30:00.029,1306.25,1,E,0,,1306.25
05/31/2012,15:30:00.029,1306.25,8,E,0,,1306.25"""
result = self.read_csv(StringIO(data), sep=',', header=None,
parse_dates=[[0, 1]], date_parser=Timestamp)
ex_val = Timestamp('05/31/2012 15:30:00.029')
self.assertEqual(result['0_1'][0], ex_val)
def test_single_line(self):
# GH 6607
# Test currently only valid with python engine because sep=None and
# delim_whitespace=False. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError,
'sep=None with delim_whitespace=False'):
# sniff separator
buf = StringIO()
sys.stdout = buf
# printing warning message when engine == 'c' for now
try:
# it works!
df = self.read_csv(StringIO('1,2'), names=['a', 'b'],
header=None, sep=None)
tm.assert_frame_equal(DataFrame({'a': [1], 'b': [2]}), df)
finally:
sys.stdout = sys.__stdout__
def test_multiple_date_cols_with_header(self):
data = """\
ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
df = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]})
self.assertNotIsInstance(df.nominal[0], compat.string_types)
ts_data = """\
ID,date,nominalTime,actualTime,A,B,C,D,E
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
def test_multiple_date_col_name_collision(self):
self.assertRaises(ValueError, self.read_csv, StringIO(self.ts_data),
parse_dates={'ID': [1, 2]})
data = """\
date_NominalTime,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000""" # noqa
self.assertRaises(ValueError, self.read_csv, StringIO(data),
parse_dates=[[1, 2]])
def test_index_col_named(self):
no_header = """\
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
h = "ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir\n"
data = h + no_header
rs = self.read_csv(StringIO(data), index_col='ID')
xp = self.read_csv(StringIO(data), header=0).set_index('ID')
tm.assert_frame_equal(rs, xp)
self.assertRaises(ValueError, self.read_csv, StringIO(no_header),
index_col='ID')
data = """\
1,2,3,4,hello
5,6,7,8,world
9,10,11,12,foo
"""
names = ['a', 'b', 'c', 'd', 'message']
xp = DataFrame({'a': [1, 5, 9], 'b': [2, 6, 10], 'c': [3, 7, 11],
'd': [4, 8, 12]},
index=Index(['hello', 'world', 'foo'], name='message'))
rs = self.read_csv(StringIO(data), names=names, index_col=['message'])
tm.assert_frame_equal(xp, rs)
self.assertEqual(xp.index.name, rs.index.name)
rs = self.read_csv(StringIO(data), names=names, index_col='message')
tm.assert_frame_equal(xp, rs)
self.assertEqual(xp.index.name, rs.index.name)
def test_usecols_index_col_False(self):
# Issue 9082
s = "a,b,c,d\n1,2,3,4\n5,6,7,8"
s_malformed = "a,b,c,d\n1,2,3,4,\n5,6,7,8,"
cols = ['a', 'c', 'd']
expected = DataFrame({'a': [1, 5], 'c': [3, 7], 'd': [4, 8]})
df = self.read_csv(StringIO(s), usecols=cols, index_col=False)
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(s_malformed),
usecols=cols, index_col=False)
tm.assert_frame_equal(expected, df)
def test_index_col_is_True(self):
# Issue 9798
self.assertRaises(ValueError, self.read_csv, StringIO(self.ts_data),
index_col=True)
def test_converter_index_col_bug(self):
# 1835
data = "A;B\n1;2\n3;4"
rs = self.read_csv(StringIO(data), sep=';', index_col='A',
converters={'A': lambda x: x})
xp = DataFrame({'B': [2, 4]}, index=Index([1, 3], name='A'))
tm.assert_frame_equal(rs, xp)
self.assertEqual(rs.index.name, xp.index.name)
def test_date_parser_int_bug(self):
# #3071
log_file = StringIO(
'posix_timestamp,elapsed,sys,user,queries,query_time,rows,'
'accountid,userid,contactid,level,silo,method\n'
'1343103150,0.062353,0,4,6,0.01690,3,'
'12345,1,-1,3,invoice_InvoiceResource,search\n'
)
def f(posix_string):
return datetime.utcfromtimestamp(int(posix_string))
# it works!
read_csv(log_file, index_col=0, parse_dates=0, date_parser=f)
def test_multiple_skts_example(self):
data = "year, month, a, b\n 2001, 01, 0.0, 10.\n 2001, 02, 1.1, 11."
pass
def test_malformed(self):
# all
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
"""
try:
df = self.read_table(
StringIO(data), sep=',', header=1, comment='#')
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 4, saw 5', str(inst))
# skip_footer
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
footer
"""
# GH 6607
# Test currently only valid with python engine because
# skip_footer != 0. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
try:
with tm.assertRaisesRegexp(ValueError, 'skip_footer'): # XXX
df = self.read_table(
StringIO(data), sep=',', header=1, comment='#',
skip_footer=1)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 4, saw 5', str(inst))
# first chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#',
iterator=True, chunksize=1,
skiprows=[2])
df = it.read(5)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
# middle chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',', header=1,
comment='#', iterator=True, chunksize=1,
skiprows=[2])
df = it.read(1)
it.read(2)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
# last chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#',
iterator=True, chunksize=1, skiprows=[2])
df = it.read(1)
it.read()
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
def test_passing_dtype(self):
# GH 6607
# Passing dtype is currently only supported by the C engine.
# Temporarily copied to TestCParser*.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError,
"The 'dtype' option is not supported"):
df = DataFrame(np.random.rand(5, 2), columns=list(
'AB'), index=['1A', '1B', '1C', '1D', '1E'])
with tm.ensure_clean('__passing_str_as_dtype__.csv') as path:
df.to_csv(path)
# GH 3795
# passing 'str' as the dtype
result = self.read_csv(path, dtype=str, index_col=0)
tm.assert_series_equal(result.dtypes, Series(
{'A': 'object', 'B': 'object'}))
# we expect all object columns, so need to convert to test for
# equivalence
result = result.astype(float)
tm.assert_frame_equal(result, df)
# invalid dtype
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'foo', 'B': 'float64'},
index_col=0)
# valid but we don't support it (date)
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'datetime64', 'B': 'float64'},
index_col=0)
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'datetime64', 'B': 'float64'},
index_col=0, parse_dates=['B'])
# valid but we don't support it
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'timedelta64', 'B': 'float64'},
index_col=0)
with tm.assertRaisesRegexp(ValueError,
"The 'dtype' option is not supported"):
# empty frame
# GH12048
self.read_csv(StringIO('A,B'), dtype=str)
def test_quoting(self):
bad_line_small = """printer\tresult\tvariant_name
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jacob
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jakob
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\t"Furststiftische Hofdruckerei, <Kempten""
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tGaller, Alois
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tHochfurstliche Buchhandlung <Kempten>"""
self.assertRaises(Exception, self.read_table, StringIO(bad_line_small),
sep='\t')
good_line_small = bad_line_small + '"'
df = self.read_table(StringIO(good_line_small), sep='\t')
self.assertEqual(len(df), 3)
def test_non_string_na_values(self):
# GH3611, na_values that are not a string are an issue
with tm.ensure_clean('__non_string_na_values__.csv') as path:
df = DataFrame({'A': [-999, 2, 3], 'B': [1.2, -999, 4.5]})
df.to_csv(path, sep=' ', index=False)
result1 = read_csv(path, sep=' ', header=0,
na_values=['-999.0', '-999'])
result2 = read_csv(path, sep=' ', header=0,
na_values=[-999, -999.0])
result3 = read_csv(path, sep=' ', header=0,
na_values=[-999.0, -999])
tm.assert_frame_equal(result1, result2)
tm.assert_frame_equal(result2, result3)
result4 = read_csv(path, sep=' ', header=0, na_values=['-999.0'])
result5 = read_csv(path, sep=' ', header=0, na_values=['-999'])
result6 = read_csv(path, sep=' ', header=0, na_values=[-999.0])
result7 = read_csv(path, sep=' ', header=0, na_values=[-999])
tm.assert_frame_equal(result4, result3)
tm.assert_frame_equal(result5, result3)
tm.assert_frame_equal(result6, result3)
tm.assert_frame_equal(result7, result3)
good_compare = result3
# with an odd float format, so we can't match the string 999.0
# exactly, but need float matching
df.to_csv(path, sep=' ', index=False, float_format='%.3f')
result1 = read_csv(path, sep=' ', header=0,
na_values=['-999.0', '-999'])
result2 = read_csv(path, sep=' ', header=0,
na_values=[-999, -999.0])
result3 = read_csv(path, sep=' ', header=0,
na_values=[-999.0, -999])
tm.assert_frame_equal(result1, good_compare)
tm.assert_frame_equal(result2, good_compare)
tm.assert_frame_equal(result3, good_compare)
result4 = read_csv(path, sep=' ', header=0, na_values=['-999.0'])
result5 = read_csv(path, sep=' ', header=0, na_values=['-999'])
result6 = read_csv(path, sep=' ', header=0, na_values=[-999.0])
result7 = read_csv(path, sep=' ', header=0, na_values=[-999])
tm.assert_frame_equal(result4, good_compare)
tm.assert_frame_equal(result5, good_compare)
tm.assert_frame_equal(result6, good_compare)
tm.assert_frame_equal(result7, good_compare)
def test_default_na_values(self):
_NA_VALUES = set(['-1.#IND', '1.#QNAN', '1.#IND', '-1.#QNAN',
'#N/A', 'N/A', 'NA', '#NA', 'NULL', 'NaN',
'nan', '-NaN', '-nan', '#N/A N/A', ''])
self.assertEqual(_NA_VALUES, parsers._NA_VALUES)
nv = len(_NA_VALUES)
def f(i, v):
if i == 0:
buf = ''
elif i > 0:
buf = ''.join([','] * i)
buf = "{0}{1}".format(buf, v)
if i < nv - 1:
buf = "{0}{1}".format(buf, ''.join([','] * (nv - i - 1)))
return buf
data = StringIO('\n'.join([f(i, v) for i, v in enumerate(_NA_VALUES)]))
expected = DataFrame(np.nan, columns=range(nv), index=range(nv))
df = self.read_csv(data, header=None)
tm.assert_frame_equal(df, expected)
def test_custom_na_values(self):
data = """A,B,C
ignore,this,row
1,NA,3
-1.#IND,5,baz
7,8,NaN
"""
expected = [[1., nan, 3],
[nan, 5, nan],
[7, 8, nan]]
df = self.read_csv(StringIO(data), na_values=['baz'], skiprows=[1])
tm.assert_almost_equal(df.values, expected)
df2 = self.read_table(StringIO(data), sep=',', na_values=['baz'],
skiprows=[1])
tm.assert_almost_equal(df2.values, expected)
df3 = self.read_table(StringIO(data), sep=',', na_values='baz',
skiprows=[1])
tm.assert_almost_equal(df3.values, expected)
def test_nat_parse(self):
# GH 3062
df = DataFrame(dict({
'A': np.asarray(lrange(10), dtype='float64'),
'B': pd.Timestamp('20010101')}))
df.iloc[3:6, :] = np.nan
with tm.ensure_clean('__nat_parse_.csv') as path:
df.to_csv(path)
result = read_csv(path, index_col=0, parse_dates=['B'])
tm.assert_frame_equal(result, df)
expected = Series(dict(A='float64', B='datetime64[ns]'))
tm.assert_series_equal(expected, result.dtypes)
# test with NaT for the nan_rep
# we don't have a method to specif the Datetime na_rep (it defaults
# to '')
df.to_csv(path)
result = read_csv(path, index_col=0, parse_dates=['B'])
tm.assert_frame_equal(result, df)
def test_skiprows_bug(self):
# GH #505
text = """#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
1/1/2000,1.,2.,3.
1/2/2000,4,5,6
1/3/2000,7,8,9
"""
data = self.read_csv(StringIO(text), skiprows=lrange(6), header=None,
index_col=0, parse_dates=True)
data2 = self.read_csv(StringIO(text), skiprows=6, header=None,
index_col=0, parse_dates=True)
expected = DataFrame(np.arange(1., 10.).reshape((3, 3)),
columns=[1, 2, 3],
index=[datetime(2000, 1, 1), datetime(2000, 1, 2),
datetime(2000, 1, 3)])
expected.index.name = 0
tm.assert_frame_equal(data, expected)
tm.assert_frame_equal(data, data2)
def test_deep_skiprows(self):
# GH #4382
text = "a,b,c\n" + \
"\n".join([",".join([str(i), str(i + 1), str(i + 2)])
for i in range(10)])
condensed_text = "a,b,c\n" + \
"\n".join([",".join([str(i), str(i + 1), str(i + 2)])
for i in [0, 1, 2, 3, 4, 6, 8, 9]])
data = self.read_csv(StringIO(text), skiprows=[6, 8])
condensed_data = self.read_csv(StringIO(condensed_text))
tm.assert_frame_equal(data, condensed_data)
def test_skiprows_blank(self):
# GH 9832
text = """#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
1/1/2000,1.,2.,3.
1/2/2000,4,5,6
1/3/2000,7,8,9
"""
data = self.read_csv(StringIO(text), skiprows=6, header=None,
index_col=0, parse_dates=True)
expected = DataFrame(np.arange(1., 10.).reshape((3, 3)),
columns=[1, 2, 3],
index=[datetime(2000, 1, 1), datetime(2000, 1, 2),
datetime(2000, 1, 3)])
expected.index.name = 0
tm.assert_frame_equal(data, expected)
def test_detect_string_na(self):
data = """A,B
foo,bar
NA,baz
NaN,nan
"""
expected = [['foo', 'bar'],
[nan, 'baz'],
[nan, nan]]
df = self.read_csv(StringIO(data))
tm.assert_almost_equal(df.values, expected)
def test_unnamed_columns(self):
data = """A,B,C,,
1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
expected = [[1, 2, 3, 4, 5.],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
df = self.read_table(StringIO(data), sep=',')
tm.assert_almost_equal(df.values, expected)
self.assert_numpy_array_equal(df.columns,
['A', 'B', 'C', 'Unnamed: 3',
'Unnamed: 4'])
def test_string_nas(self):
data = """A,B,C
a,b,c
d,,f
,g,h
"""
result = self.read_csv(StringIO(data))
expected = DataFrame([['a', 'b', 'c'],
['d', np.nan, 'f'],
[np.nan, 'g', 'h']],
columns=['A', 'B', 'C'])
tm.assert_frame_equal(result, expected)
def test_duplicate_columns(self):
for engine in ['python', 'c']:
data = """A,A,B,B,B
1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
# check default beahviour
df = self.read_table(StringIO(data), sep=',', engine=engine)
self.assertEqual(list(df.columns), ['A', 'A.1', 'B', 'B.1', 'B.2'])
df = self.read_table(StringIO(data), sep=',',
engine=engine, mangle_dupe_cols=False)
self.assertEqual(list(df.columns), ['A', 'A', 'B', 'B', 'B'])
df = self.read_table(StringIO(data), sep=',',
engine=engine, mangle_dupe_cols=True)
self.assertEqual(list(df.columns), ['A', 'A.1', 'B', 'B.1', 'B.2'])
def test_csv_mixed_type(self):
data = """A,B,C
a,1,2
b,3,4
c,4,5
"""
df = self.read_csv(StringIO(data))
# TODO
def test_csv_custom_parser(self):
data = """A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
f = lambda x: datetime.strptime(x, '%Y%m%d')
df = self.read_csv(StringIO(data), date_parser=f)
expected = self.read_csv(StringIO(data), parse_dates=True)
tm.assert_frame_equal(df, expected)
def test_parse_dates_implicit_first_col(self):
data = """A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
df = self.read_csv(StringIO(data), parse_dates=True)
expected = self.read_csv(StringIO(data), index_col=0, parse_dates=True)
self.assertIsInstance(
df.index[0], (datetime, np.datetime64, Timestamp))
tm.assert_frame_equal(df, expected)
def test_parse_dates_string(self):
data = """date,A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
rs = self.read_csv(
StringIO(data), index_col='date', parse_dates='date')
idx = date_range('1/1/2009', periods=3)
idx.name = 'date'
xp = DataFrame({'A': ['a', 'b', 'c'],
'B': [1, 3, 4],
'C': [2, 4, 5]}, idx)
tm.assert_frame_equal(rs, xp)
def test_yy_format(self):
data = """date,time,B,C
090131,0010,1,2
090228,1020,3,4
090331,0830,5,6
"""
rs = self.read_csv(StringIO(data), index_col=0,
parse_dates=[['date', 'time']])
idx = DatetimeIndex([datetime(2009, 1, 31, 0, 10, 0),
datetime(2009, 2, 28, 10, 20, 0),
datetime(2009, 3, 31, 8, 30, 0)],
dtype=object, name='date_time')
xp = DataFrame({'B': [1, 3, 5], 'C': [2, 4, 6]}, idx)
tm.assert_frame_equal(rs, xp)
rs = self.read_csv(StringIO(data), index_col=0,
parse_dates=[[0, 1]])
idx = DatetimeIndex([datetime(2009, 1, 31, 0, 10, 0),
datetime(2009, 2, 28, 10, 20, 0),
datetime(2009, 3, 31, 8, 30, 0)],
dtype=object, name='date_time')
xp = DataFrame({'B': [1, 3, 5], 'C': [2, 4, 6]}, idx)
tm.assert_frame_equal(rs, xp)
def test_parse_dates_column_list(self):
from pandas.core.datetools import to_datetime
data = '''date;destination;ventilationcode;unitcode;units;aux_date
01/01/2010;P;P;50;1;12/1/2011
01/01/2010;P;R;50;1;13/1/2011
15/01/2010;P;P;50;1;14/1/2011
01/05/2010;P;P;50;1;15/1/2011'''
expected = self.read_csv(StringIO(data), sep=";", index_col=lrange(4))
lev = expected.index.levels[0]
levels = list(expected.index.levels)
levels[0] = lev.to_datetime(dayfirst=True)
# hack to get this to work - remove for final test
levels[0].name = lev.name
expected.index.set_levels(levels, inplace=True)
expected['aux_date'] = to_datetime(expected['aux_date'],
dayfirst=True)
expected['aux_date'] = lmap(Timestamp, expected['aux_date'])
tm.assertIsInstance(expected['aux_date'][0], datetime)
df = self.read_csv(StringIO(data), sep=";", index_col=lrange(4),
parse_dates=[0, 5], dayfirst=True)
tm.assert_frame_equal(df, expected)
df = self.read_csv(StringIO(data), sep=";", index_col=lrange(4),
parse_dates=['date', 'aux_date'], dayfirst=True)
tm.assert_frame_equal(df, expected)
def test_no_header(self):
data = """1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
df = self.read_table(StringIO(data), sep=',', header=None)
df_pref = self.read_table(StringIO(data), sep=',', prefix='X',
header=None)
names = ['foo', 'bar', 'baz', 'quux', 'panda']
df2 = self.read_table(StringIO(data), sep=',', names=names)
expected = [[1, 2, 3, 4, 5.],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
tm.assert_almost_equal(df.values, expected)
tm.assert_almost_equal(df.values, df2.values)
self.assert_numpy_array_equal(df_pref.columns,
['X0', 'X1', 'X2', 'X3', 'X4'])
self.assert_numpy_array_equal(df.columns, lrange(5))
self.assert_numpy_array_equal(df2.columns, names)
def test_no_header_prefix(self):
data = """1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
df_pref = self.read_table(StringIO(data), sep=',', prefix='Field',
header=None)
expected = [[1, 2, 3, 4, 5.],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
tm.assert_almost_equal(df_pref.values, expected)
self.assert_numpy_array_equal(df_pref.columns,
['Field0', 'Field1', 'Field2', 'Field3', 'Field4'])
def test_header_with_index_col(self):
data = """foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
names = ['A', 'B', 'C']
df = self.read_csv(StringIO(data), names=names)
self.assertEqual(names, ['A', 'B', 'C'])
values = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
expected = DataFrame(values, index=['foo', 'bar', 'baz'],
columns=['A', 'B', 'C'])
tm.assert_frame_equal(df, expected)
def test_read_csv_dataframe(self):
df = self.read_csv(self.csv1, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv1, sep=',', index_col=0,
parse_dates=True)
self.assert_numpy_array_equal(df.columns, ['A', 'B', 'C', 'D'])
self.assertEqual(df.index.name, 'index')
self.assertIsInstance(
df.index[0], (datetime, np.datetime64, Timestamp))
self.assertEqual(df.values.dtype, np.float64)
tm.assert_frame_equal(df, df2)
def test_read_csv_no_index_name(self):
df = self.read_csv(self.csv2, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv2, sep=',', index_col=0,
parse_dates=True)
self.assert_numpy_array_equal(df.columns, ['A', 'B', 'C', 'D', 'E'])
self.assertIsInstance(
df.index[0], (datetime, np.datetime64, Timestamp))
self.assertEqual(df.ix[:, ['A', 'B', 'C', 'D']
].values.dtype, np.float64)
tm.assert_frame_equal(df, df2)
def test_read_csv_infer_compression(self):
# GH 9770
expected = self.read_csv(self.csv1, index_col=0, parse_dates=True)
inputs = [self.csv1, self.csv1 + '.gz',
self.csv1 + '.bz2', open(self.csv1)]
for f in inputs:
df = self.read_csv(f, index_col=0, parse_dates=True,
compression='infer')
tm.assert_frame_equal(expected, df)
inputs[3].close()
def test_read_table_unicode(self):
fin = BytesIO(u('\u0141aski, Jan;1').encode('utf-8'))
df1 = read_table(fin, sep=";", encoding="utf-8", header=None)
tm.assertIsInstance(df1[0].values[0], compat.text_type)
def test_read_table_wrong_num_columns(self):
# too few!
data = """A,B,C,D,E,F
1,2,3,4,5,6
6,7,8,9,10,11,12
11,12,13,14,15,16
"""
self.assertRaises(Exception, self.read_csv, StringIO(data))
def test_read_table_duplicate_index(self):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
result = self.read_csv(StringIO(data), index_col=0)
expected = self.read_csv(StringIO(data)).set_index('index',
verify_integrity=False)
tm.assert_frame_equal(result, expected)
def test_read_table_duplicate_index_implicit(self):
data = """A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
# it works!
result = self.read_csv(StringIO(data))
def test_parse_bools(self):
data = """A,B
True,1
False,2
True,3
"""
data = self.read_csv(StringIO(data))
self.assertEqual(data['A'].dtype, np.bool_)
data = """A,B
YES,1
no,2
yes,3
No,3
Yes,3
"""
data = self.read_csv(StringIO(data),
true_values=['yes', 'Yes', 'YES'],
false_values=['no', 'NO', 'No'])
self.assertEqual(data['A'].dtype, np.bool_)
data = """A,B
TRUE,1
FALSE,2
TRUE,3
"""
data = self.read_csv(StringIO(data))
self.assertEqual(data['A'].dtype, np.bool_)
data = """A,B
foo,bar
bar,foo"""
result = self.read_csv(StringIO(data), true_values=['foo'],
false_values=['bar'])
expected = DataFrame({'A': [True, False], 'B': [False, True]})
tm.assert_frame_equal(result, expected)
def test_int_conversion(self):
data = """A,B
1.0,1
2.0,2
3.0,3
"""
data = self.read_csv(StringIO(data))
self.assertEqual(data['A'].dtype, np.float64)
self.assertEqual(data['B'].dtype, np.int64)
def test_infer_index_col(self):
data = """A,B,C
foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
data = self.read_csv(StringIO(data))
self.assertTrue(data.index.equals(Index(['foo', 'bar', 'baz'])))
def test_read_nrows(self):
df = self.read_csv(StringIO(self.data1), nrows=3)
expected = self.read_csv(StringIO(self.data1))[:3]
tm.assert_frame_equal(df, expected)
def test_read_chunksize(self):
reader = self.read_csv(StringIO(self.data1), index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(reader)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
def test_read_chunksize_named(self):
reader = self.read_csv(
StringIO(self.data1), index_col='index', chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col='index')
chunks = list(reader)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
def test_get_chunk_passed_chunksize(self):
data = """A,B,C
1,2,3
4,5,6
7,8,9
1,2,3"""
result = self.read_csv(StringIO(data), chunksize=2)
piece = result.get_chunk()
self.assertEqual(len(piece), 2)
def test_read_text_list(self):
data = """A,B,C\nfoo,1,2,3\nbar,4,5,6"""
as_list = [['A', 'B', 'C'], ['foo', '1', '2', '3'], ['bar',
'4', '5', '6']]
df = self.read_csv(StringIO(data), index_col=0)
parser = TextParser(as_list, index_col=0, chunksize=2)
chunk = parser.read(None)
tm.assert_frame_equal(chunk, df)
def test_iterator(self):
# GH 6607
# Test currently only valid with python engine because
# skip_footer != 0. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError, 'skip_footer'):
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunk = reader.read(3)
tm.assert_frame_equal(chunk, df[:3])
last_chunk = reader.read(5)
tm.assert_frame_equal(last_chunk, df[3:])
# pass list
lines = list(csv.reader(StringIO(self.data1)))
parser = TextParser(lines, index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
# pass skiprows
parser = TextParser(lines, index_col=0, chunksize=2, skiprows=[1])
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[1:3])
# test bad parameter (skip_footer)
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True, skip_footer=True)
self.assertRaises(ValueError, reader.read, 3)
treader = self.read_table(StringIO(self.data1), sep=',', index_col=0,
iterator=True)
tm.assertIsInstance(treader, TextFileReader)
# stopping iteration when on chunksize is specified, GH 3967
data = """A,B,C
foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
reader = self.read_csv(StringIO(data), iterator=True)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
tm.assert_frame_equal(result[0], expected)
# chunksize = 1
reader = self.read_csv(StringIO(data), chunksize=1)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
self.assertEqual(len(result), 3)
tm.assert_frame_equal(pd.concat(result), expected)
def test_header_not_first_line(self):
data = """got,to,ignore,this,line
got,to,ignore,this,line
index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
"""
data2 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
"""
df = self.read_csv(StringIO(data), header=2, index_col=0)
expected = self.read_csv(StringIO(data2), header=0, index_col=0)
tm.assert_frame_equal(df, expected)
def test_header_multi_index(self):
expected = tm.makeCustomDataframe(
5, 3, r_idx_nlevels=2, c_idx_nlevels=4)
data = """\
C0,,C_l0_g0,C_l0_g1,C_l0_g2
C1,,C_l1_g0,C_l1_g1,C_l1_g2
C2,,C_l2_g0,C_l2_g1,C_l2_g2
C3,,C_l3_g0,C_l3_g1,C_l3_g2
R0,R1,,,
R_l0_g0,R_l1_g0,R0C0,R0C1,R0C2
R_l0_g1,R_l1_g1,R1C0,R1C1,R1C2
R_l0_g2,R_l1_g2,R2C0,R2C1,R2C2
R_l0_g3,R_l1_g3,R3C0,R3C1,R3C2
R_l0_g4,R_l1_g4,R4C0,R4C1,R4C2
"""
df = self.read_csv(StringIO(data), header=[0, 1, 2, 3], index_col=[
0, 1], tupleize_cols=False)
tm.assert_frame_equal(df, expected)
# skipping lines in the header
df = self.read_csv(StringIO(data), header=[0, 1, 2, 3], index_col=[
0, 1], tupleize_cols=False)
tm.assert_frame_equal(df, expected)
#### invalid options ####
# no as_recarray
self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3],
index_col=[0, 1], as_recarray=True, tupleize_cols=False)
# names
self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3],
index_col=[0, 1], names=['foo', 'bar'], tupleize_cols=False)
# usecols
self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3],
index_col=[0, 1], usecols=['foo', 'bar'], tupleize_cols=False)
# non-numeric index_col
self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3],
index_col=['foo', 'bar'], tupleize_cols=False)
def test_header_multiindex_common_format(self):
df = DataFrame([[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]],
index=['one', 'two'],
columns=MultiIndex.from_tuples([('a', 'q'), ('a', 'r'), ('a', 's'),
('b', 't'), ('c', 'u'), ('c', 'v')]))
# to_csv
data = """,a,a,a,b,c,c
,q,r,s,t,u,v
,,,,,,
one,1,2,3,4,5,6
two,7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(df, result)
# common
data = """,a,a,a,b,c,c
,q,r,s,t,u,v
one,1,2,3,4,5,6
two,7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(df, result)
# common, no index_col
data = """a,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=None)
tm.assert_frame_equal(df.reset_index(drop=True), result)
# malformed case 1
expected = DataFrame(np.array([[2, 3, 4, 5, 6],
[8, 9, 10, 11, 12]], dtype='int64'),
index=Index([1, 7]),
columns=MultiIndex(levels=[[u('a'), u('b'), u('c')], [u('r'), u('s'), u('t'), u('u'), u('v')]],
labels=[[0, 0, 1, 2, 2], [
0, 1, 2, 3, 4]],
names=[u('a'), u('q')]))
data = """a,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(expected, result)
# malformed case 2
expected = DataFrame(np.array([[2, 3, 4, 5, 6],
[8, 9, 10, 11, 12]], dtype='int64'),
index=Index([1, 7]),
columns=MultiIndex(levels=[[u('a'), u('b'), u('c')], [u('r'), u('s'), u('t'), u('u'), u('v')]],
labels=[[0, 0, 1, 2, 2], [
0, 1, 2, 3, 4]],
names=[None, u('q')]))
data = """,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(expected, result)
# mi on columns and index (malformed)
expected = DataFrame(np.array([[3, 4, 5, 6],
[9, 10, 11, 12]], dtype='int64'),
index=MultiIndex(levels=[[1, 7], [2, 8]],
labels=[[0, 1], [0, 1]]),
columns=MultiIndex(levels=[[u('a'), u('b'), u('c')], [u('s'), u('t'), u('u'), u('v')]],
labels=[[0, 1, 2, 2],
[0, 1, 2, 3]],
names=[None, u('q')]))
data = """,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=[0, 1])
tm.assert_frame_equal(expected, result)
def test_pass_names_with_index(self):
lines = self.data1.split('\n')
no_header = '\n'.join(lines[1:])
# regular index
names = ['index', 'A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=0, names=names)
expected = self.read_csv(StringIO(self.data1), index_col=0)
tm.assert_frame_equal(df, expected)
# multi index
data = """index1,index2,A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
lines = data.split('\n')
no_header = '\n'.join(lines[1:])
names = ['index1', 'index2', 'A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=[0, 1],
names=names)
expected = self.read_csv(StringIO(data), index_col=[0, 1])
tm.assert_frame_equal(df, expected)
df = self.read_csv(StringIO(data), index_col=['index1', 'index2'])
tm.assert_frame_equal(df, expected)
def test_multi_index_no_level_names(self):
data = """index1,index2,A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
data2 = """A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
lines = data.split('\n')
no_header = '\n'.join(lines[1:])
names = ['A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=[0, 1],
header=None, names=names)
expected = self.read_csv(StringIO(data), index_col=[0, 1])
tm.assert_frame_equal(df, expected, check_names=False)
# 2 implicit first cols
df2 = self.read_csv(StringIO(data2))
tm.assert_frame_equal(df2, df)
# reverse order of index
df = self.read_csv(StringIO(no_header), index_col=[1, 0], names=names,
header=None)
expected = self.read_csv(StringIO(data), index_col=[1, 0])
tm.assert_frame_equal(df, expected, check_names=False)
def test_multi_index_parse_dates(self):
data = """index1,index2,A,B,C
20090101,one,a,1,2
20090101,two,b,3,4
20090101,three,c,4,5
20090102,one,a,1,2
20090102,two,b,3,4
20090102,three,c,4,5
20090103,one,a,1,2
20090103,two,b,3,4
20090103,three,c,4,5
"""
df = self.read_csv(StringIO(data), index_col=[0, 1], parse_dates=True)
self.assertIsInstance(df.index.levels[0][0],
(datetime, np.datetime64, Timestamp))
# specify columns out of order!
df2 = self.read_csv(StringIO(data), index_col=[1, 0], parse_dates=True)
self.assertIsInstance(df2.index.levels[1][0],
(datetime, np.datetime64, Timestamp))
def test_skip_footer(self):
# GH 6607
# Test currently only valid with python engine because
# skip_footer != 0. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError, 'skip_footer'):
data = """A,B,C
1,2,3
4,5,6
7,8,9
want to skip this
also also skip this
"""
result = self.read_csv(StringIO(data), skip_footer=2)
no_footer = '\n'.join(data.split('\n')[:-3])
expected = self.read_csv(StringIO(no_footer))
tm.assert_frame_equal(result, expected)
result = self.read_csv(StringIO(data), nrows=3)
tm.assert_frame_equal(result, expected)
# skipfooter alias
result = read_csv(StringIO(data), skipfooter=2)
no_footer = '\n'.join(data.split('\n')[:-3])
expected = read_csv(StringIO(no_footer))
tm.assert_frame_equal(result, expected)
def test_no_unnamed_index(self):
data = """ id c0 c1 c2
0 1 0 a b
1 2 0 c d
2 2 2 e f
"""
df = self.read_table(StringIO(data), sep=' ')
self.assertIsNone(df.index.name)
def test_converters(self):
data = """A,B,C,D
a,1,2,01/01/2009
b,3,4,01/02/2009
c,4,5,01/03/2009
"""
from pandas.compat import parse_date
result = self.read_csv(StringIO(data), converters={'D': parse_date})
result2 = self.read_csv(StringIO(data), converters={3: parse_date})
expected = self.read_csv(StringIO(data))
expected['D'] = expected['D'].map(parse_date)
tm.assertIsInstance(result['D'][0], (datetime, Timestamp))
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result2, expected)
# produce integer
converter = lambda x: int(x.split('/')[2])
result = self.read_csv(StringIO(data), converters={'D': converter})
expected = self.read_csv(StringIO(data))
expected['D'] = expected['D'].map(converter)
tm.assert_frame_equal(result, expected)
def test_converters_no_implicit_conv(self):
# GH2184
data = """000102,1.2,A\n001245,2,B"""
f = lambda x: x.strip()
converter = {0: f}
df = self.read_csv(StringIO(data), header=None, converters=converter)
self.assertEqual(df[0].dtype, object)
def test_converters_euro_decimal_format(self):
data = """Id;Number1;Number2;Text1;Text2;Number3
1;1521,1541;187101,9543;ABC;poi;4,738797819
2;121,12;14897,76;DEF;uyt;0,377320872
3;878,158;108013,434;GHI;rez;2,735694704"""
f = lambda x: float(x.replace(",", "."))
converter = {'Number1': f, 'Number2': f, 'Number3': f}
df2 = self.read_csv(StringIO(data), sep=';', converters=converter)
self.assertEqual(df2['Number1'].dtype, float)
self.assertEqual(df2['Number2'].dtype, float)
self.assertEqual(df2['Number3'].dtype, float)
def test_converter_return_string_bug(self):
# GH #583
data = """Id;Number1;Number2;Text1;Text2;Number3
1;1521,1541;187101,9543;ABC;poi;4,738797819
2;121,12;14897,76;DEF;uyt;0,377320872
3;878,158;108013,434;GHI;rez;2,735694704"""
f = lambda x: float(x.replace(",", "."))
converter = {'Number1': f, 'Number2': f, 'Number3': f}
df2 = self.read_csv(StringIO(data), sep=';', converters=converter)
self.assertEqual(df2['Number1'].dtype, float)
def test_read_table_buglet_4x_multiindex(self):
# GH 6607
# Parsing multi-level index currently causes an error in the C parser.
# Temporarily copied to TestPythonParser.
# Here test that CParserError is raised:
with tm.assertRaises(pandas.parser.CParserError):
text = """ A B C D E
one two three four
a b 10.0032 5 -0.5109 -2.3358 -0.4645 0.05076 0.3640
a q 20 4 0.4473 1.4152 0.2834 1.00661 0.1744
x q 30 3 -0.6662 -0.5243 -0.3580 0.89145 2.5838"""
# it works!
df = self.read_table(StringIO(text), sep='\s+')
self.assertEqual(df.index.names, ('one', 'two', 'three', 'four'))
def test_line_comment(self):
data = """# empty
A,B,C
1,2.,4.#hello world
#ignore this line
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
df = self.read_csv(StringIO(data), comment='#')
tm.assert_almost_equal(df.values, expected)
def test_comment_skiprows(self):
data = """# empty
random line
# second empty line
1,2,3
A,B,C
1,2.,4.
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
# this should ignore the first four lines (including comments)
df = self.read_csv(StringIO(data), comment='#', skiprows=4)
tm.assert_almost_equal(df.values, expected)
def test_comment_header(self):
data = """# empty
# second empty line
1,2,3
A,B,C
1,2.,4.
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
# header should begin at the second non-comment line
df = self.read_csv(StringIO(data), comment='#', header=1)
tm.assert_almost_equal(df.values, expected)
def test_comment_skiprows_header(self):
data = """# empty
# second empty line
# third empty line
X,Y,Z
1,2,3
A,B,C
1,2.,4.
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
# skiprows should skip the first 4 lines (including comments), while
# header should start from the second non-commented line starting
# with line 5
df = self.read_csv(StringIO(data), comment='#', skiprows=4, header=1)
tm.assert_almost_equal(df.values, expected)
def test_read_csv_parse_simple_list(self):
text = """foo
bar baz
qux foo
foo
bar"""
df = read_csv(StringIO(text), header=None)
expected = DataFrame({0: ['foo', 'bar baz', 'qux foo',
'foo', 'bar']})
tm.assert_frame_equal(df, expected)
def test_parse_dates_custom_euroformat(self):
text = """foo,bar,baz
31/01/2010,1,2
01/02/2010,1,NA
02/02/2010,1,2
"""
parser = lambda d: parse_date(d, dayfirst=True)
df = self.read_csv(StringIO(text),
names=['time', 'Q', 'NTU'], header=0,
index_col=0, parse_dates=True,
date_parser=parser, na_values=['NA'])
exp_index = Index([datetime(2010, 1, 31), datetime(2010, 2, 1),
datetime(2010, 2, 2)], name='time')
expected = DataFrame({'Q': [1, 1, 1], 'NTU': [2, np.nan, 2]},
index=exp_index, columns=['Q', 'NTU'])
tm.assert_frame_equal(df, expected)
parser = lambda d: parse_date(d, day_first=True)
self.assertRaises(Exception, self.read_csv,
StringIO(text), skiprows=[0],
names=['time', 'Q', 'NTU'], index_col=0,
parse_dates=True, date_parser=parser,
na_values=['NA'])
def test_na_value_dict(self):
data = """A,B,C
foo,bar,NA
bar,foo,foo
foo,bar,NA
bar,foo,foo"""
df = self.read_csv(StringIO(data),
na_values={'A': ['foo'], 'B': ['bar']})
expected = DataFrame({'A': [np.nan, 'bar', np.nan, 'bar'],
'B': [np.nan, 'foo', np.nan, 'foo'],
'C': [np.nan, 'foo', np.nan, 'foo']})
tm.assert_frame_equal(df, expected)
data = """\
a,b,c,d
0,NA,1,5
"""
xp = DataFrame({'b': [np.nan], 'c': [1], 'd': [5]}, index=[0])
xp.index.name = 'a'
df = self.read_csv(StringIO(data), na_values={}, index_col=0)
tm.assert_frame_equal(df, xp)
xp = DataFrame({'b': [np.nan], 'd': [5]},
MultiIndex.from_tuples([(0, 1)]))
xp.index.names = ['a', 'c']
df = self.read_csv(StringIO(data), na_values={}, index_col=[0, 2])
tm.assert_frame_equal(df, xp)
xp = DataFrame({'b': [np.nan], 'd': [5]},
MultiIndex.from_tuples([(0, 1)]))
xp.index.names = ['a', 'c']
df = self.read_csv(StringIO(data), na_values={}, index_col=['a', 'c'])
tm.assert_frame_equal(df, xp)
@tm.network
def test_url(self):
# HTTP(S)
url = ('https://raw.github.com/pydata/pandas/master/'
'pandas/io/tests/data/salary.table')
url_table = self.read_table(url)
dirpath = tm.get_data_path()
localtable = os.path.join(dirpath, 'salary.table')
local_table = self.read_table(localtable)
tm.assert_frame_equal(url_table, local_table)
# TODO: ftp testing
@slow
def test_file(self):
# FILE
if sys.version_info[:2] < (2, 6):
raise nose.SkipTest("file:// not supported with Python < 2.6")
dirpath = tm.get_data_path()
localtable = os.path.join(dirpath, 'salary.table')
local_table = self.read_table(localtable)
try:
url_table = self.read_table('file://localhost/' + localtable)
except URLError:
# fails on some systems
raise nose.SkipTest("failing on %s" %
' '.join(platform.uname()).strip())
tm.assert_frame_equal(url_table, local_table)
def test_parse_tz_aware(self):
import pytz
# #1693
data = StringIO("Date,x\n2012-06-13T01:39:00Z,0.5")
# it works
result = read_csv(data, index_col=0, parse_dates=True)
stamp = result.index[0]
self.assertEqual(stamp.minute, 39)
try:
self.assertIs(result.index.tz, pytz.utc)
except AssertionError: # hello Yaroslav
arr = result.index.to_pydatetime()
result = tools.to_datetime(arr, utc=True)[0]
self.assertEqual(stamp.minute, result.minute)
self.assertEqual(stamp.hour, result.hour)
self.assertEqual(stamp.day, result.day)
def test_multiple_date_cols_index(self):
data = """\
ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
xp = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]})
df = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]},
index_col='nominal')
tm.assert_frame_equal(xp.set_index('nominal'), df)
df2 = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]},
index_col=0)
tm.assert_frame_equal(df2, df)
df3 = self.read_csv(StringIO(data), parse_dates=[[1, 2]], index_col=0)
tm.assert_frame_equal(df3, df, check_names=False)
def test_multiple_date_cols_chunked(self):
df = self.read_csv(StringIO(self.ts_data), parse_dates={
'nominal': [1, 2]}, index_col='nominal')
reader = self.read_csv(StringIO(self.ts_data), parse_dates={'nominal':
[1, 2]}, index_col='nominal', chunksize=2)
chunks = list(reader)
self.assertNotIn('nominalTime', df)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
def test_multiple_date_col_named_components(self):
xp = self.read_csv(StringIO(self.ts_data),
parse_dates={'nominal': [1, 2]},
index_col='nominal')
colspec = {'nominal': ['date', 'nominalTime']}
df = self.read_csv(StringIO(self.ts_data), parse_dates=colspec,
index_col='nominal')
tm.assert_frame_equal(df, xp)
def test_multiple_date_col_multiple_index(self):
df = self.read_csv(StringIO(self.ts_data),
parse_dates={'nominal': [1, 2]},
index_col=['nominal', 'ID'])
xp = self.read_csv(StringIO(self.ts_data),
parse_dates={'nominal': [1, 2]})
tm.assert_frame_equal(xp.set_index(['nominal', 'ID']), df)
def test_comment(self):
data = """A,B,C
1,2.,4.#hello world
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
df = self.read_csv(StringIO(data), comment='#')
tm.assert_almost_equal(df.values, expected)
df = self.read_table(StringIO(data), sep=',', comment='#',
na_values=['NaN'])
tm.assert_almost_equal(df.values, expected)
def test_bool_na_values(self):
data = """A,B,C
True,False,True
NA,True,False
False,NA,True"""
result = self.read_csv(StringIO(data))
expected = DataFrame({'A': np.array([True, nan, False], dtype=object),
'B': np.array([False, True, nan], dtype=object),
'C': [True, False, True]})
tm.assert_frame_equal(result, expected)
def test_nonexistent_path(self):
# don't segfault pls #2428
path = '%s.csv' % tm.rands(10)
self.assertRaises(Exception, self.read_csv, path)
def test_missing_trailing_delimiters(self):
data = """A,B,C,D
1,2,3,4
1,3,3,
1,4,5"""
result = self.read_csv(StringIO(data))
self.assertTrue(result['D'].isnull()[1:].all())
def test_skipinitialspace(self):
s = ('"09-Apr-2012", "01:10:18.300", 2456026.548822908, 12849, '
'1.00361, 1.12551, 330.65659, 0355626618.16711, 73.48821, '
'314.11625, 1917.09447, 179.71425, 80.000, 240.000, -350, '
'70.06056, 344.98370, 1, 1, -0.689265, -0.692787, '
'0.212036, 14.7674, 41.605, -9999.0, -9999.0, '
'-9999.0, -9999.0, -9999.0, -9999.0, 000, 012, 128')
sfile = StringIO(s)
# it's 33 columns
result = self.read_csv(sfile, names=lrange(33), na_values=['-9999.0'],
header=None, skipinitialspace=True)
self.assertTrue(pd.isnull(result.ix[0, 29]))
def test_utf16_bom_skiprows(self):
# #2298
data = u("""skip this
skip this too
A\tB\tC
1\t2\t3
4\t5\t6""")
data2 = u("""skip this
skip this too
A,B,C
1,2,3
4,5,6""")
path = '__%s__.csv' % tm.rands(10)
with tm.ensure_clean(path) as path:
for sep, dat in [('\t', data), (',', data2)]:
for enc in ['utf-16', 'utf-16le', 'utf-16be']:
bytes = dat.encode(enc)
with open(path, 'wb') as f:
f.write(bytes)
s = BytesIO(dat.encode('utf-8'))
if compat.PY3:
# somewhat False since the code never sees bytes
from io import TextIOWrapper
s = TextIOWrapper(s, encoding='utf-8')
result = self.read_csv(path, encoding=enc, skiprows=2,
sep=sep)
expected = self.read_csv(s, encoding='utf-8', skiprows=2,
sep=sep)
tm.assert_frame_equal(result, expected)
def test_utf16_example(self):
path = tm.get_data_path('utf16_ex.txt')
# it works! and is the right length
result = self.read_table(path, encoding='utf-16')
self.assertEqual(len(result), 50)
if not compat.PY3:
buf = BytesIO(open(path, 'rb').read())
result = self.read_table(buf, encoding='utf-16')
self.assertEqual(len(result), 50)
def test_converters_corner_with_nas(self):
# skip aberration observed on Win64 Python 3.2.2
if hash(np.int64(-1)) != -2:
raise nose.SkipTest("skipping because of windows hash on Python"
" 3.2.2")
csv = """id,score,days
1,2,12
2,2-5,
3,,14+
4,6-12,2"""
def convert_days(x):
x = x.strip()
if not x:
return np.nan
is_plus = x.endswith('+')
if is_plus:
x = int(x[:-1]) + 1
else:
x = int(x)
return x
def convert_days_sentinel(x):
x = x.strip()
if not x:
return np.nan
is_plus = x.endswith('+')
if is_plus:
x = int(x[:-1]) + 1
else:
x = int(x)
return x
def convert_score(x):
x = x.strip()
if not x:
return np.nan
if x.find('-') > 0:
valmin, valmax = lmap(int, x.split('-'))
val = 0.5 * (valmin + valmax)
else:
val = float(x)
return val
fh = StringIO(csv)
result = self.read_csv(fh, converters={'score': convert_score,
'days': convert_days},
na_values=['', None])
self.assertTrue(pd.isnull(result['days'][1]))
fh = StringIO(csv)
result2 = self.read_csv(fh, converters={'score': convert_score,
'days': convert_days_sentinel},
na_values=['', None])
tm.assert_frame_equal(result, result2)
def test_unicode_encoding(self):
pth = tm.get_data_path('unicode_series.csv')
result = self.read_csv(pth, header=None, encoding='latin-1')
result = result.set_index(0)
got = result[1][1632]
expected = u('\xc1 k\xf6ldum klaka (Cold Fever) (1994)')
self.assertEqual(got, expected)
def test_trailing_delimiters(self):
# #2442. grumble grumble
data = """A,B,C
1,2,3,
4,5,6,
7,8,9,"""
result = self.read_csv(StringIO(data), index_col=False)
expected = DataFrame({'A': [1, 4, 7], 'B': [2, 5, 8],
'C': [3, 6, 9]})
tm.assert_frame_equal(result, expected)
def test_escapechar(self):
# http://stackoverflow.com/questions/13824840/feature-request-for-
# pandas-read-csv
data = '''SEARCH_TERM,ACTUAL_URL
"bra tv bord","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"
"tv p\xc3\xa5 hjul","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"
"SLAGBORD, \\"Bergslagen\\", IKEA:s 1700-tals serie","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"'''
result = self.read_csv(StringIO(data), escapechar='\\',
quotechar='"', encoding='utf-8')
self.assertEqual(result['SEARCH_TERM'][2],
'SLAGBORD, "Bergslagen", IKEA:s 1700-tals serie')
self.assertTrue(np.array_equal(result.columns,
['SEARCH_TERM', 'ACTUAL_URL']))
def test_header_names_backward_compat(self):
# #2539
data = '1,2,3\n4,5,6'
result = self.read_csv(StringIO(data), names=['a', 'b', 'c'])
expected = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
header=None)
tm.assert_frame_equal(result, expected)
data2 = 'foo,bar,baz\n' + data
result = self.read_csv(StringIO(data2), names=['a', 'b', 'c'],
header=0)
tm.assert_frame_equal(result, expected)
def test_int64_min_issues(self):
# #2599
data = 'A,B\n0,0\n0,'
result = self.read_csv(StringIO(data))
expected = DataFrame({'A': [0, 0], 'B': [0, np.nan]})
tm.assert_frame_equal(result, expected)
def test_parse_integers_above_fp_precision(self):
data = """Numbers
17007000002000191
17007000002000191
17007000002000191
17007000002000191
17007000002000192
17007000002000192
17007000002000192
17007000002000192
17007000002000192
17007000002000194"""
result = self.read_csv(StringIO(data))
expected = DataFrame({'Numbers': [17007000002000191,
17007000002000191,
17007000002000191,
17007000002000191,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000194]})
self.assertTrue(np.array_equal(result['Numbers'], expected['Numbers']))
def test_usecols_index_col_conflict(self):
# Issue 4201 Test that index_col as integer reflects usecols
data = """SecId,Time,Price,P2,P3
10000,2013-5-11,100,10,1
500,2013-5-12,101,11,1
"""
expected = DataFrame({'Price': [100, 101]}, index=[
datetime(2013, 5, 11), datetime(2013, 5, 12)])
expected.index.name = 'Time'
df = self.read_csv(StringIO(data), usecols=[
'Time', 'Price'], parse_dates=True, index_col=0)
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(data), usecols=[
'Time', 'Price'], parse_dates=True, index_col='Time')
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(data), usecols=[
1, 2], parse_dates=True, index_col='Time')
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(data), usecols=[
1, 2], parse_dates=True, index_col=0)
tm.assert_frame_equal(expected, df)
expected = DataFrame(
{'P3': [1, 1], 'Price': (100, 101), 'P2': (10, 11)})
expected = expected.set_index(['Price', 'P2'])
df = self.read_csv(StringIO(data), usecols=[
'Price', 'P2', 'P3'], parse_dates=True, index_col=['Price', 'P2'])
tm.assert_frame_equal(expected, df)
def test_chunks_have_consistent_numerical_type(self):
integers = [str(i) for i in range(499999)]
data = "a\n" + "\n".join(integers + ["1.0", "2.0"] + integers)
with tm.assert_produces_warning(False):
df = self.read_csv(StringIO(data))
# Assert that types were coerced.
self.assertTrue(type(df.a[0]) is np.float64)
self.assertEqual(df.a.dtype, np.float)
def test_warn_if_chunks_have_mismatched_type(self):
# See test in TestCParserLowMemory.
integers = [str(i) for i in range(499999)]
data = "a\n" + "\n".join(integers + ['a', 'b'] + integers)
with tm.assert_produces_warning(False):
df = self.read_csv(StringIO(data))
self.assertEqual(df.a.dtype, np.object)
def test_usecols(self):
data = """\
a,b,c
1,2,3
4,5,6
7,8,9
10,11,12"""
result = self.read_csv(StringIO(data), usecols=(1, 2))
result2 = self.read_csv(StringIO(data), usecols=('b', 'c'))
exp = self.read_csv(StringIO(data))
self.assertEqual(len(result.columns), 2)
self.assertTrue((result['b'] == exp['b']).all())
self.assertTrue((result['c'] == exp['c']).all())
tm.assert_frame_equal(result, result2)
result = self.read_csv(StringIO(data), usecols=[1, 2], header=0,
names=['foo', 'bar'])
expected = self.read_csv(StringIO(data), usecols=[1, 2])
expected.columns = ['foo', 'bar']
tm.assert_frame_equal(result, expected)
data = """\
1,2,3
4,5,6
7,8,9
10,11,12"""
result = self.read_csv(StringIO(data), names=['b', 'c'],
header=None, usecols=[1, 2])
expected = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
header=None)
expected = expected[['b', 'c']]
tm.assert_frame_equal(result, expected)
result2 = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
header=None, usecols=['b', 'c'])
tm.assert_frame_equal(result2, result)
# 5766
result = self.read_csv(StringIO(data), names=['a', 'b'],
header=None, usecols=[0, 1])
expected = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
header=None)
expected = expected[['a', 'b']]
tm.assert_frame_equal(result, expected)
# length conflict, passed names and usecols disagree
self.assertRaises(ValueError, self.read_csv, StringIO(data),
names=['a', 'b'], usecols=[1], header=None)
def test_integer_overflow_bug(self):
# #2601
data = "65248E10 11\n55555E55 22\n"
result = self.read_csv(StringIO(data), header=None, sep=' ')
self.assertTrue(result[0].dtype == np.float64)
result = self.read_csv(StringIO(data), header=None, sep='\s+')
self.assertTrue(result[0].dtype == np.float64)
def test_catch_too_many_names(self):
# Issue 5156
data = """\
1,2,3
4,,6
7,8,9
10,11,12\n"""
tm.assertRaises(Exception, read_csv, StringIO(data),
header=0, names=['a', 'b', 'c', 'd'])
def test_ignore_leading_whitespace(self):
# GH 6607, GH 3374
data = ' a b c\n 1 2 3\n 4 5 6\n 7 8 9'
result = self.read_table(StringIO(data), sep='\s+')
expected = DataFrame({'a': [1, 4, 7], 'b': [2, 5, 8], 'c': [3, 6, 9]})
tm.assert_frame_equal(result, expected)
def test_nrows_and_chunksize_raises_notimplemented(self):
data = 'a b c'
self.assertRaises(NotImplementedError, self.read_csv, StringIO(data),
nrows=10, chunksize=5)
def test_single_char_leading_whitespace(self):
# GH 9710
data = """\
MyColumn
a
b
a
b\n"""
expected = DataFrame({'MyColumn': list('abab')})
result = self.read_csv(StringIO(data), skipinitialspace=True)
tm.assert_frame_equal(result, expected)
def test_chunk_begins_with_newline_whitespace(self):
# GH 10022
data = '\n hello\nworld\n'
result = self.read_csv(StringIO(data), header=None)
self.assertEqual(len(result), 2)
# GH 9735
chunk1 = 'a' * (1024 * 256 - 2) + '\na'
chunk2 = '\n a'
result = pd.read_csv(StringIO(chunk1 + chunk2), header=None)
expected = pd.DataFrame(['a' * (1024 * 256 - 2), 'a', ' a'])
tm.assert_frame_equal(result, expected)
def test_empty_with_index(self):
# GH 10184
data = 'x,y'
result = self.read_csv(StringIO(data), index_col=0)
expected = DataFrame([], columns=['y'], index=Index([], name='x'))
tm.assert_frame_equal(result, expected)
def test_emtpy_with_multiindex(self):
# GH 10467
data = 'x,y,z'
result = self.read_csv(StringIO(data), index_col=['x', 'y'])
expected = DataFrame([], columns=['z'],
index=MultiIndex.from_arrays([[]] * 2, names=['x', 'y']))
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_empty_with_reversed_multiindex(self):
data = 'x,y,z'
result = self.read_csv(StringIO(data), index_col=[1, 0])
expected = DataFrame([], columns=['z'],
index=MultiIndex.from_arrays([[]] * 2, names=['y', 'x']))
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_empty_index_col_scenarios(self):
data = 'x,y,z'
# None, no index
index_col, expected = None, DataFrame([], columns=list('xyz')),
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# False, no index
index_col, expected = False, DataFrame([], columns=list('xyz')),
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# int, first column
index_col, expected = 0, DataFrame(
[], columns=['y', 'z'], index=Index([], name='x'))
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# int, not first column
index_col, expected = 1, DataFrame(
[], columns=['x', 'z'], index=Index([], name='y'))
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# str, first column
index_col, expected = 'x', DataFrame(
[], columns=['y', 'z'], index=Index([], name='x'))
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# str, not the first column
index_col, expected = 'y', DataFrame(
[], columns=['x', 'z'], index=Index([], name='y'))
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# list of int
index_col, expected = [0, 1], DataFrame([], columns=['z'],
index=MultiIndex.from_arrays([[]] * 2, names=['x', 'y']))
tm.assert_frame_equal(self.read_csv(StringIO(data), index_col=index_col), expected,
check_index_type=False)
# list of str
index_col = ['x', 'y']
expected = DataFrame([], columns=['z'], index=MultiIndex.from_arrays(
[[]] * 2, names=['x', 'y']))
tm.assert_frame_equal(self.read_csv(StringIO(data), index_col=index_col), expected,
check_index_type=False)
# list of int, reversed sequence
index_col = [1, 0]
expected = DataFrame([], columns=['z'], index=MultiIndex.from_arrays(
[[]] * 2, names=['y', 'x']))
tm.assert_frame_equal(self.read_csv(StringIO(data), index_col=index_col), expected,
check_index_type=False)
# list of str, reversed sequence
index_col = ['y', 'x']
expected = DataFrame([], columns=['z'], index=MultiIndex.from_arrays(
[[]] * 2, names=['y', 'x']))
tm.assert_frame_equal(self.read_csv(StringIO(data), index_col=index_col), expected,
check_index_type=False)
def test_empty_with_index_col_false(self):
# GH 10413
data = 'x,y'
result = self.read_csv(StringIO(data), index_col=False)
expected = DataFrame([], columns=['x', 'y'])
tm.assert_frame_equal(result, expected)
def test_float_parser(self):
# GH 9565
data = '45e-1,4.5,45.,inf,-inf'
result = self.read_csv(StringIO(data), header=None)
expected = pd.DataFrame([[float(s) for s in data.split(',')]])
tm.assert_frame_equal(result, expected)
def test_int64_overflow(self):
data = """ID
00013007854817840016671868
00013007854817840016749251
00013007854817840016754630
00013007854817840016781876
00013007854817840017028824
00013007854817840017963235
00013007854817840018860166"""
result = self.read_csv(StringIO(data))
self.assertTrue(result['ID'].dtype == object)
self.assertRaises((OverflowError, pandas.parser.OverflowError),
self.read_csv, StringIO(data),
converters={'ID': np.int64})
# Just inside int64 range: parse as integer
i_max = np.iinfo(np.int64).max
i_min = np.iinfo(np.int64).min
for x in [i_max, i_min]:
result = pd.read_csv(StringIO(str(x)), header=None)
expected = pd.DataFrame([x])
tm.assert_frame_equal(result, expected)
# Just outside int64 range: parse as string
too_big = i_max + 1
too_small = i_min - 1
for x in [too_big, too_small]:
result = pd.read_csv(StringIO(str(x)), header=None)
expected = pd.DataFrame([str(x)])
tm.assert_frame_equal(result, expected)
def test_empty_with_nrows_chunksize(self):
# GH 9535
expected = pd.DataFrame([], columns=['foo', 'bar'])
result = self.read_csv(StringIO('foo,bar\n'), nrows=10)
tm.assert_frame_equal(result, expected)
result = next(iter(pd.read_csv(StringIO('foo,bar\n'), chunksize=10)))
tm.assert_frame_equal(result, expected)
result = pd.read_csv(StringIO('foo,bar\n'), nrows=10, as_recarray=True)
result = pd.DataFrame(result[2], columns=result[1], index=result[0])
tm.assert_frame_equal(pd.DataFrame.from_records(
result), expected, check_index_type=False)
result = next(
iter(pd.read_csv(StringIO('foo,bar\n'), chunksize=10, as_recarray=True)))
result = pd.DataFrame(result[2], columns=result[1], index=result[0])
tm.assert_frame_equal(pd.DataFrame.from_records(
result), expected, check_index_type=False)
def test_eof_states(self):
# GH 10728 and 10548
# With skip_blank_lines = True
expected = pd.DataFrame([[4, 5, 6]], columns=['a', 'b', 'c'])
# GH 10728
# WHITESPACE_LINE
data = 'a,b,c\n4,5,6\n '
result = self.read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
# GH 10548
# EAT_LINE_COMMENT
data = 'a,b,c\n4,5,6\n#comment'
result = self.read_csv(StringIO(data), comment='#')
tm.assert_frame_equal(result, expected)
# EAT_CRNL_NOP
data = 'a,b,c\n4,5,6\n\r'
result = self.read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
# EAT_COMMENT
data = 'a,b,c\n4,5,6#comment'
result = self.read_csv(StringIO(data), comment='#')
tm.assert_frame_equal(result, expected)
# SKIP_LINE
data = 'a,b,c\n4,5,6\nskipme'
result = self.read_csv(StringIO(data), skiprows=[2])
tm.assert_frame_equal(result, expected)
# With skip_blank_lines = False
# EAT_LINE_COMMENT
data = 'a,b,c\n4,5,6\n#comment'
result = self.read_csv(
StringIO(data), comment='#', skip_blank_lines=False)
expected = pd.DataFrame([[4, 5, 6]], columns=['a', 'b', 'c'])
tm.assert_frame_equal(result, expected)
# IN_FIELD
data = 'a,b,c\n4,5,6\n '
result = self.read_csv(StringIO(data), skip_blank_lines=False)
expected = pd.DataFrame(
[['4', 5, 6], [' ', None, None]], columns=['a', 'b', 'c'])
tm.assert_frame_equal(result, expected)
# EAT_CRNL
data = 'a,b,c\n4,5,6\n\r'
result = self.read_csv(StringIO(data), skip_blank_lines=False)
expected = pd.DataFrame(
[[4, 5, 6], [None, None, None]], columns=['a', 'b', 'c'])
tm.assert_frame_equal(result, expected)
# Should produce exceptions
# ESCAPED_CHAR
data = "a,b,c\n4,5,6\n\\"
self.assertRaises(Exception, self.read_csv,
StringIO(data), escapechar='\\')
# ESCAPE_IN_QUOTED_FIELD
data = 'a,b,c\n4,5,6\n"\\'
self.assertRaises(Exception, self.read_csv,
StringIO(data), escapechar='\\')
# IN_QUOTED_FIELD
data = 'a,b,c\n4,5,6\n"'
self.assertRaises(Exception, self.read_csv,
StringIO(data), escapechar='\\')
class TestPythonParser(ParserTests, tm.TestCase):
def test_negative_skipfooter_raises(self):
text = """#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
1/1/2000,1.,2.,3.
1/2/2000,4,5,6
1/3/2000,7,8,9
"""
with tm.assertRaisesRegexp(ValueError,
'skip footer cannot be negative'):
df = self.read_csv(StringIO(text), skipfooter=-1)
def read_csv(self, *args, **kwds):
kwds = kwds.copy()
kwds['engine'] = 'python'
return read_csv(*args, **kwds)
def read_table(self, *args, **kwds):
kwds = kwds.copy()
kwds['engine'] = 'python'
return read_table(*args, **kwds)
def test_sniff_delimiter(self):
text = """index|A|B|C
foo|1|2|3
bar|4|5|6
baz|7|8|9
"""
data = self.read_csv(StringIO(text), index_col=0, sep=None)
self.assertTrue(data.index.equals(Index(['foo', 'bar', 'baz'])))
data2 = self.read_csv(StringIO(text), index_col=0, delimiter='|')
tm.assert_frame_equal(data, data2)
text = """ignore this
ignore this too
index|A|B|C
foo|1|2|3
bar|4|5|6
baz|7|8|9
"""
data3 = self.read_csv(StringIO(text), index_col=0,
sep=None, skiprows=2)
tm.assert_frame_equal(data, data3)
text = u("""ignore this
ignore this too
index|A|B|C
foo|1|2|3
bar|4|5|6
baz|7|8|9
""").encode('utf-8')
s = BytesIO(text)
if compat.PY3:
# somewhat False since the code never sees bytes
from io import TextIOWrapper
s = TextIOWrapper(s, encoding='utf-8')
data4 = self.read_csv(s, index_col=0, sep=None, skiprows=2,
encoding='utf-8')
tm.assert_frame_equal(data, data4)
def test_regex_separator(self):
data = """ A B C D
a 1 2 3 4
b 1 2 3 4
c 1 2 3 4
"""
df = self.read_table(StringIO(data), sep='\s+')
expected = self.read_csv(StringIO(re.sub('[ ]+', ',', data)),
index_col=0)
self.assertIsNone(expected.index.name)
tm.assert_frame_equal(df, expected)
def test_1000_fwf(self):
data = """
1 2,334.0 5
10 13 10.
"""
expected = [[1, 2334., 5],
[10, 13, 10]]
df = read_fwf(StringIO(data), colspecs=[(0, 3), (3, 11), (12, 16)],
thousands=',')
tm.assert_almost_equal(df.values, expected)
def test_1000_sep_with_decimal(self):
data = """A|B|C
1|2,334.01|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334.01, 13],
'C': [5, 10.]
})
df = self.read_csv(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
def test_comment_fwf(self):
data = """
1 2. 4 #hello world
5 NaN 10.0
"""
expected = [[1, 2., 4],
[5, np.nan, 10.]]
df = read_fwf(StringIO(data), colspecs=[(0, 3), (4, 9), (9, 25)],
comment='#')
tm.assert_almost_equal(df.values, expected)
def test_fwf(self):
data_expected = """\
2011,58,360.242940,149.910199,11950.7
2011,59,444.953632,166.985655,11788.4
2011,60,364.136849,183.628767,11806.2
2011,61,413.836124,184.375703,11916.8
2011,62,502.953953,173.237159,12468.3
"""
expected = self.read_csv(StringIO(data_expected), header=None)
data1 = """\
201158 360.242940 149.910199 11950.7
201159 444.953632 166.985655 11788.4
201160 364.136849 183.628767 11806.2
201161 413.836124 184.375703 11916.8
201162 502.953953 173.237159 12468.3
"""
colspecs = [(0, 4), (4, 8), (8, 20), (21, 33), (34, 43)]
df = read_fwf(StringIO(data1), colspecs=colspecs, header=None)
tm.assert_frame_equal(df, expected)
data2 = """\
2011 58 360.242940 149.910199 11950.7
2011 59 444.953632 166.985655 11788.4
2011 60 364.136849 183.628767 11806.2
2011 61 413.836124 184.375703 11916.8
2011 62 502.953953 173.237159 12468.3
"""
df = read_fwf(StringIO(data2), widths=[5, 5, 13, 13, 7], header=None)
tm.assert_frame_equal(df, expected)
# From <NAME>: apparently some non-space filler characters can
# be seen, this is supported by specifying the 'delimiter' character:
# http://publib.boulder.ibm.com/infocenter/dmndhelp/v6r1mx/index.jsp?topic=/com.ibm.wbit.612.help.config.doc/topics/rfixwidth.html
data3 = """\
201158~~~~360.242940~~~149.910199~~~11950.7
201159~~~~444.953632~~~166.985655~~~11788.4
201160~~~~364.136849~~~183.628767~~~11806.2
201161~~~~413.836124~~~184.375703~~~11916.8
201162~~~~502.953953~~~173.237159~~~12468.3
"""
df = read_fwf(
StringIO(data3), colspecs=colspecs, delimiter='~', header=None)
tm.assert_frame_equal(df, expected)
with tm.assertRaisesRegexp(ValueError, "must specify only one of"):
read_fwf(StringIO(data3), colspecs=colspecs, widths=[6, 10, 10, 7])
with tm.assertRaisesRegexp(ValueError, "Must specify either"):
read_fwf(StringIO(data3), colspecs=None, widths=None)
def test_fwf_colspecs_is_list_or_tuple(self):
with tm.assertRaisesRegexp(TypeError,
'column specifications must be a list or '
'tuple.+'):
pd.io.parsers.FixedWidthReader(StringIO(self.data1),
{'a': 1}, ',', '#')
def test_fwf_colspecs_is_list_or_tuple_of_two_element_tuples(self):
with tm.assertRaisesRegexp(TypeError,
'Each column specification must be.+'):
read_fwf(StringIO(self.data1), [('a', 1)])
def test_fwf_colspecs_None(self):
# GH 7079
data = """\
123456
456789
"""
colspecs = [(0, 3), (3, None)]
result = read_fwf(StringIO(data), colspecs=colspecs, header=None)
expected = DataFrame([[123, 456], [456, 789]])
tm.assert_frame_equal(result, expected)
colspecs = [(None, 3), (3, 6)]
result = read_fwf(StringIO(data), colspecs=colspecs, header=None)
expected = DataFrame([[123, 456], [456, 789]])
tm.assert_frame_equal(result, expected)
colspecs = [(0, None), (3, None)]
result = read_fwf(StringIO(data), colspecs=colspecs, header=None)
expected = DataFrame([[123456, 456], [456789, 789]])
tm.assert_frame_equal(result, expected)
colspecs = [(None, None), (3, 6)]
result = read_fwf(StringIO(data), colspecs=colspecs, header=None)
expected = DataFrame([[123456, 456], [456789, 789]])
tm.assert_frame_equal(result, expected)
def test_fwf_regression(self):
# GH 3594
# turns out 'T060' is parsable as a datetime slice!
tzlist = [1, 10, 20, 30, 60, 80, 100]
ntz = len(tzlist)
tcolspecs = [16] + [8] * ntz
tcolnames = ['SST'] + ["T%03d" % z for z in tzlist[1:]]
data = """ 2009164202000 9.5403 9.4105 8.6571 7.8372 6.0612 5.8843 5.5192
2009164203000 9.5435 9.2010 8.6167 7.8176 6.0804 5.8728 5.4869
2009164204000 9.5873 9.1326 8.4694 7.5889 6.0422 5.8526 5.4657
2009164205000 9.5810 9.0896 8.4009 7.4652 6.0322 5.8189 5.4379
2009164210000 9.6034 9.0897 8.3822 7.4905 6.0908 5.7904 5.4039
"""
df = read_fwf(StringIO(data),
index_col=0,
header=None,
names=tcolnames,
widths=tcolspecs,
parse_dates=True,
date_parser=lambda s: datetime.strptime(s, '%Y%j%H%M%S'))
for c in df.columns:
res = df.loc[:, c]
self.assertTrue(len(res))
def test_fwf_for_uint8(self):
data = """1421302965.213420 PRI=3 PGN=0xef00 DST=0x17 SRC=0x28 04 154 00 00 00 00 00 127
1421302964.226776 PRI=6 PGN=0xf002 SRC=0x47 243 00 00 255 247 00 00 71"""
df = read_fwf(StringIO(data),
colspecs=[(0, 17), (25, 26), (33, 37),
(49, 51), (58, 62), (63, 1000)],
names=['time', 'pri', 'pgn', 'dst', 'src', 'data'],
converters={
'pgn': lambda x: int(x, 16),
'src': lambda x: int(x, 16),
'dst': lambda x: int(x, 16),
'data': lambda x: len(x.split(' '))})
expected = DataFrame([[1421302965.213420, 3, 61184, 23, 40, 8],
[1421302964.226776, 6, 61442, None, 71, 8]],
columns=["time", "pri", "pgn", "dst", "src", "data"])
expected["dst"] = expected["dst"].astype(object)
tm.assert_frame_equal(df, expected)
def test_fwf_compression(self):
try:
import gzip
import bz2
except ImportError:
raise nose.SkipTest("Need gzip and bz2 to run this test")
data = """1111111111
2222222222
3333333333""".strip()
widths = [5, 5]
names = ['one', 'two']
expected = read_fwf(StringIO(data), widths=widths, names=names)
if compat.PY3:
data = bytes(data, encoding='utf-8')
comps = [('gzip', gzip.GzipFile), ('bz2', bz2.BZ2File)]
for comp_name, compresser in comps:
with tm.ensure_clean() as path:
tmp = compresser(path, mode='wb')
tmp.write(data)
tmp.close()
result = read_fwf(path, widths=widths, names=names,
compression=comp_name)
tm.assert_frame_equal(result, expected)
def test_BytesIO_input(self):
if not compat.PY3:
raise nose.SkipTest(
"Bytes-related test - only needs to work on Python 3")
result = pd.read_fwf(BytesIO("שלום\nשלום".encode('utf8')), widths=[
2, 2], encoding='utf8')
expected = pd.DataFrame([["של", "ום"]], columns=["של", "ום"])
tm.assert_frame_equal(result, expected)
data = BytesIO("שלום::1234\n562::123".encode('cp1255'))
result = pd.read_table(data, sep="::", engine='python',
encoding='cp1255')
expected = pd.DataFrame([[562, 123]], columns=["שלום", "1234"])
tm.assert_frame_equal(result, expected)
def test_verbose_import(self):
text = """a,b,c,d
one,1,2,3
one,1,2,3
,1,2,3
one,1,2,3
,1,2,3
,1,2,3
one,1,2,3
two,1,2,3"""
buf = StringIO()
sys.stdout = buf
try:
# it works!
df = self.read_csv(StringIO(text), verbose=True)
self.assertEqual(
buf.getvalue(), 'Filled 3 NA values in column a\n')
finally:
sys.stdout = sys.__stdout__
buf = StringIO()
sys.stdout = buf
text = """a,b,c,d
one,1,2,3
two,1,2,3
three,1,2,3
four,1,2,3
five,1,2,3
,1,2,3
seven,1,2,3
eight,1,2,3"""
try:
# it works!
df = self.read_csv(StringIO(text), verbose=True, index_col=0)
self.assertEqual(
buf.getvalue(), 'Filled 1 NA values in column a\n')
finally:
sys.stdout = sys.__stdout__
def test_float_precision_specified(self):
# Should raise an error if float_precision (C parser option) is
# specified
with tm.assertRaisesRegexp(ValueError, "The 'float_precision' option "
"is not supported with the 'python' engine"):
self.read_csv(StringIO('a,b,c\n1,2,3'), float_precision='high')
def test_iteration_open_handle(self):
if PY3:
raise nose.SkipTest(
"won't work in Python 3 {0}".format(sys.version_info))
with tm.ensure_clean() as path:
with open(path, 'wb') as f:
f.write('AAA\nBBB\nCCC\nDDD\nEEE\nFFF\nGGG')
with open(path, 'rb') as f:
for line in f:
if 'CCC' in line:
break
try:
read_table(f, squeeze=True, header=None, engine='c')
except Exception:
pass
else:
raise ValueError('this should not happen')
result = read_table(f, squeeze=True, header=None,
engine='python')
expected = Series(['DDD', 'EEE', 'FFF', 'GGG'], name=0)
tm.assert_series_equal(result, expected)
def test_iterator(self):
# GH 6607
# This is a copy which should eventually be merged into ParserTests
# when the issue with the C parser is fixed
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunk = reader.read(3)
tm.assert_frame_equal(chunk, df[:3])
last_chunk = reader.read(5)
tm.assert_frame_equal(last_chunk, df[3:])
# pass list
lines = list(csv.reader(StringIO(self.data1)))
parser = TextParser(lines, index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
# pass skiprows
parser = TextParser(lines, index_col=0, chunksize=2, skiprows=[1])
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[1:3])
# test bad parameter (skip_footer)
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True, skip_footer=True)
self.assertRaises(ValueError, reader.read, 3)
treader = self.read_table(StringIO(self.data1), sep=',', index_col=0,
iterator=True)
tm.assertIsInstance(treader, TextFileReader)
# stopping iteration when on chunksize is specified, GH 3967
data = """A,B,C
foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
reader = self.read_csv(StringIO(data), iterator=True)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
tm.assert_frame_equal(result[0], expected)
# chunksize = 1
reader = self.read_csv(StringIO(data), chunksize=1)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
self.assertEqual(len(result), 3)
tm.assert_frame_equal(pd.concat(result), expected)
def test_single_line(self):
# GH 6607
# This is a copy which should eventually be merged into ParserTests
# when the issue with the C parser is fixed
# sniff separator
buf = StringIO()
sys.stdout = buf
# printing warning message when engine == 'c' for now
try:
# it works!
df = self.read_csv(StringIO('1,2'), names=['a', 'b'],
header=None, sep=None)
tm.assert_frame_equal(DataFrame({'a': [1], 'b': [2]}), df)
finally:
sys.stdout = sys.__stdout__
def test_malformed(self):
# GH 6607
# This is a copy which should eventually be merged into ParserTests
# when the issue with the C parser is fixed
# all
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
"""
try:
df = self.read_table(
StringIO(data), sep=',', header=1, comment='#')
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 4, saw 5', str(inst))
# skip_footer
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
footer
"""
try:
df = self.read_table(
StringIO(data), sep=',', header=1, comment='#',
skip_footer=1)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 4, saw 5', str(inst))
# first chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#', iterator=True, chunksize=1,
skiprows=[2])
df = it.read(5)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
# middle chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',', header=1,
comment='#', iterator=True, chunksize=1,
skiprows=[2])
df = it.read(1)
it.read(2)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
# last chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#', iterator=True, chunksize=1,
skiprows=[2])
df = it.read(1)
it.read()
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
def test_skip_footer(self):
# GH 6607
# This is a copy which should eventually be merged into ParserTests
# when the issue with the C parser is fixed
data = """A,B,C
1,2,3
4,5,6
7,8,9
want to skip this
also also skip this
"""
result = self.read_csv(StringIO(data), skip_footer=2)
no_footer = '\n'.join(data.split('\n')[:-3])
expected = self.read_csv(StringIO(no_footer))
tm.assert_frame_equal(result, expected)
result = self.read_csv(StringIO(data), nrows=3)
tm.assert_frame_equal(result, expected)
# skipfooter alias
result = self.read_csv(StringIO(data), skipfooter=2)
no_footer = '\n'.join(data.split('\n')[:-3])
expected = self.read_csv(StringIO(no_footer))
tm.assert_frame_equal(result, expected)
def test_decompression_regex_sep(self):
# GH 6607
# This is a copy which should eventually be moved to ParserTests
# when the issue with the C parser is fixed
try:
import gzip
import bz2
except ImportError:
raise nose.SkipTest('need gzip and bz2 to run')
data = open(self.csv1, 'rb').read()
data = data.replace(b',', b'::')
expected = self.read_csv(self.csv1)
with tm.ensure_clean() as path:
tmp = gzip.GzipFile(path, mode='wb')
tmp.write(data)
tmp.close()
result = self.read_csv(path, sep='::', compression='gzip')
tm.assert_frame_equal(result, expected)
with tm.ensure_clean() as path:
tmp = bz2.BZ2File(path, mode='wb')
tmp.write(data)
tmp.close()
result = self.read_csv(path, sep='::', compression='bz2')
tm.assert_frame_equal(result, expected)
self.assertRaises(ValueError, self.read_csv,
path, compression='bz3')
def test_read_table_buglet_4x_multiindex(self):
# GH 6607
# This is a copy which should eventually be merged into ParserTests
# when the issue with multi-level index is fixed in the C parser.
text = """ A B C D E
one two three four
a b 10.0032 5 -0.5109 -2.3358 -0.4645 0.05076 0.3640
a q 20 4 0.4473 1.4152 0.2834 1.00661 0.1744
x q 30 3 -0.6662 -0.5243 -0.3580 0.89145 2.5838"""
# it works!
df = self.read_table(StringIO(text), sep='\s+')
self.assertEqual(df.index.names, ('one', 'two', 'three', 'four'))
# GH 6893
data = ' A B C\na b c\n1 3 7 0 3 6\n3 1 4 1 5 9'
expected = DataFrame.from_records([(1, 3, 7, 0, 3, 6), (3, 1, 4, 1, 5, 9)],
columns=list('abcABC'), index=list('abc'))
actual = self.read_table(StringIO(data), sep='\s+')
tm.assert_frame_equal(actual, expected)
def test_line_comment(self):
data = """# empty
A,B,C
1,2.,4.#hello world
#ignore this line
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
df = self.read_csv(StringIO(data), comment='#')
tm.assert_almost_equal(df.values, expected)
def test_empty_lines(self):
data = """\
A,B,C
1,2.,4.
5.,NaN,10.0
-70,.4,1
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.],
[-70., .4, 1.]]
df = self.read_csv(StringIO(data))
tm.assert_almost_equal(df.values, expected)
df = self.read_csv(StringIO(data.replace(',', ' ')), sep='\s+')
tm.assert_almost_equal(df.values, expected)
expected = [[1., 2., 4.],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[5., np.nan, 10.],
[np.nan, np.nan, np.nan],
[-70., .4, 1.]]
df = self.read_csv(StringIO(data), skip_blank_lines=False)
tm.assert_almost_equal(list(df.values), list(expected))
def test_whitespace_lines(self):
data = """
\t \t\t
\t
A,B,C
\t 1,2.,4.
5.,NaN,10.0
"""
expected = [[1, 2., 4.],
[5., np.nan, 10.]]
df = self.read_csv(StringIO(data))
tm.assert_almost_equal(df.values, expected)
class TestFwfColspaceSniffing(tm.TestCase):
def test_full_file(self):
# File with all values
test = '''index A B C
2000-01-03T00:00:00 0.980268513777 3 foo
2000-01-04T00:00:00 1.04791624281 -4 bar
2000-01-05T00:00:00 0.498580885705 73 baz
2000-01-06T00:00:00 1.12020151869 1 foo
2000-01-07T00:00:00 0.487094399463 0 bar
2000-01-10T00:00:00 0.836648671666 2 baz
2000-01-11T00:00:00 0.157160753327 34 foo'''
colspecs = ((0, 19), (21, 35), (38, 40), (42, 45))
expected = read_fwf(StringIO(test), colspecs=colspecs)
tm.assert_frame_equal(expected, read_fwf(StringIO(test)))
def test_full_file_with_missing(self):
# File with missing values
test = '''index A B C
2000-01-03T00:00:00 0.980268513777 3 foo
2000-01-04T00:00:00 1.04791624281 -4 bar
0.498580885705 73 baz
2000-01-06T00:00:00 1.12020151869 1 foo
2000-01-07T00:00:00 0 bar
2000-01-10T00:00:00 0.836648671666 2 baz
34'''
colspecs = ((0, 19), (21, 35), (38, 40), (42, 45))
expected = read_fwf(StringIO(test), colspecs=colspecs)
tm.assert_frame_equal(expected, read_fwf(StringIO(test)))
def test_full_file_with_spaces(self):
# File with spaces in columns
test = '''
Account Name Balance CreditLimit AccountCreated
101 <NAME> 9315.45 10000.00 1/17/1998
312 <NAME> 90.00 1000.00 8/6/2003
868 <NAME> 0 17000.00 5/25/1985
761 Jada Pinkett-Smith 49654.87 100000.00 12/5/2006
317 <NAME> 789.65 5000.00 2/5/2007
'''.strip('\r\n')
colspecs = ((0, 7), (8, 28), (30, 38), (42, 53), (56, 70))
expected = read_fwf(StringIO(test), colspecs=colspecs)
tm.assert_frame_equal(expected, read_fwf(StringIO(test)))
def test_full_file_with_spaces_and_missing(self):
# File with spaces and missing values in columsn
test = '''
Account Name Balance CreditLimit AccountCreated
101 10000.00 1/17/1998
312 <NAME> 90.00 1000.00 8/6/2003
868 5/25/1985
761 <NAME>-Smith 49654.87 100000.00 12/5/2006
317 <NAME> 789.65
'''.strip('\r\n')
colspecs = ((0, 7), (8, 28), (30, 38), (42, 53), (56, 70))
expected = read_fwf(StringIO(test), colspecs=colspecs)
tm.assert_frame_equal(expected, read_fwf(StringIO(test)))
def test_messed_up_data(self):
# Completely messed up file
test = '''
Account Name Balance Credit Limit Account Created
101 10000.00 1/17/1998
312 <NAME> 90.00 1000.00
761 <NAME> 49654.87 100000.00 12/5/2006
317 <NAME> 789.65
'''.strip('\r\n')
colspecs = ((2, 10), (15, 33), (37, 45), (49, 61), (64, 79))
expected = read_fwf(StringIO(test), colspecs=colspecs)
tm.assert_frame_equal(expected, read_fwf(StringIO(test)))
def test_multiple_delimiters(self):
test = r'''
col1~~~~~col2 col3++++++++++++++++++col4
~~22.....11.0+++foo~~~~~~~~~~<NAME>
33+++122.33\\\bar.........<NAME>
++44~~~~12.01 baz~~<NAME>
~~55 11+++foo++++<NAME>-Smith
..66++++++.03~~~bar <NAME>
'''.strip('\r\n')
colspecs = ((0, 4), (7, 13), (15, 19), (21, 41))
expected = read_fwf(StringIO(test), colspecs=colspecs,
delimiter=' +~.\\')
tm.assert_frame_equal(expected, read_fwf(StringIO(test),
delimiter=' +~.\\'))
def test_variable_width_unicode(self):
if not compat.PY3:
raise nose.SkipTest(
'Bytes-related test - only needs to work on Python 3')
test = '''
שלום שלום
ום שלל
של ום
'''.strip('\r\n')
expected = pd.read_fwf(BytesIO(test.encode('utf8')),
colspecs=[(0, 4), (5, 9)], header=None, encoding='utf8')
tm.assert_frame_equal(expected, read_fwf(BytesIO(test.encode('utf8')),
header=None, encoding='utf8'))
class CParserTests(ParserTests):
""" base class for CParser Testsing """
def test_buffer_overflow(self):
# GH9205
# test certain malformed input files that cause buffer overflows in
# tokenizer.c
malfw = "1\r1\r1\r 1\r 1\r" # buffer overflow in words pointer
malfs = "1\r1\r1\r 1\r 1\r11\r" # buffer overflow in stream pointer
malfl = "1\r1\r1\r 1\r 1\r11\r1\r" # buffer overflow in lines pointer
for malf in (malfw, malfs, malfl):
try:
df = self.read_table(StringIO(malf))
except Exception as cperr:
self.assertIn(
'Buffer overflow caught - possible malformed input file.', str(cperr))
def test_buffer_rd_bytes(self):
# GH 12098
# src->buffer can be freed twice leading to a segfault if a corrupt
# gzip file is read with read_csv and the buffer is filled more than
# once before gzip throws an exception
data = '\x1F\x8B\x08\x00\x00\x00\x00\x00\x00\x03\xED\xC3\x41\x09' \
'\x00\x00\x08\x00\xB1\xB7\xB6\xBA\xFE\xA5\xCC\x21\x6C\xB0' \
'\xA6\x4D' + '\x55' * 267 + \
'\x7D\xF7\x00\x91\xE0\x47\x97\x14\x38\x04\x00' \
'\x1f\x8b\x08\x00VT\x97V\x00\x03\xed]\xefO'
for i in range(100):
try:
_ = self.read_csv(StringIO(data),
compression='gzip',
delim_whitespace=True)
except Exception as e:
pass
class TestCParserHighMemory(CParserTests, tm.TestCase):
def read_csv(self, *args, **kwds):
kwds = kwds.copy()
kwds['engine'] = 'c'
kwds['low_memory'] = False
return read_csv(*args, **kwds)
def read_table(self, *args, **kwds):
kwds = kwds.copy()
kwds['engine'] = 'c'
kwds['low_memory'] = False
return read_table(*args, **kwds)
def test_compact_ints(self):
if compat.is_platform_windows():
raise nose.SkipTest(
"segfaults on win-64, only when all tests are run")
data = ('0,1,0,0\n'
'1,1,0,0\n'
'0,1,0,1')
result = read_csv(StringIO(data), delimiter=',', header=None,
compact_ints=True, as_recarray=True)
ex_dtype = np.dtype([(str(i), 'i1') for i in range(4)])
self.assertEqual(result.dtype, ex_dtype)
result = read_csv(StringIO(data), delimiter=',', header=None,
as_recarray=True, compact_ints=True,
use_unsigned=True)
ex_dtype = np.dtype([(str(i), 'u1') for i in range(4)])
self.assertEqual(result.dtype, ex_dtype)
def test_parse_dates_empty_string(self):
# #2263
s = StringIO("Date, test\n2012-01-01, 1\n,2")
result = self.read_csv(s, parse_dates=["Date"], na_filter=False)
self.assertTrue(result['Date'].isnull()[1])
def test_usecols(self):
raise nose.SkipTest(
"Usecols is not supported in C High Memory engine.")
def test_line_comment(self):
data = """# empty
A,B,C
1,2.,4.#hello world
#ignore this line
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
df = self.read_csv(StringIO(data), comment='#')
tm.assert_almost_equal(df.values, expected)
# check with delim_whitespace=True
df = self.read_csv(StringIO(data.replace(',', ' ')), comment='#',
delim_whitespace=True)
tm.assert_almost_equal(df.values, expected)
# check with custom line terminator
df = self.read_csv(StringIO(data.replace('\n', '*')), comment='#',
lineterminator='*')
tm.assert_almost_equal(df.values, expected)
def test_comment_skiprows(self):
data = """# empty
random line
# second empty line
1,2,3
A,B,C
1,2.,4.
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
# this should ignore the first four lines (including comments)
df = self.read_csv(StringIO(data), comment='#', skiprows=4)
tm.assert_almost_equal(df.values, expected)
def test_skiprows_lineterminator(self):
# GH #9079
data = '\n'.join(['SMOSMANIA ThetaProbe-ML2X ',
'2007/01/01 01:00 0.2140 U M ',
'2007/01/01 02:00 0.2141 M O ',
'2007/01/01 04:00 0.2142 D M '])
expected = pd.DataFrame([['2007/01/01', '01:00', 0.2140, 'U', 'M'],
['2007/01/01', '02:00', 0.2141, 'M', 'O'],
['2007/01/01', '04:00', 0.2142, 'D', 'M']],
columns=['date', 'time', 'var', 'flag',
'oflag'])
# test with the three default lineterminators LF, CR and CRLF
df = self.read_csv(StringIO(data), skiprows=1, delim_whitespace=True,
names=['date', 'time', 'var', 'flag', 'oflag'])
tm.assert_frame_equal(df, expected)
df = self.read_csv(StringIO(data.replace('\n', '\r')),
skiprows=1, delim_whitespace=True,
names=['date', 'time', 'var', 'flag', 'oflag'])
tm.assert_frame_equal(df, expected)
df = self.read_csv(StringIO(data.replace('\n', '\r\n')),
skiprows=1, delim_whitespace=True,
names=['date', 'time', 'var', 'flag', 'oflag'])
tm.assert_frame_equal(df, expected)
def test_trailing_spaces(self):
data = "A B C \nrandom line with trailing spaces \nskip\n1,2,3\n1,2.,4.\nrandom line with trailing tabs\t\t\t\n \n5.1,NaN,10.0\n"
expected = pd.DataFrame([[1., 2., 4.],
[5.1, np.nan, 10.]])
# this should ignore six lines including lines with trailing
# whitespace and blank lines. issues 8661, 8679
df = self.read_csv(StringIO(data.replace(',', ' ')),
header=None, delim_whitespace=True,
skiprows=[0, 1, 2, 3, 5, 6], skip_blank_lines=True)
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data.replace(',', ' ')),
header=None, delim_whitespace=True,
skiprows=[0, 1, 2, 3, 5, 6], skip_blank_lines=True)
tm.assert_frame_equal(df, expected)
# test skipping set of rows after a row with trailing spaces, issue
# #8983
expected = pd.DataFrame({"A": [1., 5.1], "B": [2., np.nan],
"C": [4., 10]})
df = self.read_table(StringIO(data.replace(',', ' ')),
delim_whitespace=True,
skiprows=[1, 2, 3, 5, 6], skip_blank_lines=True)
tm.assert_frame_equal(df, expected)
def test_comment_header(self):
data = """# empty
# second empty line
1,2,3
A,B,C
1,2.,4.
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
# header should begin at the second non-comment line
df = self.read_csv(StringIO(data), comment='#', header=1)
tm.assert_almost_equal(df.values, expected)
def test_comment_skiprows_header(self):
data = """# empty
# second empty line
# third empty line
X,Y,Z
1,2,3
A,B,C
1,2.,4.
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
# skiprows should skip the first 4 lines (including comments), while
# header should start from the second non-commented line starting
# with line 5
df = self.read_csv(StringIO(data), comment='#', skiprows=4, header=1)
tm.assert_almost_equal(df.values, expected)
def test_empty_lines(self):
data = """\
A,B,C
1,2.,4.
5.,NaN,10.0
-70,.4,1
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.],
[-70., .4, 1.]]
df = self.read_csv(StringIO(data))
tm.assert_almost_equal(df.values, expected)
df = self.read_csv(StringIO(data.replace(',', ' ')), sep='\s+')
tm.assert_almost_equal(df.values, expected)
expected = [[1., 2., 4.],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[5., np.nan, 10.],
[np.nan, np.nan, np.nan],
[-70., .4, 1.]]
df = self.read_csv(StringIO(data), skip_blank_lines=False)
tm.assert_almost_equal(list(df.values), list(expected))
def test_whitespace_lines(self):
data = """
\t \t\t
\t
A,B,C
\t 1,2.,4.
5.,NaN,10.0
"""
expected = [[1, 2., 4.],
[5., np.nan, 10.]]
df = self.read_csv(StringIO(data))
tm.assert_almost_equal(df.values, expected)
def test_passing_dtype(self):
# GH 6607
# This is a copy which should eventually be merged into ParserTests
# when the dtype argument is supported by all engines.
df = DataFrame(np.random.rand(5, 2), columns=list(
'AB'), index=['1A', '1B', '1C', '1D', '1E'])
with tm.ensure_clean('__passing_str_as_dtype__.csv') as path:
df.to_csv(path)
# GH 3795
# passing 'str' as the dtype
result = self.read_csv(path, dtype=str, index_col=0)
tm.assert_series_equal(result.dtypes, Series(
{'A': 'object', 'B': 'object'}))
# we expect all object columns, so need to convert to test for
# equivalence
result = result.astype(float)
tm.assert_frame_equal(result, df)
# invalid dtype
self.assertRaises(TypeError, self.read_csv, path, dtype={'A': 'foo', 'B': 'float64'},
index_col=0)
# valid but we don't support it (date)
self.assertRaises(TypeError, self.read_csv, path, dtype={'A': 'datetime64', 'B': 'float64'},
index_col=0)
self.assertRaises(TypeError, self.read_csv, path, dtype={'A': 'datetime64', 'B': 'float64'},
index_col=0, parse_dates=['B'])
# valid but we don't support it
self.assertRaises(TypeError, self.read_csv, path, dtype={'A': 'timedelta64', 'B': 'float64'},
index_col=0)
# empty frame
# GH12048
actual = self.read_csv(StringIO('A,B'), dtype=str)
expected = DataFrame({'A': [], 'B': []}, index=[], dtype=str)
tm.assert_frame_equal(actual, expected)
def test_dtype_and_names_error(self):
# GH 8833
# passing both dtype and names resulting in an error reporting issue
data = """
1.0 1
2.0 2
3.0 3
"""
# base cases
result = self.read_csv(StringIO(data), sep='\s+', header=None)
expected = DataFrame([[1.0, 1], [2.0, 2], [3.0, 3]])
tm.assert_frame_equal(result, expected)
result = self.read_csv(StringIO(data), sep='\s+',
header=None, names=['a', 'b'])
expected = DataFrame(
[[1.0, 1], [2.0, 2], [3.0, 3]], columns=['a', 'b'])
tm.assert_frame_equal(result, expected)
# fallback casting
result = self.read_csv(StringIO(
data), sep='\s+', header=None, names=['a', 'b'], dtype={'a': np.int32})
expected = DataFrame([[1, 1], [2, 2], [3, 3]], columns=['a', 'b'])
expected['a'] = expected['a'].astype(np.int32)
tm.assert_frame_equal(result, expected)
data = """
1.0 1
nan 2
3.0 3
"""
# fallback casting, but not castable
with tm.assertRaisesRegexp(ValueError, 'cannot safely convert'):
self.read_csv(StringIO(data), sep='\s+', header=None,
names=['a', 'b'], dtype={'a': np.int32})
def test_fallback_to_python(self):
# GH 6607
data = 'a b c\n1 2 3'
# specify C engine with unsupported options (raise)
with tm.assertRaisesRegexp(ValueError, 'does not support'):
self.read_table(StringIO(data), engine='c', sep=None,
delim_whitespace=False)
with tm.assertRaisesRegexp(ValueError, 'does not support'):
self.read_table(StringIO(data), engine='c', sep='\s')
with tm.assertRaisesRegexp(ValueError, 'does not support'):
self.read_table(StringIO(data), engine='c', skip_footer=1)
def test_single_char_leading_whitespace(self):
# GH 9710
data = """\
MyColumn
a
b
a
b\n"""
expected = DataFrame({'MyColumn': list('abab')})
result = self.read_csv(StringIO(data), delim_whitespace=True,
skipinitialspace=True)
tm.assert_frame_equal(result, expected)
result = self.read_csv(StringIO(data), lineterminator='\n',
skipinitialspace=True)
tm.assert_frame_equal(result, expected)
class TestCParserLowMemory(CParserTests, tm.TestCase):
def read_csv(self, *args, **kwds):
kwds = kwds.copy()
kwds['engine'] = 'c'
kwds['low_memory'] = True
kwds['buffer_lines'] = 2
return read_csv(*args, **kwds)
def read_table(self, *args, **kwds):
kwds = kwds.copy()
kwds['engine'] = 'c'
kwds['low_memory'] = True
kwds['buffer_lines'] = 2
return read_table(*args, **kwds)
def test_compact_ints(self):
data = ('0,1,0,0\n'
'1,1,0,0\n'
'0,1,0,1')
result = read_csv(StringIO(data), delimiter=',', header=None,
compact_ints=True)
ex_dtype = np.dtype([(str(i), 'i1') for i in range(4)])
self.assertEqual(result.to_records(index=False).dtype, ex_dtype)
result = read_csv(StringIO(data), delimiter=',', header=None,
compact_ints=True,
use_unsigned=True)
ex_dtype = np.dtype([(str(i), 'u1') for i in range(4)])
self.assertEqual(result.to_records(index=False).dtype, ex_dtype)
def test_compact_ints_as_recarray(self):
if compat.is_platform_windows():
raise nose.SkipTest(
"segfaults on win-64, only when all tests are run")
data = ('0,1,0,0\n'
'1,1,0,0\n'
'0,1,0,1')
result = read_csv(StringIO(data), delimiter=',', header=None,
compact_ints=True, as_recarray=True)
ex_dtype = np.dtype([(str(i), 'i1') for i in range(4)])
self.assertEqual(result.dtype, ex_dtype)
result = read_csv(StringIO(data), delimiter=',', header=None,
as_recarray=True, compact_ints=True,
use_unsigned=True)
ex_dtype = np.dtype([(str(i), 'u1') for i in range(4)])
self.assertEqual(result.dtype, ex_dtype)
def test_precise_conversion(self):
# GH #8002
tm._skip_if_32bit()
from decimal import Decimal
normal_errors = []
precise_errors = []
for num in np.linspace(1., 2., num=500): # test numbers between 1 and 2
text = 'a\n{0:.25}'.format(num) # 25 decimal digits of precision
normal_val = float(self.read_csv(StringIO(text))['a'][0])
precise_val = float(self.read_csv(
StringIO(text), float_precision='high')['a'][0])
roundtrip_val = float(self.read_csv(
StringIO(text), float_precision='round_trip')['a'][0])
actual_val = Decimal(text[2:])
def error(val):
return abs(Decimal('{0:.100}'.format(val)) - actual_val)
normal_errors.append(error(normal_val))
precise_errors.append(error(precise_val))
# round-trip should match float()
self.assertEqual(roundtrip_val, float(text[2:]))
self.assertTrue(sum(precise_errors) <= sum(normal_errors))
self.assertTrue(max(precise_errors) <= max(normal_errors))
def test_pass_dtype(self):
data = """\
one,two
1,2.5
2,3.5
3,4.5
4,5.5"""
result = self.read_csv(StringIO(data), dtype={'one': 'u1', 1: 'S1'})
self.assertEqual(result['one'].dtype, 'u1')
self.assertEqual(result['two'].dtype, 'object')
def test_pass_dtype_as_recarray(self):
data = """\
one,two
1,2.5
2,3.5
3,4.5
4,5.5"""
if compat.is_platform_windows():
raise nose.SkipTest(
"segfaults on win-64, only when all tests are run")
result = self.read_csv(StringIO(data), dtype={'one': 'u1', 1: 'S1'},
as_recarray=True)
self.assertEqual(result['one'].dtype, 'u1')
self.assertEqual(result['two'].dtype, 'S1')
def test_empty_pass_dtype(self):
data = 'one,two'
result = self.read_csv(StringIO(data), dtype={'one': 'u1'})
expected = DataFrame({'one': np.empty(0, dtype='u1'),
'two': np.empty(0, dtype=np.object)})
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_empty_with_index_pass_dtype(self):
data = 'one,two'
result = self.read_csv(StringIO(data), index_col=['one'],
dtype={'one': 'u1', 1: 'f'})
expected = DataFrame({'two': np.empty(0, dtype='f')},
index=Index([], dtype='u1', name='one'))
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_empty_with_multiindex_pass_dtype(self):
data = 'one,two,three'
result = self.read_csv(StringIO(data), index_col=['one', 'two'],
dtype={'one': 'u1', 1: 'f8'})
exp_idx = MultiIndex.from_arrays([np.empty(0, dtype='u1'), np.empty(0, dtype='O')],
names=['one', 'two'])
expected = DataFrame(
{'three': np.empty(0, dtype=np.object)}, index=exp_idx)
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_empty_with_mangled_column_pass_dtype_by_names(self):
data = 'one,one'
result = self.read_csv(StringIO(data), dtype={
'one': 'u1', 'one.1': 'f'})
expected = DataFrame(
{'one': np.empty(0, dtype='u1'), 'one.1': np.empty(0, dtype='f')})
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_empty_with_mangled_column_pass_dtype_by_indexes(self):
data = 'one,one'
result = self.read_csv(StringIO(data), dtype={0: 'u1', 1: 'f'})
expected = DataFrame(
{'one': np.empty(0, dtype='u1'), 'one.1': np.empty(0, dtype='f')})
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_empty_with_dup_column_pass_dtype_by_names(self):
data = 'one,one'
result = self.read_csv(
StringIO(data), mangle_dupe_cols=False, dtype={'one': 'u1'})
expected = pd.concat([Series([], name='one', dtype='u1')] * 2, axis=1)
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_empty_with_dup_column_pass_dtype_by_indexes(self):
### FIXME in GH9424
raise nose.SkipTest(
"GH 9424; known failure read_csv with duplicate columns")
data = 'one,one'
result = self.read_csv(
StringIO(data), mangle_dupe_cols=False, dtype={0: 'u1', 1: 'f'})
expected = pd.concat([Series([], name='one', dtype='u1'),
Series([], name='one', dtype='f')], axis=1)
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_usecols_dtypes(self):
data = """\
1,2,3
4,5,6
7,8,9
10,11,12"""
result = self.read_csv(StringIO(data), usecols=(0, 1, 2),
names=('a', 'b', 'c'),
header=None,
converters={'a': str},
dtype={'b': int, 'c': float},
)
result2 = self.read_csv(StringIO(data), usecols=(0, 2),
names=('a', 'b', 'c'),
header=None,
converters={'a': str},
dtype={'b': int, 'c': float},
)
self.assertTrue((result.dtypes == [object, np.int, np.float]).all())
self.assertTrue((result2.dtypes == [object, np.float]).all())
def test_usecols_implicit_index_col(self):
# #2654
data = 'a,b,c\n4,apple,bat,5.7\n8,orange,cow,10'
result = self.read_csv(StringIO(data), usecols=['a', 'b'])
expected = DataFrame({'a': ['apple', 'orange'],
'b': ['bat', 'cow']}, index=[4, 8])
tm.assert_frame_equal(result, expected)
def test_usecols_with_whitespace(self):
data = 'a b c\n4 apple bat 5.7\n8 orange cow 10'
result = self.read_csv(StringIO(data), delim_whitespace=True,
usecols=('a', 'b'))
expected = DataFrame({'a': ['apple', 'orange'],
'b': ['bat', 'cow']}, index=[4, 8])
tm.assert_frame_equal(result, expected)
def test_usecols_regex_sep(self):
# #2733
data = 'a b c\n4 apple bat 5.7\n8 orange cow 10'
df = self.read_csv(StringIO(data), sep='\s+', usecols=('a', 'b'))
expected = DataFrame({'a': ['apple', 'orange'],
'b': ['bat', 'cow']}, index=[4, 8])
tm.assert_frame_equal(df, expected)
def test_pure_python_failover(self):
data = "a,b,c\n1,2,3#ignore this!\n4,5,6#ignorethistoo"
result = self.read_csv(StringIO(data), comment='#')
expected = DataFrame({'a': [1, 4], 'b': [2, 5], 'c': [3, 6]})
tm.assert_frame_equal(result, expected)
def test_decompression(self):
try:
import gzip
import bz2
except ImportError:
raise nose.SkipTest('need gzip and bz2 to run')
data = open(self.csv1, 'rb').read()
expected = self.read_csv(self.csv1)
with tm.ensure_clean() as path:
tmp = gzip.GzipFile(path, mode='wb')
tmp.write(data)
tmp.close()
result = self.read_csv(path, compression='gzip')
tm.assert_frame_equal(result, expected)
result = self.read_csv(open(path, 'rb'), compression='gzip')
tm.assert_frame_equal(result, expected)
with tm.ensure_clean() as path:
tmp = bz2.BZ2File(path, mode='wb')
tmp.write(data)
tmp.close()
result = self.read_csv(path, compression='bz2')
tm.assert_frame_equal(result, expected)
# result = self.read_csv(open(path, 'rb'), compression='bz2')
# tm.assert_frame_equal(result, expected)
self.assertRaises(ValueError, self.read_csv,
path, compression='bz3')
with open(path, 'rb') as fin:
if compat.PY3:
result = self.read_csv(fin, compression='bz2')
tm.assert_frame_equal(result, expected)
else:
self.assertRaises(ValueError, self.read_csv,
fin, compression='bz2')
def test_decompression_regex_sep(self):
try:
import gzip
import bz2
except ImportError:
raise nose.SkipTest('need gzip and bz2 to run')
data = open(self.csv1, 'rb').read()
data = data.replace(b',', b'::')
expected = self.read_csv(self.csv1)
with tm.ensure_clean() as path:
tmp = gzip.GzipFile(path, mode='wb')
tmp.write(data)
tmp.close()
# GH 6607
# Test currently only valid with the python engine because of
# regex sep. Temporarily copied to TestPythonParser.
# Here test for ValueError when passing regex sep:
with tm.assertRaisesRegexp(ValueError, 'regex sep'): # XXX
result = self.read_csv(path, sep='::', compression='gzip')
tm.assert_frame_equal(result, expected)
with tm.ensure_clean() as path:
tmp = bz2.BZ2File(path, mode='wb')
tmp.write(data)
tmp.close()
# GH 6607
with tm.assertRaisesRegexp(ValueError, 'regex sep'): # XXX
result = self.read_csv(path, sep='::', compression='bz2')
tm.assert_frame_equal(result, expected)
self.assertRaises(ValueError, self.read_csv,
path, compression='bz3')
def test_memory_map(self):
# it works!
result = self.read_csv(self.csv1, memory_map=True)
def test_disable_bool_parsing(self):
# #2090
data = """A,B,C
Yes,No,Yes
No,Yes,Yes
Yes,,Yes
No,No,No"""
result = read_csv(StringIO(data), dtype=object)
self.assertTrue((result.dtypes == object).all())
result = read_csv(StringIO(data), dtype=object, na_filter=False)
self.assertEqual(result['B'][2], '')
def test_euro_decimal_format(self):
data = """Id;Number1;Number2;Text1;Text2;Number3
1;1521,1541;187101,9543;ABC;poi;4,738797819
2;121,12;14897,76;DEF;uyt;0,377320872
3;878,158;108013,434;GHI;rez;2,735694704"""
df2 = self.read_csv(StringIO(data), sep=';', decimal=',')
self.assertEqual(df2['Number1'].dtype, float)
self.assertEqual(df2['Number2'].dtype, float)
self.assertEqual(df2['Number3'].dtype, float)
def test_custom_lineterminator(self):
data = 'a,b,c~1,2,3~4,5,6'
result = self.read_csv(StringIO(data), lineterminator='~')
expected = self.read_csv(StringIO(data.replace('~', '\n')))
tm.assert_frame_equal(result, expected)
data2 = data.replace('~', '~~')
result = self.assertRaises(ValueError, read_csv, StringIO(data2),
lineterminator='~~')
def test_raise_on_passed_int_dtype_with_nas(self):
# #2631
data = """YEAR, DOY, a
2001,106380451,10
2001,,11
2001,106380451,67"""
self.assertRaises(Exception, read_csv, StringIO(data), sep=",",
skipinitialspace=True,
dtype={'DOY': np.int64})
def test_na_trailing_columns(self):
data = """Date,Currenncy,Symbol,Type,Units,UnitPrice,Cost,Tax
2012-03-14,USD,AAPL,BUY,1000
2012-05-12,USD,SBUX,SELL,500"""
result = self.read_csv(StringIO(data))
self.assertEqual(result['Date'][1], '2012-05-12')
self.assertTrue(result['UnitPrice'].isnull().all())
def test_parse_ragged_csv(self):
data = """1,2,3
1,2,3,4
1,2,3,4,5
1,2
1,2,3,4"""
nice_data = """1,2,3,,
1,2,3,4,
1,2,3,4,5
1,2,,,
1,2,3,4,"""
result = self.read_csv(StringIO(data), header=None,
names=['a', 'b', 'c', 'd', 'e'])
expected = self.read_csv(StringIO(nice_data), header=None,
names=['a', 'b', 'c', 'd', 'e'])
tm.assert_frame_equal(result, expected)
# too many columns, cause segfault if not careful
data = "1,2\n3,4,5"
result = self.read_csv(StringIO(data), header=None,
names=lrange(50))
expected = self.read_csv(StringIO(data), header=None,
names=lrange(3)).reindex(columns=lrange(50))
tm.assert_frame_equal(result, expected)
def test_tokenize_CR_with_quoting(self):
# #3453, this doesn't work with Python parser for some reason
data = ' a,b,c\r"a,b","e,d","f,f"'
result = self.read_csv(StringIO(data), header=None)
expected = self.read_csv(StringIO(data.replace('\r', '\n')),
header=None)
tm.assert_frame_equal(result, expected)
result = self.read_csv(StringIO(data))
expected = self.read_csv(StringIO(data.replace('\r', '\n')))
tm.assert_frame_equal(result, expected)
def test_raise_on_no_columns(self):
# single newline
data = "\n"
self.assertRaises(ValueError, self.read_csv, StringIO(data))
# test with more than a single newline
data = "\n\n\n"
self.assertRaises(ValueError, self.read_csv, StringIO(data))
def test_warn_if_chunks_have_mismatched_type(self):
# Issue #3866 If chunks are different types and can't
# be coerced using numerical types, then issue warning.
integers = [str(i) for i in range(499999)]
data = "a\n" + "\n".join(integers + ['a', 'b'] + integers)
with tm.assert_produces_warning(DtypeWarning):
df = self.read_csv(StringIO(data))
self.assertEqual(df.a.dtype, np.object)
def test_invalid_c_parser_opts_with_not_c_parser(self):
from pandas.io.parsers import _c_parser_defaults as c_defaults
data = """1,2,3,,
1,2,3,4,
1,2,3,4,5
1,2,,,
1,2,3,4,"""
engines = 'python', 'python-fwf'
for default in c_defaults:
for engine in engines:
kwargs = {default: object()}
with tm.assertRaisesRegexp(ValueError,
'The %r option is not supported '
'with the %r engine' % (default,
engine)):
read_csv(StringIO(data), engine=engine, **kwargs)
def test_passing_dtype(self):
# GH 6607
# This is a copy which should eventually be merged into ParserTests
# when the dtype argument is supported by all engines.
df = DataFrame(np.random.rand(5, 2), columns=list(
'AB'), index=['1A', '1B', '1C', '1D', '1E'])
with tm.ensure_clean('__passing_str_as_dtype__.csv') as path:
df.to_csv(path)
# GH 3795
# passing 'str' as the dtype
result = self.read_csv(path, dtype=str, index_col=0)
tm.assert_series_equal(result.dtypes, Series(
{'A': 'object', 'B': 'object'}))
# we expect all object columns, so need to convert to test for
# equivalence
result = result.astype(float)
tm.assert_frame_equal(result, df)
# invalid dtype
self.assertRaises(TypeError, self.read_csv, path, dtype={'A': 'foo', 'B': 'float64'},
index_col=0)
# valid but we don't support it (date)
self.assertRaises(TypeError, self.read_csv, path, dtype={'A': 'datetime64', 'B': 'float64'},
index_col=0)
self.assertRaises(TypeError, self.read_csv, path, dtype={'A': 'datetime64', 'B': 'float64'},
index_col=0, parse_dates=['B'])
# valid but we don't support it
self.assertRaises(TypeError, self.read_csv, path, dtype={'A': 'timedelta64', 'B': 'float64'},
index_col=0)
def test_fallback_to_python(self):
# GH 6607
data = 'a b c\n1 2 3'
# specify C engine with C-unsupported options (raise)
with tm.assertRaisesRegexp(ValueError, 'does not support'):
self.read_table(StringIO(data), engine='c', sep=None,
delim_whitespace=False)
with tm.assertRaisesRegexp(ValueError, 'does not support'):
self.read_table(StringIO(data), engine='c', sep='\s')
with tm.assertRaisesRegexp(ValueError, 'does not support'):
self.read_table(StringIO(data), engine='c', skip_footer=1)
def test_raise_on_sep_with_delim_whitespace(self):
# GH 6607
data = 'a b c\n1 2 3'
with tm.assertRaisesRegexp(ValueError, 'you can only specify one'):
self.read_table(StringIO(data), sep='\s', delim_whitespace=True)
def test_single_char_leading_whitespace(self):
# GH 9710
data = """\
MyColumn
a
b
a
b\n"""
expected = DataFrame({'MyColumn': list('abab')})
result = self.read_csv(StringIO(data), delim_whitespace=True,
skipinitialspace=True)
tm.assert_frame_equal(result, expected)
result = self.read_csv(StringIO(data), lineterminator='\n',
skipinitialspace=True)
tm.assert_frame_equal(result, expected)
def test_bool_header_arg(self):
# GH 6114
data = """\
MyColumn
a
b
a
b"""
for arg in [True, False]:
with tm.assertRaises(TypeError):
pd.read_csv(StringIO(data), header=arg)
with tm.assertRaises(TypeError):
pd.read_table(StringIO(data), header=arg)
with tm.assertRaises(TypeError):
pd.read_fwf(StringIO(data), header=arg)
def test_multithread_stringio_read_csv(self):
# GH 11786
max_row_range = 10000
num_files = 100
bytes_to_df = [
'\n'.join(
['%d,%d,%d' % (i, i, i) for i in range(max_row_range)]
).encode() for j in range(num_files)]
files = [BytesIO(b) for b in bytes_to_df]
# Read all files in many threads
pool = ThreadPool(8)
results = pool.map(pd.read_csv, files)
first_result = results[0]
for result in results:
tm.assert_frame_equal(first_result, result)
def test_multithread_path_multipart_read_csv(self):
# GH 11786
num_tasks = 4
file_name = '__threadpool_reader__.csv'
num_rows = 100000
df = self.construct_dataframe(num_rows)
with tm.ensure_clean(file_name) as path:
df.to_csv(path)
final_dataframe = self.generate_multithread_dataframe(path,
num_rows,
num_tasks)
tm.assert_frame_equal(df, final_dataframe)
class TestMiscellaneous(tm.TestCase):
# for tests that don't fit into any of the other classes, e.g. those that
# compare results for different engines or test the behavior when 'engine'
# is not passed
def test_compare_whitespace_regex(self):
# GH 6607
data = ' a b c\n1 2 3 \n4 5 6\n 7 8 9'
result_c = pd.read_table(StringIO(data), sep='\s+', engine='c')
result_py = pd.read_table(StringIO(data), sep='\s+', engine='python')
print(result_c)
tm.assert_frame_equal(result_c, result_py)
def test_fallback_to_python(self):
# GH 6607
data = 'a b c\n1 2 3'
# specify C-unsupported options with python-unsupported option
# (options will be ignored on fallback, raise)
with tm.assertRaisesRegexp(ValueError, 'Falling back'):
pd.read_table(StringIO(data), sep=None,
delim_whitespace=False, dtype={'a': float})
with tm.assertRaisesRegexp(ValueError, 'Falling back'):
pd.read_table(StringIO(data), sep='\s', dtype={'a': float})
with tm.assertRaisesRegexp(ValueError, 'Falling back'):
pd.read_table(StringIO(data), skip_footer=1, dtype={'a': float})
# specify C-unsupported options without python-unsupported options
with tm.assert_produces_warning(parsers.ParserWarning):
pd.read_table(StringIO(data), sep=None, delim_whitespace=False)
with tm.assert_produces_warning(parsers.ParserWarning):
pd.read_table(StringIO(data), sep='\s')
with tm.assert_produces_warning(parsers.ParserWarning):
pd.read_table(StringIO(data), skip_footer=1)
class TestParseSQL(tm.TestCase):
def test_convert_sql_column_floats(self):
arr = np.array([1.5, None, 3, 4.2], dtype=object)
result = lib.convert_sql_column(arr)
expected = np.array([1.5, np.nan, 3, 4.2], dtype='f8')
assert_same_values_and_dtype(result, expected)
def test_convert_sql_column_strings(self):
arr = np.array(['1.5', None, '3', '4.2'], dtype=object)
result = lib.convert_sql_column(arr)
expected = np.array(['1.5', np.nan, '3', '4.2'], dtype=object)
assert_same_values_and_dtype(result, expected)
def test_convert_sql_column_unicode(self):
arr = np.array([u('1.5'), None, u('3'), u('4.2')],
dtype=object)
result = lib.convert_sql_column(arr)
expected = np.array([u('1.5'), np.nan, u('3'), u('4.2')],
dtype=object)
assert_same_values_and_dtype(result, expected)
def test_convert_sql_column_ints(self):
arr = np.array([1, 2, 3, 4], dtype='O')
arr2 = np.array([1, 2, 3, 4], dtype='i4').astype('O')
result = lib.convert_sql_column(arr)
result2 = lib.convert_sql_column(arr2)
expected = np.array([1, 2, 3, 4], dtype='i8')
assert_same_values_and_dtype(result, expected)
assert_same_values_and_dtype(result2, expected)
arr = np.array([1, 2, 3, None, 4], dtype='O')
result = lib.convert_sql_column(arr)
expected = np.array([1, 2, 3, np.nan, 4], dtype='f8')
assert_same_values_and_dtype(result, expected)
def test_convert_sql_column_longs(self):
arr = np.array([long(1), long(2), long(3), long(4)], dtype='O')
result = lib.convert_sql_column(arr)
expected = np.array([1, 2, 3, 4], dtype='i8')
assert_same_values_and_dtype(result, expected)
arr = np.array([long(1), long(2), long(3), None, long(4)], dtype='O')
result = lib.convert_sql_column(arr)
expected = np.array([1, 2, 3, np.nan, 4], dtype='f8')
assert_same_values_and_dtype(result, expected)
def test_convert_sql_column_bools(self):
arr = np.array([True, False, True, False], dtype='O')
result = lib.convert_sql_column(arr)
expected = np.array([True, False, True, False], dtype=bool)
assert_same_values_and_dtype(result, expected)
arr = np.array([True, False, None, False], dtype='O')
result = lib.convert_sql_column(arr)
expected = np.array([True, False, np.nan, False], dtype=object)
assert_same_values_and_dtype(result, expected)
def test_convert_sql_column_decimals(self):
from decimal import Decimal
arr = np.array([Decimal('1.5'), None, Decimal('3'), Decimal('4.2')])
result = lib.convert_sql_column(arr)
expected = np.array([1.5, np.nan, 3, 4.2], dtype='f8')
assert_same_values_and_dtype(result, expected)
class TestUrlGz(tm.TestCase):
def setUp(self):
dirpath = tm.get_data_path()
localtable = os.path.join(dirpath, 'salary.table')
self.local_table = read_table(localtable)
@tm.network
def test_url_gz(self):
url = 'https://raw.github.com/pydata/pandas/master/pandas/io/tests/data/salary.table.gz'
url_table = read_table(url, compression="gzip", engine="python")
tm.assert_frame_equal(url_table, self.local_table)
@tm.network
def test_url_gz_infer(self):
url = ('https://s3.amazonaws.com/pandas-test/salary.table.gz')
url_table = read_table(url, compression="infer", engine="python")
tm.assert_frame_equal(url_table, self.local_table)
class TestS3(tm.TestCase):
def setUp(self):
try:
import boto
except ImportError:
raise nose.SkipTest("boto not installed")
@tm.network
def test_parse_public_s3_bucket(self):
for ext, comp in [('', None), ('.gz', 'gzip'), ('.bz2', 'bz2')]:
if comp == 'bz2' and compat.PY2:
# The Python 2 C parser can't read bz2 from S3.
self.assertRaises(ValueError, pd.read_csv,
's3://pandas-test/tips.csv' + ext,
compression=comp)
else:
df = pd.read_csv('s3://pandas-test/tips.csv' +
ext, compression=comp)
self.assertTrue(isinstance(df, pd.DataFrame))
self.assertFalse(df.empty)
tm.assert_frame_equal(pd.read_csv(
tm.get_data_path('tips.csv')), df)
# Read public file from bucket with not-public contents
df = pd.read_csv('s3://cant_get_it/tips.csv')
self.assertTrue(isinstance(df, pd.DataFrame))
self.assertFalse(df.empty)
tm.assert_frame_equal(pd.read_csv(tm.get_data_path('tips.csv')), df)
@tm.network
def test_parse_public_s3n_bucket(self):
# Read from AWS s3 as "s3n" URL
df = pd.read_csv('s3n://pandas-test/tips.csv', nrows=10)
self.assertTrue(isinstance(df, pd.DataFrame))
self.assertFalse(df.empty)
tm.assert_frame_equal(pd.read_csv(
tm.get_data_path('tips.csv')).iloc[:10], df)
@tm.network
def test_parse_public_s3a_bucket(self):
# Read from AWS s3 as "s3a" URL
df = pd.read_csv('s3a://pandas-test/tips.csv', nrows=10)
self.assertTrue(isinstance(df, pd.DataFrame))
self.assertFalse(df.empty)
tm.assert_frame_equal(pd.read_csv(
tm.get_data_path('tips.csv')).iloc[:10], df)
@tm.network
def test_parse_public_s3_bucket_nrows(self):
for ext, comp in [('', None), ('.gz', 'gzip'), ('.bz2', 'bz2')]:
if comp == 'bz2' and compat.PY2:
# The Python 2 C parser can't read bz2 from S3.
self.assertRaises(ValueError, pd.read_csv,
's3://pandas-test/tips.csv' + ext,
compression=comp)
else:
df = pd.read_csv('s3://pandas-test/tips.csv' +
ext, nrows=10, compression=comp)
self.assertTrue(isinstance(df, pd.DataFrame))
self.assertFalse(df.empty)
tm.assert_frame_equal(pd.read_csv(
tm.get_data_path('tips.csv')).iloc[:10], df)
@tm.network
def test_parse_public_s3_bucket_chunked(self):
# Read with a chunksize
chunksize = 5
local_tips = pd.read_csv(tm.get_data_path('tips.csv'))
for ext, comp in [('', None), ('.gz', 'gzip'), ('.bz2', 'bz2')]:
if comp == 'bz2' and compat.PY2:
# The Python 2 C parser can't read bz2 from S3.
self.assertRaises(ValueError, pd.read_csv,
's3://pandas-test/tips.csv' + ext,
compression=comp)
else:
df_reader = pd.read_csv('s3://pandas-test/tips.csv' + ext,
chunksize=chunksize, compression=comp)
self.assertEqual(df_reader.chunksize, chunksize)
for i_chunk in [0, 1, 2]:
# Read a couple of chunks and make sure we see them
# properly.
df = df_reader.get_chunk()
self.assertTrue(isinstance(df, pd.DataFrame))
self.assertFalse(df.empty)
true_df = local_tips.iloc[
chunksize * i_chunk: chunksize * (i_chunk + 1)]
# Chunking doesn't preserve row numbering
true_df = true_df.reset_index().drop('index', axis=1)
tm.assert_frame_equal(true_df, df)
@tm.network
def test_parse_public_s3_bucket_chunked_python(self):
# Read with a chunksize using the Python parser
chunksize = 5
local_tips = pd.read_csv( | tm.get_data_path('tips.csv') | pandas.util.testing.get_data_path |
# -*- coding: utf-8 -*-
import io
import pandas as pd
import requests
from jqdatasdk import auth, get_price, logout
from zvt.api.common import generate_kdata_id, to_jq_security_id
from zvt.api.technical import get_kdata
from zvt.domain import TradingLevel, SecurityType, Provider, Stock1DKdata, StoreCategory, Stock
from zvt.recorders.recorder import TimeSeriesFetchingStyle, FixedCycleDataRecorder, ApiWrapper
from zvt.settings import JQ_ACCOUNT, JQ_PASSWD
from zvt.utils import utils
from zvt.utils.time_utils import to_time_str, TIME_FORMAT_DAY1, now_time_str, to_pd_timestamp
from zvt.utils.utils import init_process_log
class MyApiWrapper(ApiWrapper):
def request(self, url=None, method='get', param=None, path_fields=None):
security_item = param['security_item']
if security_item.exchange == 'sh':
exchange_flag = 0
else:
exchange_flag = 1
url = url.format(exchange_flag, security_item.code, param['start'], param['end'])
response = requests.get(url=url)
df = utils.read_csv(io.BytesIO(response.content), encoding='GB2312', na_values='None')
if df is None:
[]
df['name'] = security_item.name
# 指数数据
if security_item.type == 'index':
df = df.loc[:,
['日期', 'name', '最低价', '开盘价', '收盘价', '最高价', '成交量', '成交金额', '涨跌幅']]
df.columns = ['timestamp', 'name', 'low', 'open', 'close', 'high', 'volume', 'turnover', 'change_pct']
# 股票数据
else:
df = df.loc[:,
['日期', 'name', '最低价', '开盘价', '收盘价', '最高价', '成交量', '成交金额', '涨跌幅', '换手率']]
df.columns = ['timestamp', 'name', 'low', 'open', 'close', 'high', 'volume', 'turnover', 'change_pct',
'turnover_rate']
df['timestamp'] = | pd.to_datetime(df['timestamp']) | pandas.to_datetime |
from datetime import datetime
import numpy as np
import pytest
from pandas.core.dtypes.cast import find_common_type, is_dtype_equal
import pandas as pd
from pandas import DataFrame, Index, MultiIndex, Series
import pandas._testing as tm
class TestDataFrameCombineFirst:
def test_combine_first_mixed(self):
a = Series(["a", "b"], index=range(2))
b = Series(range(2), index=range(2))
f = DataFrame({"A": a, "B": b})
a = Series(["a", "b"], index=range(5, 7))
b = Series(range(2), index=range(5, 7))
g = DataFrame({"A": a, "B": b})
exp = DataFrame({"A": list("abab"), "B": [0, 1, 0, 1]}, index=[0, 1, 5, 6])
combined = f.combine_first(g)
tm.assert_frame_equal(combined, exp)
def test_combine_first(self, float_frame):
# disjoint
head, tail = float_frame[:5], float_frame[5:]
combined = head.combine_first(tail)
reordered_frame = float_frame.reindex(combined.index)
tm.assert_frame_equal(combined, reordered_frame)
assert tm.equalContents(combined.columns, float_frame.columns)
tm.assert_series_equal(combined["A"], reordered_frame["A"])
# same index
fcopy = float_frame.copy()
fcopy["A"] = 1
del fcopy["C"]
fcopy2 = float_frame.copy()
fcopy2["B"] = 0
del fcopy2["D"]
combined = fcopy.combine_first(fcopy2)
assert (combined["A"] == 1).all()
tm.assert_series_equal(combined["B"], fcopy["B"])
tm.assert_series_equal(combined["C"], fcopy2["C"])
tm.assert_series_equal(combined["D"], fcopy["D"])
# overlap
head, tail = reordered_frame[:10].copy(), reordered_frame
head["A"] = 1
combined = head.combine_first(tail)
assert (combined["A"][:10] == 1).all()
# reverse overlap
tail["A"][:10] = 0
combined = tail.combine_first(head)
assert (combined["A"][:10] == 0).all()
# no overlap
f = float_frame[:10]
g = float_frame[10:]
combined = f.combine_first(g)
tm.assert_series_equal(combined["A"].reindex(f.index), f["A"])
tm.assert_series_equal(combined["A"].reindex(g.index), g["A"])
# corner cases
comb = float_frame.combine_first(DataFrame())
tm.assert_frame_equal(comb, float_frame)
comb = DataFrame().combine_first(float_frame)
tm.assert_frame_equal(comb, float_frame)
comb = float_frame.combine_first(DataFrame(index=["faz", "boo"]))
assert "faz" in comb.index
# #2525
df = DataFrame({"a": [1]}, index=[datetime(2012, 1, 1)])
df2 = DataFrame(columns=["b"])
result = df.combine_first(df2)
assert "b" in result
def test_combine_first_mixed_bug(self):
idx = Index(["a", "b", "c", "e"])
ser1 = Series([5.0, -9.0, 4.0, 100.0], index=idx)
ser2 = Series(["a", "b", "c", "e"], index=idx)
ser3 = Series([12, 4, 5, 97], index=idx)
frame1 = DataFrame({"col0": ser1, "col2": ser2, "col3": ser3})
idx = Index(["a", "b", "c", "f"])
ser1 = Series([5.0, -9.0, 4.0, 100.0], index=idx)
ser2 = Series(["a", "b", "c", "f"], index=idx)
ser3 = Series([12, 4, 5, 97], index=idx)
frame2 = DataFrame({"col1": ser1, "col2": ser2, "col5": ser3})
combined = frame1.combine_first(frame2)
assert len(combined.columns) == 5
def test_combine_first_same_as_in_update(self):
# gh 3016 (same as in update)
df = DataFrame(
[[1.0, 2.0, False, True], [4.0, 5.0, True, False]],
columns=["A", "B", "bool1", "bool2"],
)
other = DataFrame([[45, 45]], index=[0], columns=["A", "B"])
result = df.combine_first(other)
tm.assert_frame_equal(result, df)
df.loc[0, "A"] = np.nan
result = df.combine_first(other)
df.loc[0, "A"] = 45
tm.assert_frame_equal(result, df)
def test_combine_first_doc_example(self):
# doc example
df1 = DataFrame(
{"A": [1.0, np.nan, 3.0, 5.0, np.nan], "B": [np.nan, 2.0, 3.0, np.nan, 6.0]}
)
df2 = DataFrame(
{
"A": [5.0, 2.0, 4.0, np.nan, 3.0, 7.0],
"B": [np.nan, np.nan, 3.0, 4.0, 6.0, 8.0],
}
)
result = df1.combine_first(df2)
expected = DataFrame({"A": [1, 2, 3, 5, 3, 7.0], "B": [np.nan, 2, 3, 4, 6, 8]})
tm.assert_frame_equal(result, expected)
def test_combine_first_return_obj_type_with_bools(self):
# GH3552
df1 = DataFrame(
[[np.nan, 3.0, True], [-4.6, np.nan, True], [np.nan, 7.0, False]]
)
df2 = DataFrame([[-42.6, np.nan, True], [-5.0, 1.6, False]], index=[1, 2])
expected = Series([True, True, False], name=2, dtype=bool)
result_12 = df1.combine_first(df2)[2]
tm.assert_series_equal(result_12, expected)
result_21 = df2.combine_first(df1)[2]
tm.assert_series_equal(result_21, expected)
@pytest.mark.parametrize(
"data1, data2, data_expected",
(
(
[datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)],
[pd.NaT, pd.NaT, pd.NaT],
[datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)],
),
(
[pd.NaT, pd.NaT, pd.NaT],
[datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)],
[datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)],
),
(
[datetime(2000, 1, 2), pd.NaT, pd.NaT],
[datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)],
[datetime(2000, 1, 2), datetime(2000, 1, 2), datetime(2000, 1, 3)],
),
(
[datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)],
[datetime(2000, 1, 2), pd.NaT, pd.NaT],
[datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)],
),
),
)
def test_combine_first_convert_datatime_correctly(
self, data1, data2, data_expected
):
# GH 3593
df1, df2 = DataFrame({"a": data1}), DataFrame({"a": data2})
result = df1.combine_first(df2)
expected = DataFrame({"a": data_expected})
tm.assert_frame_equal(result, expected)
def test_combine_first_align_nan(self):
# GH 7509 (not fixed)
dfa = DataFrame([[pd.Timestamp("2011-01-01"), 2]], columns=["a", "b"])
dfb = DataFrame([[4], [5]], columns=["b"])
assert dfa["a"].dtype == "datetime64[ns]"
assert dfa["b"].dtype == "int64"
res = dfa.combine_first(dfb)
exp = DataFrame(
{"a": [pd.Timestamp("2011-01-01"), pd.NaT], "b": [2, 5]},
columns=["a", "b"],
)
tm.assert_frame_equal(res, exp)
assert res["a"].dtype == "datetime64[ns]"
# ToDo: this must be int64
assert res["b"].dtype == "int64"
res = dfa.iloc[:0].combine_first(dfb)
exp = DataFrame({"a": [np.nan, np.nan], "b": [4, 5]}, columns=["a", "b"])
tm.assert_frame_equal(res, exp)
# ToDo: this must be datetime64
assert res["a"].dtype == "float64"
# ToDo: this must be int64
assert res["b"].dtype == "int64"
def test_combine_first_timezone(self):
# see gh-7630
data1 = pd.to_datetime("20100101 01:01").tz_localize("UTC")
df1 = DataFrame(
columns=["UTCdatetime", "abc"],
data=data1,
index=pd.date_range("20140627", periods=1),
)
data2 = pd.to_datetime("20121212 12:12").tz_localize("UTC")
df2 = DataFrame(
columns=["UTCdatetime", "xyz"],
data=data2,
index=pd.date_range("20140628", periods=1),
)
res = df2[["UTCdatetime"]].combine_first(df1)
exp = DataFrame(
{
"UTCdatetime": [
pd.Timestamp("2010-01-01 01:01", tz="UTC"),
pd.Timestamp("2012-12-12 12:12", tz="UTC"),
],
"abc": [pd.Timestamp("2010-01-01 01:01:00", tz="UTC"), pd.NaT],
},
columns=["UTCdatetime", "abc"],
index=pd.date_range("20140627", periods=2, freq="D"),
)
assert res["UTCdatetime"].dtype == "datetime64[ns, UTC]"
assert res["abc"].dtype == "datetime64[ns, UTC]"
tm.assert_frame_equal(res, exp)
# see gh-10567
dts1 = pd.date_range("2015-01-01", "2015-01-05", tz="UTC")
df1 = DataFrame({"DATE": dts1})
dts2 = pd.date_range("2015-01-03", "2015-01-05", tz="UTC")
df2 = DataFrame({"DATE": dts2})
res = df1.combine_first(df2)
tm.assert_frame_equal(res, df1)
assert res["DATE"].dtype == "datetime64[ns, UTC]"
dts1 = pd.DatetimeIndex(
["2011-01-01", "NaT", "2011-01-03", "2011-01-04"], tz="US/Eastern"
)
df1 = DataFrame({"DATE": dts1}, index=[1, 3, 5, 7])
dts2 = pd.DatetimeIndex(
["2012-01-01", "2012-01-02", "2012-01-03"], tz="US/Eastern"
)
df2 = DataFrame({"DATE": dts2}, index=[2, 4, 5])
res = df1.combine_first(df2)
exp_dts = pd.DatetimeIndex(
[
"2011-01-01",
"2012-01-01",
"NaT",
"2012-01-02",
"2011-01-03",
"2011-01-04",
],
tz="US/Eastern",
)
exp = DataFrame({"DATE": exp_dts}, index=[1, 2, 3, 4, 5, 7])
tm.assert_frame_equal(res, exp)
# different tz
dts1 = pd.date_range("2015-01-01", "2015-01-05", tz="US/Eastern")
df1 = DataFrame({"DATE": dts1})
dts2 = pd.date_range("2015-01-03", "2015-01-05")
df2 = DataFrame({"DATE": dts2})
# if df1 doesn't have NaN, keep its dtype
res = df1.combine_first(df2)
tm.assert_frame_equal(res, df1)
assert res["DATE"].dtype == "datetime64[ns, US/Eastern]"
dts1 = pd.date_range("2015-01-01", "2015-01-02", tz="US/Eastern")
df1 = DataFrame({"DATE": dts1})
dts2 = pd.date_range("2015-01-01", "2015-01-03")
df2 = DataFrame({"DATE": dts2})
res = df1.combine_first(df2)
exp_dts = [
pd.Timestamp("2015-01-01", tz="US/Eastern"),
pd.Timestamp("2015-01-02", tz="US/Eastern"),
pd.Timestamp("2015-01-03"),
]
exp = | DataFrame({"DATE": exp_dts}) | pandas.DataFrame |
from __future__ import division
import numpy as np
import pandas as pd
import pickle
import os
from math import ceil
import matplotlib
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
from sklearn.metrics import r2_score
warnings.simplefilter("ignore")
# colors = ["#3366cc", "#dc3912", "#109618", "#990099", "#ff9900"]
colors = sns.color_palette('muted')
labels = ['Remaining', 'First','Last']
def density_plot(df, Accuracy_base, Accuracy_LSTM, Accuracy_NG, save_fig, Out_put_name,model_name_list, Mean_or_median):
model = model_name_list
sns.set(font_scale=1.5)
sns.set_style("white", {"legend.frameon": True})
plt.figure(figsize=(14, 7))
ax1 = plt.subplot(1, 2, 1)
cols1 = [df, Accuracy_base, Accuracy_LSTM, Accuracy_NG]
for i in range(len(cols1)):
data = cols1[i]['first']
sns.kdeplot(data, ax=ax1, shade=True, color=colors[i], label=model[i],linewidth=2)
if Mean_or_median == 'Mean':
med = data.mean()
else:
med = data.median()
plt.axvline(med, color=colors[i], linestyle='dashed', linewidth=2)
if i == 1:
if 'duration' in Out_put_name:
plt.text(med - 0.02, 3.0, '{}'.format(round(med, 3)),
horizontalalignment='right', verticalalignment='center',
fontsize=18, color=colors[i])
else:
plt.text(med - 0.02, 3.0, '{}%'.format(round(med * 100, 1)),
horizontalalignment='right', verticalalignment='center',
fontsize=18, color=colors[i])
elif i== 2:
if 'duration' in Out_put_name:
plt.text(med + 0.02, 3.1, '{}'.format(round(med, 3)),
horizontalalignment='left', verticalalignment='center',
fontsize=18, color=colors[i])
else:
plt.text(med + 0.02, 3.1, '{}%'.format(round(med * 100, 1)),
horizontalalignment='left', verticalalignment='center',
fontsize=18, color=colors[i])
elif i == 0:
if 'duration' in Out_put_name:
plt.text(med + 0.02, 3.3, '{}'.format(round(med, 3)),
horizontalalignment='left', verticalalignment='center',
fontsize=18, color=colors[i])
else:
plt.text(med + 0.02, 3.3, '{}%'.format(round(med * 100, 1)),
horizontalalignment='left', verticalalignment='center',
fontsize=18, color=colors[i])
elif i == 3:
if 'duration' in Out_put_name:
plt.text(med - 0.02, 3.0, '{}'.format(round(med, 3)),
horizontalalignment='right', verticalalignment='center',
fontsize=18, color=colors[i])
else:
plt.text(med - 0.02, 3.3, '{}%'.format(round(med * 100, 1)),
horizontalalignment='right', verticalalignment='center',
fontsize=18, color=colors[i])
plt.xlim(0, 1.0)
plt.ylim(0, 3.5)
if 'location' in Out_put_name:
ax1.set_xticklabels([str(i) + '%' for i in range(0, 101, 20)])
plt.xlabel('Prediction accuracy', fontsize=20)
else:
plt.xlabel('R'+r'$^2$', fontsize=20)
plt.ylabel('Density', fontsize=20)
plt.yticks(fontsize=20)
plt.xticks(fontsize=20)
if 'location' in Out_put_name:
plt.legend(fontsize=18) #, loc='upper right'
else:
plt.legend(fontsize=18) #
plt.title('First activities',fontsize=20)
# plt.text(-0.1, 1.05, '(a)', fontdict={'size': 20, 'weight': 'bold'},
# transform=ax1.transAxes)
ax2 = plt.subplot(1, 2, 2)
for i in range(len(cols1)):
data = cols1[i]['Middle']
sns.kdeplot(data, ax=ax2, shade=True, color=colors[i], label=model[i], linewidth=2)
if Mean_or_median == 'Mean':
med = data.mean()
else:
med = data.median()
plt.axvline(med, color=colors[i], linestyle='dashed', linewidth=2, alpha = 1)
if i == 1:
if 'duration' in Out_put_name:
plt.text(med - 0.01, 3.3, '{}'.format(round(med, 3)),
horizontalalignment='right', verticalalignment='center',
fontsize=18, color=colors[i])
else:
plt.text(med + 0.01, 3.3, '{}%'.format(round(med * 100, 1)),
horizontalalignment='left', verticalalignment='center',
fontsize=18, color=colors[i])
elif i== 2:
if 'duration' in Out_put_name:
plt.text(med + 0.023, 3.0, '{}'.format(round(med, 3)),
horizontalalignment='left', verticalalignment='center',
fontsize=18, color=colors[i])
else:
plt.text(med - 0.01, 3.0, '{}%'.format(round(med * 100, 1)),
horizontalalignment='right', verticalalignment='center',
fontsize=18, color=colors[i])
elif i == 0:
if 'duration' in Out_put_name:
plt.text(med + 0.01, 3.0, '{}'.format(round(med, 3)),
horizontalalignment='left', verticalalignment='center',
fontsize=18, color=colors[i])
else:
plt.text(med + 0.01, 3.0, '{}%'.format(round(med* 100, 1)),
horizontalalignment='left', verticalalignment='center',
fontsize=18, color=colors[i])
elif i == 3:
if 'duration' in Out_put_name:
plt.text(med + 0.01, 3.3, '{}'.format(round(med, 3)),
horizontalalignment='left', verticalalignment='center',
fontsize=18, color=colors[i])
else:
plt.text(med + 0.01, 3.3, '{}%'.format(round(med* 100, 1)),
horizontalalignment='left', verticalalignment='center',
fontsize=18, color=colors[i])
plt.xlim(0, 1.0)
plt.ylim(0, 3.5)
if 'location' in Out_put_name:
ax2.set_xticklabels([str(i) + '%' for i in range(0, 101, 20)])
plt.xlabel('Prediction accuracy', fontsize=20)
else:
plt.xlabel('R'+r'$^2$', fontsize=20)
plt.yticks(fontsize=20)
plt.xticks(fontsize=20)
plt.ylabel('Density', fontsize=20)
if 'location' in Out_put_name:
plt.legend(fontsize=18) #, loc='upper right'
else:
plt.legend(fontsize=18) #
plt.title('Remaining activities',fontsize=20)
# plt.text(-0.1, 1.05, '(b)', fontdict={'size': 20, 'weight': 'bold'},
# transform=ax2.transAxes)
plt.tight_layout()
if save_fig == 0:
plt.show()
else:
plt.savefig('img/' + Out_put_name, dpi=200)
def density_plot_duration_error(df, Accuracy_base, Accuracy_LSTM, save_fig, Out_put_name, model_name_list, Mean_or_median):
model = model_name_list
sns.set(font_scale=1.5)
sns.set_style("white", {"legend.frameon": True})
plt.figure(figsize=(14, 7))
ax1 = plt.subplot(1, 2, 1)
cols1 = [df, Accuracy_base, Accuracy_LSTM]
for i in range(len(cols1)):
data = cols1[i]['first']
sns.kdeplot(data, ax=ax1, shade=True, color=colors[i], label=model[i])
if Mean_or_median == 'Mean':
med = data.mean()
else:
med = data.median()
plt.axvline(med, color=colors[i], linestyle='dashed', linewidth=2)
if i == 0:
plt.text(med + 0.02, 3.3, '{}%'.format(round(med * 100, 1)),
horizontalalignment='left', verticalalignment='center',
fontsize=18, color=colors[i])
elif i == 2:
if 'duration' in Out_put_name:
plt.text(med + 0.02, 3.0, '{}%'.format(round(med * 100, 1)),
horizontalalignment='left', verticalalignment='center',
fontsize=18, color=colors[i])
else:
plt.text(med + 0.02, 3.0, '{}%'.format(round(med * 100, 1)),
horizontalalignment='left', verticalalignment='center',
fontsize=18, color=colors[i])
else:
plt.text(med - 0.01, 3.3, '{}%'.format(round(med * 100, 1)),
horizontalalignment='right', verticalalignment='center',
fontsize=18, color=colors[i])
# plt.xlim(0, 1.0)
# plt.ylim(0, 3.5)
# ax1.set_xticklabels([str(i) + '%' for i in range(0, 101, 20)])
# plt.xlabel('Prediction Accuracy', fontsize=20)
plt.ylabel('Density (Users)', fontsize=20)
plt.yticks(fontsize=20)
plt.xticks(fontsize=20)
if 'location' in Out_put_name:
plt.legend(fontsize=18) # , loc='upper right'
else:
plt.legend(fontsize=18, loc='center right') #
plt.title('First Activities', fontsize=20)
# plt.text(-0.1, 1.05, '(a)', fontdict={'size': 20, 'weight': 'bold'},
# transform=ax1.transAxes)
ax2 = plt.subplot(1, 2, 2)
for i in range(len(cols1)):
data = cols1[i]['Remaining']
sns.kdeplot(data, ax=ax2, shade=True, color=colors[i], label=model[i])
if Mean_or_median == 'Mean':
med = data.mean()
else:
med = data.median()
plt.axvline(med, color=colors[i], linestyle='dashed', linewidth=2)
if i == 1:
if 'duration' in Out_put_name:
plt.text(med - 0.01, 3.3, '{}%'.format(round(med * 100, 1)),
horizontalalignment='right', verticalalignment='center',
fontsize=18, color=colors[i])
else:
plt.text(med + 0.01, 3.3, '{}%'.format(round(med * 100, 1)),
horizontalalignment='left', verticalalignment='center',
fontsize=18, color=colors[i])
elif i == 2:
if 'duration' in Out_put_name:
plt.text(med + 0.023, 3.0, '{}%'.format(round(med * 100, 1)),
horizontalalignment='left', verticalalignment='center',
fontsize=18, color=colors[i])
else:
plt.text(med - 0.01, 3.0, '{}%'.format(round(med * 100, 1)),
horizontalalignment='right', verticalalignment='center',
fontsize=18, color=colors[i])
else:
plt.text(med + 0.01, 3.3, '{}%'.format(round(med * 100, 1)),
horizontalalignment='left', verticalalignment='center',
fontsize=18, color=colors[i])
# plt.xlim(0, 1.0)
# plt.ylim(0, 3.5)
# ax2.set_xticklabels([str(i) + '%' for i in range(0, 101, 20)])
plt.yticks(fontsize=20)
plt.xticks(fontsize=20)
# plt.xlabel('Prediction Accuracy', fontsize=20)
plt.ylabel('Density (User-level)', fontsize=20)
if 'location' in Out_put_name:
plt.legend(fontsize=18) # , loc='upper right'
else:
plt.legend(fontsize=18, loc='center right') #
plt.title('Remaining Activities', fontsize=20)
# plt.text(-0.1, 1.05, '(b)', fontdict={'size': 20, 'weight': 'bold'},
# transform=ax2.transAxes)
plt.tight_layout()
if save_fig == 0:
plt.show()
else:
plt.savefig('img/' + Out_put_name, dpi=200)
def density_plot_not_seperate_mid_first(df, Accuracy_base, Accuracy_LSTM, save_fig, Out_put_name, model_name_list):
model = model_name_list
sns.set(font_scale=1.5)
sns.set_style("white", {"legend.frameon": True})
plt.figure(figsize=(7, 7))
ax1 = plt.subplot(1, 1, 1)
cols1 = [df, Accuracy_base, Accuracy_LSTM]
for i in range(len(cols1)):
data = cols1[i]['all']
sns.kdeplot(data, ax=ax1, shade=True, color=colors[i], label=model[i])
med = data.mean()
plt.axvline(med, color=colors[i], linestyle='dashed', linewidth=2)
if i == 0:
plt.text(med + 0.02, 3.3, '{}%'.format(round(med * 100, 1)),
horizontalalignment='left', verticalalignment='center',
fontsize=18, color=colors[i])
elif i == 2:
if 'duration' in Out_put_name:
plt.text(med + 0.02, 3.0, '{}%'.format(round(med * 100, 1)),
horizontalalignment='left', verticalalignment='center',
fontsize=18, color=colors[i])
else:
plt.text(med + 0.02, 3.0, '{}%'.format(round(med * 100, 1)),
horizontalalignment='left', verticalalignment='center',
fontsize=18, color=colors[i])
else:
plt.text(med - 0.01, 3.3, '{}%'.format(round(med * 100, 1)),
horizontalalignment='right', verticalalignment='center',
fontsize=18, color=colors[i])
plt.xlim(0, 1.0)
plt.ylim(0, 3.5)
ax1.set_xticklabels([str(i) + '%' for i in range(0, 101, 20)])
plt.xlabel('Prediction Accuracy', fontsize=20)
plt.ylabel('Density (Users)', fontsize=20)
plt.yticks(fontsize=20)
plt.xticks(fontsize=20)
if 'location' in Out_put_name:
plt.legend(fontsize=18) # , loc='upper right'
else:
plt.legend(fontsize=18) #
plt.tight_layout()
if save_fig == 0:
plt.show()
else:
plt.savefig('img/' + Out_put_name, dpi=200)
def data_process_continuous(data):
error_first_temp = (data['Predict1'].loc[data['activity_index']==0] - data['Ground_truth'].loc[data['activity_index']==0])/3600
Accuracy_first_temp = sum(np.array(data['Correct'].loc[data['activity_index']==0]))/data['Correct'].loc[data['activity_index']==0].count()
data_temp = data.loc[data['activity_index']!=0]
# data_temp = data
error_Remaining_temp = (data_temp['Predict1'] - data_temp['Ground_truth'])/3600
Accuracy_temp = sum(np.array(data_temp['Correct']))/data_temp['Correct'].count()
accuracy_all = sum(np.array(data['Correct']))/data['Correct'].count()
return error_first_temp, Accuracy_first_temp, error_Remaining_temp, Accuracy_temp,accuracy_all
def calculate_error(result_df):
# correct error data
result_df.loc[result_df['Predict_duration'] > 86400, 'Predict_duration'] = 86400
result_df.loc[result_df['Predict_duration'] <= 0, 'Predict_duration'] = 1
######
result_df['error_sq'] = (result_df['Predict_duration'] - result_df['Ground_truth_duration']) ** 2
result_df['error_abs'] = np.abs(result_df['Predict_duration'] - result_df['Ground_truth_duration'])
RMSE = np.sqrt(np.mean(result_df['error_sq']))
MAPE = np.mean(result_df['error_abs'] / result_df['Ground_truth_duration'])
MAE = np.mean(result_df['error_abs'])
if len(result_df) > 0:
R_sq = r2_score(result_df['Ground_truth_duration'], result_df['Predict_duration'])
else:
R_sq = None
return RMSE, MAPE, MAE, R_sq
def r_sq_for_two_parts(data,y_mean):
data['RES'] = (data['Ground_truth_duration'] - data['Predict_duration'])**2
data['TOT'] = (data['Ground_truth_duration'] - y_mean)**2
R_sq = 1 - sum(data['RES'])/sum(data['TOT'])
return R_sq
def data_process_continuous_R_sq(data):
_, _, _, R_sq_all = calculate_error(data)
data_first = data.loc[data['activity_index']==0].copy()
data_Remaining = data.loc[data['activity_index']!=0].copy()
mean_y = np.mean(data['Ground_truth_duration'])
R_sq_first = r_sq_for_two_parts(data_first, mean_y)
if len(data_Remaining)>0:
R_sq_Remaining = r_sq_for_two_parts(data_Remaining, mean_y)
else:
R_sq_Remaining = None
return R_sq_first, R_sq_Remaining, R_sq_all
def data_process_continuous_RMSE(data):
RMSE_all, _, _, _ = calculate_error(data)
data_first = data.loc[data['activity_index']==0].copy()
data_Remaining = data.loc[data['activity_index']!=0].copy()
RMSE_first, _, _, R_sq_first = calculate_error(data_first)
RMSE_Remaining, _, _, R_sq_Remaining = calculate_error(data_Remaining)
return RMSE_first, RMSE_Remaining, RMSE_all
def data_process_continuous_MAPE(data):
_, MAPE_all, _, _ = calculate_error(data)
data_first = data.loc[data['activity_index']==0].copy()
data_Remaining = data.loc[data['activity_index']!=0].copy()
_, MAPE_first, _, R_sq_first = calculate_error(data_first)
_, MAPE_Remaining, _, R_sq_Remaining = calculate_error(data_Remaining)
return MAPE_first, MAPE_Remaining, MAPE_all
def data_process_discrete(data):
error_first_temp = (data['Predict1'].loc[data['activity_index']==0] - data['Ground_truth'].loc[data['activity_index']==0])
Accuracy_first_temp = sum(np.array(data['Correct'].loc[data['activity_index']==0]))/data['Correct'].loc[data['activity_index']==0].count()
data_temp = data.loc[data['activity_index']!=0]
# data_temp = data
error_Remaining_temp = (data_temp['Predict1'] - data_temp['Ground_truth'])
Accuracy_temp = sum(np.array(data_temp['Correct']))/data_temp['Correct'].count()
accuracy_all = sum(np.array(data['Correct'])) / data['Correct'].count()
return error_first_temp, Accuracy_first_temp, error_Remaining_temp, Accuracy_temp, accuracy_all
def generate_accuracy_file(individual_ID_list, output_fig, duration_error):
error_list=[]
total=0
error_Remaining = pd.DataFrame({'Remaining':[]})
error_first = pd.DataFrame({'first':[]})
error_Remaining_base = pd.DataFrame({'Remaining':[]})
error_first_base = pd.DataFrame({'first':[]})
Accuracy = {'Card_ID':[], 'Remaining':[],'first':[],'all':[]}
Accuracy_base = {'Card_ID':[], 'Remaining':[],'first':[],'all':[]}
Accuracy_LSTM = {'Card_ID': [], 'Remaining': [], 'first': [], 'all': []}
Accuracy_NG = {'Card_ID': [], 'Remaining': [], 'first': [], 'all': []}
# data
Card_ID_used = []
# individual_ID_list = individual_ID_list[0:80]
#############IOHMM
for Card_ID in individual_ID_list:
# if output_fig == 'duration':
# file_name = data_path + 'results/result_' + str(Card_ID) + 'test' + '.csv'
# else:
# file_name = data_path + 'results/result_Location_' + str(Card_ID) + 'test' + '.csv'
file_name = data_path + 'results/result_con_dur+loc_' + str(Card_ID) + 'test' + '.csv'
if os.path.exists(file_name) == False:
print(Card_ID,'does not exist for IOHMM')
continue
else:
Card_ID_used.append(Card_ID)
data = pd.read_csv(file_name)
if output_fig == 'duration':
if duration_error == 'RMSE':
R_sq_first, R_sq_Remaining, R_sq_all = data_process_continuous_RMSE(data)
elif duration_error == 'MAPE':
R_sq_first, R_sq_Remaining, R_sq_all = data_process_continuous_MAPE(data)
else:
R_sq_first, R_sq_Remaining, R_sq_all = data_process_continuous_R_sq(data)
Accuracy['first'].append(R_sq_first)
Accuracy['Remaining'].append(R_sq_Remaining)
Accuracy['all'].append(R_sq_all)
Accuracy['Card_ID'].append(Card_ID)
else:
error_first_temp, Accuracy_first_temp, error_Remaining_temp, Accuracy_temp, accuracy_all = data_process_discrete(data)
#print (error_first_temp)
error_first = pd.concat([error_first, error_first_temp], axis = 0)
error_Remaining = pd.concat([error_Remaining, error_Remaining_temp], axis = 0)
Accuracy['first'].append(Accuracy_first_temp)
Accuracy['Remaining'].append(Accuracy_temp)
Accuracy['all'].append(accuracy_all)
Accuracy['Card_ID'].append(Card_ID)
# data
############## LSTM
Card_ID_used_for_base = list(set(Card_ID_used))
for Card_ID in Card_ID_used_for_base:
if output_fig == 'duration':
# file_name = data_path + 'results/result_LSTM' + str(Card_ID) + 'test' + '.csv'
file_name = data_path + 'results/result_LSTM_con_dur' + str(Card_ID) + 'test' + '.csv'
#file_name = data_path + 'results/result_NGRAM_con_dur_' + str(Card_ID) + '.csv'
else:
file_name = data_path + 'results/result_Location_LSTM' + str(Card_ID) + 'test' + '.csv'
#file_name = data_path + 'results/result_NGRAM_location_' + str(Card_ID) + '.csv'
if os.path.exists(file_name) == False:
print(Card_ID,'does not exist for LSTM')
continue
data = pd.read_csv(file_name)
if output_fig == 'duration':
if duration_error == 'RMSE':
R_sq_first, R_sq_Remaining, R_sq_all = data_process_continuous_RMSE(data)
elif duration_error == 'MAPE':
R_sq_first, R_sq_Remaining, R_sq_all = data_process_continuous_MAPE(data)
else:
R_sq_first, R_sq_Remaining, R_sq_all = data_process_continuous_R_sq(data)
Accuracy_LSTM['first'].append(R_sq_first)
Accuracy_LSTM['Remaining'].append(R_sq_Remaining)
Accuracy_LSTM['all'].append(R_sq_all)
Accuracy_LSTM['Card_ID'].append(Card_ID)
else:
error_first_temp, Accuracy_first_temp, error_Remaining_temp, Accuracy_temp, accuracy_all = data_process_discrete(data)
#print (error_first_temp)
error_first = pd.concat([error_first, error_first_temp], axis = 0)
error_Remaining = pd.concat([error_Remaining, error_Remaining_temp], axis = 0)
Accuracy_LSTM['first'].append(Accuracy_first_temp)
Accuracy_LSTM['Remaining'].append(Accuracy_temp)
Accuracy_LSTM['all'].append(accuracy_all)
Accuracy_LSTM['Card_ID'].append(Card_ID)
############## NG
Card_ID_used_for_base = list(set(Card_ID_used))
for Card_ID in Card_ID_used_for_base:
if output_fig == 'duration':
# file_name = data_path + 'results/result_LSTM' + str(Card_ID) + 'test' + '.csv'
#file_name = data_path + 'results/result_LSTM_con_dur' + str(Card_ID) + 'test' + '.csv'
file_name = data_path + 'results/result_NGRAM_con_dur_' + str(Card_ID) + '.csv'
else:
#file_name = data_path + 'results/result_Location_LSTM' + str(Card_ID) + 'test' + '.csv'
file_name = data_path + 'results/result_NGRAM_location_' + str(Card_ID) + '.csv'
if os.path.exists(file_name) == False:
print(Card_ID,'does not exist for NG')
continue
data = | pd.read_csv(file_name) | pandas.read_csv |
from __future__ import unicode_literals, division, print_function
import numpy as np
import pandas as pd
from pymatgen.core import Structure, Lattice
from pymatgen.util.testing import PymatgenTest
from pymatgen.analysis.local_env import VoronoiNN, JmolNN, CrystalNN
from matminer.featurizers.site import AGNIFingerprints, \
OPSiteFingerprint, CrystalNNFingerprint, \
EwaldSiteEnergy, \
VoronoiFingerprint, IntersticeDistribution, ChemEnvSiteFingerprint, \
CoordinationNumber, ChemicalSRO, GaussianSymmFunc, \
GeneralizedRadialDistributionFunction, AngularFourierSeries, \
LocalPropertyDifference, SOAP, BondOrientationalParameter, \
SiteElementalProperty, AverageBondLength, AverageBondAngle
from matminer.featurizers.deprecated import CrystalSiteFingerprint
from matminer.featurizers.utils.grdf import Gaussian
class FingerprintTests(PymatgenTest):
def setUp(self):
self.sc = Structure(
Lattice([[3.52, 0, 0], [0, 3.52, 0], [0, 0, 3.52]]),
["Al", ],
[[0, 0, 0]],
validate_proximity=False, to_unit_cell=False,
coords_are_cartesian=False)
self.cscl = Structure(
Lattice([[4.209, 0, 0], [0, 4.209, 0], [0, 0, 4.209]]),
["Cl1-", "Cs1+"], [[0.45, 0.5, 0.5], [0, 0, 0]],
validate_proximity=False, to_unit_cell=False,
coords_are_cartesian=False)
self.b1 = Structure(
Lattice([[0,1,1],[1,0,1],[1,1,0]]),
["H", "He"], [[0,0,0],[0.5,0.5,0.5]],
validate_proximity=False, to_unit_cell=False,
coords_are_cartesian=False)
self.diamond = Structure(
Lattice([[2.189, 0, 1.264], [0.73, 2.064, 1.264],
[0, 0, 2.528]]), ["C0+", "C0+"], [[2.554, 1.806, 4.423],
[0.365, 0.258, 0.632]],
validate_proximity=False,
to_unit_cell=False, coords_are_cartesian=True,
site_properties=None)
self.nacl = Structure(
Lattice([[3.485, 0, 2.012], [1.162, 3.286, 2.012],
[0, 0, 4.025]]), ["Na1+", "Cl1-"], [[0, 0, 0],
[2.324, 1.643, 4.025]],
validate_proximity=False,
to_unit_cell=False, coords_are_cartesian=True,
site_properties=None)
self.ni3al = Structure(
Lattice([[3.52, 0, 0], [0, 3.52, 0], [0, 0, 3.52]]),
["Al", ] + ["Ni"] * 3,
[[0, 0, 0], [0.5, 0.5, 0], [0.5, 0, 0.5], [0, 0.5, 0.5]],
validate_proximity=False, to_unit_cell=False,
coords_are_cartesian=False, site_properties=None)
def test_simple_cubic(self):
"""Test with an easy structure"""
# Make sure direction-dependent fingerprints are zero
agni = AGNIFingerprints(directions=['x', 'y', 'z'])
features = agni.featurize(self.sc, 0)
self.assertEqual(8 * 3, len(features))
self.assertEqual(8 * 3, len(set(agni.feature_labels())))
self.assertArrayAlmostEqual([0, ] * 24, features)
# Compute the "atomic fingerprints"
agni.directions = [None]
agni.cutoff = 3.75 # To only get 6 neighbors to deal with
features = agni.featurize(self.sc, 0)
self.assertEqual(8, len(features))
self.assertEqual(8, len(set(agni.feature_labels())))
self.assertEqual(0.8, agni.etas[0])
self.assertAlmostEqual(6 * np.exp(-(3.52 / 0.8) ** 2) * 0.5 * (np.cos(np.pi * 3.52 / 3.75) + 1), features[0])
self.assertAlmostEqual(6 * np.exp(-(3.52 / 16) ** 2) * 0.5 * (np.cos(np.pi * 3.52 / 3.75) + 1), features[-1])
# Test that passing etas to constructor works
new_etas = np.logspace(-4, 2, 6)
agni = AGNIFingerprints(directions=['x', 'y', 'z'], etas=new_etas)
self.assertArrayAlmostEqual(new_etas, agni.etas)
def test_off_center_cscl(self):
agni = AGNIFingerprints(directions=[None, 'x', 'y', 'z'], cutoff=4)
# Compute the features on both sites
site1 = agni.featurize(self.cscl, 0)
site2 = agni.featurize(self.cscl, 1)
# The atomic attributes should be equal
self.assertArrayAlmostEqual(site1[:8], site2[:8])
# The direction-dependent ones should be equal and opposite in sign
self.assertArrayAlmostEqual(-1 * site1[8:], site2[8:])
# Make sure the site-ones are as expected.
right_dist = 4.209 * np.sqrt(0.45 ** 2 + 2 * 0.5 ** 2)
right_xdist = 4.209 * 0.45
left_dist = 4.209 * np.sqrt(0.55 ** 2 + 2 * 0.5 ** 2)
left_xdist = 4.209 * 0.55
self.assertAlmostEqual(4 * (
right_xdist / right_dist * np.exp(-(right_dist / 0.8) ** 2) * 0.5 * (np.cos(np.pi * right_dist / 4) + 1) -
left_xdist / left_dist * np.exp(-(left_dist / 0.8) ** 2) * 0.5 * (np.cos(np.pi * left_dist / 4) + 1)),
site1[8])
def test_dataframe(self):
data = pd.DataFrame({'strc': [self.cscl, self.cscl, self.sc], 'site': [0, 1, 0]})
agni = AGNIFingerprints()
agni.featurize_dataframe(data, ['strc', 'site'])
def test_op_site_fingerprint(self):
opsf = OPSiteFingerprint()
l = opsf.feature_labels()
t = ['sgl_bd CN_1', 'L-shaped CN_2', 'water-like CN_2', \
'bent 120 degrees CN_2', 'bent 150 degrees CN_2', \
'linear CN_2', 'trigonal planar CN_3', \
'trigonal non-coplanar CN_3', 'T-shaped CN_3', \
'square co-planar CN_4', 'tetrahedral CN_4', \
'rectangular see-saw-like CN_4', 'see-saw-like CN_4', \
'trigonal pyramidal CN_4', 'pentagonal planar CN_5', \
'square pyramidal CN_5', 'trigonal bipyramidal CN_5', \
'hexagonal planar CN_6', 'octahedral CN_6', \
'pentagonal pyramidal CN_6', 'hexagonal pyramidal CN_7', \
'pentagonal bipyramidal CN_7', 'body-centered cubic CN_8', \
'hexagonal bipyramidal CN_8', 'q2 CN_9', 'q4 CN_9', 'q6 CN_9', \
'q2 CN_10', 'q4 CN_10', 'q6 CN_10', \
'q2 CN_11', 'q4 CN_11', 'q6 CN_11', \
'cuboctahedral CN_12', 'q2 CN_12', 'q4 CN_12', 'q6 CN_12']
for i in range(len(l)):
self.assertEqual(l[i], t[i])
ops = opsf.featurize(self.sc, 0)
self.assertEqual(len(ops), 37)
self.assertAlmostEqual(ops[opsf.feature_labels().index(
'octahedral CN_6')], 0.9995, places=7)
ops = opsf.featurize(self.cscl, 0)
self.assertAlmostEqual(ops[opsf.feature_labels().index(
'body-centered cubic CN_8')], 0.8955, places=7)
opsf = OPSiteFingerprint(dist_exp=0)
ops = opsf.featurize(self.cscl, 0)
self.assertAlmostEqual(ops[opsf.feature_labels().index(
'body-centered cubic CN_8')], 0.9555, places=7)
# The following test aims at ensuring the copying of the OP dictionaries work.
opsfp = OPSiteFingerprint()
cnnfp = CrystalNNFingerprint.from_preset('ops')
self.assertEqual(len([1 for l in opsfp.feature_labels() if l.split()[0] == 'wt']), 0)
def test_crystal_site_fingerprint(self):
with self.assertWarns(FutureWarning):
csf = CrystalSiteFingerprint.from_preset('ops')
l = csf.feature_labels()
t = ['wt CN_1', 'wt CN_2', 'L-shaped CN_2', 'water-like CN_2',
'bent 120 degrees CN_2', 'bent 150 degrees CN_2', 'linear CN_2',
'wt CN_3', 'trigonal planar CN_3', 'trigonal non-coplanar CN_3',
'T-shaped CN_3', 'wt CN_4', 'square co-planar CN_4',
'tetrahedral CN_4', 'rectangular see-saw-like CN_4',
'see-saw-like CN_4', 'trigonal pyramidal CN_4', 'wt CN_5',
'pentagonal planar CN_5', 'square pyramidal CN_5',
'trigonal bipyramidal CN_5', 'wt CN_6', 'hexagonal planar CN_6',
'octahedral CN_6', 'pentagonal pyramidal CN_6', 'wt CN_7',
'hexagonal pyramidal CN_7', 'pentagonal bipyramidal CN_7',
'wt CN_8', 'body-centered cubic CN_8',
'hexagonal bipyramidal CN_8', 'wt CN_9', 'q2 CN_9', 'q4 CN_9',
'q6 CN_9', 'wt CN_10', 'q2 CN_10', 'q4 CN_10', 'q6 CN_10',
'wt CN_11', 'q2 CN_11', 'q4 CN_11', 'q6 CN_11', 'wt CN_12',
'cuboctahedral CN_12', 'q2 CN_12', 'q4 CN_12', 'q6 CN_12']
for i in range(len(l)):
self.assertEqual(l[i], t[i])
ops = csf.featurize(self.sc, 0)
self.assertEqual(len(ops), 48)
self.assertAlmostEqual(ops[csf.feature_labels().index(
'wt CN_6')], 1, places=4)
self.assertAlmostEqual(ops[csf.feature_labels().index(
'octahedral CN_6')], 1, places=4)
ops = csf.featurize(self.cscl, 0)
self.assertAlmostEqual(ops[csf.feature_labels().index(
'wt CN_8')], 0.5575257, places=4)
self.assertAlmostEqual(ops[csf.feature_labels().index(
'body-centered cubic CN_8')], 0.5329344, places=4)
def test_crystal_nn_fingerprint(self):
cnnfp = CrystalNNFingerprint.from_preset(
'ops', distance_cutoffs=None, x_diff_weight=None)
l = cnnfp.feature_labels()
t = ['wt CN_1', 'sgl_bd CN_1', 'wt CN_2', 'L-shaped CN_2',
'water-like CN_2', 'bent 120 degrees CN_2',
'bent 150 degrees CN_2', 'linear CN_2', 'wt CN_3',
'trigonal planar CN_3', 'trigonal non-coplanar CN_3',
'T-shaped CN_3', 'wt CN_4', 'square co-planar CN_4',
'tetrahedral CN_4', 'rectangular see-saw-like CN_4',
'see-saw-like CN_4', 'trigonal pyramidal CN_4', 'wt CN_5',
'pentagonal planar CN_5', 'square pyramidal CN_5',
'trigonal bipyramidal CN_5', 'wt CN_6', 'hexagonal planar CN_6',
'octahedral CN_6', 'pentagonal pyramidal CN_6', 'wt CN_7',
'hexagonal pyramidal CN_7', 'pentagonal bipyramidal CN_7',
'wt CN_8', 'body-centered cubic CN_8',
'hexagonal bipyramidal CN_8', 'wt CN_9', 'q2 CN_9', 'q4 CN_9',
'q6 CN_9', 'wt CN_10', 'q2 CN_10', 'q4 CN_10', 'q6 CN_10',
'wt CN_11', 'q2 CN_11', 'q4 CN_11', 'q6 CN_11', 'wt CN_12',
'cuboctahedral CN_12', 'q2 CN_12', 'q4 CN_12', 'q6 CN_12',
'wt CN_13', 'wt CN_14', 'wt CN_15', 'wt CN_16', 'wt CN_17',
'wt CN_18', 'wt CN_19', 'wt CN_20', 'wt CN_21', 'wt CN_22',
'wt CN_23', 'wt CN_24']
for i in range(len(l)):
self.assertEqual(l[i], t[i])
ops = cnnfp.featurize(self.sc, 0)
self.assertEqual(len(ops), 61)
self.assertAlmostEqual(ops[cnnfp.feature_labels().index(
'wt CN_6')], 1, places=7)
self.assertAlmostEqual(ops[cnnfp.feature_labels().index(
'octahedral CN_6')], 1, places=7)
ops = cnnfp.featurize(self.cscl, 0)
self.assertAlmostEqual(ops[cnnfp.feature_labels().index(
'wt CN_8')], 0.498099, places=3)
self.assertAlmostEqual(ops[cnnfp.feature_labels().index(
'body-centered cubic CN_8')], 0.47611, places=3)
op_types = {6: ["wt", "oct_max"], 8: ["wt", "bcc"]}
cnnfp = CrystalNNFingerprint(
op_types, distance_cutoffs=None, \
x_diff_weight=None)
labels = ['wt CN_6', 'oct_max CN_6', \
'wt CN_8', 'bcc CN_8']
for l1, l2 in zip(cnnfp.feature_labels(), labels):
self.assertEqual(l1, l2)
feats = cnnfp.featurize(self.sc, 0)
self.assertEqual(len(feats), 4)
chem_info = {"mass": {"Al": 26.9, "Cs+": 132.9,"Cl-": 35.4}, \
"Pauling scale": {"Al": 1.61, "Cs+": 0.79, "Cl-": 3.16}}
cnnchemfp = CrystalNNFingerprint(
op_types, chem_info=chem_info, distance_cutoffs=None, \
x_diff_weight=None)
labels = labels + ['mass local diff', \
'Pauling scale local diff']
for l1, l2 in zip(cnnchemfp.feature_labels(), labels):
self.assertEqual(l1, l2)
feats = cnnchemfp.featurize(self.sc, 0)
self.assertEqual(len(feats), 6)
self.assertAlmostEqual(feats[cnnchemfp.feature_labels().index(
'wt CN_6')], 1, places=7)
self.assertAlmostEqual(feats[cnnchemfp.feature_labels().index(
'oct_max CN_6')], 1, places=7)
self.assertAlmostEqual(feats[cnnchemfp.feature_labels().index(
'mass local diff')], 0, places=7)
self.assertAlmostEqual(feats[cnnchemfp.feature_labels().index(
'Pauling scale local diff')], 0, places=7)
feats = cnnchemfp.featurize(self.cscl, 0)
self.assertAlmostEqual(feats[cnnchemfp.feature_labels().index(
'bcc CN_8')], 0.4761107, places=3)
self.assertAlmostEqual(feats[cnnchemfp.feature_labels().index(
'mass local diff')], 97.5, places=3)
self.assertAlmostEqual(feats[cnnchemfp.feature_labels().index(
'Pauling scale local diff')], -2.37, places=3)
def test_chemenv_site_fingerprint(self):
cefp = ChemEnvSiteFingerprint.from_preset('multi_weights')
l = cefp.feature_labels()
cevals = cefp.featurize(self.sc, 0)
self.assertEqual(len(cevals), 66)
self.assertAlmostEqual(cevals[l.index('O:6')], 1, places=7)
self.assertAlmostEqual(cevals[l.index('C:8')], 0, places=7)
cevals = cefp.featurize(self.cscl, 0)
self.assertAlmostEqual(cevals[l.index('C:8')], 0.9953721, places=7)
self.assertAlmostEqual(cevals[l.index('O:6')], 0, places=7)
cefp = ChemEnvSiteFingerprint.from_preset('simple')
l = cefp.feature_labels()
cevals = cefp.featurize(self.sc, 0)
self.assertEqual(len(cevals), 66)
self.assertAlmostEqual(cevals[l.index('O:6')], 1, places=7)
self.assertAlmostEqual(cevals[l.index('C:8')], 0, places=7)
cevals = cefp.featurize(self.cscl, 0)
self.assertAlmostEqual(cevals[l.index('C:8')], 0.9953721, places=7)
self.assertAlmostEqual(cevals[l.index('O:6')], 0, places=7)
def test_voronoifingerprint(self):
df_sc= pd.DataFrame({'struct': [self.sc], 'site': [0]})
vorofp = VoronoiFingerprint(use_symm_weights=True)
vorofps = vorofp.featurize_dataframe(df_sc, ['struct', 'site'])
self.assertAlmostEqual(vorofps['Voro_index_3'][0], 0.0)
self.assertAlmostEqual(vorofps['Voro_index_4'][0], 6.0)
self.assertAlmostEqual(vorofps['Voro_index_5'][0], 0.0)
self.assertAlmostEqual(vorofps['Voro_index_6'][0], 0.0)
self.assertAlmostEqual(vorofps['Voro_index_7'][0], 0.0)
self.assertAlmostEqual(vorofps['Voro_index_8'][0], 0.0)
self.assertAlmostEqual(vorofps['Voro_index_9'][0], 0.0)
self.assertAlmostEqual(vorofps['Voro_index_10'][0], 0.0)
self.assertAlmostEqual(vorofps['Symmetry_index_3'][0], 0.0)
self.assertAlmostEqual(vorofps['Symmetry_index_4'][0], 1.0)
self.assertAlmostEqual(vorofps['Symmetry_index_5'][0], 0.0)
self.assertAlmostEqual(vorofps['Symmetry_index_6'][0], 0.0)
self.assertAlmostEqual(vorofps['Symmetry_index_7'][0], 0.0)
self.assertAlmostEqual(vorofps['Symmetry_index_8'][0], 0.0)
self.assertAlmostEqual(vorofps['Symmetry_index_9'][0], 0.0)
self.assertAlmostEqual(vorofps['Symmetry_index_10'][0], 0.0)
self.assertAlmostEqual(vorofps['Symmetry_weighted_index_3'][0], 0.0)
self.assertAlmostEqual(vorofps['Symmetry_weighted_index_4'][0], 1.0)
self.assertAlmostEqual(vorofps['Symmetry_weighted_index_5'][0], 0.0)
self.assertAlmostEqual(vorofps['Symmetry_weighted_index_6'][0], 0.0)
self.assertAlmostEqual(vorofps['Symmetry_weighted_index_7'][0], 0.0)
self.assertAlmostEqual(vorofps['Symmetry_weighted_index_8'][0], 0.0)
self.assertAlmostEqual(vorofps['Symmetry_weighted_index_9'][0], 0.0)
self.assertAlmostEqual(vorofps['Symmetry_weighted_index_10'][0], 0.0)
self.assertAlmostEqual(vorofps['Voro_vol_sum'][0], 43.614208)
self.assertAlmostEqual(vorofps['Voro_area_sum'][0], 74.3424)
self.assertAlmostEqual(vorofps['Voro_vol_mean'][0], 7.269034667)
self.assertAlmostEqual(vorofps['Voro_vol_std_dev'][0], 0.0)
self.assertAlmostEqual(vorofps['Voro_vol_minimum'][0], 7.269034667)
self.assertAlmostEqual(vorofps['Voro_vol_maximum'][0], 7.269034667)
self.assertAlmostEqual(vorofps['Voro_area_mean'][0], 12.3904)
self.assertAlmostEqual(vorofps['Voro_area_std_dev'][0], 0.0)
self.assertAlmostEqual(vorofps['Voro_area_minimum'][0], 12.3904)
self.assertAlmostEqual(vorofps['Voro_area_maximum'][0], 12.3904)
self.assertAlmostEqual(vorofps['Voro_dist_mean'][0], 3.52)
self.assertAlmostEqual(vorofps['Voro_dist_std_dev'][0], 0.0)
self.assertAlmostEqual(vorofps['Voro_dist_minimum'][0], 3.52)
self.assertAlmostEqual(vorofps['Voro_dist_maximum'][0], 3.52)
def test_interstice_distribution_of_crystal(self):
bcc_li = Structure(Lattice([[3.51, 0, 0], [0, 3.51, 0], [0, 0, 3.51]]),
["Li"] * 2, [[0, 0, 0], [0.5, 0.5, 0.5]])
df_bcc_li= pd.DataFrame({'struct': [bcc_li], 'site': [1]})
interstice_distribution = IntersticeDistribution()
intersticefp = interstice_distribution.featurize_dataframe(
df_bcc_li, ['struct', 'site'])
self.assertAlmostEqual(intersticefp['Interstice_vol_mean'][0], 0.32, 2)
self.assertAlmostEqual(intersticefp['Interstice_vol_std_dev'][0], 0)
self.assertAlmostEqual(intersticefp['Interstice_vol_minimum'][0], 0.32, 2)
self.assertAlmostEqual(intersticefp['Interstice_vol_maximum'][0], 0.32, 2)
self.assertAlmostEqual(intersticefp['Interstice_area_mean'][0], 0.16682, 5)
self.assertAlmostEqual(intersticefp['Interstice_area_std_dev'][0], 0)
self.assertAlmostEqual(intersticefp['Interstice_area_minimum'][0], 0.16682, 5)
self.assertAlmostEqual(intersticefp['Interstice_area_maximum'][0], 0.16682, 5)
self.assertAlmostEqual(intersticefp['Interstice_dist_mean'][0], 0.06621, 5)
self.assertAlmostEqual(intersticefp['Interstice_dist_std_dev'][0], 0.07655, 5)
self.assertAlmostEqual(intersticefp['Interstice_dist_minimum'][0], 0, 3)
self.assertAlmostEqual(intersticefp['Interstice_dist_maximum'][0], 0.15461, 5)
def test_interstice_distribution_of_glass(self):
cuzr_glass = Structure(Lattice([[25, 0, 0], [0, 25, 0], [0, 0, 25]]),
["Cu", "Cu", "Cu", "Cu", "Cu", "Zr", "Cu", "Zr",
"Cu", "Zr", "Cu", "Zr", "Cu", "Cu"],
[[11.81159679, 16.49480537, 21.69139442],
[11.16777208, 17.87850033, 18.57877144],
[12.22394796, 15.83218325, 19.37763412],
[13.07053548, 14.34025424, 21.77557646],
[10.78147725, 19.61647494, 20.77595531],
[10.87541011, 14.65986432, 23.61517624],
[12.76631002, 18.41479521, 20.46717947],
[14.63911675, 16.47487037, 20.52671362],
[14.2470256, 18.44215167, 22.56257566],
[9.38050168, 16.87974592, 20.51885879],
[10.66332986, 14.43900833, 20.545186],
[11.57096832, 18.79848982, 23.26073408],
[13.27048138, 16.38613795, 23.59697472],
[9.55774984, 17.09220537, 23.1856528]],
coords_are_cartesian=True)
df_glass= pd.DataFrame({'struct': [cuzr_glass], 'site': [0]})
interstice_distribution = IntersticeDistribution()
intersticefp = interstice_distribution.featurize_dataframe(
df_glass, ['struct', 'site'])
self.assertAlmostEqual(intersticefp['Interstice_vol_mean'][0], 0.28905, 5)
self.assertAlmostEqual(intersticefp['Interstice_vol_std_dev'][0], 0.04037, 5)
self.assertAlmostEqual(intersticefp['Interstice_vol_minimum'][0], 0.21672, 5)
self.assertAlmostEqual(intersticefp['Interstice_vol_maximum'][0], 0.39084, 5)
self.assertAlmostEqual(intersticefp['Interstice_area_mean'][0], 0.16070, 5)
self.assertAlmostEqual(intersticefp['Interstice_area_std_dev'][0], 0.05245, 5)
self.assertAlmostEqual(intersticefp['Interstice_area_minimum'][0], 0.07132, 5)
self.assertAlmostEqual(intersticefp['Interstice_area_maximum'][0], 0.26953, 5)
self.assertAlmostEqual(intersticefp['Interstice_dist_mean'][0], 0.08154, 5)
self.assertAlmostEqual(intersticefp['Interstice_dist_std_dev'][0], 0.14778, 5)
self.assertAlmostEqual(intersticefp['Interstice_dist_minimum'][0], -0.04668, 5)
self.assertAlmostEqual(intersticefp['Interstice_dist_maximum'][0], 0.37565, 5)
def test_chemicalSRO(self):
df_sc = | pd.DataFrame({'struct': [self.sc], 'site': [0]}) | pandas.DataFrame |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.